hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
81fdb0e1136255e877c9ae2c151c33d3b0b0ee1d | 338 | py | Python | 1801-1900/1807.evaluate-thebracket-pairs-of-a-string.py | guangxu-li/leetcode-in-python | 8a5a373b32351500342705c141591a1a8f5f1cb1 | [
"MIT"
] | null | null | null | 1801-1900/1807.evaluate-thebracket-pairs-of-a-string.py | guangxu-li/leetcode-in-python | 8a5a373b32351500342705c141591a1a8f5f1cb1 | [
"MIT"
] | null | null | null | 1801-1900/1807.evaluate-thebracket-pairs-of-a-string.py | guangxu-li/leetcode-in-python | 8a5a373b32351500342705c141591a1a8f5f1cb1 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=1807 lang=python3
#
# [1807] Evaluate the Bracket Pairs of a String
#
# @lc code=start
import re
# @lc code=end
| 18.777778 | 79 | 0.612426 |
81fdf45abe6a280ed6357190764ee304c85901c1 | 787 | py | Python | Math Functions/Uncategorized/Herons formula.py | adrikagupta/Must-Know-Programming-Codes | d403428bb9e619b855bde1ae9f46f41a2952b4fa | [
"MIT"
] | 13 | 2017-10-11T09:03:48.000Z | 2020-06-09T16:00:50.000Z | Math Functions/Uncategorized/Herons formula.py | adrikagupta/Must-Know-Programming-Codes | d403428bb9e619b855bde1ae9f46f41a2952b4fa | [
"MIT"
] | 4 | 2017-10-15T06:23:10.000Z | 2017-10-22T08:22:49.000Z | Math Functions/Uncategorized/Herons formula.py | adrikagupta/Must-Know-Programming-Codes | d403428bb9e619b855bde1ae9f46f41a2952b4fa | [
"MIT"
] | 23 | 2017-10-14T05:22:33.000Z | 2019-10-30T19:35:42.000Z | #Heron's formula#
import math
unit_of_measurement = "cm"
side1 = int(input("Enter the length of side A in cm: "))
side2 = int(input("Enter the length of side B in cm: "))
side3 = int(input("Enter the length of side C in cm: "))
braket1 = (side1 ** 2) * (side2**2) + (side1**2)*(side3**2) + (side2**2)*(side3**2)
braket2 = (side1**2)+(side2**2)+(side3**2)
function_braket1 = 4*braket1
function_braket2 = braket2**2
both_brakets = function_braket1 - function_braket2
result1 = math.sqrt(both_brakets)
area_of_triangle = result1 / 4
print("Side A", "=", side1, sep="")
print("Side B", "=", side2, sep="")
print("Side C", "=", side3, sep="")
print()
print("Calculated using Heron's Formula")
print()
print("Area of triangle"), print(area_of_triangle, unit_of_measurement, "2", sep="")
| 29.148148 | 84 | 0.672173 |
81fe4dff84aff0d592f6a1cba826b336e24d2573 | 4,605 | py | Python | viewer/bitmap_from_array.py | TiankunZhou/dials | bd5c95b73c442cceb1c61b1690fd4562acf4e337 | [
"BSD-3-Clause"
] | 2 | 2021-03-17T11:25:46.000Z | 2021-11-18T04:20:54.000Z | viewer/bitmap_from_array.py | TiankunZhou/dials | bd5c95b73c442cceb1c61b1690fd4562acf4e337 | [
"BSD-3-Clause"
] | null | null | null | viewer/bitmap_from_array.py | TiankunZhou/dials | bd5c95b73c442cceb1c61b1690fd4562acf4e337 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, division, print_function
import numpy as np
import wx
from dials.array_family import flex
from dials_viewer_ext import rgb_img
| 33.369565 | 87 | 0.569381 |
81fea7350bf3a22df6647f4ff0e42232c0fd7743 | 191 | py | Python | spinesTS/utils/_validation.py | BirchKwok/spinesTS | b88ec333f41f58979e0570177d1fdc364d976056 | [
"Apache-2.0"
] | 2 | 2021-08-15T09:29:37.000Z | 2022-03-10T13:56:13.000Z | spinesTS/utils/_validation.py | BirchKwok/spinesTS | b88ec333f41f58979e0570177d1fdc364d976056 | [
"Apache-2.0"
] | null | null | null | spinesTS/utils/_validation.py | BirchKwok/spinesTS | b88ec333f41f58979e0570177d1fdc364d976056 | [
"Apache-2.0"
] | null | null | null | import numpy as np
| 23.875 | 67 | 0.612565 |
81feb43ec495c8683dc8e553db15a96568c7f33c | 34,772 | py | Python | sphinxsharp-pro/sphinxsharp.py | madTeddy/sphinxsharp-pro | e7a164214113cef33bca96e9fbbab3feafe16823 | [
"MIT"
] | 2 | 2019-04-22T12:59:26.000Z | 2021-07-30T21:32:44.000Z | sphinxsharp-pro/sphinxsharp.py | madTeddy/sphinxsharp-pro | e7a164214113cef33bca96e9fbbab3feafe16823 | [
"MIT"
] | null | null | null | sphinxsharp-pro/sphinxsharp.py | madTeddy/sphinxsharp-pro | e7a164214113cef33bca96e9fbbab3feafe16823 | [
"MIT"
] | null | null | null | """
CSharp (#) domain for sphinx
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sphinxsharp Pro (with custom styling)
:copyright: Copyright 2021 by MadTeddy
"""
import re
import warnings
from os import path
from collections import defaultdict, namedtuple
from docutils import nodes
from docutils.parsers.rst import directives, Directive
from sphinx.locale import get_translation
from sphinx.domains import Domain, Index, ObjType
from sphinx.roles import XRefRole
from sphinx.directives import ObjectDescription
from sphinx.util.docfields import DocFieldTransformer
from sphinx.util.nodes import make_refnode
from sphinx import addnodes
from sphinx.util.fileutil import copy_asset
MODIFIERS = ('public', 'private', 'protected', 'internal',
'static', 'sealed', 'abstract', 'const', 'partial',
'readonly', 'virtual', 'extern', 'new', 'override',
'unsafe', 'async', 'event', 'delegate')
VALUE_KEYWORDS = ('char', 'ulong', 'byte', 'decimal',
'double', 'bool', 'int', 'null', 'sbyte',
'float', 'long', 'object', 'short', 'string',
'uint', 'ushort', 'void')
PARAM_MODIFIERS = ('ref', 'out', 'params')
MODIFIERS_RE = '|'.join(MODIFIERS)
PARAM_MODIFIERS_RE = '|'.join(PARAM_MODIFIERS)
TYPE_SIG_RE = re.compile(r'^((?:(?:' + MODIFIERS_RE
+ r')\s+)*)?(\w+)\s([\w\.]+)(?:<(.+)>)?(?:\s?\:\s?(.+))?$')
REF_TYPE_RE = re.compile(r'^(?:(new)\s+)?([\w\.]+)\s*(?:<(.+)>)*(\[\])*\s?(?:\((.*)\))?$')
METHOD_SIG_RE = re.compile(r'^((?:(?:' + MODIFIERS_RE
+ r')\s+)*)?([^\s=\(\)]+\s+)?([^\s=\(\)]+)\s?(?:\<(.+)\>)?\s?(?:\((.+)*\))$')
PARAM_SIG_RE = re.compile(r'^(?:(?:(' + PARAM_MODIFIERS_RE + r')\s)*)?([^=]+)\s+([^=]+)\s*(?:=\s?(.+))?$')
VAR_SIG_RE = re.compile(r'^((?:(?:' + MODIFIERS_RE + r')\s+)*)?([^=]+)\s+([^\s=]+)\s*(?:=\s*(.+))?$')
PROP_SIG_RE = re.compile(r'^((?:(?:' + MODIFIERS_RE
+ r')\s+)*)?(.+)\s+([^\s]+)\s*(?:{(\s*get;\s*)?((?:'
+ MODIFIERS_RE + r')?\s*set;\s*)?})$')
ENUM_SIG_RE = re.compile(r'^((?:(?:' + MODIFIERS_RE + r')\s+)*)?(?:enum)\s?(\w+)$')
_ = get_translation('sphinxsharp')
def split_sig(params):
if not params:
return None
result = []
current = ''
level = 0
for char in params:
if char in ('<', '{', '['):
level += 1
elif char in ('>', '}', ']'):
level -= 1
if char != ',' or level > 0:
current += char
elif char == ',' and level == 0:
result.append(current)
current = ''
if current.strip() != '':
result.append(current)
return result
def get_targets(target, node):
targets = [target]
if node[CSharpObject.PARENT_ATTR_NAME] is not None:
parts = node[CSharpObject.PARENT_ATTR_NAME].split('.')
while parts:
targets.append('{}.{}'.format('.'.join(parts), target))
parts = parts[:-1]
return targets
def copy_asset_files(app, exc):
package_dir = path.abspath(path.dirname(__file__))
asset_files = [path.join(package_dir, '_static/css/sphinxsharp.css')]
if exc is None: # build succeeded
for asset_path in asset_files:
copy_asset(asset_path, path.join(app.outdir, '_static'))
| 38.085433 | 118 | 0.569884 |
81ff4f468611ece2f0ec909a6f48f5be0e5338fb | 404 | py | Python | articles/migrations/0003_article_published_at.py | mosalaheg/django3.2 | 551ecd0c8f633bcd9c37a95688e7bed958c0b91c | [
"MIT"
] | null | null | null | articles/migrations/0003_article_published_at.py | mosalaheg/django3.2 | 551ecd0c8f633bcd9c37a95688e7bed958c0b91c | [
"MIT"
] | null | null | null | articles/migrations/0003_article_published_at.py | mosalaheg/django3.2 | 551ecd0c8f633bcd9c37a95688e7bed958c0b91c | [
"MIT"
] | null | null | null | # Generated by Django 3.2.7 on 2021-10-02 08:24
from django.db import migrations, models
| 21.263158 | 62 | 0.608911 |
81ff80a60fb6d3f51394a723adeda192add9c640 | 5,622 | py | Python | kratos/mpi/tests/test_data_communicator_factory.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 778 | 2017-01-27T16:29:17.000Z | 2022-03-30T03:01:51.000Z | kratos/mpi/tests/test_data_communicator_factory.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 6,634 | 2017-01-15T22:56:13.000Z | 2022-03-31T15:03:36.000Z | kratos/mpi/tests/test_data_communicator_factory.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 224 | 2017-02-07T14:12:49.000Z | 2022-03-06T23:09:34.000Z | from KratosMultiphysics import ParallelEnvironment, IsDistributedRun
if IsDistributedRun():
from KratosMultiphysics.mpi import DataCommunicatorFactory
import KratosMultiphysics.KratosUnittest as UnitTest
import math
if __name__ == "__main__":
UnitTest.main()
| 52.055556 | 155 | 0.730523 |
81ffc4260214e21a8fbb8d247a68944ab547969b | 643 | py | Python | example/usage/example_kate.py | vodka2/vkaudiotoken-python | 5720e4cf77f5e1b20c3bf57f3df0717638a539e0 | [
"MIT"
] | 32 | 2020-07-21T18:32:59.000Z | 2022-03-20T21:16:11.000Z | example/usage/example_kate.py | vodka2/vkaudiotoken-python | 5720e4cf77f5e1b20c3bf57f3df0717638a539e0 | [
"MIT"
] | 1 | 2020-10-04T04:41:06.000Z | 2020-10-05T11:43:48.000Z | example/usage/example_kate.py | vodka2/vkaudiotoken-python | 5720e4cf77f5e1b20c3bf57f3df0717638a539e0 | [
"MIT"
] | 2 | 2021-09-21T01:17:05.000Z | 2022-03-17T10:17:22.000Z | from __future__ import print_function
try:
import vkaudiotoken
except ImportError:
import path_hack
from vkaudiotoken import supported_clients
import sys
import requests
import json
token = sys.argv[1]
user_agent = supported_clients.KATE.user_agent
sess = requests.session()
sess.headers.update({'User-Agent': user_agent})
prettyprint(sess.get(
"https://api.vk.com/method/audio.getById",
params=[('access_token', token),
('audios', '371745461_456289486,-41489995_202246189'),
('v', '5.95')]
))
| 21.433333 | 75 | 0.715397 |
c3013dccb7b2137642e33db7031ac540e1d949e4 | 811 | py | Python | create_order.py | behnam71/Crypto_P | 1196f06c611eac65dece323d62104233cf2386b1 | [
"MIT"
] | null | null | null | create_order.py | behnam71/Crypto_P | 1196f06c611eac65dece323d62104233cf2386b1 | [
"MIT"
] | null | null | null | create_order.py | behnam71/Crypto_P | 1196f06c611eac65dece323d62104233cf2386b1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
from pprint import pprint
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt # noqa: E402
exchange = ccxt.binance({
'apiKey': 'SmweB9bNM2qpYkgl4zaQSFPpSzYpyoJ6B3BE9rCm0XYcAdIE0b7n6bm11e8jMwnI',
'secret': '8x6LtJztmIeGPZyiJOC7lVfg2ixCUYkhVV7CKVWq2LVlPh8mo3Ab7SMkaC8qTZLt',
'enableRateLimit': True,
})
exchange.urls['api'] = exchange.urls['test'] # use the testnet
symbol = 'BTC/USDT'; type = 'market' # or limit
amount = 0.01; price = None; side = 'buy' # or sell
# extra params and overrides if needed
params = {
'test': True, # test if it's valid, but don't actually place it
}
order = exchange.create_order(symbol, type, side, amount, price)
pprint(order)
| 27.033333 | 83 | 0.713933 |
c301529eb7d8f8a6047d8e286ff806d7da8427d3 | 2,235 | py | Python | tools/testrunner/outproc/message.py | LancerWang001/v8 | 42ff4531f590b901ade0a18bfd03e56485fe2452 | [
"BSD-3-Clause"
] | 20,995 | 2015-01-01T05:12:40.000Z | 2022-03-31T21:39:18.000Z | tools/testrunner/outproc/message.py | Andrea-MariaDB-2/v8 | a0f0ebd7a876e8cb2210115adbfcffe900e99540 | [
"BSD-3-Clause"
] | 333 | 2020-07-15T17:06:05.000Z | 2021-03-15T12:13:09.000Z | tools/testrunner/outproc/message.py | Andrea-MariaDB-2/v8 | a0f0ebd7a876e8cb2210115adbfcffe900e99540 | [
"BSD-3-Clause"
] | 4,523 | 2015-01-01T15:12:34.000Z | 2022-03-28T06:23:41.000Z | # Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import itertools
import os
import re
from . import base
| 32.867647 | 72 | 0.648322 |
c3027f734157db362e121ea8ce2b5d36ad4e6075 | 604 | py | Python | gemtown/users/urls.py | doramong0926/gemtown | 2c39284e3c68f0cc11994bed0ee2abaad0ea06b6 | [
"MIT"
] | null | null | null | gemtown/users/urls.py | doramong0926/gemtown | 2c39284e3c68f0cc11994bed0ee2abaad0ea06b6 | [
"MIT"
] | 5 | 2020-09-04T20:13:39.000Z | 2022-02-17T22:03:33.000Z | gemtown/users/urls.py | doramong0926/gemtown | 2c39284e3c68f0cc11994bed0ee2abaad0ea06b6 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
app_name = "users"
urlpatterns = [
path("all/", view=views.UserList.as_view(), name="all_user"),
path("<int:user_id>/password/", view=views.ChangePassword.as_view(), name="change password"),
path("<int:user_id>/follow/", view=views.FollowUser.as_view(), name="follow user"),
path("<int:user_id>/unfollow/", view=views.UnfollowUser.as_view(), name="unfollow user"),
path("<int:user_id>/", view=views.UserFeed.as_view(), name="user_detail_infomation"),
path("login/facebook/", view=views.FacebookLogin.as_view(), name="fb_login"),
] | 50.333333 | 97 | 0.701987 |
c302fe24cced11c5bc506098882205738bad2b79 | 3,132 | py | Python | Packs/Thycotic/Integrations/Thycotic/Thycotic_test.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 799 | 2016-08-02T06:43:14.000Z | 2022-03-31T11:10:11.000Z | Packs/Thycotic/Integrations/Thycotic/Thycotic_test.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 9,317 | 2016-08-07T19:00:51.000Z | 2022-03-31T21:56:04.000Z | Packs/Thycotic/Integrations/Thycotic/Thycotic_test.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 1,297 | 2016-08-04T13:59:00.000Z | 2022-03-31T23:43:06.000Z | import pytest
from Thycotic import Client, \
secret_password_get_command, secret_username_get_command, \
secret_get_command, secret_password_update_command, secret_checkout_command, secret_checkin_command, \
secret_delete_command, folder_create_command, folder_delete_command, folder_update_command
from test_data.context import GET_PASSWORD_BY_ID_CONTEXT, GET_USERNAME_BY_ID_CONTENT, \
SECRET_GET_CONTENT, SECRET_PASSWORD_UPDATE_CONTEXT, SECRET_CHECKOUT_CONTEXT, SECRET_CHECKIN_CONTEXT, \
SECRET_DELETE_CONTEXT, FOLDER_CREATE_CONTEXT, FOLDER_DELETE_CONTEXT, FOLDER_UPDATE_CONTEXT
from test_data.http_responses import GET_PASSWORD_BY_ID_RAW_RESPONSE, GET_USERNAME_BY_ID_RAW_RESPONSE, \
SECRET_GET_RAW_RESPONSE, SECRET_PASSWORD_UPDATE_RAW_RESPONSE, SECRET_CHECKOUT_RAW_RESPONSE, \
SECRET_CHECKIN_RAW_RESPONSE, SECRET_DELETE_RAW_RESPONSE, FOLDER_CREATE_RAW_RESPONSE, FOLDER_DELETE_RAW_RESPONSE, \
FOLDER_UPDATE_RAW_RESPONSE
GET_PASSWORD_BY_ID_ARGS = {"secret_id": "4"}
GET_USERNAME_BY_ID_ARGS = {"secret_id": "4"}
SECRET_GET_ARGS = {"secret_id": "4"}
SECRET_PASSWORD_UPDATE_ARGS = {"secret_id": "4", "newpassword": "NEWPASSWORD1"}
SECRET_CHECKOUT_ARGS = {"secret_id": "4"}
SECRET_CHECKIN_ARGS = {"secret_id": "4"}
SECRET_DELETE_ARGS = {"id": "9"}
FOLDER_CREATE_ARGS = {"folderName": "xsoarFolderTest3", "folderTypeId": "1", "parentFolderId": "3"}
FOLDER_DELETE_ARGS = {"folder_id": "9"}
FOLDER_UPDATE_ARGS = {"id": "12", "folderName": "xsoarTF3New"}
| 60.230769 | 120 | 0.814815 |
c3044c3a6846d86e6151eb00472156db75ba2d69 | 1,046 | py | Python | xml_to_csv.py | bhavdeepsingh33/blood-cell-detection | 1afe0ce7aba7c621eb13fc055cc706981fcf4962 | [
"MIT"
] | null | null | null | xml_to_csv.py | bhavdeepsingh33/blood-cell-detection | 1afe0ce7aba7c621eb13fc055cc706981fcf4962 | [
"MIT"
] | null | null | null | xml_to_csv.py | bhavdeepsingh33/blood-cell-detection | 1afe0ce7aba7c621eb13fc055cc706981fcf4962 | [
"MIT"
] | null | null | null | import os
import glob
import pandas as pd
import xml.etree.ElementTree as ET
main()
| 29.885714 | 78 | 0.543977 |
c304c12fe37620c738efd7817690de209aad07c4 | 1,190 | py | Python | src/pynnet/test.py | RalphMao/kaldi-pynnet | a8c050e976a138b43ff0c2ea2a1def72f51f9177 | [
"Apache-2.0"
] | null | null | null | src/pynnet/test.py | RalphMao/kaldi-pynnet | a8c050e976a138b43ff0c2ea2a1def72f51f9177 | [
"Apache-2.0"
] | null | null | null | src/pynnet/test.py | RalphMao/kaldi-pynnet | a8c050e976a138b43ff0c2ea2a1def72f51f9177 | [
"Apache-2.0"
] | null | null | null | import _nnet
import numpy as np
import IPython
net = _nnet.Nnet()
net.read('/home/maohz12/online_50h_Tsinghua/exp_train_50h/lstm_karel_bak/nnet/nnet_iter14_learnrate7.8125e-07_tr1.2687_cv1.6941')
# Test1
blobs = net.layers[0].get_params()
x = blobs[1].data.flatten()
x_test = np.fromfile('test/1.bin', 'f')
assert np.sum(abs(x-x_test)) < 1e-5
x = blobs[4].data.flatten()
x_test = np.fromfile('test/4.bin', 'f')
assert np.sum(abs(x-x_test)) < 1e-5
blobs[1].data[:] = np.arange(blobs[1].data.size).reshape(blobs[1].data.shape)
blobs[4].data[:] = np.arange(blobs[4].data.size).reshape(blobs[4].data.shape)
net.layers[0].set_params(blobs)
net.write('test/test_nnet', 0)
pointer, read_only_flag = blobs[1].data.__array_interface__['data']
# Test 2
data_copy = blobs[1].data.copy()
del net
pointer, read_only_flag = blobs[1].data.__array_interface__['data']
assert np.sum(abs(blobs[1].data - data_copy)) < 1e-5
# Test 3
net = _nnet.Nnet()
net.read('test/test_nnet')
blobs_new = net.layers[0].get_params()
x = blobs[1].data
x_test = blobs_new[1].data
assert np.sum(abs(x-x_test)) < 1e-5
x = blobs[4].data
x_test = blobs_new[4].data
assert np.sum(abs(x-x_test)) < 1e-5
print "Test passed"
| 27.045455 | 129 | 0.715966 |
c304d7f7756e66ca85c0f39399c15dd4c7181588 | 1,329 | py | Python | collegiate-explorer-admin/cc_admin/cc_admin/test.py | Chit-Chaat/Collegiate_Explorer_APP | f30171d01fec62a836332b5508374144fbb487c7 | [
"MIT"
] | 3 | 2021-05-24T23:06:40.000Z | 2021-11-08T10:32:42.000Z | collegiate-explorer-admin/cc_admin/cc_admin/test.py | Chit-Chaat/Collegiate_Explorer_APP | f30171d01fec62a836332b5508374144fbb487c7 | [
"MIT"
] | 4 | 2020-10-12T03:00:43.000Z | 2020-11-17T01:47:56.000Z | collegiate-explorer-admin/cc_admin/cc_admin/test.py | Chit-Chaat/Collegiate_Explorer_APP | f30171d01fec62a836332b5508374144fbb487c7 | [
"MIT"
] | 2 | 2021-03-01T15:30:26.000Z | 2022-01-13T21:30:20.000Z | __author__ = 'Aaron Yang'
__email__ = 'byang971@usc.edu'
__date__ = '10/28/2020 4:52 PM'
# import re
#
#
# def format_qs_score(score_str):
# """
# help you generate a qs score
# 1 - 100 : 5
# 141-200 : 4
# =100: 4
# N/A 3
# :param score_str:
# :return:
# """
# score = 3
# if not score_str or score_str != "N/A":
# try:
# parts = int(list(filter(lambda val: val,
# list(re.split('-|=', score_str))))[0])
# except:
# return 3
# score = 5 - int(parts / 100)
# if score > 5 or score < 1:
# return 3
# return score
#
#
# print(format_qs_score("=100"))
#
# print(list(filter(lambda val: val, re.split('-|=', "=100"))))
# import csv
# import numpy as np
# import requests
#
# with open('./college_explorer.csv', newline='', encoding='utf-8') as file:
# data = list(csv.reader(file))
# data = np.array(data)
# img_list = data[1:, 33].tolist()
#
# img_list = list(filter(lambda url: url != 'N/A', img_list))
#
#
# for url in img_list:
# response = requests.get(url)
# if response.status_code == 200:
# school_name = url.split('/')[-1].split('_')[0]
# with open("./images/" + school_name + ".jpg", 'wb') as f:
# f.write(response.content) | 26.058824 | 76 | 0.527464 |
c306596434700224d0b28f389b38ad8d05d0205f | 4,311 | py | Python | djangocms_translations/utils.py | divio/djangocms-translations | 9bfde2fed91973160bbe50ccbd6b4e2a2f4ba07f | [
"BSD-3-Clause"
] | 3 | 2019-01-14T13:30:38.000Z | 2020-08-10T22:16:06.000Z | djangocms_translations/utils.py | divio/djangocms-translations | 9bfde2fed91973160bbe50ccbd6b4e2a2f4ba07f | [
"BSD-3-Clause"
] | 5 | 2018-12-20T13:56:47.000Z | 2021-07-20T07:13:01.000Z | djangocms_translations/utils.py | divio/djangocms-translations | 9bfde2fed91973160bbe50ccbd6b4e2a2f4ba07f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import json
from itertools import chain
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import BooleanField
from django.forms import modelform_factory
from django.utils.lru_cache import lru_cache
from django.utils.safestring import mark_safe
from django.utils.translation import get_language_info
from djangocms_transfer.utils import get_plugin_class, get_plugin_model
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import JsonLexer
from yurl import URL
from .conf import TRANSLATIONS_CONF
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
USE_HTTPS = getattr(settings, 'URLS_USE_HTTPS', False)
def get_language_name(lang_code):
info = get_language_info(lang_code)
if info['code'] == lang_code:
return info['name']
try:
return dict(settings.LANGUAGES)[lang_code]
except KeyError:
# fallback to known name
return info['name']
def get_page_url(page, language, is_https=False):
return urljoin(
'http{}://{}'.format(
's' if is_https else '',
page.node.site.domain,
),
page.get_absolute_url(language=language),
)
| 28.549669 | 98 | 0.68128 |
c3068d09ac63699b36a60bd71dd55030585fc665 | 597 | py | Python | bin/render_ingress.py | phplaboratory/madcore-ai | ea866334480d77b084ce971506cfdb285405c122 | [
"MIT"
] | null | null | null | bin/render_ingress.py | phplaboratory/madcore-ai | ea866334480d77b084ce971506cfdb285405c122 | [
"MIT"
] | null | null | null | bin/render_ingress.py | phplaboratory/madcore-ai | ea866334480d77b084ce971506cfdb285405c122 | [
"MIT"
] | null | null | null | import sys, os, json, jinja2, redis
from jinja2 import Template
r_server = redis.StrictRedis('127.0.0.1', db=2)
i_key = "owner-info"
json_data = r_server.get(i_key)
if json_data is not None:
data = json.loads(json_data)
main_domain = data['Hostname']
fqdn = sys.argv[1] + ".ext." + main_domain
config_template = open('/opt/madcore/bin/templates/ingress.template').read()
template = Template(config_template)
config = (template.render(HOST=fqdn, SERVICE_NAME=sys.argv[2], SERVICE_PORT=sys.argv[3], NAMESPACE=sys.argv[4]))
open("/opt/ingress/" + sys.argv[2] + ".yaml", "w").write(config)
| 35.117647 | 112 | 0.715243 |
c307055a5d64c20c7212a67b032444ffbf9d764a | 569 | py | Python | Linear_Insertion_Sort.py | toppassion/python-master-app | 21d854186664440f997bfe53010b242f62979e7f | [
"MIT"
] | null | null | null | Linear_Insertion_Sort.py | toppassion/python-master-app | 21d854186664440f997bfe53010b242f62979e7f | [
"MIT"
] | null | null | null | Linear_Insertion_Sort.py | toppassion/python-master-app | 21d854186664440f997bfe53010b242f62979e7f | [
"MIT"
] | 1 | 2021-12-08T11:38:20.000Z | 2021-12-08T11:38:20.000Z |
if __name__ == "__main__":
Test_list = input("Enter the list of Numbers: ").split()
Test_list = [int(i) for i in Test_list]
print(f"Binary Insertion Sort: {Insertion_Sort(Test_list)}") | 27.095238 | 64 | 0.616872 |
c30749f6e672c3d0997217dae6e0ef97c37975d8 | 631 | py | Python | scripts/tests/snapshots/snap_keywords_test.py | Duroktar/Wolf | c192d5c27eb2098e440f7726eb1bff40ed004db5 | [
"Apache-2.0"
] | 105 | 2018-02-07T22:07:47.000Z | 2022-03-31T18:16:47.000Z | scripts/tests/snapshots/snap_keywords_test.py | Duroktar/Wolf | c192d5c27eb2098e440f7726eb1bff40ed004db5 | [
"Apache-2.0"
] | 57 | 2018-02-07T23:07:41.000Z | 2021-11-21T17:14:06.000Z | scripts/tests/snapshots/snap_keywords_test.py | Duroktar/Wolf | c192d5c27eb2098e440f7726eb1bff40ed004db5 | [
"Apache-2.0"
] | 10 | 2018-02-24T23:44:51.000Z | 2022-03-02T07:52:27.000Z | # -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['test_keywords 1'] = '[{"lineno": 7, "source": [" a\\n"], "value": "1"}, {"lineno": 7, "source": [" a\\n"], "value": "2"}, {"lineno": 7, "source": [" a\\n"], "value": "3"}, {"lineno": 13, "source": [" i\\n"], "value": "0"}, {"lineno": 13, "source": [" i\\n"], "value": "1"}, {"lineno": 13, "source": [" i\\n"], "value": "2"}, {"lineno": 13, "source": [" i\\n"], "value": "3"}, {"lineno": 13, "source": [" i\\n"], "value": "4"}]'
| 57.363636 | 462 | 0.505547 |
c307664c89867d683750f1b12c8b33f9be0a22ae | 443 | py | Python | pyside/lesson_10_main.py | LueyEscargot/pyGuiTest | c072fe29a7c94dc60ec54344a5d4a91253d25f3f | [
"MIT"
] | null | null | null | pyside/lesson_10_main.py | LueyEscargot/pyGuiTest | c072fe29a7c94dc60ec54344a5d4a91253d25f3f | [
"MIT"
] | null | null | null | pyside/lesson_10_main.py | LueyEscargot/pyGuiTest | c072fe29a7c94dc60ec54344a5d4a91253d25f3f | [
"MIT"
] | null | null | null | import sys
from PySide2.QtWidgets import QApplication, QMainWindow
from PySide2.QtCore import QFile
from lesson_10_mainWidget import Ui_MainWindow
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
| 23.315789 | 55 | 0.708804 |
c308e55ef9a8f6ca2122399901177b70c65eef30 | 1,208 | py | Python | test/test_everything.py | jameschapman19/Eigengame | 165d1bf35076fbfc6e65a987cb2e09a174776927 | [
"MIT"
] | null | null | null | test/test_everything.py | jameschapman19/Eigengame | 165d1bf35076fbfc6e65a987cb2e09a174776927 | [
"MIT"
] | null | null | null | test/test_everything.py | jameschapman19/Eigengame | 165d1bf35076fbfc6e65a987cb2e09a174776927 | [
"MIT"
] | null | null | null | import jax.numpy as jnp
import numpy as np
from jax import random
from algorithms import Game, GHA, Oja, Krasulina, Numpy
def test_pca():
"""
At the moment just checks they all run.
Returns
-------
"""
n = 10
p = 2
n_components = 2
batch_size = 2
epochs = 10
key = random.PRNGKey(0)
X = random.normal(key, (n, p))
X = X / jnp.linalg.norm(X, axis=0)
numpy = Numpy(n_components=n_components).fit(X)
game = Game(
n_components=n_components, batch_size=batch_size, epochs=epochs
).fit(X)
gha = GHA(n_components=n_components, batch_size=batch_size, epochs=epochs).fit(
X
)
oja = Oja(n_components=n_components, batch_size=batch_size, epochs=epochs).fit(
X
)
krasulina = Krasulina(
n_components=n_components, batch_size=batch_size, epochs=epochs
).fit(X)
assert (
np.testing.assert_almost_equal(
[
game.score(X),
gha.score(X),
oja.score(X),
krasulina.score(X),
],
numpy.score(X),
decimal=0,
)
is None
)
| 24.16 | 83 | 0.543874 |
c309029b235f5e5d51eecba38f135cf9e46dd8c7 | 971 | py | Python | script/python/result_get.py | yztong/LeNet_RTL | 0f714522dd7adc491bfbcf34b22f9f594e7442af | [
"MIT"
] | 29 | 2018-03-22T07:26:37.000Z | 2022-03-21T08:28:36.000Z | script/python/result_get.py | honorpeter/LeNet_RTL | 0f714522dd7adc491bfbcf34b22f9f594e7442af | [
"MIT"
] | 1 | 2021-04-12T13:28:46.000Z | 2021-04-21T10:00:53.000Z | script/python/result_get.py | honorpeter/LeNet_RTL | 0f714522dd7adc491bfbcf34b22f9f594e7442af | [
"MIT"
] | 9 | 2019-04-06T06:27:41.000Z | 2021-12-28T12:11:18.000Z | import numpy as np
from radix import radixConvert
c = radixConvert()
a = np.load("../../data/5/layer4.npy")
print(a.shape)
a = a*128
a = np.around(a).astype(np.int16)
print(a)
a = np.load('../../data/6.npy')
a = a*128
a = np.around(a).astype(np.int8)
print(a.shape)
for i in range(84):
print(i)
print(a[i])
'''
a = a*128
print(a)
for i in range(a.shape[0]):
for j in range(a.shape[1]):
if a[i][j] > 127:
a[i][j] = 127
a = np.around(a).astype(np.int8)
print(a)
print(a[4][17])
weight_file = open('f1_rom.coe', 'w')
weight_file.write('MEMORY_INITIALIZATION_RADIX=2;\n')
weight_file.write('MEMORY_INITIALIZATION_VECTOR=\n')
for i in range(32):
for j in range(32):
if(i < 2 or i > 29):
weight_file.write(c.dec2Bincmpmt('0', 8)+';\n')
elif(j < 2 or j > 29):
weight_file.write(c.dec2Bincmpmt('0', 8)+';\n')
else:
weight_file.write(c.dec2Bincmpmt(str(a[i-2][j-2]), 8)+',\n')
'''
| 23.682927 | 72 | 0.582904 |
c3092b2cfa6e3e6a80652151da3eb7e1dffe233e | 1,237 | py | Python | saifooler/classifiers/image_net_classifier.py | sailab-code/SAIFooler | 76f91c33624273227d8ee2d974aa5b7b90ace5ac | [
"MIT"
] | null | null | null | saifooler/classifiers/image_net_classifier.py | sailab-code/SAIFooler | 76f91c33624273227d8ee2d974aa5b7b90ace5ac | [
"MIT"
] | null | null | null | saifooler/classifiers/image_net_classifier.py | sailab-code/SAIFooler | 76f91c33624273227d8ee2d974aa5b7b90ace5ac | [
"MIT"
] | null | null | null | from saifooler.classifiers.classifier import Classifier
import torch
import json
import os
| 29.452381 | 104 | 0.600647 |
c3093f2af126d570c81bda760a6f55d8df7bb8fb | 12,130 | py | Python | bertsification-multi-lstm.py | linhd-postdata/alberti | 4006eb0b97fe9e9bf3d8d1b014b1080713496da1 | [
"Apache-2.0"
] | null | null | null | bertsification-multi-lstm.py | linhd-postdata/alberti | 4006eb0b97fe9e9bf3d8d1b014b1080713496da1 | [
"Apache-2.0"
] | null | null | null | bertsification-multi-lstm.py | linhd-postdata/alberti | 4006eb0b97fe9e9bf3d8d1b014b1080713496da1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# conda install pytorch>=1.6 cudatoolkit=10.2 -c pytorch
# wandb login XXX
import json
import logging
import os
import re
import sklearn
import time
from itertools import product
import numpy as np
import pandas as pd
import wandb
#from IPython import get_ipython
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation
from keras.layers import Bidirectional, GlobalMaxPool1D
from keras.models import Model
from keras import initializers, regularizers, constraints, optimizers, layers
from simpletransformers.classification import MultiLabelClassificationModel
from sklearn.model_selection import train_test_split
truthy_values = ("true", "1", "y", "yes")
TAG = os.environ.get("TAG", "bertsification")
LANGS = [lang.strip() for lang in os.environ.get("LANGS", "es,ge,en,multi").lower().split(",")]
MODELNAMES = os.environ.get("MODELNAMES")
EVAL = os.environ.get("EVAL", "True").lower() in truthy_values
OVERWRITE = os.environ.get("OVERWRITE", "False").lower() in truthy_values
logging.basicConfig(level=logging.INFO, filename=time.strftime("models/{}-%Y-%m-%dT%H%M%S.log".format(TAG)))
with open('pid', 'w') as pid:
pid.write(str(os.getpid()))
logging.info("Experiment '{}' on {}, (eval = {}, pid = {})".format(
TAG, LANGS, str(EVAL), str(os.getpid()),
))
# SimpleTransformers (based on HuggingFace/Transformers) for Multilingual Scansion
# We will be using `simpletransformers`, a wrapper of `huggingface/transformers` to fine-tune different BERT-based and other architecture models with support for Spanish.
# Utils
# Spanish
# if not os.path.isfile("adso100.json"):
# get_ipython().system("averell export adso100 --filename adso100.json")
# if not os.path.isfile("adso.json"):
# get_ipython().system("averell export adso --filename adso.json")
es_test = (pd
.read_json(open("adso100.json"))
.query("manually_checked == True")[["line_text", "metrical_pattern"]]
.assign(
line_text=lambda x: x["line_text"].apply(clean_text),
length=lambda x: x["metrical_pattern"].str.len()
)
.drop_duplicates("line_text")
.rename(columns={"line_text": "text", "metrical_pattern": "meter"})
)
es_test = es_test[es_test["length"] == 11]
es = (pd
.read_json(open("adso.json"))
.query("manually_checked == True")[["line_text", "metrical_pattern"]]
.assign(
line_text=lambda x: x["line_text"].apply(clean_text),
length=lambda x: x["metrical_pattern"].str.len()
)
.drop_duplicates("line_text")
.rename(columns={"line_text": "text", "metrical_pattern": "meter"})
)
es = es[~es["text"].isin(es_test["text"])][es["length"] == 11]
es["labels"] = es.meter.apply(metric2binary)
es_train, es_eval = train_test_split(
es[["text", "labels"]], test_size=0.25, random_state=42)
logging.info("Spanish")
logging.info("- Lines: {} train, {} eval, {} test".format(es_train.shape[0], es_eval.shape[0], es_test.shape[0]))
# English
en_test = (pd
.read_csv("4b4v_prosodic_meter.csv")
.assign(
text=lambda x: x["text"].apply(clean_text),
length=lambda x: x["meter"].str.len()
)
.drop_duplicates("text")
.rename(columns={"line_text": "text", "metrical_pattern": "meter", "prosodic_meter": "sota"})
)
en_test = en_test.query("length in (5,6,7,8,9,10,11)")
# if not os.path.isfile("ecpa.json"):
# get_ipython().system("averell export ecpa --filename ecpa.json")
en = (pd
.read_json(open("ecpa.json"))
.query("manually_checked == True")[["line_text", "metrical_pattern"]]
.assign(
line_text=lambda x: x["line_text"].apply(clean_text),
metrical_pattern=lambda x: x["metrical_pattern"].str.replace("|", "").str.replace("(", "").str.replace(")", "")
)
.assign(
length=lambda x: x["metrical_pattern"].str.len(),
)
.drop_duplicates("line_text")
.rename(columns={"line_text": "text", "metrical_pattern": "meter", "prosodic_meter": "sota"})
)
en = en[~en["text"].isin(en_test["text"])].query("length in (5,6,7,8,9,10,11)")
en["labels"] = en.meter.apply(metric2binary)
en_train, en_eval = train_test_split(
en[["text", "labels"]], test_size=0.25, random_state=42)
logging.info("English")
logging.info("- Lines: {} train, {} eval, {} test".format(en_train.shape[0], en_eval.shape[0], en_test.shape[0]))
# sota
en_sota = sum(en_test.meter == en_test.sota) / en_test.meter.size
# German
ge = (pd
.read_csv("po-emo-metricalizer.csv")
.rename(columns={"verse": "text", "annotated_pattern": "meter", "metricalizer_pattern": "sota"})
.assign(
text=lambda x: x["text"].apply(clean_text),
length=lambda x: x["meter"].str.len()
)
.drop_duplicates("text")
.query("length in (5, 6, 7, 8, 9, 10, 11)")
)
ge["labels"] = ge.meter.apply(metric2binary)
ge_train_eval, ge_test = train_test_split(ge, test_size=0.15, random_state=42)
ge_train, ge_eval = train_test_split(
ge_train_eval[["text", "labels"]], test_size=0.176, random_state=42)
logging.info("German")
logging.info("- Lines: {} train, {} eval, {} test".format(ge_train.shape[0], ge_eval.shape[0], ge_test.shape[0]))
# sota
ge_sota = sum(ge_test.meter == ge_test.sota) / ge_test.meter.size
# training
# Multilingual inputs
# - bert bert-base-multilingual-cased
# - distilbert distilbert-base-multilingual-cased
# - xlmroberta, xlm-roberta-base
# - xlmroberta, xlm-roberta-large
# Only English
# - roberta roberta-base
# - roberta roberta-large
# - albert albert-xxlarge-v2
# You can set class weights by using the optional weight argument
models = (
# ("xlnet", "xlnet-base-cased"),
("bert", "bert-base-multilingual-cased"),
("distilbert", "distilbert-base-multilingual-cased"),
("roberta", "roberta-base"),
("roberta", "roberta-large"),
("xlmroberta", "xlm-roberta-base"),
("xlmroberta", "xlm-roberta-large"),
("electra", "google/electra-base-discriminator"),
("albert", "albert-base-v2"),
("albert", "albert-large-v2"),
)
if MODELNAMES:
models = [list(map(str.strip, modelname.split(",")))
for modelname in MODELNAMES.split(";")]
langs = LANGS or ("es", "ge", "en", "multi")
for lang, (model_type, model_name) in product(langs, models):
model_output = 'models/{}-{}-{}-{}'.format(TAG, lang, model_type, model_name.replace("/", "-"))
if OVERWRITE is False and os.path.exists(model_output):
logging.info("Skipping training of {} for {}".format(model_name, lang))
continue
logging.info("Starting training of {} for {}".format(model_name, lang))
run = wandb.init(project=model_output.split("/")[-1], reinit=True)
model = MultiLabelClassificationModel(
model_type, model_name, num_labels=11, args={
'output_dir': model_output,
'best_model_dir': '{}/best'.format(model_output),
'reprocess_input_data': True,
'overwrite_output_dir': True,
'use_cached_eval_features': True,
'num_train_epochs': 100, # For BERT, 2, 3, 4
'save_steps': 10000,
'early_stopping_patience': 5,
'evaluate_during_training': EVAL,
#'early_stopping_metric': "accuracy_score",
'evaluate_during_training_steps': 1000,
'early_stopping_delta': 0.00001,
'manual_seed': 42,
# 'learning_rate': 2e-5, # For BERT, 5e-5, 3e-5, 2e-5
# For BERT 16, 32. It could be 128, but with gradient_acc_steps set to 2 is equivalent
'train_batch_size': 16 if "large" in model_name else 32,
'eval_batch_size': 16 if "large" in model_name else 32,
# Doubles train_batch_size, but gradients and wrights are calculated once every 2 steps
'gradient_accumulation_steps': 2 if "large" in model_name else 1,
'max_seq_length': 32,
'use_early_stopping': True,
'wandb_project': model_output.split("/")[-1],
#'wandb_kwargs': {'reinit': True},
# "adam_epsilon": 3e-5, # 1e-8
"silent": False,
"fp16": False,
"n_gpu": 2,
})
# train the model
if lang == "multi":
train_df = pd.concat([es_train, en_train, ge_train], ignore_index=True)
eval_df = pd.concat([es_eval, en_eval, ge_eval], ignore_index=True)
elif lang == "es":
train_df = es_train
eval_df = es_eval
elif lang == "en":
train_df = en_train
eval_df = en_eval
elif lang == "ge":
train_df = ge_train
eval_df = ge_eval
if EVAL:
model.train_model(train_df, eval_df=eval_df)
# evaluate the model
result, model_outputs, wrong_predictions = model.eval_model(eval_df)
logging.info(str(result))
#logging.info(str(model_outputs))
else:
train_eval_df = pd.concat([train_df, eval_df, ge_train], ignore_index=True)
model.train_model(train_eval_df)
if lang in ("es", "multi"):
es_test["predicted"], *_ = model.predict(es_test.text.values)
es_test["predicted"] = es_test["predicted"].apply(label2metric)
es_test["pred"] = es_test.apply(lambda x: str(x.predicted)[:int(x.length)], axis=1)
es_bert = sum(es_test.meter == es_test.pred) / es_test.meter.size
logging.info("Accuracy [{}:es]: {} ({})".format(lang, es_bert, model_name))
wandb.log({"accuracy_es": es_bert})
if lang in ("en", "multi"):
en_test["predicted"], *_ = model.predict(en_test.text.values)
en_test["predicted"] = en_test["predicted"].apply(label2metric)
en_test["pred"] = en_test.apply(lambda x: str(x.predicted)[:int(x.length)], axis=1)
en_bert = sum(en_test.meter == en_test.pred) / en_test.meter.size
logging.info("Accuracy [{}:en]: {} ({})".format(lang, en_bert, model_name))
wandb.log({"accuracy_en": en_bert})
if lang in ("ge", "multi"):
ge_test["predicted"], *_ = model.predict(ge_test.text.values)
ge_test["predicted"] = ge_test["predicted"].apply(label2metric)
ge_test["pred"] = ge_test.apply(lambda x: str(x.predicted)[:int(x.length)], axis=1)
ge_bert = sum(ge_test.meter == ge_test.pred) / ge_test.meter.size
logging.info("Accuracy [{}:ge]: {} ({})".format(lang, ge_bert, model_name))
wandb.log({"accuracy_ge": ge_bert})
if lang in ("multi", ):
test_df = pd.concat([es_test, en_test, ge_test], ignore_index=True)
test_df["predicted"], *_ = model.predict(test_df.text.values)
test_df["predicted"] = test_df["predicted"].apply(label2metric)
test_df["pred"] = test_df.apply(lambda x: str(x.predicted)[:int(x.length)], axis=1)
multi_bert = sum(test_df.meter == test_df.pred) / test_df.meter.size
logging.info("Accuracy [{}:multi]: {} ({})".format(lang, multi_bert, model_name))
wandb.log({"accuracy_multi": multi_bert})
run.finish()
logging.info("Done training '{}'".format(model_output))
# get_ipython().system("rm -rf `ls -dt models/{}-*/checkpoint*/ | awk 'NR>5'`".format(TAG))
logging.info("Done training")
| 41.683849 | 170 | 0.639077 |
c309cc940b59cd3830a59d4a46d48907f9c3e32d | 515 | py | Python | go_server_app/views.py | benjaminaaron/simple-go-server | 0ebe6756f72f896fd014d060252c27c2907e7ae8 | [
"MIT"
] | 1 | 2017-11-29T22:39:05.000Z | 2017-11-29T22:39:05.000Z | go_server_app/views.py | benjaminaaron/simple-go-server | 0ebe6756f72f896fd014d060252c27c2907e7ae8 | [
"MIT"
] | 1 | 2017-11-09T18:41:41.000Z | 2017-11-09T19:14:08.000Z | go_server_app/views.py | benjaminaaron/simple-go-server | 0ebe6756f72f896fd014d060252c27c2907e7ae8 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from .models import GameMeta
| 24.52381 | 98 | 0.747573 |
c30a8241bc4eb176e2d35bfc53ddbf79b7ca685f | 77 | py | Python | test/settings/test_kafka_consumer_config.py | DebasishMaji/PI | e293982cae8f8755d28d7b3de22966dc74759b90 | [
"Apache-2.0"
] | null | null | null | test/settings/test_kafka_consumer_config.py | DebasishMaji/PI | e293982cae8f8755d28d7b3de22966dc74759b90 | [
"Apache-2.0"
] | null | null | null | test/settings/test_kafka_consumer_config.py | DebasishMaji/PI | e293982cae8f8755d28d7b3de22966dc74759b90 | [
"Apache-2.0"
] | null | null | null | import unittest
| 12.833333 | 49 | 0.805195 |
c30ea52dd60b15b77f690236c9544837627ac0f7 | 7,684 | py | Python | Pycraft/StartupAnimation.py | demirdogukan/InsiderPycraft | 5567107326fbd222a7df6aabf4ab265e0a157636 | [
"MIT"
] | 22 | 2021-03-25T17:47:45.000Z | 2022-03-29T01:56:12.000Z | Pycraft/StartupAnimation.py | demirdogukan/InsiderPycraft | 5567107326fbd222a7df6aabf4ab265e0a157636 | [
"MIT"
] | 1 | 2021-12-22T16:12:59.000Z | 2021-12-22T16:12:59.000Z | Pycraft/StartupAnimation.py | demirdogukan/InsiderPycraft | 5567107326fbd222a7df6aabf4ab265e0a157636 | [
"MIT"
] | 3 | 2021-09-05T14:10:05.000Z | 2022-01-10T12:57:34.000Z | if not __name__ == "__main__":
print("Started <Pycraft_StartupAnimation>")
else:
print("You need to run this as part of Pycraft")
import tkinter as tk
from tkinter import messagebox
root = tk.Tk()
root.withdraw()
messagebox.showerror("Startup Fail", "You need to run this as part of Pycraft, please run the 'main.py' file")
quit() | 53.361111 | 141 | 0.511322 |
c30f35ba35fbd12a0fe79d62b724f5343db144f1 | 2,985 | py | Python | kattishunter/kattis/submission.py | ParksProjets/kattis-hunter | c4990edf59fba6d91d22fdc126673781ab423d0f | [
"MIT"
] | null | null | null | kattishunter/kattis/submission.py | ParksProjets/kattis-hunter | c4990edf59fba6d91d22fdc126673781ab423d0f | [
"MIT"
] | null | null | null | kattishunter/kattis/submission.py | ParksProjets/kattis-hunter | c4990edf59fba6d91d22fdc126673781ab423d0f | [
"MIT"
] | null | null | null | """
Submit files for a Kattis problem.
Copyright (C) 2019, Guillaume Gonnet
This project is under the MIT license.
"""
import os.path as path
import re
from typing import Dict, List, Text
import requests
import logging
from .login import login
logger = logging.getLogger(__name__)
# Base headers to use.
HEADERS = {
"Accept": "text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8",
"Accept-Language": "en-US,en;q=0.5",
}
def retreive_csrf_token(config: Dict, pid: Text, retry = True):
"Retreive CSRF token from the submit page."
# Setup headers to send.
headers = HEADERS.copy()
headers["User-Agent"] = config["cache"]["user-agent"]
# Make the GET request.
url = config["url"]["submit"].format(pid=pid)
cookies = config["cache"].get("cookies", {})
res = requests.get(url, headers=headers, cookies=cookies,
allow_redirects=False)
config["cache"]["cookies"] = {**cookies, **res.cookies.get_dict()}
# Not logged, try to login first.
if res.status_code != 200:
if not retry:
logger.critical("Can't retrieve submit page from Kattis.")
login(config)
return retreive_csrf_token(config, pid, False)
# Find the CSRF token in response body.
pattern = r"name=\"csrf_token\".*?value=\"([0-9a-z]+)\""
match = re.search(pattern, res.text)
if match is None:
logger.critical("Can't find CSRF token in submit page.")
return match.group(1)
def read_file(filename: Text):
"Read a single file to send."
with open(filename, "rb") as file:
return file.read()
def read_files(files: List[Text]):
"Read files to send."
return [(
"sub_file[]",
(path.basename(file), read_file(file), "application/octet-stream")
) for file in files]
def submit_kattis(config: Dict, pid: Text, files: List[Text]):
"Submit files to a Kattis problem."
# Setup headers to send.
headers = HEADERS.copy()
headers["User-Agent"] = config["cache"]["user-agent"]
# Setup data to send.
data = {
"csrf_token": retreive_csrf_token(config, pid),
"type": "files",
"sub_code": "",
"problem": pid,
"language": "C++",
"submit": "Submit",
"submit_ctr": 10
}
# URL, files and cookies to use.
url = config["url"]["submit"].format(pid=pid)
files = read_files(files)
cookies = config["cache"]["cookies"]
# Make the POST request.
logger.debug("Submitting %d files for '%s'.", len(files), pid)
res = requests.post(url, data=data, files=files, headers=headers,
cookies=cookies)
config["cache"]["cookies"] = {**cookies, **res.cookies.get_dict()}
# Find submisson ID.
match = re.match(r"^.*/submissions/([0-9]+)$", res.url)
if not match:
logger.critical("Can't find submission ID from URL '%s'.", res.url)
sid = match.group(1)
logger.debug("Files sent to submission %s.", sid)
return sid
| 26.415929 | 83 | 0.622781 |
c30f9cd42abaa561a1c9cda944cad65e60a4fabe | 785 | py | Python | scripts/set_health_led.py | alanmitchell/mini-monitor | 5d60e1f69fc61d53f3090a159445595a9987c36a | [
"Apache-2.0"
] | 7 | 2016-01-11T23:54:31.000Z | 2022-02-16T11:58:16.000Z | scripts/set_health_led.py | alanmitchell/mini-monitor | 5d60e1f69fc61d53f3090a159445595a9987c36a | [
"Apache-2.0"
] | null | null | null | scripts/set_health_led.py | alanmitchell/mini-monitor | 5d60e1f69fc61d53f3090a159445595a9987c36a | [
"Apache-2.0"
] | 5 | 2015-12-17T15:22:45.000Z | 2018-08-13T17:40:38.000Z | #!/usr/bin/env python3
"""Script to do basic health checks of the system and turn on an LED on
BCM pin 12 (pin 32 on header) if they pass, turn Off otherwise.
"""
import time
import RPi.GPIO as GPIO
import subprocess
# The BCM pin number that the LED is wired to. When the pin
# is at 3.3V the LED is On.
LED_PIN = 12
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(LED_PIN, GPIO.OUT)
# ----- Test for Internet availability.
# Try to ping for a minute before declaring that the Internet
# is not available
internet_available = False
for i in range(12):
if subprocess.call('/bin/ping -q -c1 8.8.8.8', shell=True) == 0:
internet_available = True
break
time.sleep(5)
# Set LED according to results of test
GPIO.output(LED_PIN, internet_available)
| 25.322581 | 71 | 0.71465 |
c30fa03b89f6de54fae9f895f13e66390afbeacc | 2,676 | py | Python | vqa_txt_data/compare_experiment_results.py | billyang98/UNITER | c7f0833f14aa9dcb1e251a986c72e49edde1bdd4 | [
"MIT"
] | null | null | null | vqa_txt_data/compare_experiment_results.py | billyang98/UNITER | c7f0833f14aa9dcb1e251a986c72e49edde1bdd4 | [
"MIT"
] | null | null | null | vqa_txt_data/compare_experiment_results.py | billyang98/UNITER | c7f0833f14aa9dcb1e251a986c72e49edde1bdd4 | [
"MIT"
] | null | null | null | import json
import numpy as np
from tqdm import tqdm
# Change these based on experiment
#exp_dataset = 'mask_char_oov_test_set.db'
#exp_name = 'results_test_mask_char'
#exp_dataset = 'mask_2_oov_test_set.db'
#exp_name = 'results_test_mask_2'
#exp_dataset = 'mask_2_oov_test_set.db'
#exp_name = 'results_test_synonyms_mask_2_ensemble_all_5'
#exp_dataset = 'synonyms_mask_char_l03_oov_test_set.db'
#exp_name = 'results_test_synonyms_mask_char_l03'
#exp_dataset = 'synonyms_mask_char_03m_oov_test_set.db'
#exp_name = 'results_test_synonyms_mask_char_03m'
#exp_dataset = 'synonyms_mask_2_03l_oov_test_set.db'
#exp_name = 'results_test_synonyms_mask_2_03l'
exp_dataset = 'mask_2_oov_test_set.db'
exp_name = 'results_test_synonyms_mask_2_fixed'
q_list_file = '/scratch/cluster/billyang/vqa_dataset/txt_db/oov_datasets/{}/questions_changed.json'.format(exp_dataset)
exp_ans_file = '/scratch/cluster/billyang/uniter_image/vqa_joint_trained/{}/results_3000_all.json'.format(exp_name)
#exp_ans_file = '/scratch/cluster/billyang/uniter_image/vqa_joint_fixed_trained/{}/results_3000_all.json'.format(exp_name)
q_list = json.load(open(q_list_file))
exp_ans_list = json.load(open(exp_ans_file))
baseline_ans_list = json.load(open('/scratch/cluster/billyang/uniter_image/vqa_joint_trained/results_test_normal_test/results_3000_all.json'))
#baseline_ans_list = json.load(open('/scratch/cluster/billyang/uniter_image/vqa_joint_fixed_trained/results_test_normal_test_fixed/results_3000_all.json'))
exp_ans = {o['question_id']: o['answer'] for o in exp_ans_list}
baseline_ans = {o['question_id']: o['answer'] for o in baseline_ans_list}
gt_ans = json.load(open('oov_test_full_answers.json'))
results = {}
results['num_questions'] = len(q_list)
exp_tot_score = 0
bl_tot_score = 0
rtw = []
wtr = []
for qid in tqdm(q_list):
exp_score = getscore(exp_ans[qid], gt_ans[qid]['strings'], gt_ans[qid]['scores'])
exp_tot_score += exp_score
bl_score = getscore(baseline_ans[qid], gt_ans[qid]['strings'], gt_ans[qid]['scores'])
bl_tot_score += bl_score
if exp_score > 0 and bl_score == 0:
wtr.append(qid)
if bl_score > 0 and exp_score == 0:
rtw.append(qid)
results['exp_score'] = exp_tot_score / len(q_list)
results['bl_score'] = bl_tot_score / len(q_list)
results['rtw'] = rtw
results['wtr'] = wtr
results['rtw_count'] = len(rtw)
results['wtr_count'] = len(wtr)
print("dumping")
json.dump(results, open('{}.json'.format(exp_name), 'w'))
# get new scores
# find answers wrong to right
# find answers right to wrong
| 32.240964 | 155 | 0.763079 |
c31181ed7742f029eee26ce5c90c82ae4b887fbd | 528 | py | Python | app/articles/forms.py | AlexRAV/flask-blog | df8036e01794914ca0e88856ed93f8a91cc1d47a | [
"BSD-3-Clause"
] | null | null | null | app/articles/forms.py | AlexRAV/flask-blog | df8036e01794914ca0e88856ed93f8a91cc1d47a | [
"BSD-3-Clause"
] | null | null | null | app/articles/forms.py | AlexRAV/flask-blog | df8036e01794914ca0e88856ed93f8a91cc1d47a | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Article forms."""
from flask_wtf import Form, FlaskForm
from wtforms import PasswordField, StringField, TextAreaField
from wtforms.validators import DataRequired, Email, EqualTo, Length
| 35.2 | 93 | 0.746212 |
c3119f2506c627ca857b498eb0bfe45c4bd66fbc | 9,582 | py | Python | dataanalysis.py | Rev-Jiang/Python | c91d5724a6843f095bfe1a05f65d9fc885e01b88 | [
"MIT"
] | null | null | null | dataanalysis.py | Rev-Jiang/Python | c91d5724a6843f095bfe1a05f65d9fc885e01b88 | [
"MIT"
] | null | null | null | dataanalysis.py | Rev-Jiang/Python | c91d5724a6843f095bfe1a05f65d9fc885e01b88 | [
"MIT"
] | null | null | null | #-*- coding: UTF-8 -*-
#ASCII
# Filename : dataanalysis.py
# author by : Rev_997
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#if it is not list or NumPy, transfer it
if not isinstance(x,list) and isiterable(x):
x=list(x)
#is and is not are used to judge if the varible is None, as None is unique.
a=None
a is None
import datetime
dt=datetime(2011,10,29,20,30,21)
dt.day
dt.minute
dt.date()
dt.time()
#datetime could be transfered to string by function striftime
dt.strftime('%m/%d/%Y %H:%M')
#string could be transfered to datetime by function strptime
datetime.strptime('20091031','%Y%m%d')
#substitute 0 for minutes and seconds
dt.replace(minute=0,second=0)
#the difference of two datetime objects produce a datetime.timedelta
dt2=datetime(2011,11,15,22,30)
delta=dt2-dt
delta
type(delta)
#add a timedelta to a datetime -- get a now datetime
dt+delta
#if elif else
if x:
pass
elif:
pass
else:
pass
#for
for value in collection:
#do something wuth value
#continue
#break
for a,b,c in iterator:
#do something
#while
x=256
total=0
while x>0:
if total>500:
break
total+=x
x=x//2
#once the float(x) is invalid, the except works
#catch the abnormity
#value=true-expr if condition else false-expr
#same as
'''
if condition:
value=true-expr
else:
value=false-expr
'''
#about tuple
tup=4,5,6
tup
#(4,5,6)
#transfer to tuple
tuple([4,0,2])
tuple('string')
#tuple use + to generate longer tuple
#tuple.append()
#tuple.count()
#list.append()
#list.insert()
#list.pop()
#list.remove()
#list.extend()
#list.sort()
import bisect
c=[1,2,2,2,3,4,7]
#find the suitable position
bisect.bisect(c,2)
#insert the new number
bisect.insort(c,6)
###attention: bisect is suitable for ordered sequence
#----------------------------------------------------------------
#some function of list
#enumerate
for i,value in enumerate(collection):
#do something with value
some_list=['foo','bar','baz']
mapping=dict((v,i) for i,v in enumerate(some_list))
mapping
#sorted
sorted([7,2,4,6,3,5,2])
sorted('horse race')
#powerful with set
sorted(set('this is just some string'))
#zip
seq1=['foo','bar','baz']
seq2=['one','two','three']
zip(seq1,seq2)
seq3=[False,True]
zip(seq1,seq2,seq3)
#several arrays iterate together with zip
for i,(a,b) in enumerate(zip(seq1,seq2)):
print('%d: %s, %s' % (i,a,b))
#unzip
pitchers=[('Nolan','Ryan'),('Roger','Clemens'),('Schilling','Curt')]
first_names,last_names=zip(*pitchers)# * is meant zip(seq[0],seq[1],...,seq[len(seq)-1])
first_names
last_names
#reversed
list(reversed(range(10)))
#dictionary
empty_dict={}d1={'a':'some value','b':[1,2,3,4]}
d1
#delete
del d1[5]
#or
ret=d1.pop('dummy')
ret
#get keys and values
d1.keys()
d1.values()
#combine two dictionaries
d1.update({'b':'foo','c':12})
d1
#match two list to be dictionary
'''
mapping={}
for key,value in zip(key_list,value_list):
mapping[key]=value
'''
mapping=dict(zip(range(5),reversed(range(5))))
mapping
#brief way to express circulation by dict
'''
if key in some_dict:
value=some_dict[key]
else:
value=default_value
'''
value=some_dict.get(key,default_values)
#the vlaue of dictionary is set as other list
'''
words=['apple','bat','bar','atom','book']
by_letter={}
for word in words:
letter=word[0]
if letter not in by_letter:
by_letter[letter]=[word]
else:
by_letter[letter].append(word)
by_letter
'''
by_letter.setdefault(letter,[]).append(word)
#or use defaultdict class in Module collections
from collections import defaultdict
by_letter=defaultdict(list)
for word in words:
by_letter[word[0]].append(word)
#the key of dictionary should be of hashability--unchangable
hash('string')
hash((1,2,(2,3)))
hash((1,2,[3,4]))#no hashability as list is changable
#to change a list to tuple is the easiest way to make it a key
d={}
d[tuple([1,2,3])]=5
d
#set
set([2,2,2,1,3,3])
{2,2,2,1,3,3}
a={1,2,3,4,5}
b={3,4,5,6,7,8}
#intersection
a|b
#union
a&b
#difference
a-b
#symmetric difference
a^b
#if is subset
a_set={1,2,3,4,5}
{1,2,3}.issubset(a_set)
a_set.issuperset({1,2,3})
#set could use the == to judge if the same
{1,2,3}=={3,2,1}
#the operation of the sets
a.add(x)
a.remove(x)
a.union(b)
a.intersection(b)
a.difference(b)
a.symmetric_difference(b)
a.issubset(b)
a.issuperset(b)
a.isdisjoint(b)
#the derivative of list&set&dictionary
'''
[expr for val in collection if condition]
is the same as
result=[]
for val in collection:
if condition:
result.append(expr)
'''
#list
#[expr for val in collection if condition]
strings=['a','as','bat','car','dove','python']
[x.upper() for x in strings if len(x)>2]
#dicrionary
#dict_comp={key-expr:value-expr for value in collection if condition}
loc_mapping={val:index for index, val in enumerate(string)}
loc_mapping
#or
loc_mapping=dict((val,idx) for idx, val in enumerate(string))
#set
#set_comp={expr for value in collection if condition}
unique_lengths={len(x) for x in strings}
unique_lengths
#list nesting derivative
all_data=[['Tom','Billy','Jeffery','Andrew','Wesley','Steven','Joe'],
['Susie','Casey','Jill','Ana','Eva','Jennifer','Stephanie']]
#find the names with two 'e' and put them in a new list
names_of_interest=[]
for name in all_data:
enough_es=[name for name in names if name.count('e')>2]
names_of_interest.extend(enough_es)
#which could be shorten as below:
result=[name for names in all_data for name in names
if name.count('e')>=2]
result
#flat a list consist of tuples
some_tuples=[(1,2,3),(4,5,6),(7,8,9)]
flattened=[x for tup in some_tuples for x in tup]
flattened
'''
flattened=[]
for tup in some_tuples:
for x in tup:
flattened.append(x)
'''
#which is different from:
[[x for x in tup] for tup in some_tuples]
#clean function
import re
states=[' Alabama ','Georgia!','Georgia','georgia','FlOrIda','south carolina##','West virginia?']
clean_strings(states)
#or
clean_ops=[str.strip,remove_punctuation,str.title]
clean_strings(states,clean_ops)
#anonymous function
#lambda [arg1[, arg2, ... argN]]: expression
#exmaple 1
#use def define function
#use lambda expression
lambda x, y: x + y
#lambda permits default parameter
lambda x, y = 2: x + y
lambda *z: z
#call lambda function
a = lambda x, y: x + y
a( 1, 3 )
b = lambda x, y = 2: x + y
b( 1 )
b( 1, 3 )
c = lambda *z: z
c( 10, 'test')
#example2
#use def define function
#use lambda expression
lambda x, y: x + y
#lambda permits default parameter
lambda x, y = 2: x + y
lambda *z: z
#call lambda function
a = lambda x, y: x + y
a( 1, 3 )
b = lambda x, y = 2: x + y
b( 1 )
b( 1, 3 )
c = lambda *z: z
c( 10, 'test')
#example 3
ints=[4,0,1,5,6]
apply_to_list(ints,lambda x:x*2)
#example 4
strings=['foo','card','bar','aaaa','abab']
strings.sort(key=lambda x: len(set(list(x))))
strings
#currying
'''
def add_numbers(x,y):
return x+y
add_five=lambda y:add_numbers(5,y)
'''
#partial function is to simplify the process
from functools import partial
add_five=partial(add_numbers,5)
#generator expression
gen=(x**2 for x in xxrange(100))
gen
#the same:
gen=_make_gen()
#generator expression could be used in any python function acceptable of generator
sum(x**2 for x in xrange(100))
dict((i,i**2) for i in xrange(5))
#itertools module
import itertools
first_letter=lambda x:x[0]
names=['Alan','Adam','Wes','Will','Albert','Steven']
for letter,names in itertools.groupby(names,first_letter):
print letter,list(names) #names is a genetator
#some functions in itertools
imap(func,*iterables)
ifilter(func,iterable)
combinations(iterable,k)
permutations(iterable,k)
groupby(iterable[,keyfunc])
#documents and operation system
path='xxx.txt'
f=open(path)
for line in f:
pass
#remove EOL of every line
lines=[x.rstrip() for x in open(path)]
lines
#set a empty-lineproof doc
with open('tmp.txt','w') as handle:
handle.writelines(x for x in open(path) if len(x)>1)
open('tmp.txt').readlines()
#some function to construct documents
read([size])
readlines([size])
write(str)
close()
flush()
seek(pos)
tell()
closed
| 20.08805 | 100 | 0.644124 |
c311dcd3f870bbdf6b67118d6ccc561653945f40 | 259 | py | Python | show_model_info.py | panovr/Brain-Tumor-Segmentation | bf1ac2360af46a484d632474ce93de339ad2b496 | [
"MIT"
] | null | null | null | show_model_info.py | panovr/Brain-Tumor-Segmentation | bf1ac2360af46a484d632474ce93de339ad2b496 | [
"MIT"
] | null | null | null | show_model_info.py | panovr/Brain-Tumor-Segmentation | bf1ac2360af46a484d632474ce93de339ad2b496 | [
"MIT"
] | null | null | null | import bts.model as model
import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
BATCH_SIZE = 6
FILTER_LIST = [16,32,64,128,256]
unet_model = model.DynamicUNet(FILTER_LIST)
unet_model.summary(batch_size=BATCH_SIZE, device=device)
| 28.777778 | 69 | 0.783784 |
c31289e9e024e29ebc9122d648b85bcf484eedb5 | 1,031 | py | Python | docs/source/conf.py | andriis/bravado | 0d2ef182df4eb38641282e2f839c4dc813ee4349 | [
"BSD-3-Clause"
] | 600 | 2015-05-20T00:37:21.000Z | 2022-03-09T03:48:38.000Z | docs/source/conf.py | andriis/bravado | 0d2ef182df4eb38641282e2f839c4dc813ee4349 | [
"BSD-3-Clause"
] | 323 | 2015-05-19T22:35:29.000Z | 2021-12-09T12:55:09.000Z | docs/source/conf.py | andriis/bravado | 0d2ef182df4eb38641282e2f839c4dc813ee4349 | [
"BSD-3-Clause"
] | 137 | 2015-05-14T19:51:58.000Z | 2022-01-31T19:36:32.000Z | # -*- coding: utf-8 -*-
import sphinx_rtd_theme
# -- General configuration -----------------------------------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bravado'
copyright = u'2013, Digium, Inc.; 2014-2015, Yelp, Inc'
exclude_patterns = []
pygments_style = 'sphinx'
autoclass_content = 'both'
# -- Options for HTML output ---------------------------------------------
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_static_path = ['_static']
htmlhelp_basename = 'bravado-pydoc'
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'bravado-core': ('https://bravado-core.readthedocs.io/en/latest/', None),
}
| 22.413043 | 77 | 0.645975 |
c313212d51b9e2cc91e003a4faa89dafdee74dd8 | 13,900 | py | Python | edgedb/_testbase.py | Fogapod/edgedb-python | 377805660e3455bef536412bd5467b435753b3a5 | [
"Apache-2.0"
] | null | null | null | edgedb/_testbase.py | Fogapod/edgedb-python | 377805660e3455bef536412bd5467b435753b3a5 | [
"Apache-2.0"
] | null | null | null | edgedb/_testbase.py | Fogapod/edgedb-python | 377805660e3455bef536412bd5467b435753b3a5 | [
"Apache-2.0"
] | null | null | null | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import atexit
import contextlib
import functools
import inspect
import json
import logging
import os
import re
import unittest
import edgedb
from edgedb import _cluster as edgedb_cluster
_default_cluster = None
class TestCase(unittest.TestCase, metaclass=TestCaseMeta):
def add_fail_notes(self, **kwargs):
if not hasattr(self, 'fail_notes'):
self.fail_notes = {}
self.fail_notes.update(kwargs)
_lock_cnt = 0
| 30.151844 | 79 | 0.566403 |
c315663a28ae143f4027a8b0b899801904c9cfc7 | 824 | py | Python | konwledge_extraction/ner/bert_crf_ner/losses/focal_loss.py | mlshenkai/KGQA | 08e72d68da6519aaca7f39fabf8c0194bebd0314 | [
"Apache-2.0"
] | null | null | null | konwledge_extraction/ner/bert_crf_ner/losses/focal_loss.py | mlshenkai/KGQA | 08e72d68da6519aaca7f39fabf8c0194bebd0314 | [
"Apache-2.0"
] | null | null | null | konwledge_extraction/ner/bert_crf_ner/losses/focal_loss.py | mlshenkai/KGQA | 08e72d68da6519aaca7f39fabf8c0194bebd0314 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# @Author: Kai Shen
# @Created Time: 2022/2/23 10:14 AM
# @Organization: YQN
# @Email: mlshenkai@163.com
import torch
import torch.nn as nn
import torch.nn.functional as F
| 28.413793 | 85 | 0.598301 |
c3159e702eacd0f494cdd9cb0e3428247b34b8ae | 669 | py | Python | tests/biology/test_join_fasta.py | shandou/pyjanitor | d7842613b4e4a7532a88f673fd54e94c3ba5a96b | [
"MIT"
] | 1 | 2021-03-25T10:46:57.000Z | 2021-03-25T10:46:57.000Z | tests/biology/test_join_fasta.py | shandou/pyjanitor | d7842613b4e4a7532a88f673fd54e94c3ba5a96b | [
"MIT"
] | null | null | null | tests/biology/test_join_fasta.py | shandou/pyjanitor | d7842613b4e4a7532a88f673fd54e94c3ba5a96b | [
"MIT"
] | null | null | null | import importlib
import os
import pytest
from helpers import running_on_ci
import janitor.biology # noqa: F403, F401
# Skip all tests if Biopython not installed
pytestmark = pytest.mark.skipif(
(importlib.util.find_spec("Bio") is None) & ~running_on_ci(),
reason="Biology tests relying on Biopython only required for CI",
)
| 25.730769 | 71 | 0.714499 |
c3166bb775db3bf02b0cf82cc41168152ed9ad5b | 12,182 | py | Python | test/test_tilepyramid.py | ungarj/tilematrix | f5797cf2056f7de8de8f284db40b10943e5e40fb | [
"MIT"
] | 16 | 2016-07-27T22:21:12.000Z | 2022-01-15T18:13:43.000Z | test/test_tilepyramid.py | ungarj/tilematrix | f5797cf2056f7de8de8f284db40b10943e5e40fb | [
"MIT"
] | 30 | 2015-10-09T18:10:13.000Z | 2022-03-15T16:56:54.000Z | test/test_tilepyramid.py | ungarj/tilematrix | f5797cf2056f7de8de8f284db40b10943e5e40fb | [
"MIT"
] | 2 | 2021-07-30T07:23:13.000Z | 2021-09-13T12:24:11.000Z | """TilePyramid creation."""
import pytest
from shapely.geometry import Point
from shapely.ops import unary_union
from types import GeneratorType
from tilematrix import TilePyramid, snap_bounds
def test_init():
"""Initialize TilePyramids."""
for tptype in ["geodetic", "mercator"]:
assert TilePyramid(tptype)
with pytest.raises(ValueError):
TilePyramid("invalid")
with pytest.raises(ValueError):
TilePyramid()
assert hash(TilePyramid(tptype))
def test_metatiling():
"""Metatiling setting."""
for metatiling in [1, 2, 4, 8, 16]:
assert TilePyramid("geodetic", metatiling=metatiling)
try:
TilePyramid("geodetic", metatiling=5)
raise Exception()
except ValueError:
pass
def test_tile_size():
"""Tile sizes."""
for tile_size in [128, 256, 512, 1024]:
tp = TilePyramid("geodetic", tile_size=tile_size)
assert tp.tile_size == tile_size
def test_intersect():
"""Get intersecting Tiles."""
# same metatiling
tp = TilePyramid("geodetic")
intersect_tile = TilePyramid("geodetic").tile(5, 1, 1)
control = {(5, 1, 1)}
test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)}
assert control == test_tiles
# smaller metatiling
tp = TilePyramid("geodetic")
intersect_tile = TilePyramid("geodetic", metatiling=2).tile(5, 1, 1)
control = {(5, 2, 2), (5, 2, 3), (5, 3, 3), (5, 3, 2)}
test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)}
assert control == test_tiles
# bigger metatiling
tp = TilePyramid("geodetic", metatiling=2)
intersect_tile = TilePyramid("geodetic").tile(5, 1, 1)
control = {(5, 0, 0)}
test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)}
assert control == test_tiles
intersect_tile = TilePyramid("geodetic").tile(4, 12, 31)
control = {(4, 6, 15)}
test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)}
assert control == test_tiles
# different CRSes
tp = TilePyramid("geodetic")
intersect_tile = TilePyramid("mercator").tile(5, 1, 1)
try:
test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)}
raise Exception()
except ValueError:
pass
def test_tilepyramid_compare(grid_definition_proj, grid_definition_epsg):
"""Comparison operators."""
gproj, gepsg = grid_definition_proj, grid_definition_epsg
# predefined
assert TilePyramid("geodetic") == TilePyramid("geodetic")
assert TilePyramid("geodetic") != TilePyramid("geodetic", metatiling=2)
assert TilePyramid("geodetic") != TilePyramid("geodetic", tile_size=512)
assert TilePyramid("mercator") == TilePyramid("mercator")
assert TilePyramid("mercator") != TilePyramid("mercator", metatiling=2)
assert TilePyramid("mercator") != TilePyramid("mercator", tile_size=512)
# epsg based
assert TilePyramid(gepsg) == TilePyramid(gepsg)
assert TilePyramid(gepsg) != TilePyramid(gepsg, metatiling=2)
assert TilePyramid(gepsg) != TilePyramid(gepsg, tile_size=512)
# proj based
assert TilePyramid(gproj) == TilePyramid(gproj)
assert TilePyramid(gproj) != TilePyramid(gproj, metatiling=2)
assert TilePyramid(gproj) != TilePyramid(gproj, tile_size=512)
# altered bounds
abounds = dict(**gproj)
abounds.update(bounds=(-5000000.0, -5000000.0, 5000000.0, 5000000.0))
assert TilePyramid(abounds) == TilePyramid(abounds)
assert TilePyramid(gproj) != TilePyramid(abounds)
# other type
assert TilePyramid("geodetic") != "string"
def test_grid_compare(grid_definition_proj, grid_definition_epsg):
"""Comparison operators."""
gproj, gepsg = grid_definition_proj, grid_definition_epsg
# predefined
assert TilePyramid("geodetic").grid == TilePyramid("geodetic").grid
assert TilePyramid("geodetic").grid == TilePyramid("geodetic", metatiling=2).grid
assert TilePyramid("geodetic").grid == TilePyramid("geodetic", tile_size=512).grid
assert TilePyramid("mercator").grid == TilePyramid("mercator").grid
assert TilePyramid("mercator").grid == TilePyramid("mercator", metatiling=2).grid
assert TilePyramid("mercator").grid == TilePyramid("mercator", tile_size=512).grid
# epsg based
assert TilePyramid(gepsg).grid == TilePyramid(gepsg).grid
assert TilePyramid(gepsg).grid == TilePyramid(gepsg, metatiling=2).grid
assert TilePyramid(gepsg).grid == TilePyramid(gepsg, tile_size=512).grid
# proj based
assert TilePyramid(gproj).grid == TilePyramid(gproj).grid
assert TilePyramid(gproj).grid == TilePyramid(gproj, metatiling=2).grid
assert TilePyramid(gproj).grid == TilePyramid(gproj, tile_size=512).grid
# altered bounds
abounds = dict(**gproj)
abounds.update(bounds=(-5000000.0, -5000000.0, 5000000.0, 5000000.0))
assert TilePyramid(abounds).grid == TilePyramid(abounds).grid
assert TilePyramid(gproj).grid != TilePyramid(abounds).grid
| 34.315493 | 86 | 0.64111 |
c316f38f732cf3a6b4ada4ff98b624a3bbbf8f67 | 1,180 | py | Python | setup.py | giovannicuriel/report_builder | e728e77d7647f248198e39521278ed246171b256 | [
"BSD-2-Clause"
] | null | null | null | setup.py | giovannicuriel/report_builder | e728e77d7647f248198e39521278ed246171b256 | [
"BSD-2-Clause"
] | null | null | null | setup.py | giovannicuriel/report_builder | e728e77d7647f248198e39521278ed246171b256 | [
"BSD-2-Clause"
] | 1 | 2019-11-25T12:51:29.000Z | 2019-11-25T12:51:29.000Z | # -*- coding: utf-8 -*-
"""
setup.py script
"""
import io
from collections import OrderedDict
from setuptools import setup, find_packages
with io.open('README.md', 'rt', encoding='utf8') as f:
README = f.read()
setup(
name='reportbuilder',
version='0.0.1',
url='http://github.com/giovannicuriel/report-builder',
project_urls=OrderedDict((
('Code', 'https://github.com/giovannicuriel/report-builder.git'),
('Issue tracker', 'https://github.com/giovannicuriel/report-builder/issues'),
)),
license='BSD-2-Clause',
author='Giovanni Curiel dos Santos',
author_email='giovannicuriel@gmail.com',
description='Sample package for Python training courses',
long_description=README,
packages=["reportbuilder"],
include_package_data=True,
zip_safe=False,
platforms=[any],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
],
install_requires=[
'flask==1.1.1'
],
entry_points={
'console_scripts': [
'report-builder = reportbuilder.app:main'
]
}
)
| 26.818182 | 85 | 0.634746 |
c3184306de4eb3bd08a9f52149a34046ea7333f3 | 2,296 | py | Python | pyfunds/option.py | lucaruzzola/pyfunds | 498c5a0a3eb9423ca9f267b8d8c47f0f23987f3d | [
"MIT"
] | 6 | 2021-08-16T16:15:05.000Z | 2022-03-21T15:46:29.000Z | pyfunds/option.py | lucaruzzola/pyfunds | 498c5a0a3eb9423ca9f267b8d8c47f0f23987f3d | [
"MIT"
] | null | null | null | pyfunds/option.py | lucaruzzola/pyfunds | 498c5a0a3eb9423ca9f267b8d8c47f0f23987f3d | [
"MIT"
] | null | null | null | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Callable, Generic, TypeVar
T = TypeVar("T")
U = TypeVar("U")
def map(self, f: Callable[[T], U]) -> Option[U]:
return Some(f(self.get())) if not self._is_empty() else self
def flat_map(self, f: Callable[[T], Option[U]]) -> Option[U]:
return f(self.get()) if not self._is_empty() else self
def fold(self, default: U, fs: Callable[[T], U]) -> U:
return default if self._is_empty() else fs(self.get())
def __str__(self) -> str:
return f"Option is {'Some' if not self._is_empty() else 'Nothing'}" + (
f", with value: {self.get().__repr__()} of type {type(self.get())}"
if not self._is_empty()
else ""
)
def __repr__(self) -> str:
return "pyfunds.Option"
def __eq__(self, other: Option[T]) -> bool:
if self._is_empty():
return other._is_empty()
elif other._is_empty():
return False
else:
return self.get() == other.get()
def __ne__(self, other: Option[T]) -> bool:
return not self == other
class Some(Option[T]):
class Nothing(Option[T]):
| 23.916667 | 79 | 0.582753 |
c318b45aea4400e446baec5f077cb60419864b6f | 1,237 | py | Python | generate_dataset/visualize_mask.py | Kaju-Bubanja/PoseCNN | c2f7c4e8f98bc7c67d5cbc0be3167d3cb3bea396 | [
"MIT"
] | 20 | 2018-08-30T08:02:56.000Z | 2021-09-15T12:22:22.000Z | generate_dataset/visualize_mask.py | Kaju-Bubanja/PoseCNN | c2f7c4e8f98bc7c67d5cbc0be3167d3cb3bea396 | [
"MIT"
] | null | null | null | generate_dataset/visualize_mask.py | Kaju-Bubanja/PoseCNN | c2f7c4e8f98bc7c67d5cbc0be3167d3cb3bea396 | [
"MIT"
] | 5 | 2018-10-16T15:01:15.000Z | 2020-08-29T03:52:51.000Z | import cv2
import rosbag
import rospy
from cv_bridge import CvBridge
if __name__ == "__main__":
main() | 35.342857 | 100 | 0.595796 |
c318c41ae02a5b1bce71b7e42ebcd848cf95e1f3 | 909 | py | Python | itembase/core/urls/location_urls.py | wedwardbeck/ibase | 5647fa5aff6c1bdc99b6c93884ff0d5aef17d85b | [
"MIT"
] | null | null | null | itembase/core/urls/location_urls.py | wedwardbeck/ibase | 5647fa5aff6c1bdc99b6c93884ff0d5aef17d85b | [
"MIT"
] | 9 | 2020-01-17T14:16:08.000Z | 2020-02-18T15:07:40.000Z | itembase/core/urls/location_urls.py | wedwardbeck/ibase | 5647fa5aff6c1bdc99b6c93884ff0d5aef17d85b | [
"MIT"
] | null | null | null | from django.urls import path
from itembase.core.views.location_views import LocationAddressCreateView, LocationAddressDetailView, \
LocationAddressUpdateView, LocationCreateView, LocationDeleteView, LocationDetailView, LocationListView, \
LocationUpdateView
app_name = "locations"
urlpatterns = [
path("", LocationListView.as_view(), name="list"),
path("new/", LocationCreateView.as_view(), name="new"),
path("edit/<int:pk>/", LocationUpdateView.as_view(), name="edit"),
path("delete/<int:pk>/", LocationDeleteView.as_view(), name="delete"),
path("<int:pk>/", LocationDetailView.as_view(), name="view"),
path('<int:pk>/address-new/', LocationAddressCreateView.as_view(), name='address-new'),
path('address/<int:pk>', LocationAddressDetailView.as_view(), name='address-view'),
path('address/edit/<int:pk>', LocationAddressUpdateView.as_view(), name='address-edit'),
]
| 50.5 | 110 | 0.729373 |
c318d66c0ef14a2821d36ef3adf7ffcb264139ea | 3,455 | py | Python | web/addons/product_margin/wizard/product_margin.py | diogocs1/comps | 63df07f6cf21c41e4527c06e2d0499f23f4322e7 | [
"Apache-2.0"
] | 1 | 2019-12-29T11:53:56.000Z | 2019-12-29T11:53:56.000Z | odoo/addons/product_margin/wizard/product_margin.py | tuanquanghpvn/odoo8-tutorial | 52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e | [
"MIT"
] | null | null | null | odoo/addons/product_margin/wizard/product_margin.py | tuanquanghpvn/odoo8-tutorial | 52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e | [
"MIT"
] | 3 | 2020-10-08T14:42:10.000Z | 2022-01-28T14:12:29.000Z | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 35.618557 | 81 | 0.589001 |
c31b0a85e27980acdba6410b67f84602e15446a0 | 1,038 | py | Python | scripts/analysis_one.py | VikkiMba/Programmable-matter | d340c0b370a7e610892ffd8351f7aa576928d05c | [
"MIT"
] | null | null | null | scripts/analysis_one.py | VikkiMba/Programmable-matter | d340c0b370a7e610892ffd8351f7aa576928d05c | [
"MIT"
] | null | null | null | scripts/analysis_one.py | VikkiMba/Programmable-matter | d340c0b370a7e610892ffd8351f7aa576928d05c | [
"MIT"
] | null | null | null | name = input('Enter file name: ')
lst=list()
lst2=list()
with open(name) as f:
for line in f:
#print(line)
blops=line.rstrip()
blop=blops.split()
#for val in blop:
my_lst = [float(val) for val in blop]#list_comprehension
for num in my_lst:
if num <= 3.5:
lst.append(num)
if num >=4: lst2.append(num)
#num = float(val)
#print(num)
#text = f.read()
#print(text)
#print(type(text))
#print(type(line))
#print(blop)
#print(type(blop))
#print(lst)
#print(lst2)
import itertools
import matplotlib.pyplot as plt
import seaborn as sns
#for (f, b) in zip(lst2 ,lst):
#print (f, b)
#print(type(my_lst))
with open('neu_sam_4b.csv', 'w') as fh:
for (f, b) in zip(lst, lst2):
print(f,',',b, file=fh)
ext=lst
force=lst2
plt.plot(ext, force)
plt.xlabel('Extension')
plt.ylabel('Force')
plt.title('sample with 0.25wt%')
plt.tight_layout()
plt.show()
#for digit in lst:
#print(digit, file=fh) | 20.352941 | 64 | 0.578998 |
c31bd0f2505a1c4be1c52fbd6469723bb696bfa9 | 2,470 | py | Python | account/models.py | Hasanozzaman-Khan/Django-User-Authentication | 96482a51ed01bbdc7092d6ca34383054967a8aa0 | [
"MIT"
] | null | null | null | account/models.py | Hasanozzaman-Khan/Django-User-Authentication | 96482a51ed01bbdc7092d6ca34383054967a8aa0 | [
"MIT"
] | null | null | null | account/models.py | Hasanozzaman-Khan/Django-User-Authentication | 96482a51ed01bbdc7092d6ca34383054967a8aa0 | [
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager
from PIL import Image
# Create your models here.
| 30.493827 | 90 | 0.676923 |
c31dc50c8e3e9b895471f34a5cb531f2da5f9d94 | 316 | py | Python | Numeric Patterns/numericpattern37.py | vaidehisinha1/Python-PatternHouse | 49f71bcc5319a838592e69b0e49ef1edba32bf7c | [
"MIT"
] | null | null | null | Numeric Patterns/numericpattern37.py | vaidehisinha1/Python-PatternHouse | 49f71bcc5319a838592e69b0e49ef1edba32bf7c | [
"MIT"
] | 471 | 2022-01-15T07:07:18.000Z | 2022-02-28T16:01:42.000Z | Numeric Patterns/numericpattern37.py | vaidehisinha1/Python-PatternHouse | 49f71bcc5319a838592e69b0e49ef1edba32bf7c | [
"MIT"
] | 2 | 2022-01-17T09:43:16.000Z | 2022-01-29T15:15:47.000Z | height = int(input())
for i in range(1,height+1) :
for j in range(1, i+1):
m = i*j
if(m <= 9):
print("",m,end = " ")
else:
print(m,end = " ")
print()
# Sample Input :- 5
# Output :-
# 1
# 2 4
# 3 6 9
# 4 8 12 16
# 5 10 15 20 25
| 13.73913 | 33 | 0.379747 |
c31e95b8404220c927502906ac5a4aee6be489dd | 2,617 | py | Python | reviewboard/search/testing.py | pombredanne/reviewboard | 15f1d7236ec7a5cb4778ebfeb8b45d13a46ac71d | [
"MIT"
] | null | null | null | reviewboard/search/testing.py | pombredanne/reviewboard | 15f1d7236ec7a5cb4778ebfeb8b45d13a46ac71d | [
"MIT"
] | null | null | null | reviewboard/search/testing.py | pombredanne/reviewboard | 15f1d7236ec7a5cb4778ebfeb8b45d13a46ac71d | [
"MIT"
] | null | null | null | """Search-related testing utilities."""
import tempfile
import time
from contextlib import contextmanager
import haystack
from django.conf import settings
from django.core.management import call_command
from djblets.siteconfig.models import SiteConfiguration
from reviewboard.admin.siteconfig import load_site_config
def reindex_search():
"""Rebuild the search index."""
call_command('rebuild_index', interactive=False)
# On Whoosh, the above is asynchronous, and we can end up trying to read
# before we end up writing, occasionally breaking tests. We need to
# introduce just a bit of a delay.
#
# Yeah, this is still sketchy, but we can't turn off the async behavior
# or receive notification that the write has completed.
time.sleep(0.1)
| 30.788235 | 78 | 0.659534 |
c31e9aaa2f138851e26e15e8a729a624dea9ce5b | 6,434 | py | Python | pydron/config/config.py | DelphianCalamity/pydron | 1518dc71b5cf64fde563b864db2a4de74e092c8e | [
"MIT"
] | 5 | 2020-04-06T15:20:56.000Z | 2022-01-05T23:11:13.000Z | pydron/config/config.py | mahmoudimus/pydron | a7b484dec8bcc2730ba9bd76bc63bf3362c05e4d | [
"MIT"
] | null | null | null | pydron/config/config.py | mahmoudimus/pydron | a7b484dec8bcc2730ba9bd76bc63bf3362c05e4d | [
"MIT"
] | 2 | 2020-11-27T20:21:34.000Z | 2021-02-26T23:02:11.000Z | # Copyright (C) 2015 Stefan C. Mueller
import json
import os.path
from remoot import pythonstarter, smartstarter
import anycall
from pydron.backend import worker
from pydron.interpreter import scheduler, strategies
from twisted.internet import defer
preload_packages = []
def create_pool(config, rpcsystem, error_handler):
"""
starts workers and returns a pool of them.
Returns two callbacks:
* The first callbacks with the pool as
soon as there is one worker. Errbacks if all starters
failed to create a worker.
* The second calls back once all workers have been
started. This one can be cancelled.
The given `error_handler` is invoked for every failed start.
"""
starters = []
for starter_conf in config["workers"]:
starters.extend(_create_starters(starter_conf, rpcsystem))
pool = worker.Pool()
ds = []
for i, starter in enumerate(starters):
d = starter.start()
d.addCallback(success, i, starter)
ds.append(d)
d = defer.DeferredList(ds, fireOnOneErrback=True, consumeErrors=True)
d.addCallbacks(on_success, on_fail)
return d
| 32.331658 | 96 | 0.562791 |
c31eedaebb01514423f430e25d6d4f8b0f2cba6b | 4,678 | py | Python | astropy/tests/plugins/display.py | guntbert/astropy | f2d2add09e5b1638b2698f19a4d46fcca19e82be | [
"BSD-3-Clause"
] | null | null | null | astropy/tests/plugins/display.py | guntbert/astropy | f2d2add09e5b1638b2698f19a4d46fcca19e82be | [
"BSD-3-Clause"
] | 10 | 2017-03-15T16:14:43.000Z | 2018-11-22T14:40:54.000Z | astropy/tests/plugins/display.py | guntbert/astropy | f2d2add09e5b1638b2698f19a4d46fcca19e82be | [
"BSD-3-Clause"
] | 1 | 2020-01-23T00:41:10.000Z | 2020-01-23T00:41:10.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This plugin provides customization of the header displayed by pytest for
reporting purposes.
"""
import os
import sys
import datetime
import locale
import math
from collections import OrderedDict
from astropy.tests.helper import ignore_warnings
from astropy.utils.introspection import resolve_name
PYTEST_HEADER_MODULES = OrderedDict([('Numpy', 'numpy'),
('Scipy', 'scipy'),
('Matplotlib', 'matplotlib'),
('h5py', 'h5py'),
('Pandas', 'pandas')])
# This always returns with Astropy's version
from astropy import __version__
TESTED_VERSIONS = OrderedDict([('Astropy', __version__)])
def pytest_terminal_summary(terminalreporter):
"""Output a warning to IPython users in case any tests failed."""
try:
get_ipython()
except NameError:
return
if not terminalreporter.stats.get('failed'):
# Only issue the warning when there are actually failures
return
terminalreporter.ensure_newline()
terminalreporter.write_line(
'Some tests are known to fail when run from the IPython prompt; '
'especially, but not limited to tests involving logging and warning '
'handling. Unless you are certain as to the cause of the failure, '
'please check that the failure occurs outside IPython as well. See '
'http://docs.astropy.org/en/stable/known_issues.html#failing-logging-'
'tests-when-running-the-tests-in-ipython for more information.',
yellow=True, bold=True)
| 32.943662 | 78 | 0.614793 |
c31f343b321d0b0d195053ec8d68315783bcc174 | 2,782 | py | Python | packages/api-server/api_server/routes/lifts.py | Sald-for-Communication-and-IT/rmf-web | ec5996ab0b06440d7147170f3030b14c73d26116 | [
"Apache-2.0"
] | null | null | null | packages/api-server/api_server/routes/lifts.py | Sald-for-Communication-and-IT/rmf-web | ec5996ab0b06440d7147170f3030b14c73d26116 | [
"Apache-2.0"
] | null | null | null | packages/api-server/api_server/routes/lifts.py | Sald-for-Communication-and-IT/rmf-web | ec5996ab0b06440d7147170f3030b14c73d26116 | [
"Apache-2.0"
] | null | null | null | from typing import Any, List, cast
from fastapi import Depends
from rx import operators as rxops
from api_server.base_app import BaseApp
from api_server.fast_io import FastIORouter, WatchRequest
from api_server.models import Lift, LiftHealth, LiftRequest, LiftState
from api_server.repositories import RmfRepository
from .utils import rx_watcher
| 36.12987 | 86 | 0.586988 |
c320f2c59ba3ca84a73f6a79313b9f9398f03283 | 5,228 | py | Python | src/opnsense/scripts/suricata/queryAlertLog.py | ass-a2s/opnsense-core | a0634d180325f6afe3be7f514b4470e47ff5eb75 | [
"BSD-2-Clause"
] | 2 | 2019-03-15T03:35:54.000Z | 2019-03-15T07:50:36.000Z | src/opnsense/scripts/suricata/queryAlertLog.py | ass-a2s/opnsense-core | a0634d180325f6afe3be7f514b4470e47ff5eb75 | [
"BSD-2-Clause"
] | null | null | null | src/opnsense/scripts/suricata/queryAlertLog.py | ass-a2s/opnsense-core | a0634d180325f6afe3be7f514b4470e47ff5eb75 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/local/bin/python3.6
"""
Copyright (c) 2015-2019 Ad Schellevis <ad@opnsense.org>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
query suricata alert log
"""
import sys
import os.path
import re
import sre_constants
import shlex
import ujson
sys.path.insert(0, "/usr/local/opnsense/site-python")
from log_helper import reverse_log_reader
from params import update_params
from lib import suricata_alert_log
if __name__ == '__main__':
# handle parameters
parameters = {'limit': '0', 'offset': '0', 'filter': '', 'fileid': ''}
update_params(parameters)
# choose logfile by number
if parameters['fileid'].isdigit():
suricata_log = '%s.%d' % (suricata_alert_log, int(parameters['fileid']))
else:
suricata_log = suricata_alert_log
if parameters['limit'].isdigit():
limit = int(parameters['limit'])
else:
limit = 0
if parameters['offset'].isdigit():
offset = int(parameters['offset'])
else:
offset = 0
data_filters = {}
data_filters_comp = {}
for filter_txt in shlex.split(parameters['filter']):
filterField = filter_txt.split('/')[0]
if filter_txt.find('/') > -1:
data_filters[filterField] = '/'.join(filter_txt.split('/')[1:])
filter_regexp = data_filters[filterField]
filter_regexp = filter_regexp.replace('*', '.*')
filter_regexp = filter_regexp.lower()
try:
data_filters_comp[filterField] = re.compile(filter_regexp)
except sre_constants.error:
# remove illegal expression
# del data_filters[filterField]
data_filters_comp[filterField] = re.compile('.*')
# filter one specific log line
if 'filepos' in data_filters and data_filters['filepos'].isdigit():
log_start_pos = int(data_filters['filepos'])
else:
log_start_pos = None
# query suricata eve log
result = {'filters': data_filters, 'rows': [], 'total_rows': 0, 'origin': suricata_log.split('/')[-1]}
if os.path.exists(suricata_log):
for line in reverse_log_reader(filename=suricata_log, start_pos=log_start_pos):
try:
record = ujson.loads(line['line'])
except ValueError:
# can not handle line
record = {}
# only process valid alert items
if 'alert' in record:
# add position in file
record['filepos'] = line['pos']
record['fileid'] = parameters['fileid']
# flatten structure
record['alert_sid'] = record['alert']['signature_id']
record['alert_action'] = record['alert']['action']
record['alert'] = record['alert']['signature']
# use filters on data (using regular expressions)
do_output = True
for filterKeys in data_filters:
filter_hit = False
for filterKey in filterKeys.split(','):
if filterKey in record and data_filters_comp[filterKeys].match(
('%s' % record[filterKey]).lower()):
filter_hit = True
if not filter_hit:
do_output = False
if do_output:
result['total_rows'] += 1
if (len(result['rows']) < limit or limit == 0) and result['total_rows'] >= offset:
result['rows'].append(record)
elif result['total_rows'] > offset + limit:
# do not fetch data until end of file...
break
# only try to fetch one line when filepos is given
if log_start_pos is not None:
break
# output results
print(ujson.dumps(result))
| 39.606061 | 106 | 0.599273 |
c3214282673aaeda28c84c61fc6d8f9be877c23b | 3,592 | py | Python | cairis/gui/DictionaryListCtrl.py | RachelLar/cairis_update | 0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2 | [
"Apache-2.0"
] | null | null | null | cairis/gui/DictionaryListCtrl.py | RachelLar/cairis_update | 0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2 | [
"Apache-2.0"
] | null | null | null | cairis/gui/DictionaryListCtrl.py | RachelLar/cairis_update | 0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
from cairis.core.armid import *
from DictionaryEntryDialog import DictionaryEntryDialog
| 37.030928 | 133 | 0.733018 |
c32367d43e08138167f815beb65fbee346856f66 | 1,965 | py | Python | old_test/test-large.py | briandobbins/pynio | 1dd5fc0fc133f2b8d329ae68929bd3c6c1c5fa7c | [
"Apache-2.0"
] | null | null | null | old_test/test-large.py | briandobbins/pynio | 1dd5fc0fc133f2b8d329ae68929bd3c6c1c5fa7c | [
"Apache-2.0"
] | null | null | null | old_test/test-large.py | briandobbins/pynio | 1dd5fc0fc133f2b8d329ae68929bd3c6c1c5fa7c | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function, division
import numpy as np
import Nio
import time, os
#
# Creating a file
#
init_time = time.clock()
ncfile = 'test-large.nc'
if (os.path.exists(ncfile)):
os.system("/bin/rm -f " + ncfile)
opt = Nio.options()
opt.Format = "LargeFile"
opt.PreFill = False
file = Nio.open_file(ncfile, 'w', options=opt)
file.title = "Testing large files and dimensions"
file.create_dimension('big', 2500000000)
bigvar = file.create_variable('bigvar', "b", ('big',))
print("created bigvar")
# note it is incredibly slow to write a scalar to a large file variable
# so create an temporary variable x that will get assigned in steps
x = np.empty(1000000,dtype = 'int8')
#print x
x[:] = 42
t = list(range(0,2500000000,1000000))
ii = 0
for i in t:
if (i == 0):
continue
print(t[ii],i)
bigvar[t[ii]:i] = x[:]
ii += 1
x[:] = 84
bigvar[2499000000:2500000000] = x[:]
bigvar[-1] = 84
bigvar.units = "big var units"
#print bigvar[-1]
print(bigvar.dimensions)
# check unlimited status
for dim in list(file.dimensions.keys()):
print(dim, " unlimited: ",file.unlimited(dim))
print(file)
print("closing file")
print('elapsed time: ',time.clock() - init_time)
file.close()
#quit()
#
# Reading a file
#
print('opening file for read')
print('elapsed time: ',time.clock() - init_time)
file = Nio.open_file(ncfile, 'r')
print('file is open')
print('elapsed time: ',time.clock() - init_time)
print(file.dimensions)
print(list(file.variables.keys()))
print(file)
print("reading variable")
print('elapsed time: ',time.clock() - init_time)
x = file.variables['bigvar']
print(x[0],x[1000000],x[249000000],x[2499999999])
print("max and min")
min = x[:].min()
max = x[:].max()
print(min, max)
print('elapsed time: ',time.clock() - init_time)
# check unlimited status
for dim in list(file.dimensions.keys()):
print(dim, " unlimited: ",file.unlimited(dim))
print("closing file")
print('elapsed time: ',time.clock() - init_time)
file.close()
| 23.674699 | 71 | 0.689567 |
c323fd0a281a1d87543bdabbbe5b4427e5eec191 | 36,178 | py | Python | eeauditor/auditors/aws/Amazon_ECS_Auditor.py | kbhagi/ElectricEye | 31960e1e1cfb75c5d354844ea9e07d5295442823 | [
"Apache-2.0"
] | 442 | 2020-03-15T20:56:36.000Z | 2022-03-31T22:13:07.000Z | eeauditor/auditors/aws/Amazon_ECS_Auditor.py | kbhagi/ElectricEye | 31960e1e1cfb75c5d354844ea9e07d5295442823 | [
"Apache-2.0"
] | 57 | 2020-03-15T22:09:56.000Z | 2022-03-31T13:17:06.000Z | eeauditor/auditors/aws/Amazon_ECS_Auditor.py | kbhagi/ElectricEye | 31960e1e1cfb75c5d354844ea9e07d5295442823 | [
"Apache-2.0"
] | 59 | 2020-03-15T21:19:10.000Z | 2022-03-31T15:01:31.000Z | #This file is part of ElectricEye.
#SPDX-License-Identifier: Apache-2.0
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing,
#software distributed under the License is distributed on an
#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#KIND, either express or implied. See the License for the
#specific language governing permissions and limitations
#under the License.
import boto3
import datetime
from check_register import CheckRegister
registry = CheckRegister()
# import boto3 clients
ecs = boto3.client("ecs")
# loop through ECS Clusters
| 57.60828 | 254 | 0.409199 |
c324c7d6ffabe1bf0c4f2f6e3eba09b511032c92 | 7,470 | py | Python | Mask/Interpolate slider without prepolate.py | typedev/RoboFont-1 | 307c3c953a338f58cd0070aa5b1bb737bde08cc9 | [
"MIT"
] | 1 | 2016-03-27T17:07:16.000Z | 2016-03-27T17:07:16.000Z | Mask/Interpolate slider without prepolate.py | typedev/RoboFont-1 | 307c3c953a338f58cd0070aa5b1bb737bde08cc9 | [
"MIT"
] | null | null | null | Mask/Interpolate slider without prepolate.py | typedev/RoboFont-1 | 307c3c953a338f58cd0070aa5b1bb737bde08cc9 | [
"MIT"
] | null | null | null | """
This slider controls interpolation between foreground and mask layers.
Initial position for slider is at 1.0 (current foreground outline)
Sliding left to 0.0 interpolates to mask
Sliding right to 3.0 extrapolates away from mask.
NOTE:
Running this script opens an observer on the current glyph in the Glyph View window.
The slider window must then be closed before it can be used on another glyph.
"""
from fontTools.misc.transform import Transform
from vanilla import *
g = CurrentGlyph()
g.prepareUndo('interpolate with mask')
################### PREPOLATION ###################################
## Auto contour order and startpoints for foreground:
#g.autoContourOrder()
#for c in g:
# c.autoStartSegment()
## Auto contour order and startpoints for mask:
g.flipLayers("foreground", "mask")
#g.autoContourOrder()
#for c in g:
# c.autoStartSegment()
## Gather point info for mask layer:
maskpoints = []
for i in range(len(g)):
maskpoints.append([])
for j in range(len(g[i])):
maskpoints[i].append((g[i][j].onCurve.x,g[i][j].onCurve.y))
## Gather point info for foreground layer:
g.flipLayers("mask", "foreground")
forepoints = []
for i in range(len(g)):
forepoints.append([])
for j in range(len(g[i])):
forepoints[i].append((g[i][j].onCurve.x,g[i][j].onCurve.y))
## Compare length of each contour in mask and foreground:
n = 0
print '-------------------------------'
print 'Checking ' + str(g.name) + ' without auto ordering'
mismatched = []
if len(maskpoints) == len(forepoints):
for i in range(len(forepoints)):
print '-------------------------------'
if len(forepoints[i]) == len(maskpoints[i]):
print 'Contour ' + str(i) + ' matches'
else:
n = n + 1
print 'Contour ' + str(i) + ':'
print str(len(forepoints[i])) + ' points in foreground'
print str(len(maskpoints[i])) + ' points in mask'
print '-------------------------------'
if len(forepoints[i]) > len(maskpoints[i]):
count = len(maskpoints[i])
prob = 'mask'
else:
count = len(forepoints[i])
prob = 'foreground'
for j in range(-1,count - 1):
foregrad = foregradient(i,j)
maskgrad = maskgradient(i,j)
if foregrad > 20:
foregrad = 100
if maskgrad > 20:
maskgrad = 100
if foregrad < -20:
foregrad = -100
if maskgrad < -20:
maskgrad = -100
if abs(foregrad - maskgrad) > 0.4:
mismatched.append(j+1)
mismatched = [mismatched[0]]
## Find second problem:
if prob == 'foreground':
foregrad = foregradient(i,j)
maskgrad = maskgradient(i,j+1)
else:
foregrad = foregradient(i,j+1)
maskgrad = maskgradient(i,j)
if foregrad > 20:
foregrad = 100
if maskgrad > 20:
maskgrad = 100
if foregrad < -20:
foregrad = -100
if maskgrad < -20:
maskgrad = -100
if abs(foregrad - maskgrad) > 0.4:
mismatched.append(j+1)
if abs(len(forepoints[i]) - len(maskpoints[i])) == 1:
if len(mismatched) == 1:
print 'Check between points ' + str(mismatched[0]) + ' and ' + str(mismatched[0] + 1)
else:
print 'Check amongst the last few points'
else:
if len(mismatched) == 2:
print 'Check between points ' + str(mismatched[0]) + ' and ' + str(mismatched[0] + 1)
print 'Check between points ' + str(mismatched[1]) + ' and ' + str(mismatched[1] + 1)
elif len(mismatched) == 1:
print 'Check between points ' + str(mismatched[0]) + ' and ' + str(mismatched[0] + 1)
print 'Check amongst the last few points'
else:
print 'Check amongst the last few points'
else:
print '-------------------------------'
print 'Foreground has ' + str(len(forepoints)) + ' contours'
print 'Mask has ' + str(len(maskpoints)) + ' contours'
print '-------------------------------'
################### INTERP SLIDER ###################################
## Collect mask points:
g.flipLayers("foreground", "mask")
all_mask_points = []
all_mask_points_length = []
for i in range(len(g)):
all_mask_points.append([])
for j in range(len(g[i].points)):
all_mask_points[i].append((g[i].points[j].x, g[i].points[j].y))
all_mask_points_length.append(j)
## Collect initial foreground points:
g.flipLayers("mask", "foreground")
all_fore_points = []
all_fore_points_length = []
for i in range(len(g)):
all_fore_points.append([])
for j in range(len(g[i].points)):
all_fore_points[i].append((g[i].points[j].x, g[i].points[j].y))
all_fore_points_length.append(j)
## Check for compatibility:
if n > 0:
pass
else:
## if compatible, interpolate:
OpenWindow(InterpWithMaskWindow, CurrentGlyph())
g.update()
g.performUndo()
t = Transform().translate(0, 0)
g.transform(t, doComponents=True)
g.update()
| 30.614754 | 105 | 0.493574 |
c3261a4d2211366618f8d261cfec66b8e3825641 | 429 | py | Python | ex062.py | paulo-caixeta/Exercicios_Curso_Python | 3b77925499c174ea9ff81dec65d6319125219b9a | [
"MIT"
] | null | null | null | ex062.py | paulo-caixeta/Exercicios_Curso_Python | 3b77925499c174ea9ff81dec65d6319125219b9a | [
"MIT"
] | null | null | null | ex062.py | paulo-caixeta/Exercicios_Curso_Python | 3b77925499c174ea9ff81dec65d6319125219b9a | [
"MIT"
] | null | null | null | # Continuao do ex061 (Termos de PA)
print('Gerador de PA')
print('-=' * 10)
primeiro = int(input('Primeiro termo: '))
razo = int(input('Razo: '))
i = 0
n = 10
novos = 10
total = 0
while novos != 0:
total = total + novos
while i < total:
termo = primeiro + razo * i
i += 1
print(termo, end=' -> ')
print('PAUSA')
novos = int(input('Deseja mostrar mais termos? Quantos? '))
print('FIM') | 23.833333 | 63 | 0.578089 |
c3266d2fbd585c2d178a2034896380f763b83e0c | 4,151 | py | Python | order/tests.py | DanLivassan/bookstore | f054c3dcb7d6b57c24f98ea28a23de0061d2ccf2 | [
"MIT"
] | null | null | null | order/tests.py | DanLivassan/bookstore | f054c3dcb7d6b57c24f98ea28a23de0061d2ccf2 | [
"MIT"
] | 1 | 2022-02-25T01:38:50.000Z | 2022-02-25T01:38:50.000Z | order/tests.py | DanLivassan/bookstore | f054c3dcb7d6b57c24f98ea28a23de0061d2ccf2 | [
"MIT"
] | null | null | null | from random import randint
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from order.serializers import OrderSerializer
from product.models import Product
from order.models import Order
from rest_framework import status
from rest_framework.test import APIClient
MAX_PER_PAGE = 5
def sample_user(email='test@mail.com', password='Sstring1'):
"""Creste a sample user"""
return get_user_model().objects.create_user(email, password)
| 33.747967 | 113 | 0.645387 |
c326bebf1fd8cf9fedf46e490c5cf11624fd3c7e | 6,950 | py | Python | sam-app/tests/unit/test_apns.py | mgacy/Adequate-Backend | 7f62f692a3fff53f825e597289515bffadb8f25c | [
"MIT"
] | 1 | 2021-06-03T07:27:18.000Z | 2021-06-03T07:27:18.000Z | sam-app/tests/unit/test_apns.py | mgacy/Adequate-Backend | 7f62f692a3fff53f825e597289515bffadb8f25c | [
"MIT"
] | 3 | 2021-04-06T18:36:02.000Z | 2021-06-16T04:22:27.000Z | sam-app/tests/unit/test_apns.py | mgacy/Adequate-Backend | 7f62f692a3fff53f825e597289515bffadb8f25c | [
"MIT"
] | null | null | null | import unittest
from .mocks import BotoSessionMock
from push_notification import apns
| 29.079498 | 109 | 0.489784 |
c327543b799027a0d190954bd8149ab8b7d7603f | 809 | py | Python | scrapets/extract.py | ownport/scrapets | e52609aae4d55fb9d4315f90d4e2fe3804ef8ff6 | [
"MIT"
] | 2 | 2017-06-22T15:45:52.000Z | 2019-08-23T03:34:40.000Z | scrapets/extract.py | ownport/scrapets | e52609aae4d55fb9d4315f90d4e2fe3804ef8ff6 | [
"MIT"
] | 9 | 2016-10-23T17:56:34.000Z | 2016-12-12T10:39:23.000Z | scrapets/extract.py | ownport/scrapets | e52609aae4d55fb9d4315f90d4e2fe3804ef8ff6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from HTMLParser import HTMLParser
# -------------------------------------------------------
#
# LinkExtractor: extract links from html page
#
| 20.74359 | 80 | 0.566131 |
c327b1f258b5961f3d1085b7f824f0cb5ee2f32a | 2,227 | py | Python | tour/forms.py | superdev0505/mtp-web | 8288765a89daaa7b02dfd7e78cc51c4f12d7fcce | [
"MIT"
] | null | null | null | tour/forms.py | superdev0505/mtp-web | 8288765a89daaa7b02dfd7e78cc51c4f12d7fcce | [
"MIT"
] | null | null | null | tour/forms.py | superdev0505/mtp-web | 8288765a89daaa7b02dfd7e78cc51c4f12d7fcce | [
"MIT"
] | null | null | null | ## Django Packages
from django import forms
from django_select2 import forms as s2forms
## App packages
from .models import *
from datetime import datetime
from bootstrap_datepicker_plus import DatePickerInput, TimePickerInput, DateTimePickerInput, MonthPickerInput, YearPickerInput
from tags_input import fields
from lib.classes import CustomTagsInputField
############################################################################
############################################################################
| 31.814286 | 133 | 0.562191 |
c327d2cfdab4947c294367dcb469b4dd7dc0ab92 | 485 | py | Python | bots/test_analyseGithub.py | RSE2-D2/RSE2-D2 | eb535669cbc476b67d7cb6e1092eb0babe2f24df | [
"MIT"
] | 3 | 2020-04-02T09:39:44.000Z | 2020-04-02T14:26:48.000Z | bots/test_analyseGithub.py | RSE2-D2/RSE2-D2 | eb535669cbc476b67d7cb6e1092eb0babe2f24df | [
"MIT"
] | 16 | 2020-04-02T08:21:16.000Z | 2020-04-02T15:44:29.000Z | bots/test_analyseGithub.py | RSE2-D2/RSE2-D2 | eb535669cbc476b67d7cb6e1092eb0babe2f24df | [
"MIT"
] | 1 | 2020-04-02T08:36:41.000Z | 2020-04-02T08:36:41.000Z | import analyseGithub
| 30.3125 | 77 | 0.762887 |
c3283cdb2fefed11f9dc322c324670fa2d4fbccd | 1,069 | py | Python | tests/unit/utils/filebuffer_test.py | gotcha/salt | 7b84c704777d3d2062911895dc3fdf93d40e9848 | [
"Apache-2.0"
] | 2 | 2019-03-30T02:12:56.000Z | 2021-03-08T18:59:46.000Z | tests/unit/utils/filebuffer_test.py | gotcha/salt | 7b84c704777d3d2062911895dc3fdf93d40e9848 | [
"Apache-2.0"
] | null | null | null | tests/unit/utils/filebuffer_test.py | gotcha/salt | 7b84c704777d3d2062911895dc3fdf93d40e9848 | [
"Apache-2.0"
] | 1 | 2020-12-04T11:28:06.000Z | 2020-12-04T11:28:06.000Z | # -*- coding: utf-8 -*-
'''
tests.unit.utils.filebuffer_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
:copyright: 2012 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
'''
# Import salt libs
from saltunittest import TestCase, TestLoader, TextTestRunner
from salt.utils.filebuffer import BufferedReader, InvalidFileMode
if __name__ == "__main__":
loader = TestLoader()
tests = loader.loadTestsFromTestCase(TestFileBuffer)
TextTestRunner(verbosity=1).run(tests)
| 30.542857 | 75 | 0.663237 |
c328c8c92438a707d941d9592d5c8e996b8cd217 | 2,881 | py | Python | ranking_baselines/ARCII/rank_metrics.py | dileep1996/mnsrf_ranking_suggestion | 5bd241fb49f08fa4937539991e12e5a502d5a072 | [
"MIT"
] | 1 | 2020-02-04T18:27:25.000Z | 2020-02-04T18:27:25.000Z | ranking_baselines/DRMM/rank_metrics.py | dileep1996/mnsrf_ranking_suggestion | 5bd241fb49f08fa4937539991e12e5a502d5a072 | [
"MIT"
] | null | null | null | ranking_baselines/DRMM/rank_metrics.py | dileep1996/mnsrf_ranking_suggestion | 5bd241fb49f08fa4937539991e12e5a502d5a072 | [
"MIT"
] | null | null | null | ###############################################################################
# Author: Wasi Ahmad
# Project: ARC-II: Convolutional Matching Model
# Date Created: 7/18/2017
#
# File Description: This script contains ranking evaluation functions.
###############################################################################
import torch, numpy
def mean_average_precision(logits, target):
"""
Compute mean average precision.
:param logits: 2d tensor [batch_size x num_clicks_per_query]
:param target: 2d tensor [batch_size x num_clicks_per_query]
:return: mean average precision [a float value]
"""
assert logits.size() == target.size()
sorted, indices = torch.sort(logits, 1, descending=True)
map = 0
for i in range(indices.size(0)):
average_precision = 0
num_rel = 0
for j in range(indices.size(1)):
if target[i, indices[i, j].data[0]].data[0] == 1:
num_rel += 1
average_precision += num_rel / (j + 1)
average_precision = average_precision / num_rel
map += average_precision
return map / indices.size(0)
def NDCG(logits, target, k):
"""
Compute normalized discounted cumulative gain.
:param logits: 2d tensor [batch_size x rel_docs_per_query]
:param target: 2d tensor [batch_size x rel_docs_per_query]
:return: mean average precision [a float value]
"""
assert logits.size() == target.size()
assert logits.size(1) >= k, 'NDCG@K cannot be computed, invalid value of K.'
sorted, indices = torch.sort(logits, 1, descending=True)
NDCG = 0
for i in range(indices.size(0)):
DCG_ref = 0
num_rel_docs = torch.nonzero(target[i].data).size(0)
for j in range(indices.size(1)):
if j == k:
break
if target[i, indices[i, j].data[0]].data[0] == 1:
DCG_ref += 1 / numpy.log2(j + 2)
DCG_gt = 0
for j in range(num_rel_docs):
if j == k:
break
DCG_gt += 1 / numpy.log2(j + 2)
NDCG += DCG_ref / DCG_gt
return NDCG / indices.size(0)
def MRR(logits, target):
"""
Compute mean reciprocal rank.
:param logits: 2d tensor [batch_size x rel_docs_per_query]
:param target: 2d tensor [batch_size x rel_docs_per_query]
:return: mean reciprocal rank [a float value]
"""
assert logits.size() == target.size()
sorted, indices = torch.sort(logits, 1, descending=True)
total_reciprocal_rank = 0
for i in range(indices.size(0)):
for j in range(indices.size(1)):
if target[i, indices[i, j].data[0]].data[0] == 1:
total_reciprocal_rank += 1.0 / (j + 1)
break
return total_reciprocal_rank / logits.size(0)
| 34.710843 | 81 | 0.560222 |
c3292201406d3697087e8916c4dd2621e50dc55a | 192 | py | Python | src/wwucs/bot/__init__.py | reillysiemens/wwucs-bot | 9e48ba5dc981e36cd8b18345bcbd3768c3deeeb8 | [
"0BSD"
] | null | null | null | src/wwucs/bot/__init__.py | reillysiemens/wwucs-bot | 9e48ba5dc981e36cd8b18345bcbd3768c3deeeb8 | [
"0BSD"
] | null | null | null | src/wwucs/bot/__init__.py | reillysiemens/wwucs-bot | 9e48ba5dc981e36cd8b18345bcbd3768c3deeeb8 | [
"0BSD"
] | null | null | null | """WWUCS Bot module."""
__all__ = [
"__author__",
"__email__",
"__version__",
]
__author__ = "Reilly Tucker Siemens"
__email__ = "reilly@tuckersiemens.com"
__version__ = "0.1.0"
| 16 | 38 | 0.651042 |
c3295559778e2a7c61a68e36cb3971cb3e83f7f7 | 10,638 | py | Python | deploy/python/det_keypoint_unite_infer.py | Amanda-Barbara/PaddleDetection | 65ac13074eaaa2447c644a2df71969d8a3dd1fae | [
"Apache-2.0"
] | null | null | null | deploy/python/det_keypoint_unite_infer.py | Amanda-Barbara/PaddleDetection | 65ac13074eaaa2447c644a2df71969d8a3dd1fae | [
"Apache-2.0"
] | null | null | null | deploy/python/det_keypoint_unite_infer.py | Amanda-Barbara/PaddleDetection | 65ac13074eaaa2447c644a2df71969d8a3dd1fae | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import cv2
import math
import numpy as np
import paddle
import yaml
from det_keypoint_unite_utils import argsparser
from preprocess import decode_image
from infer import Detector, DetectorPicoDet, PredictConfig, print_arguments, get_test_images, bench_log
from keypoint_infer import KeyPointDetector, PredictConfig_KeyPoint
from visualize import visualize_pose
from benchmark_utils import PaddleInferBenchmark
from utils import get_current_memory_mb
from keypoint_postprocess import translate_to_ori_images
KEYPOINT_SUPPORT_MODELS = {
'HigherHRNet': 'keypoint_bottomup',
'HRNet': 'keypoint_topdown'
}
if __name__ == '__main__':
paddle.enable_static()
parser = argsparser()
FLAGS = parser.parse_args()
print_arguments(FLAGS)
FLAGS.device = FLAGS.device.upper()
assert FLAGS.device in ['CPU', 'GPU', 'XPU'
], "device should be CPU, GPU or XPU"
main()
| 39.254613 | 112 | 0.611957 |
c329db170d0245164f12a99cffcce2a4d1c0ef5a | 551 | py | Python | plugins/google_cloud_compute/komand_google_cloud_compute/actions/disk_detach/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/google_cloud_compute/komand_google_cloud_compute/actions/disk_detach/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/google_cloud_compute/komand_google_cloud_compute/actions/disk_detach/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | import insightconnect_plugin_runtime
from .schema import DiskDetachInput, DiskDetachOutput, Input, Component
| 34.4375 | 117 | 0.718693 |
c32a765467990449b567bcb5b74b49876b530290 | 431 | py | Python | troupon/payment/serializers.py | andela/troupon | 3704cbe6e69ba3e4c53401d3bbc339208e9ebccd | [
"MIT"
] | 14 | 2016-01-12T07:31:09.000Z | 2021-11-20T19:29:35.000Z | troupon/payment/serializers.py | andela/troupon | 3704cbe6e69ba3e4c53401d3bbc339208e9ebccd | [
"MIT"
] | 52 | 2015-09-02T14:54:43.000Z | 2016-08-01T08:22:21.000Z | troupon/payment/serializers.py | andela/troupon | 3704cbe6e69ba3e4c53401d3bbc339208e9ebccd | [
"MIT"
] | 17 | 2015-09-30T13:18:48.000Z | 2021-11-18T16:25:12.000Z | """Serializers for the payment app."""
from rest_framework import serializers
from models import Purchases
| 26.9375 | 61 | 0.654292 |
c32ab97ad989123fa02793d4bdfb1b13b2fa964a | 4,817 | py | Python | mla/kmeans.py | anshulg5/MLAlgorithms | 6c12ebe64016eabb9527fb1f18be81cd3ff0c599 | [
"MIT"
] | 1 | 2020-04-22T22:03:51.000Z | 2020-04-22T22:03:51.000Z | mla/kmeans.py | anshulg5/MLAlgorithms | 6c12ebe64016eabb9527fb1f18be81cd3ff0c599 | [
"MIT"
] | 1 | 2021-06-25T15:40:35.000Z | 2021-06-25T15:40:35.000Z | mla/kmeans.py | anshulg5/MLAlgorithms | 6c12ebe64016eabb9527fb1f18be81cd3ff0c599 | [
"MIT"
] | 2 | 2019-07-21T13:19:17.000Z | 2020-12-28T05:46:37.000Z | import random
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from mla.base import BaseEstimator
from mla.metrics.distance import euclidean_distance
random.seed(1111)
| 34.654676 | 119 | 0.622379 |
c32ad26a1993eb568f93f3377b6a0497a0eab914 | 10,741 | py | Python | train_classifier.py | justusmattern/dist-embeds | 2a5fd97bcfc3eed5c7f11e76d82c4ff49709cbe8 | [
"MIT"
] | null | null | null | train_classifier.py | justusmattern/dist-embeds | 2a5fd97bcfc3eed5c7f11e76d82c4ff49709cbe8 | [
"MIT"
] | null | null | null | train_classifier.py | justusmattern/dist-embeds | 2a5fd97bcfc3eed5c7f11e76d82c4ff49709cbe8 | [
"MIT"
] | null | null | null | import os
import sys
import argparse
import time
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
# from sru import *
import dataloader
import modules
if __name__ == "__main__":
argparser = argparse.ArgumentParser(sys.argv[0], conflict_handler='resolve')
argparser.add_argument("--cnn", action='store_true', help="whether to use cnn")
argparser.add_argument("--lstm", action='store_true', help="whether to use lstm")
argparser.add_argument("--dataset", type=str, default="mr", help="which dataset")
argparser.add_argument("--embedding", type=str, required=True, help="word vectors")
argparser.add_argument("--batch_size", "--batch", type=int, default=32)
argparser.add_argument("--max_epoch", type=int, default=70)
argparser.add_argument("--d", type=int, default=150)
argparser.add_argument("--dropout", type=float, default=0.3)
argparser.add_argument("--depth", type=int, default=1)
argparser.add_argument("--lr", type=float, default=0.001)
argparser.add_argument("--lr_decay", type=float, default=0)
argparser.add_argument("--cv", type=int, default=0)
argparser.add_argument("--save_path", type=str, default='')
argparser.add_argument("--save_data_split", action='store_true', help="whether to save train/test split")
argparser.add_argument("--gpu_id", type=int, default=0)
argparser.add_argument("--kl_weight", type=float, default = 0.001)
argparser.add_argument("--dist_embeds", action='store_true')
args = argparser.parse_args()
# args.save_path = os.path.join(args.save_path, args.dataset)
print (args)
torch.cuda.set_device(args.gpu_id)
main(args)
| 34.536977 | 147 | 0.553114 |
c32b3afbd77078a6af646cb681c0da4280c9fc0a | 1,844 | py | Python | app/request/queue.py | infrared5/massroute-pi | c2e16d655058c6c5531ec66f8a82fe41ad4e8427 | [
"MIT"
] | null | null | null | app/request/queue.py | infrared5/massroute-pi | c2e16d655058c6c5531ec66f8a82fe41ad4e8427 | [
"MIT"
] | null | null | null | app/request/queue.py | infrared5/massroute-pi | c2e16d655058c6c5531ec66f8a82fe41ad4e8427 | [
"MIT"
] | null | null | null | import logging
from time import sleep
logger = logging.getLogger(__name__)
| 28.8125 | 96 | 0.679501 |
c32c528f23adfd98c6057b14b36d3ef97d2f6fbf | 5,826 | py | Python | trimap_module.py | lnugraha/trimap_generator | a279562b0d0f387896330cf88549e67618d1eb7f | [
"MIT"
] | 168 | 2018-04-14T09:46:03.000Z | 2022-03-29T08:14:11.000Z | trimap_module.py | lnugraha/trimap_generator | a279562b0d0f387896330cf88549e67618d1eb7f | [
"MIT"
] | 7 | 2018-05-14T12:54:23.000Z | 2021-10-12T01:16:20.000Z | trimap_module.py | lnugraha/trimap_generator | a279562b0d0f387896330cf88549e67618d1eb7f | [
"MIT"
] | 35 | 2019-05-13T03:13:11.000Z | 2022-03-22T11:55:58.000Z | #!/usr/bin/env python
import cv2, os, sys
import numpy as np
def checkImage(image):
"""
Args:
image: input image to be checked
Returns:
binary image
Raises:
RGB image, grayscale image, all-black, and all-white image
"""
if len(image.shape) > 2:
print("ERROR: non-binary image (RGB)"); sys.exit();
smallest = image.min(axis=0).min(axis=0) # lowest pixel value: 0 (black)
largest = image.max(axis=0).max(axis=0) # highest pixel value: 1 (white)
if (smallest == 0 and largest == 0):
print("ERROR: non-binary image (all black)"); sys.exit()
elif (smallest == 255 and largest == 255):
print("ERROR: non-binary image (all white)"); sys.exit()
elif (smallest > 0 or largest < 255 ):
print("ERROR: non-binary image (grayscale)"); sys.exit()
else:
return True
def morph_open(self, image, kernel):
"""
Remove all white noises or speckles outside images
Need to tune the kernel size
Instruction:
unit01 = Toolbox(image);
kernel = np.ones( (9,9), np.uint8 );
morph = unit01.morph_open(input_image, kernel);
"""
bin_open = cv2.morphologyEx(self.image, cv2.MORPH_OPEN, kernel)
return bin_open
def morph_close(self, image, kernel):
"""
Remove all black noises or speckles inside images
Need to tune the kernel size
Instruction:
unit01 = Toolbox(image);
kernel = np.ones( (11,11)_, np.uint8 );
morph = unit01.morph_close(input_image, kernel);
"""
bin_close = cv2.morphologyEx(self.image, cv2.MORPH_CLOSE, kernel)
return bin_close
def trimap(image, name, size, number, erosion=False):
"""
This function creates a trimap based on simple dilation algorithm
Inputs [4]: a binary image (black & white only), name of the image, dilation pixels
the last argument is optional; i.e., how many iterations will the image get eroded
Output : a trimap
"""
checkImage(image)
row = image.shape[0]
col = image.shape[1]
pixels = 2*size + 1 ## Double and plus 1 to have an odd-sized kernel
kernel = np.ones((pixels,pixels),np.uint8) ## Pixel of extension I get
if erosion is not False:
erosion = int(erosion)
erosion_kernel = np.ones((3,3), np.uint8) ## Design an odd-sized erosion kernel
image = cv2.erode(image, erosion_kernel, iterations=erosion) ## How many erosion do you expect
image = np.where(image > 0, 255, image) ## Any gray-clored pixel becomes white (smoothing)
# Error-handler to prevent entire foreground annihilation
if cv2.countNonZero(image) == 0:
print("ERROR: foreground has been entirely eroded")
sys.exit()
dilation = cv2.dilate(image, kernel, iterations = 1)
dilation = np.where(dilation == 255, 127, dilation) ## WHITE to GRAY
remake = np.where(dilation != 127, 0, dilation) ## Smoothing
remake = np.where(image > 127, 200, dilation) ## mark the tumor inside GRAY
remake = np.where(remake < 127, 0, remake) ## Embelishment
remake = np.where(remake > 200, 0, remake) ## Embelishment
remake = np.where(remake == 200, 255, remake) ## GRAY to WHITE
#############################################
# Ensures only three pixel values available #
# TODO: Optimization with Cython #
#############################################
for i in range(0,row):
for j in range (0,col):
if (remake[i,j] != 0 and remake[i,j] != 255):
remake[i,j] = 127
path = "./images/results/" ## Change the directory
new_name = '{}px_'.format(size) + name + '_{}.png'.format(number)
cv2.imwrite(os.path.join(path, new_name) , remake)
#############################################
### TESTING SECTION ###
#############################################
if __name__ == '__main__':
path = "./images/test_images/test_image_11.png"
image = extractImage(path)
size = 10
number = path[-5]
title = "test_image"
unit01 = Toolbox(image);
kernel1 = np.ones( (11,11), np.uint8 )
unit01.displayImage
opening = unit01.morph_close(image,kernel1)
trimap(opening, title, size, number, erosion=False)
unit02 = Toolbox(opening)
unit02.displayImage
########################################################
## Default instruction (no binary opening or closing ##
## trimap(image, title, size, number, erosion=False); ##
########################################################
| 35.309091 | 120 | 0.562993 |
c32c82635b0888813302ed2b3bc7efe4aeb79fdb | 4,047 | py | Python | integration/keeper_secrets_manager_ansible/tests/keeper_init.py | Keeper-Security/secrets-manager | 0044dec7f323ae2e531f52ef2435bd7205949fe9 | [
"MIT"
] | 9 | 2022-01-10T18:39:45.000Z | 2022-03-06T03:51:41.000Z | integration/keeper_secrets_manager_ansible/tests/keeper_init.py | Keeper-Security/secrets-manager | 0044dec7f323ae2e531f52ef2435bd7205949fe9 | [
"MIT"
] | 10 | 2022-01-27T00:51:05.000Z | 2022-03-30T08:42:01.000Z | integration/keeper_secrets_manager_ansible/tests/keeper_init.py | Keeper-Security/secrets-manager | 0044dec7f323ae2e531f52ef2435bd7205949fe9 | [
"MIT"
] | 6 | 2021-12-17T18:59:26.000Z | 2022-03-28T16:47:28.000Z | import unittest
from unittest.mock import patch
import os
from .ansible_test_framework import AnsibleTestFramework, RecordMaker
import keeper_secrets_manager_ansible.plugins
import tempfile
records = {
"TRd_567FkHy-CeGsAzs8aA": RecordMaker.make_record(
uid="TRd_567FkHy-CeGsAzs8aA",
title="JW-F1-R1",
fields={
"password": "ddd"
}
),
"A_7YpGBUgRTeDEQLhVRo0Q": RecordMaker.make_file(
uid="A_7YpGBUgRTeDEQLhVRo0Q",
title="JW-F1-R2-File",
files=[
{"name": "nailing it.mp4", "type": "video/mp4", "url": "http://localhost/abc", "data": "ABC123"},
{"name": "video_file.mp4", "type": "video/mp4", "url": "http://localhost/xzy", "data": "XYZ123"},
]
)
}
| 37.472222 | 111 | 0.596244 |
c32db06fd5b31b4a1c0ead0ae4470b3896648d58 | 1,180 | py | Python | wonambi/attr/__init__.py | wonambi-python/wonambi | 4e2834cdd799576d1a231ecb48dfe4da1364fe3a | [
"BSD-3-Clause"
] | 63 | 2017-12-30T08:11:17.000Z | 2022-01-28T10:34:20.000Z | wonambi/attr/__init__.py | wonambi-python/wonambi | 4e2834cdd799576d1a231ecb48dfe4da1364fe3a | [
"BSD-3-Clause"
] | 23 | 2017-09-08T08:29:49.000Z | 2022-03-17T08:19:13.000Z | wonambi/attr/__init__.py | wonambi-python/wonambi | 4e2834cdd799576d1a231ecb48dfe4da1364fe3a | [
"BSD-3-Clause"
] | 12 | 2017-09-18T12:48:36.000Z | 2021-09-22T07:16:07.000Z | """Packages containing all the possible attributes to recordings, such as
- channels (module "chan") with class:
- Chan
- anatomical info (module "anat") with class:
- Surf
- annotations and sleep scores (module "annotations") with class:
- Annotations
Possibly include forward and inverse models.
These attributes are only "attached" to the DataType, there should not be any
consistency check when you load them. The risk is that attributes do not refer
to the correct datatype, but the advantage is that we cannot keep track of all
the possible inconsistencies (f.e. if the channel names are not the same
between the actual channels and those stored in the Channels class).
In addition, these classes are often used in isolation, even without a dataset,
so do not assume that any of the classes in the module can call the main
dataset. In other words, these classes shouldn't have methods calling the
datatype, but there can be functions in the modules that use both the
dataset and the classes below.
"""
from .chan import Channels
from .anat import Brain, Surf, Freesurfer
from .annotations import Annotations, create_empty_annotations
| 43.703704 | 79 | 0.766102 |
c32fe65d24a5f464b2f3a2a3ac48a2c68f408fd3 | 1,418 | py | Python | Corpus/Pyramid Score/PyrEval/Pyramid/parameters.py | LCS2-IIITD/summarization_bias | d66846bb7657439347f4714f2672350447474c5a | [
"MIT"
] | 1 | 2020-11-11T19:48:10.000Z | 2020-11-11T19:48:10.000Z | Corpus/Pyramid Score/PyrEval/Pyramid/parameters.py | LCS2-IIITD/summarization_bias | d66846bb7657439347f4714f2672350447474c5a | [
"MIT"
] | null | null | null | Corpus/Pyramid Score/PyrEval/Pyramid/parameters.py | LCS2-IIITD/summarization_bias | d66846bb7657439347f4714f2672350447474c5a | [
"MIT"
] | null | null | null | """
=========== What is Matter Parameters ===================
"""
#tups = [(125.0, 1.0), (125.0, 1.5), (125.0, 2.0), (125.0, 2.5), (125.0, 3.0), (150.0, 1.0), (150.0, 1.5), (150.0, 2.0), (150.0, 2.5), (150.0, 3.0), (175.0, 1.0), (175.0, 1.5), (175.0, 2.0), (175.0, 2.5), (175.0, 3.0), (200.0, 1.0), (200.0, 1.5), (200.0, 2.0), (200.0, 2.5), (200.0, 3.0), (225.0, 1.0), (225.0, 1.5), (225.0, 2.0), (225.0, 2.5), (225.0, 3.0), (250.0, 1.0), (250.0, 1.5), (250.0, 2.0), (250.0, 2.5), (250.0, 3.0)]
"""
=========== DUC Data ==========
"""
#tups = [(64.0, 1.0), (64.0, 1.5), (64.0, 2.0), (64.0, 2.5), (70.0, 1.0), (70.0, 1.5), (70.0, 2.0), (70.0, 2.5), (76.0, 1.0), (76.0, 1.5), (76.0, 2.0), (76.0, 2.5), (82.0, 1.0), (82.0, 1.5), (82.0, 2.0), (82.0, 2.5), (88.0, 1.0), (88.0, 1.5), (88.0, 2.0), (88.0, 2.5), (96.0, 1.0), (96.0, 1.5), (96.0, 2.0), (96.0, 2.5), (100.0, 1.0), (100.0, 1.5), (100.0, 2.0), (100.0, 2.5)]
#b = [1.0,1.5,2.0,2.5,3.0]
# alpha should be from [10,40]
#a = range(len(segpool)+10,len(segpool)+60,10)
#tups = list(itertools.product(a,b))
#print "Alll combinations ", tups
#tups = [(125, 1.0), (125, 1.5), (125, 2.0), (125, 2.5), (125, 3.0), (135, 1.0), (135, 1.5), (135, 2.0), (135, 2.5), (135, 3.0), (145, 1.0), (145, 1.5), (145, 2.0), (145, 2.5), (145, 3.0), (155, 1.0), (155, 1.5), (155, 2.0), (155, 2.5), (155, 3.0), (165, 1.0), (165, 1.5), (165, 2.0), (165, 2.5), (165, 3.0)]
#thresholds = [83] | 78.777778 | 428 | 0.43512 |
c33017f8651ee60d0cc6f759fc632d532c899c80 | 3,213 | py | Python | deploy/terraform/tasks.py | kinecosystem/blockchain-ops | fc21bbd2d3d405844857a8b3413718bacbaad294 | [
"MIT"
] | 15 | 2018-08-08T23:47:53.000Z | 2020-02-13T17:14:15.000Z | deploy/terraform/tasks.py | kinfoundation/stellar-ops | fc21bbd2d3d405844857a8b3413718bacbaad294 | [
"MIT"
] | 21 | 2018-10-16T09:20:32.000Z | 2019-12-15T19:01:56.000Z | deploy/terraform/tasks.py | yonikashi/blocktest | db044d74afc62f80f8f74060830347e82dd03adb | [
"MIT"
] | 9 | 2018-11-05T17:28:55.000Z | 2019-08-02T20:10:14.000Z | """Call various Terraform actions."""
import os
import os.path
from invoke import task
import jinja2
import yaml
TERRAFORM_VERSION = '0.11.7'
MAIN_TF_FILE = 'stellar-network.tf'
| 24.157895 | 81 | 0.62714 |
c331cb67fa44126ad7899136fc1a363b37ea7fe2 | 263 | py | Python | gdal/swig/python/scripts/gdal2xyz.py | Sokigo-GLS/gdal | 595f74bf60dff89fc5df53f9f4c3e40fc835e909 | [
"MIT"
] | null | null | null | gdal/swig/python/scripts/gdal2xyz.py | Sokigo-GLS/gdal | 595f74bf60dff89fc5df53f9f4c3e40fc835e909 | [
"MIT"
] | null | null | null | gdal/swig/python/scripts/gdal2xyz.py | Sokigo-GLS/gdal | 595f74bf60dff89fc5df53f9f4c3e40fc835e909 | [
"MIT"
] | null | null | null | import sys
# import osgeo.utils.gdal2xyz as a convenience to use as a script
from osgeo.utils.gdal2xyz import * # noqa
from osgeo.utils.gdal2xyz import main
from osgeo.gdal import deprecation_warn
deprecation_warn('gdal2xyz', 'utils')
sys.exit(main(sys.argv))
| 26.3 | 65 | 0.787072 |
c33241bd3d20aeeac4e2cda557798ad660937ce2 | 587 | py | Python | inferencia/task/person_reid/body_reid/model/body_reid_model_factory.py | yuya-mochimaru-np/inferencia | e09f298d0a80672fc5bb9383e23c941290eff334 | [
"Apache-2.0"
] | null | null | null | inferencia/task/person_reid/body_reid/model/body_reid_model_factory.py | yuya-mochimaru-np/inferencia | e09f298d0a80672fc5bb9383e23c941290eff334 | [
"Apache-2.0"
] | 5 | 2021-07-25T23:19:29.000Z | 2021-07-26T23:35:13.000Z | inferencia/task/person_reid/body_reid/model/body_reid_model_factory.py | yuya-mochimaru-np/inferencia | e09f298d0a80672fc5bb9383e23c941290eff334 | [
"Apache-2.0"
] | 1 | 2021-09-18T12:06:13.000Z | 2021-09-18T12:06:13.000Z | from .body_reid_model_name import BodyReidModelName
| 32.611111 | 66 | 0.577513 |
c3327d62d6a5e087ae5d5a099ea856c563dc576f | 3,931 | py | Python | cheatingbee/twitter.py | exoskellyman/cheatingbee | 1dd0710f9be8f40c3f23aa5bcac568588ac8feeb | [
"MIT"
] | null | null | null | cheatingbee/twitter.py | exoskellyman/cheatingbee | 1dd0710f9be8f40c3f23aa5bcac568588ac8feeb | [
"MIT"
] | null | null | null | cheatingbee/twitter.py | exoskellyman/cheatingbee | 1dd0710f9be8f40c3f23aa5bcac568588ac8feeb | [
"MIT"
] | null | null | null | import datetime
import io
import os
import tweepy
from dotenv import load_dotenv
from PIL import Image, ImageDraw, ImageFont
| 34.787611 | 86 | 0.58204 |
c332b852b9ea902789bff01e3510374ac9b4407d | 106 | py | Python | variables.py | MuhweziDeo/python_refresher | 0d100f88524ff780f1cee8afabfee1025c648f8b | [
"MIT"
] | null | null | null | variables.py | MuhweziDeo/python_refresher | 0d100f88524ff780f1cee8afabfee1025c648f8b | [
"MIT"
] | null | null | null | variables.py | MuhweziDeo/python_refresher | 0d100f88524ff780f1cee8afabfee1025c648f8b | [
"MIT"
] | null | null | null | x = 2
print(x)
# multiple assignment
a, b, c, d = (1, 2, 5, 9)
print(a, b, c, d)
print(type(str(a)))
| 8.833333 | 25 | 0.528302 |
c332e2fe6b727044df2454bc3e05a8e3dca73a1d | 4,773 | py | Python | examples/authentication/demo_auth.py | jordiyeh/safrs | eecfaf6d63ed44b9dc44b7b86c600db02989b512 | [
"MIT"
] | null | null | null | examples/authentication/demo_auth.py | jordiyeh/safrs | eecfaf6d63ed44b9dc44b7b86c600db02989b512 | [
"MIT"
] | null | null | null | examples/authentication/demo_auth.py | jordiyeh/safrs | eecfaf6d63ed44b9dc44b7b86c600db02989b512 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# This is a demo application to demonstrate the functionality of the safrs_rest REST API with authentication
#
# you will have to install the requirements:
# pip3 install passlib flask_httpauth flask_login
#
# This script can be ran standalone like this:
# python3 demo_auth.py [Listener-IP]
# This will run the example on http://Listener-Ip:5000
#
# - A database is created and a item is added
# - User is created and the User endpoint is protected by user:admin & pass: adminPASS
# - swagger2 documentation is generated
#
import sys
import os
import logging
import builtins
from functools import wraps
from flask import Flask, redirect, jsonify, make_response
from flask import abort, request, g, url_for
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import Column, Integer, String
from safrs import SAFRSBase, SAFRSJSONEncoder, Api, jsonapi_rpc
from flask_swagger_ui import get_swaggerui_blueprint
from flask_sqlalchemy import SQLAlchemy
from flask_httpauth import HTTPBasicAuth
from passlib.apps import custom_app_context as pwd_context
from itsdangerous import (TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired)
from flask.ext.login import LoginManager, UserMixin, \
login_required, login_user, logout_user
db = SQLAlchemy()
auth = HTTPBasicAuth()
# Example sqla database object
def start_app(app):
api = Api(app, api_spec_url = '/api/swagger', host = '{}:{}'.format(HOST,PORT), schemes = [ "http" ] )
item = Item(name='test',email='em@il')
user = User(username='admin')
user.hash_password('adminPASS')
api.expose_object(Item)
api.expose_object(User)
# Set the JSON encoder used for object to json marshalling
app.json_encoder = SAFRSJSONEncoder
# Register the API at /api/docs
swaggerui_blueprint = get_swaggerui_blueprint('/api', '/api/swagger.json')
app.register_blueprint(swaggerui_blueprint, url_prefix='/api')
print('Starting API: http://{}:{}/api'.format(HOST,PORT))
app.run(host=HOST, port = PORT)
#
# APP Initialization
#
app = Flask('demo_app')
app.config.update( SQLALCHEMY_DATABASE_URI = 'sqlite://',
SQLALCHEMY_TRACK_MODIFICATIONS = False,
SECRET_KEY = b'sdqfjqsdfqizroqnxwc',
DEBUG = True)
HOST = sys.argv[1] if len(sys.argv) > 1 else '0.0.0.0'
PORT = 5000
db.init_app(app)
#
# Authentication and custom routes
#
# Start the application
with app.app_context():
db.create_all()
start_app(app)
| 31.82 | 109 | 0.673581 |
c3335a14a14888a29737d6b5d92bb38bedb9c886 | 2,045 | py | Python | ev3/sensors/color.py | NewThingsCo/ev3-controller | 70d30617fa3ea6ef73a39a8c5360e8e4c72a9e98 | [
"BSD-2-Clause"
] | 1 | 2019-08-06T10:16:39.000Z | 2019-08-06T10:16:39.000Z | ev3/sensors/color.py | NewThingsCo/ev3-controller | 70d30617fa3ea6ef73a39a8c5360e8e4c72a9e98 | [
"BSD-2-Clause"
] | null | null | null | ev3/sensors/color.py | NewThingsCo/ev3-controller | 70d30617fa3ea6ef73a39a8c5360e8e4c72a9e98 | [
"BSD-2-Clause"
] | 1 | 2018-03-06T10:59:50.000Z | 2018-03-06T10:59:50.000Z | import goless
import time
from sys import platform
if platform == "linux" or platform == "linux2":
import brickpi3
if __name__ == '__main__':
print('for local testing read 100 color readings from port 1')
brick = brickpi3.BrickPi3()
readings = goless.chan()
start_color_sensor(brick, brick.PORT_3, readings)
for i in range(100):
case, val = goless.select([goless.rcase(readings)])
print(case, val)
print('100 reading are done, time to clean and exit')
brick.reset_all()
| 30.984848 | 89 | 0.642543 |
c333f525069086ebb8689eece355d91dd6b64f69 | 8,757 | py | Python | model/BPE.py | djmhunt/TTpy | 0f0997314bf0f54831494b2ef1a64f1bff95c097 | [
"MIT"
] | null | null | null | model/BPE.py | djmhunt/TTpy | 0f0997314bf0f54831494b2ef1a64f1bff95c097 | [
"MIT"
] | 4 | 2020-04-19T11:43:41.000Z | 2020-07-21T09:57:51.000Z | model/BPE.py | djmhunt/TTpy | 0f0997314bf0f54831494b2ef1a64f1bff95c097 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
:Author: Dominic Hunt
"""
import logging
import numpy as np
import scipy as sp
import collections
import itertools
from model.modelTemplate import Model
| 33.680769 | 145 | 0.628183 |
c336028d3170491bb761554d05258241830c82fc | 1,688 | py | Python | affiliates/banners/tests/__init__.py | glogiotatidis/affiliates | 34d0ded8e24be9dd207d6419a5157dc8ce34bc06 | [
"BSD-3-Clause"
] | 15 | 2015-01-01T07:17:44.000Z | 2020-11-09T06:28:29.000Z | affiliates/banners/tests/__init__.py | glogiotatidis/affiliates | 34d0ded8e24be9dd207d6419a5157dc8ce34bc06 | [
"BSD-3-Clause"
] | 16 | 2015-02-25T23:17:27.000Z | 2015-08-20T10:28:18.000Z | affiliates/banners/tests/__init__.py | glogiotatidis/affiliates | 34d0ded8e24be9dd207d6419a5157dc8ce34bc06 | [
"BSD-3-Clause"
] | 12 | 2015-01-17T20:57:03.000Z | 2019-11-03T15:04:31.000Z | from django.db.models.signals import post_init
from factory import DjangoModelFactory, Sequence, SubFactory
from factory.django import mute_signals
from affiliates.banners import models
| 25.575758 | 70 | 0.776659 |
c337200f464a7d012b7b952e50ed5709111473ef | 7,996 | py | Python | cradlepy/framework/http.py | cblanquera/cradlepy | 1634fe38a0cc58f92dbfc2b0c84ace9d16821c3c | [
"MIT"
] | null | null | null | cradlepy/framework/http.py | cblanquera/cradlepy | 1634fe38a0cc58f92dbfc2b0c84ace9d16821c3c | [
"MIT"
] | null | null | null | cradlepy/framework/http.py | cblanquera/cradlepy | 1634fe38a0cc58f92dbfc2b0c84ace9d16821c3c | [
"MIT"
] | null | null | null | from .request import Request
from .response import Response
| 23.380117 | 83 | 0.617184 |
c3372092201a1e6f33ba16a8e3cd911550232f4d | 326 | py | Python | src/plugins/command/main.py | AlexCaranha/MyLauncher | d15037d5e26eee61e851a938b432ee1107f441ab | [
"MIT"
] | null | null | null | src/plugins/command/main.py | AlexCaranha/MyLauncher | d15037d5e26eee61e851a938b432ee1107f441ab | [
"MIT"
] | null | null | null | src/plugins/command/main.py | AlexCaranha/MyLauncher | d15037d5e26eee61e851a938b432ee1107f441ab | [
"MIT"
] | null | null | null |
import pluggy
hookimpl = pluggy.HookimplMarker('mylauncher')
| 15.52381 | 46 | 0.631902 |
c3374bd201ea3739cfe629bae1ecfda55d32a4e4 | 5,022 | py | Python | setup.py | kmike/UnbalancedDataset | 777f26cee73c04ae2f3d59e43c990cbfd1725b23 | [
"MIT"
] | 6 | 2016-06-02T09:27:41.000Z | 2021-04-21T06:46:12.000Z | setup.py | kmike/UnbalancedDataset | 777f26cee73c04ae2f3d59e43c990cbfd1725b23 | [
"MIT"
] | null | null | null | setup.py | kmike/UnbalancedDataset | 777f26cee73c04ae2f3d59e43c990cbfd1725b23 | [
"MIT"
] | 1 | 2018-08-25T03:11:05.000Z | 2018-08-25T03:11:05.000Z | #! /usr/bin/env python
"""Toolbox for unbalanced dataset in machine learning."""
from setuptools import setup, find_packages
import os
import sys
import setuptools
from distutils.command.build_py import build_py
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
descr = """Toolbox for unbalanced dataset in machine learning."""
DISTNAME = 'unbalanced_dataset'
DESCRIPTION = 'Toolbox for unbalanced dataset in machine learning.'
LONG_DESCRIPTION = descr
MAINTAINER = 'Fernando Nogueira, Guillaume Lemaitre'
MAINTAINER_EMAIL = 'fmfnogueira@gmail.com, g.lemaitre58@gmail.com'
URL = 'https://github.com/fmfn/UnbalancedDataset'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'https://github.com/fmfn/UnbalancedDataset'
# This is a bit (!) hackish: we are setting a global variable so that the main
# skimage __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by UnbalancedDataset to
# recursively build the compiled extensions in sub-packages is based on
# the Python import machinery.
builtins.__UNBALANCED_DATASET_SETUP__ = True
with open('unbalanced_dataset/__init__.py') as fid:
for line in fid:
if line.startswith('__version__'):
VERSION = line.strip().split()[-1][1:-1]
break
with open('requirements.txt') as fid:
INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l]
# requirements for those browsing PyPI
REQUIRES = [r.replace('>=', ' (>= ') + ')' for r in INSTALL_REQUIRES]
REQUIRES = [r.replace('==', ' (== ') for r in REQUIRES]
REQUIRES = [r.replace('[array]', '') for r in REQUIRES]
if __name__ == "__main__":
try:
from numpy.distutils.core import setup
extra = {'configuration': configuration}
# Do not try and upgrade larger dependencies
for lib in ['scipy', 'numpy', 'matplotlib']:
try:
__import__(lib)
INSTALL_REQUIRES = [i for i in INSTALL_REQUIRES
if lib not in i]
except ImportError:
pass
except ImportError:
if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'--version',
'clean')):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install UnbalancedDataset when Numpy is not yet
# present in the system.
from setuptools import setup
extra = {}
else:
print('To install UnbalancedDataset from source, you need numpy.' +
'Install numpy with pip:\n' +
'pip install numpy\n'
'Or use your operating system package manager.')
sys.exit(1)
setup(
name=DISTNAME,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=URL,
license=LICENSE,
download_url=DOWNLOAD_URL,
version=VERSION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
install_requires=INSTALL_REQUIRES,
requires=REQUIRES,
packages=setuptools.find_packages(exclude=['doc']),
include_package_data=True,
zip_safe=False, # the package can run out of an .egg file
cmdclass={'build_py': build_py},
**extra
)
| 37.477612 | 79 | 0.597172 |
c33796a44fa6fcd9a0852b7e8f6f953771655ffa | 64 | py | Python | ores/scoring/models/__init__.py | elukey/ores | 18f6e2da642dd124daf4dc122d58032eb15485c9 | [
"MIT"
] | 69 | 2015-07-15T15:04:12.000Z | 2018-08-20T15:00:27.000Z | ores/scoring/models/__init__.py | elukey/ores | 18f6e2da642dd124daf4dc122d58032eb15485c9 | [
"MIT"
] | 146 | 2015-06-13T18:56:49.000Z | 2018-08-17T22:38:52.000Z | ores/scoring/models/__init__.py | elukey/ores | 18f6e2da642dd124daf4dc122d58032eb15485c9 | [
"MIT"
] | 34 | 2018-10-15T16:58:50.000Z | 2022-03-08T20:01:34.000Z | from .rev_id_scorer import RevIdScorer
__all__ = [RevIdScorer]
| 16 | 38 | 0.8125 |
c3386296bb9f34b0112bf5ce7c89306471d38bbf | 4,311 | py | Python | conjuntos.py | Tiesco789/guppe | 464702a2d618e149439a9b5c763f82c5376d2c32 | [
"MIT"
] | null | null | null | conjuntos.py | Tiesco789/guppe | 464702a2d618e149439a9b5c763f82c5376d2c32 | [
"MIT"
] | null | null | null | conjuntos.py | Tiesco789/guppe | 464702a2d618e149439a9b5c763f82c5376d2c32 | [
"MIT"
] | null | null | null | """
Conjuntos
Conjunto em qualquer linguagem de programao, estamos fazendo referncia teoria de conjuntos da matemtica
Aqui no Python, os conjuntos so chamados de sets
Dito isto, da mesma forma que na matemtica:
Sets (conjuntos) no possuem valores duplicados;
Sets (conjuntos) no possuem valores ordenados;
Elementos no so acessados via ndice, ou seja, conjuntos no so indexados;
Conjuntos so bons para se utilizar quando precisamos armazenar elementos mas no nos importamos com a ordenao deles.
Quando no precisamos se preocupar com chaves, valores e itens duplicados
Os conjuntos (sets) so referenciados em python com chaves {}
Diferena entre conjutnos (sets) e mapas (dicionrios) em python:
Um dicionrio tem chave/valor
Um conjunto tem apenas valor
# Definindo um conjunto
# Forma 1
s = set({1, 2, 3, 4, 5, 6, 7, 2, 3}) # Repare que temos valores repetidos
print(s)
print(type(s))
# OBS: Ao criar uim conjunto, caso seja adicionado um valor j existente, o mesmo ser ignorado sem gerar error e no far parde do conjunto
# Forma 2
s = {1, 2, 3, 4, 5, 5}
print(s)
print(type(s))
# Podemos verificar se um determinado valor est contido em um conjunto
if 3 in s:
print('Encontrei o valor 3')
else:
print('No encontrei o valor 3')
# Importante lembrar que, alem de no termos valores duplicados, os valores no so ordenados
dados = 99, 2, 34, 23, 2, 12, 1, 44, 5, 34
# Listas aceitam valores duplicados, ento temos 10 elementos
lista = list(dados)
print(f"Lista: {lista} com {len(lista)} elementos")
# Tuplas aceitam valores duplicados, ento temos 10 elementos
tupla = tuple(dados)
print(f"Tupla: {tupla} com {len(tupla)} elementos")
# Dicionrios no aceitam chaves duplicadas, ento temos 8 elementos
dicionario = {}.fromkeys(dados, 'dict')
print(f"Dicionrio: {dicionario} com {len(dicionario)} elementos")
# Conjuntos no aceitam valores duplicados, ento temos 8 elementos
conjunto = set(dados)
print(f"Conjunto: {conjunto} com {len(conjunto)} elementos")
# Assim como os outros conjuntos python, podemos colocar tipos de dados misturados em Sets
s = {1, 'b', True, 1.23, 44}
print(s)
print(type(s))
# Podemos iterar em um set normalmente
for valor in s:
print(valor)
# Usos interessantes com sets
# Imagine que fizemos um formulrio de cadastro de visitantes em uma feira ou museu,
# os visitantes informam manualmente a cidade de onde vieram
# Ns adicionamos cada cidade em uma lista Python, j que em uma lista podemos adicionar novos elmentos e ter repeties
cidades = ['Belo Horizante', 'So Paulo', 'Campo Grande',
'Cuiaba', 'Campo Grande', 'So Paulo', 'Cuiaba']
print(cidades)
print(len(cidades))
# Agora precisamos saber quantas cidades distintas, ou seja, nicas, temos.
# O que voc faria? Faria um loop na lista?
# Podemos utilizar o set para isso
print(len(set(cidades)))
s = {1, 2, 3}
s.add(4)
print(s)
s = {1, 2, 3}
s.remove(3)
print(s)
s.discard(2)
print(s)
# Copiando um conjunto para outro
# Forma 1 - Deep Copy
novo = s.copy()
print(novo)
novo.add(4)
print(novo)
print(s)
# Forma 2 - Shallow Copy
novo = s
novo.add(4)
print(novo)
print(s)
s = {1, 2, 3}
print(s)
s.clear()
print(s)
# Precisamos gerar qum conjunto com nomes de estudantes nicos
# Forma 1 - Utilizando union
# unicos1 = estudantes_python.union(estudantes_java)
# print(unicos1)
# Forma 2 - Utilizando o | pipe
unicos2 = estudantes_python | estudantes_java
print(unicos2)
# Gerar um conjunto de estudantes que esto em ambos os cursos
# Forma 1 - Utilizando union
ambos1 = estudantes_python.intersection(estudantes_java)
print(ambos1)
# Forma 2 - utilizando o &
ambos2 = estudantes_python & estudantes_java
print(ambos2)
# Mtodos matemticos de conjuntos
# Imagine que temos dois conjuntos: um contendo estudantes do curso Python e um
# Contendo estudantes do curso Java
estudantes_python = {'Pedro', 'Maria', 'Cludia', 'Joo', 'Marcos', 'Patricia'}
estudantes_java = {'Ana', 'Maria', 'Cludia', 'Joo', 'Marcos', 'Patricia'}
# Veja que alguns alins que estudam python tambm estudam java.
# Gerar um conjunto de estudantes que no esto no outro curso
so_python = estudantes_python.difference(estudantes_java)
print(so_python)
so_java = estudantes_java.difference(estudantes_python)
print(so_java)
"""
| 27.812903 | 140 | 0.740895 |
c338d3a2b98ef137f9b2463dce7a00499cad0407 | 1,346 | py | Python | tests/test_gpreg.py | cdgreenidge/gdec | 1ee6ab0156fa8f74683f5b7a7dfcb2c3f2a57d7f | [
"MIT"
] | null | null | null | tests/test_gpreg.py | cdgreenidge/gdec | 1ee6ab0156fa8f74683f5b7a7dfcb2c3f2a57d7f | [
"MIT"
] | null | null | null | tests/test_gpreg.py | cdgreenidge/gdec | 1ee6ab0156fa8f74683f5b7a7dfcb2c3f2a57d7f | [
"MIT"
] | null | null | null | """Test gpreg.py."""
from typing import Tuple
import numpy as np
import pytest
from gdec import gpreg, npgp
| 26.392157 | 80 | 0.665676 |
c33952e0e337955829e818701b7429be3b750ed1 | 1,008 | py | Python | gizer/all_schema_engines.py | racker/gizer | 4600999c35e99bce54071ea4f952b09b3fd5dc9b | [
"Apache-2.0"
] | null | null | null | gizer/all_schema_engines.py | racker/gizer | 4600999c35e99bce54071ea4f952b09b3fd5dc9b | [
"Apache-2.0"
] | null | null | null | gizer/all_schema_engines.py | racker/gizer | 4600999c35e99bce54071ea4f952b09b3fd5dc9b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
__author__ = "Yaroslav Litvinov"
__copyright__ = "Copyright 2016, Rackspace Inc."
__email__ = "yaroslav.litvinov@rackspace.com"
from mongo_schema import schema_engine
import os
def get_schema_files(schemas_dirpath):
""" get list of js / json files resided in dirpath param. """
res = []
for fname in os.listdir(schemas_dirpath):
if fname.endswith('json') or fname.endswith('js'):
res.append(fname)
res.sort()
return res
def get_schema_engines_as_dict(schemas_dirpath):
""" Load schema engines into dict.
Basename of schema file should be the name of collection"""
js_schema_files = get_schema_files(schemas_dirpath)
schemas = {}
for fname in js_schema_files:
collection_name = os.path.splitext(os.path.basename(fname))[0]
schema_path = os.path.join(schemas_dirpath, fname)
schemas[collection_name] = \
schema_engine.create_schema_engine(collection_name, schema_path)
return schemas
| 33.6 | 76 | 0.709325 |
c33a308f4b93fcb1577dde001c5501e493e62b57 | 146 | py | Python | WeatherPy/config.py.py | Brownc03/python-api-challenge | 24af57a6652b2990e8bdbc1e8e01566a2e7878b8 | [
"ADSL"
] | null | null | null | WeatherPy/config.py.py | Brownc03/python-api-challenge | 24af57a6652b2990e8bdbc1e8e01566a2e7878b8 | [
"ADSL"
] | null | null | null | WeatherPy/config.py.py | Brownc03/python-api-challenge | 24af57a6652b2990e8bdbc1e8e01566a2e7878b8 | [
"ADSL"
] | null | null | null | # OpenWeatherMap API Key
weather_api_key = "ae41fcf95db0d612b74e2b509abe9684"
# Google API Key
g_key = "AIzaSyCuF1rT6NscWq62bcBm0tZM7hKlaeWfONQ" | 29.2 | 52 | 0.842466 |
c33b670e9c5af9440c581f7412728d80706d9eb8 | 5,240 | py | Python | bin/runinterpret.py | christine-liu/somaticCNVpipeline | 254b709e611e56e5c891c663508ac79fa1093c07 | [
"MIT"
] | null | null | null | bin/runinterpret.py | christine-liu/somaticCNVpipeline | 254b709e611e56e5c891c663508ac79fa1093c07 | [
"MIT"
] | 2 | 2018-03-09T00:22:18.000Z | 2019-03-12T11:26:42.000Z | bin/runinterpret.py | christine-liu/somaticCNVpipeline | 254b709e611e56e5c891c663508ac79fa1093c07 | [
"MIT"
] | 6 | 2018-03-09T02:10:49.000Z | 2020-05-14T09:19:11.000Z | #!usr/bin/python
import os
import numpy as np
import common
from interpret import qcfile, funcfile, analyzefiles
| 31.566265 | 202 | 0.698473 |
c33c7a593798637e5989678bfdadfbeb83157154 | 29,527 | py | Python | mbio/EM/mrc.py | wzmao/mbio | af78cfdb47577199585179c3b04cc6cf3d6b401c | [
"MIT"
] | 2 | 2015-05-28T12:23:02.000Z | 2018-05-25T14:01:17.000Z | mbio/EM/mrc.py | wzmao/mbio | af78cfdb47577199585179c3b04cc6cf3d6b401c | [
"MIT"
] | null | null | null | mbio/EM/mrc.py | wzmao/mbio | af78cfdb47577199585179c3b04cc6cf3d6b401c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""This module contains the MRC file class.
"""
__author__ = 'Wenzhi Mao'
__all__ = ['MRC']
| 44.602719 | 265 | 0.507739 |
c33c9ccdc0ba66d9833eb045f4eb9b0711984aa5 | 2,416 | py | Python | links/management/commands/seed_data.py | darth-dodo/hackernews-backend | 402497a47271a90402624ed2c34b46ac08638440 | [
"MIT"
] | 3 | 2020-04-20T09:15:39.000Z | 2020-05-25T18:27:44.000Z | links/management/commands/seed_data.py | darth-dodo/hackernews-backend | 402497a47271a90402624ed2c34b46ac08638440 | [
"MIT"
] | null | null | null | links/management/commands/seed_data.py | darth-dodo/hackernews-backend | 402497a47271a90402624ed2c34b46ac08638440 | [
"MIT"
] | 1 | 2022-01-29T06:05:15.000Z | 2022-01-29T06:05:15.000Z | from random import randint
from django.core.management.base import BaseCommand
from django.db import transaction
from faker import Faker
from hn_users.models import HNUser, User
from links.models import Link, Vote
faker = Faker()
| 31.789474 | 105 | 0.624586 |
c33fdea58a4606282019dc0ca418482457a10cef | 3,010 | py | Python | locations/spiders/cenex.py | mfjackson/alltheplaces | 37c90b4041c80a574e6e4c2f886883e97df4b636 | [
"MIT"
] | null | null | null | locations/spiders/cenex.py | mfjackson/alltheplaces | 37c90b4041c80a574e6e4c2f886883e97df4b636 | [
"MIT"
] | null | null | null | locations/spiders/cenex.py | mfjackson/alltheplaces | 37c90b4041c80a574e6e4c2f886883e97df4b636 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
| 40.675676 | 88 | 0.469435 |
c3402d0f4328c3cbff771ca36bde6cdd1c05dd43 | 6,794 | py | Python | core/dbt/flags.py | tskleonard/dbt-core | c112050455e1f7b984c5c0d42a57a90a0d4d7053 | [
"Apache-2.0"
] | null | null | null | core/dbt/flags.py | tskleonard/dbt-core | c112050455e1f7b984c5c0d42a57a90a0d4d7053 | [
"Apache-2.0"
] | null | null | null | core/dbt/flags.py | tskleonard/dbt-core | c112050455e1f7b984c5c0d42a57a90a0d4d7053 | [
"Apache-2.0"
] | null | null | null | import os
import multiprocessing
if os.name != "nt":
# https://bugs.python.org/issue41567
import multiprocessing.popen_spawn_posix # type: ignore
from pathlib import Path
from typing import Optional
# PROFILES_DIR must be set before the other flags
# It also gets set in main.py and in set_from_args because the rpc server
# doesn't go through exactly the same main arg processing.
DEFAULT_PROFILES_DIR = os.path.join(os.path.expanduser("~"), ".dbt")
PROFILES_DIR = os.path.expanduser(os.getenv("DBT_PROFILES_DIR", DEFAULT_PROFILES_DIR))
STRICT_MODE = False # Only here for backwards compatibility
FULL_REFRESH = False # subcommand
STORE_FAILURES = False # subcommand
# Global CLI commands
USE_EXPERIMENTAL_PARSER = None
STATIC_PARSER = None
WARN_ERROR = None
WRITE_JSON = None
PARTIAL_PARSE = None
USE_COLORS = None
DEBUG = None
LOG_FORMAT = None
VERSION_CHECK = None
FAIL_FAST = None
SEND_ANONYMOUS_USAGE_STATS = None
PRINTER_WIDTH = 80
WHICH = None
INDIRECT_SELECTION = None
LOG_CACHE_EVENTS = None
EVENT_BUFFER_SIZE = 100000
QUIET = None
# Global CLI defaults. These flags are set from three places:
# CLI args, environment variables, and user_config (profiles.yml).
# Environment variables use the pattern 'DBT_{flag name}', like DBT_PROFILES_DIR
flag_defaults = {
"USE_EXPERIMENTAL_PARSER": False,
"STATIC_PARSER": True,
"WARN_ERROR": False,
"WRITE_JSON": True,
"PARTIAL_PARSE": True,
"USE_COLORS": True,
"PROFILES_DIR": DEFAULT_PROFILES_DIR,
"DEBUG": False,
"LOG_FORMAT": None,
"VERSION_CHECK": True,
"FAIL_FAST": False,
"SEND_ANONYMOUS_USAGE_STATS": True,
"PRINTER_WIDTH": 80,
"INDIRECT_SELECTION": "eager",
"LOG_CACHE_EVENTS": False,
"EVENT_BUFFER_SIZE": 100000,
"QUIET": False,
}
def env_set_truthy(key: str) -> Optional[str]:
"""Return the value if it was set to a "truthy" string value, or None
otherwise.
"""
value = os.getenv(key)
if not value or value.lower() in ("0", "false", "f"):
return None
return value
MACRO_DEBUGGING = env_set_truthy("DBT_MACRO_DEBUGGING")
DEFER_MODE = env_set_truthy("DBT_DEFER_TO_STATE")
ARTIFACT_STATE_PATH = env_set_path("DBT_ARTIFACT_STATE_PATH")
ENABLE_LEGACY_LOGGER = env_set_truthy("DBT_ENABLE_LEGACY_LOGGER")
# This is not a flag, it's a place to store the lock
MP_CONTEXT = _get_context()
| 35.94709 | 97 | 0.704592 |
c341709c7b99e4263e43265985148f2594b1d447 | 2,223 | py | Python | DataShine/DataShine.py | monk-after-90s/DataShine | e707d5a737ad1aca84a2646aa6d39fcfe430b58d | [
"MIT"
] | null | null | null | DataShine/DataShine.py | monk-after-90s/DataShine | e707d5a737ad1aca84a2646aa6d39fcfe430b58d | [
"MIT"
] | null | null | null | DataShine/DataShine.py | monk-after-90s/DataShine | e707d5a737ad1aca84a2646aa6d39fcfe430b58d | [
"MIT"
] | null | null | null | import asyncio
import functools
from copy import deepcopy
from ensureTaskCanceled import ensureTaskCanceled
def _no_closed(method):
'''
Can not be run when closed.
:return:
'''
return wrapper
if __name__ == '__main__':
asyncio.create_task(test())
| 24.977528 | 123 | 0.604139 |
c343356ef27f41702366f05b06f61bd4669c4a8d | 13,886 | py | Python | src/python/deepseq2.py | yotamfr/prot2vec | eaee36f9e3929054b1c324acd053a52d0e7be2bd | [
"MIT"
] | 8 | 2017-10-01T14:34:25.000Z | 2021-04-27T13:18:00.000Z | src/python/deepseq2.py | yotamfr/prot2vec | eaee36f9e3929054b1c324acd053a52d0e7be2bd | [
"MIT"
] | 1 | 2020-01-23T17:17:18.000Z | 2020-01-23T17:17:18.000Z | src/python/deepseq2.py | yotamfr/prot2vec | eaee36f9e3929054b1c324acd053a52d0e7be2bd | [
"MIT"
] | 1 | 2018-05-04T04:54:32.000Z | 2018-05-04T04:54:32.000Z | import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
from src.python.baselines import *
from pymongo import MongoClient
from tqdm import tqdm
import tensorflow as tf
### Keras
from keras import optimizers
from keras.models import Model
from keras.layers import Input, Dense, Embedding, Activation
from keras.layers import Conv2D, Conv1D
from keras.layers import Dropout, BatchNormalization
from keras.layers import MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, GlobalAveragePooling1D
from keras.layers import Concatenate, Flatten, Reshape
from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint, LambdaCallback, LearningRateScheduler
# from keras.losses import hinge, binary_crossentropy
from keras import backend as K
from sklearn.metrics import log_loss
import math
import argparse
sess = tf.Session()
K.set_session(sess)
LR = 0.001
BATCH_SIZE = 32
LONG_EXPOSURE = True
t0 = datetime(2014, 1, 1, 0, 0)
t1 = datetime(2014, 9, 1, 0, 0)
MAX_LENGTH = 2000
MIN_LENGTH = 30
def train(model, gen_xy, length_xy, epoch, num_epochs,
history=LossHistory(), lrate=LearningRateScheduler(step_decay)):
pbar = tqdm(total=length_xy)
for _, (X, Y) in gen_xy:
model.fit(x=X, y=Y,
batch_size=BATCH_SIZE,
epochs=num_epochs if LONG_EXPOSURE else epoch + 1,
verbose=0,
validation_data=None,
initial_epoch=epoch,
callbacks=[history])
pbar.set_description("Training Loss:%.5f" % np.mean(history.losses))
pbar.update(len(Y))
pbar.close()
def zeroone2oneminusone(vec):
return np.add(np.multiply(np.array(vec), 2), -1)
def oneminusone2zeroone(vec):
return np.divide(np.add(np.array(vec), 1), 2)
def calc_loss(y_true, y_pred):
return np.mean([log_loss(y, y_hat) for y, y_hat in zip(y_true, y_pred) if np.any(y)])
def predict(model, gen_xy, length_xy, classes):
pbar = tqdm(total=length_xy, desc="Predicting...")
i, m, n = 0, length_xy, len(classes)
ids = list()
y_pred, y_true = np.zeros((m, n)), np.zeros((m, n))
for i, (keys, (X, Y)) in enumerate(gen_xy):
k = len(Y)
ids.extend(keys)
y_hat, y = model.predict(X), Y
y_pred[i:i + k, ], y_true[i:i + k, ] = y_hat, y
pbar.update(k)
pbar.close()
return ids, y_true, y_pred
def evaluate(y_true, y_pred, classes):
y_pred = y_pred[~np.all(y_pred == 0, axis=1)]
y_true = y_true[~np.all(y_true == 0, axis=1)]
prs, rcs, f1s = performance(y_pred, y_true, classes)
return calc_loss(y_true, y_pred), prs, rcs, f1s
def add_arguments(parser):
parser.add_argument("--mongo_url", type=str, default='mongodb://localhost:27017/',
help="Supply the URL of MongoDB"),
parser.add_argument("--aspect", type=str, choices=['F', 'P', 'C'],
default="F", help="Specify the ontology aspect.")
parser.add_argument("--init_epoch", type=int, default=0,
help="Which epoch to start training the model?")
parser.add_argument("--arch", type=str, choices=['deepseq', 'motifnet', 'inception'],
default="deepseq", help="Specify the model arch.")
parser.add_argument('-r', '--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
ASPECT = args.aspect # default: Molecular Function
client = MongoClient(args.mongo_url)
db = client['prot2vec']
print("Loading Ontology...")
onto = get_ontology(ASPECT)
# classes = get_classes(db, onto)
classes = onto.classes
classes.remove(onto.root)
assert onto.root not in classes
opt = optimizers.Adam(lr=LR, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
if args.arch == 'inception':
model = ProteinInception(classes, opt)
LONG_EXPOSURE = False
num_epochs = 200
elif args.arch == 'deepseq':
model = DeeperSeq(classes, opt)
LONG_EXPOSURE = True
num_epochs = 20
elif args.arch == 'motifnet':
model = MotifNet(classes, opt)
LONG_EXPOSURE = False
num_epochs = 200
else:
print('Unknown model arch')
exit(0)
if args.resume:
model.load_weights(args.resume)
print("Loaded model from disk")
model.summary()
for epoch in range(args.init_epoch, num_epochs):
trn_stream, tst_stream = get_training_and_validation_streams(db)
train(model, batch_generator(trn_stream, onto, classes), len(trn_stream), epoch, num_epochs)
_, y_true, y_pred = predict(model, batch_generator(tst_stream, onto, classes), len(tst_stream), classes)
loss, prs, rcs, f1s = evaluate(y_true, y_pred, classes)
i = np.argmax(f1s)
f_max = f1s[i]
print("[Epoch %d/%d] (Validation Loss: %.5f, F_max: %.3f, precision: %.3f, recall: %.3f)"
% (epoch + 1, num_epochs, loss, f1s[i], prs[i], rcs[i]))
model_str = '%s-%d-%.5f-%.2f' % (args.arch, epoch + 1, loss, f_max)
model.save_weights("checkpoints/%s.hdf5" % model_str)
with open("checkpoints/%s.json" % model_str, "w+") as f:
f.write(model.to_json())
np.save("checkpoints/%s.npy" % model_str, np.asarray(classes))
| 32.983373 | 124 | 0.631931 |
c343f679c520b2ae7bc168a588760750faee9e80 | 5,977 | py | Python | inbima.py | SkoltechAI/inbima | 4c22a864208091e3fb41ea7703c463c4189e78d1 | [
"MIT"
] | null | null | null | inbima.py | SkoltechAI/inbima | 4c22a864208091e3fb41ea7703c463c4189e78d1 | [
"MIT"
] | null | null | null | inbima.py | SkoltechAI/inbima | 4c22a864208091e3fb41ea7703c463c4189e78d1 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import openpyxl
import sys
from fs import FS
from journals import Journals
from utils import load_sheet
from utils import log
from word import Word
YEARS = [2017, 2018, 2019, 2020, 2021]
if __name__ == '__main__':
args = sys.argv[1:]
if len(args) == 0:
ibm = InBiMa()
elif len(args) == 1 and args[0] == '-f':
ibm = InBiMa(is_new_folder=True)
elif len(args) == 2 and args[0] == '-j':
journals = Journals()
journals.load_ref()
journals.log_ref(title=args[1])
else:
raise ValueError('Invalid arguments for script')
| 32.840659 | 79 | 0.543918 |
c3462530e3c62749cd08fd4db0ee3cc3926324bb | 2,183 | py | Python | UkDatabaseAPI/UkDatabaseAPI/database/mongo_db.py | kplachkov/UkDatabase | 51db3183a86d3b07e0f97cc685f6f47ad4a8fc2e | [
"Apache-2.0"
] | null | null | null | UkDatabaseAPI/UkDatabaseAPI/database/mongo_db.py | kplachkov/UkDatabase | 51db3183a86d3b07e0f97cc685f6f47ad4a8fc2e | [
"Apache-2.0"
] | 3 | 2018-04-02T20:32:51.000Z | 2019-02-09T16:19:39.000Z | UkDatabaseAPI/UkDatabaseAPI/database/mongo_db.py | kplachkov/UkDatabase | 51db3183a86d3b07e0f97cc685f6f47ad4a8fc2e | [
"Apache-2.0"
] | null | null | null | import pymongo
from bson.json_util import dumps
from pymongo import MongoClient
from UkDatabaseAPI.database.database import Database
from UkDatabaseAPI.database.query_builder.mongo_query_builder import MongoQueryBuilder
MONGO_URI = "mongodb://localhost:27017"
"""str: The MongoDB URI."""
| 38.982143 | 103 | 0.639945 |