hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
38fb3ca62ed5924a18e726aa270114cbccbf7a59 | 10,062 | py | Python | tests/integration/test_k8s.py | lslebodn/conu | dee6fd958471f77d1c0511b031ea136dfaf8a77a | [
"MIT"
] | 95 | 2018-05-19T14:35:08.000Z | 2022-01-08T23:31:40.000Z | tests/integration/test_k8s.py | lslebodn/conu | dee6fd958471f77d1c0511b031ea136dfaf8a77a | [
"MIT"
] | 179 | 2017-09-12T11:14:30.000Z | 2018-04-26T05:36:13.000Z | tests/integration/test_k8s.py | lslebodn/conu | dee6fd958471f77d1c0511b031ea136dfaf8a77a | [
"MIT"
] | 16 | 2018-05-09T14:15:32.000Z | 2021-08-02T21:11:33.000Z | # -*- coding: utf-8 -*-
#
# Copyright Contributors to the Conu project.
# SPDX-License-Identifier: MIT
#
"""
Tests for Kubernetes backend
"""
import urllib3
import pytest
from conu import DockerBackend, \
K8sBackend, K8sCleanupPolicy
from conu.backend.k8s.pod import Pod, PodPhase
from conu.backend.k8s.service import Service
from conu.backend.k8s.deployment import Deployment
from conu.backend.k8s.client import get_core_api
from conu.utils import get_oc_api_token, oc_command_exists, is_oc_cluster_running
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
| 35.305263 | 100 | 0.550487 |
38fbd974e03682bea68c5248fd58ad877e5fa2e2 | 747 | py | Python | fanyi.py | smithgoo/python3Learn | d0c066c10887db3942ca285b86ce464463998aad | [
"MIT"
] | 1 | 2019-05-30T08:08:34.000Z | 2019-05-30T08:08:34.000Z | fanyi.py | smithgoo/python3Learn | d0c066c10887db3942ca285b86ce464463998aad | [
"MIT"
] | null | null | null | fanyi.py | smithgoo/python3Learn | d0c066c10887db3942ca285b86ce464463998aad | [
"MIT"
] | null | null | null | #!/usr/bin/python
#coding:utf-8
import requests
import json
headers ={"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1","Referer": "http://fanyi.baidu.com/translate?aldtype=16047&query=&keyfrom=baidu&smartresult=dict&lang=auto2zh"}
url = "http://fanyi.baidu.com/basetrans"
words = raw_input(":")
requestdic ={"query":words,"from":"en","to":"zh"}
response = requests.post(url,data=requestdic,headers =headers)
# response.encoding = "utf-8"
print(response)
print(response.content.decode())
htmlstr = response.content.decode()
str1 = json.loads(htmlstr)
print(str1)
print(type(str1))
str2 = str1["trans"][0]["dst"]
print(str2)
| 20.75 | 273 | 0.716198 |
38fbddf88e24f691af151a049dc107ebe16a7e13 | 13,228 | py | Python | frads/radmtx.py | LBNL-ETA/frads | dbd9980c7cfebd363089180d8fb1b7107e73ec92 | [
"BSD-3-Clause-LBNL"
] | 8 | 2019-11-13T22:26:45.000Z | 2022-03-23T15:30:37.000Z | frads/radmtx.py | LBNL-ETA/frads | dbd9980c7cfebd363089180d8fb1b7107e73ec92 | [
"BSD-3-Clause-LBNL"
] | null | null | null | frads/radmtx.py | LBNL-ETA/frads | dbd9980c7cfebd363089180d8fb1b7107e73ec92 | [
"BSD-3-Clause-LBNL"
] | 2 | 2021-08-10T18:22:04.000Z | 2021-08-30T23:16:27.000Z | """ Support matrices generation.
radmtx module contains two class objects: sender and receiver, representing
the ray sender and receiver in the rfluxmtx operation. sender object is can
be instantiated as a surface, a list of points, or a view, and these are
typical forms of a sender. Similarly, a receiver object can be instantiated as
a surface, sky, or suns.
"""
from __future__ import annotations
import os
import copy
import subprocess as sp
import tempfile as tf
import logging
from frads import makesky
from frads import radgeom
from frads import radutil, util
from typing import Optional
logger = logging.getLogger('frads.radmtx')
def prepare_surface(*, prims, basis, left, offset, source, out) -> str:
"""Prepare the sender or receiver surface, adding appropriate tags.
Args:
prims(list): list of primitives
basis(str): sampling basis
left(bool): use instead the left-hand rule
offset(float): offset surface in its normal direction
source(str): surface light source for receiver
out: output path
Returns:
The receiver as string
"""
if basis is None:
raise ValueError('Sampling basis cannot be None')
upvector = str(radutil.up_vector(prims)).replace(' ', ',')
upvector = "-" + upvector if left else upvector
modifier_set = {p.modifier for p in prims}
if len(modifier_set) != 1:
logger.warning("Primitives don't share modifier")
src_mod = f"rflx{prims[0].modifier}"
header = f'#@rfluxmtx h={basis} u={upvector}\n'
if out is not None:
header += f'#@rfluxmtx o="{out}"\n\n'
if source is not None:
source_line = f"void {source} {src_mod}\n0\n0\n4 1 1 1 0\n\n"
header += source_line
modifiers = [p.modifier for p in prims]
content = ''
for prim in prims:
if prim.identifier in modifiers:
_identifier = 'discarded'
else:
_identifier = prim.identifier
_modifier = src_mod
if offset is not None:
poly = radutil.parse_polygon(prim.real_arg)
offset_vec = poly.normal().scale(offset)
moved_pts = [pt + offset_vec for pt in poly.vertices]
_real_args = radgeom.Polygon(moved_pts).to_real()
else:
_real_args = prim.real_arg
new_prim = radutil.Primitive(
_modifier, prim.ptype, _identifier, prim.str_arg, _real_args)
content += str(new_prim) + '\n'
return header + content
def rfluxmtx(*, sender, receiver, env, opt=None, out=None):
"""Calling rfluxmtx to generate the matrices.
Args:
sender: Sender object
receiver: Receiver object
env: model environment, basically anything that's not the
sender or receiver
opt: option string
out: output path
Returns:
return the stdout of the command
"""
if None in (sender, receiver):
raise ValueError("Sender/Receiver object is None")
opt = '' if opt is None else opt
with tf.TemporaryDirectory() as tempd:
receiver_path = os.path.join(tempd, 'receiver')
with open(receiver_path, 'w') as wtr:
wtr.write(receiver.receiver)
if isinstance(env[0], dict):
env_path = os.path.join(tempd, 'env')
with open(env_path, 'w') as wtr:
[wtr.write(str(prim)) for prim in env]
env_paths = [env_path]
else:
env_paths = env
cmd = ['rfluxmtx'] + opt.split()
stdin = None
if sender.form == 's':
sender_path = os.path.join(tempd, 'sender')
with open(sender_path, 'wb') as wtr:
wtr.write(sender.sender)
cmd.extend([sender_path, receiver_path])
elif sender.form == 'p':
cmd.extend(['-I+', '-faa', '-y', str(sender.yres), '-', receiver_path])
stdin = sender.sender
elif sender.form == 'v':
cmd.extend(["-ffc", "-x", str(sender.xres), "-y", str(sender.yres), "-ld-"])
if out is not None:
util.mkdir_p(out)
out = os.path.join(out, '%04d.hdr')
cmd.extend(["-o", out])
cmd.extend(['-', receiver_path])
stdin = sender.sender
cmd.extend(env_paths)
return util.spcheckout(cmd, inp=stdin)
def rcvr_oct(receiver, env, oct_path):
"""Generate an octree of the environment and the receiver.
Args:
receiver: receiver object
env: environment file paths
oct_path: Path to write the octree to
"""
with tf.TemporaryDirectory() as tempd:
receiver_path = os.path.join(tempd, 'rcvr_path')
with open(receiver_path, 'w') as wtr:
wtr.write(receiver.receiver)
ocmd = ['oconv', '-f'] + env + [receiver_path]
octree = util.spcheckout(ocmd)
with open(oct_path, 'wb') as wtr:
wtr.write(octree)
def rcontrib(*, sender, modifier: str, octree, out, opt) -> None:
"""Calling rcontrib to generate the matrices.
Args:
sender: Sender object
modifier: modifier str listing the receivers in octree
octree: the octree that includes the environment and the receiver
opt: option string
out: output path
Returns:
None
"""
lopt = opt.split()
lopt.append('-fo+')
with tf.TemporaryDirectory() as tempd:
modifier_path = os.path.join(tempd, 'modifier')
with open(modifier_path, 'w') as wtr:
wtr.write(modifier)
cmd = ['rcontrib'] + lopt
stdin = sender.sender
if sender.form == 'p':
cmd += ['-I+', '-faf', '-y', str(sender.yres)]
elif sender.form == 'v':
util.mkdir_p(out)
out = os.path.join(out, '%04d.hdr')
cmd += ['-ffc', '-x', str(sender.xres), '-y', str(sender.yres)]
cmd += ['-o', out, '-M', modifier_path, octree]
util.spcheckout(cmd, inp=stdin)
| 36.043597 | 93 | 0.584971 |
38ffdd9e33324c5571bb8c9331725ff628bfcf97 | 7,354 | py | Python | QGrain/ui/NNResolverSettingWidget.py | yuriok/QGrain | 39a136d4e89716a26a88d68ffa00d36ef6036279 | [
"MIT"
] | 4 | 2021-09-04T11:00:07.000Z | 2022-01-06T12:50:09.000Z | QGrain/ui/NNResolverSettingWidget.py | yuriok/QGrain | 39a136d4e89716a26a88d68ffa00d36ef6036279 | [
"MIT"
] | null | null | null | QGrain/ui/NNResolverSettingWidget.py | yuriok/QGrain | 39a136d4e89716a26a88d68ffa00d36ef6036279 | [
"MIT"
] | 2 | 2021-08-23T02:39:07.000Z | 2021-12-30T03:04:07.000Z |
__all__ = ["NNResolverSettingWidget"]
import pickle
import numpy as np
import torch
from PySide2.QtCore import QSettings, Qt
from PySide2.QtWidgets import (QCheckBox, QComboBox, QDialog, QDoubleSpinBox,
QGridLayout, QLabel, QSpinBox)
from QGrain.models.NNResolverSetting import (NNResolverSetting,
built_in_distances)
if __name__ == "__main__":
import sys
from QGrain.entry import setup_app
app, splash = setup_app()
main = NNResolverSettingWidget()
main.show()
splash.finish(main)
setting = main.setting
setting.device = "cuda"
setting.tol = 1e-9
setting.ftol = 1e-23
setting.lr = 3e-2
setting.eps = 1e-12
main.setting = setting
sys.exit(app.exec_())
| 43.77381 | 147 | 0.663992 |
ac012aecb07834c5fb29cd6c1e9f0c6f5ac9d379 | 284 | py | Python | week3_greedy_algorithms/6_maximum_number_of_prizes/different_summands.py | thegautamkumarjaiswal/Algorithm-s_ToolBox_Solutions | bb265647ed183f44e0d56f14a4b8b966af73dfd2 | [
"Apache-2.0"
] | null | null | null | week3_greedy_algorithms/6_maximum_number_of_prizes/different_summands.py | thegautamkumarjaiswal/Algorithm-s_ToolBox_Solutions | bb265647ed183f44e0d56f14a4b8b966af73dfd2 | [
"Apache-2.0"
] | null | null | null | week3_greedy_algorithms/6_maximum_number_of_prizes/different_summands.py | thegautamkumarjaiswal/Algorithm-s_ToolBox_Solutions | bb265647ed183f44e0d56f14a4b8b966af73dfd2 | [
"Apache-2.0"
] | null | null | null | # Uses python3
n = int(input())
if n == 1:
print(1)
print(1)
quit()
W = n
prizes = []
for i in range(1, n):
if W>2*i:
prizes.append(i)
W -= i
else:
prizes.append(W)
break
print(len(prizes))
print(' '.join([str(i) for i in prizes])) | 15.777778 | 41 | 0.5 |
ac015c8aae392f0c5e8f71cda13e428662656402 | 1,158 | py | Python | town/migrations/0002_shop.py | Snowman88/DODOTOWN | a817d645b3002d75a25dc543e8f253648f007ca7 | [
"Apache-2.0"
] | 1 | 2015-01-07T14:54:19.000Z | 2015-01-07T14:54:19.000Z | town/migrations/0002_shop.py | Snowman88/DODOTOWN | a817d645b3002d75a25dc543e8f253648f007ca7 | [
"Apache-2.0"
] | 2 | 2015-01-12T13:48:52.000Z | 2015-01-12T13:49:45.000Z | town/migrations/0002_shop.py | Snowman88/DODOTOWN | a817d645b3002d75a25dc543e8f253648f007ca7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
| 35.090909 | 114 | 0.603627 |
ac01a8777ade5c22566c19425f199dbb6101a624 | 8,700 | py | Python | testing/scipy_distutils-0.3.3_34.586/command/build_clib.py | fireballpoint1/fortranTOpy | 55843a62c6f0a2f8e2a777ef70193940d3d2d141 | [
"Apache-2.0"
] | 1 | 2018-08-26T05:10:56.000Z | 2018-08-26T05:10:56.000Z | testing/scipy_distutils-0.3.3_34.586/command/build_clib.py | fireballpoint1/fortranTOpy | 55843a62c6f0a2f8e2a777ef70193940d3d2d141 | [
"Apache-2.0"
] | null | null | null | testing/scipy_distutils-0.3.3_34.586/command/build_clib.py | fireballpoint1/fortranTOpy | 55843a62c6f0a2f8e2a777ef70193940d3d2d141 | [
"Apache-2.0"
] | 1 | 2018-06-26T18:06:44.000Z | 2018-06-26T18:06:44.000Z | """ Modified version of build_clib that handles fortran source files.
"""
import os
import string
import sys
import re
from glob import glob
from types import *
from distutils.command.build_clib import build_clib as old_build_clib
from distutils.command.build_clib import show_compilers
from scipy_distutils import log, misc_util
from distutils.dep_util import newer_group
from scipy_distutils.misc_util import filter_sources, \
has_f_sources, has_cxx_sources
| 38.666667 | 83 | 0.545057 |
ac024dc71c56bdd789c2d35559cf132d917d749c | 1,405 | py | Python | mayan/apps/web_links/migrations/0004_make_labes_unique.py | nattangwiwat/Mayan-EDMS-recitation | fcf16afb56eae812fb99144d65ae1ae6749de0b7 | [
"Apache-2.0"
] | 343 | 2015-01-05T14:19:35.000Z | 2018-12-10T19:07:48.000Z | mayan/apps/web_links/migrations/0004_make_labes_unique.py | nattangwiwat/Mayan-EDMS-recitation | fcf16afb56eae812fb99144d65ae1ae6749de0b7 | [
"Apache-2.0"
] | 191 | 2015-01-03T00:48:19.000Z | 2018-11-30T09:10:25.000Z | mayan/apps/web_links/migrations/0004_make_labes_unique.py | nattangwiwat/Mayan-EDMS-recitation | fcf16afb56eae812fb99144d65ae1ae6749de0b7 | [
"Apache-2.0"
] | 257 | 2019-05-14T10:26:37.000Z | 2022-03-30T03:37:36.000Z | from django.db import migrations
| 34.268293 | 80 | 0.661922 |
ac0252914b7769682c26f81c801eeba081b42d28 | 6,251 | py | Python | ansible-devel/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/module_args.py | satishcarya/ansible | ed091e174c26316f621ac16344a95c99f56bdc43 | [
"MIT"
] | null | null | null | ansible-devel/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/module_args.py | satishcarya/ansible | ed091e174c26316f621ac16344a95c99f56bdc43 | [
"MIT"
] | null | null | null | ansible-devel/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/module_args.py | satishcarya/ansible | ed091e174c26316f621ac16344a95c99f56bdc43 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Matt Martz <matt@sivel.net>
# Copyright (C) 2016 Rackspace US, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import runpy
import json
import os
import subprocess
import sys
from contextlib import contextmanager
from ansible.executor.powershell.module_manifest import PSModuleDepFinder
from ansible.module_utils.basic import FILE_COMMON_ARGUMENTS
from ansible.module_utils.six import reraise
from ansible.module_utils._text import to_bytes, to_text
from .utils import CaptureStd, find_executable, get_module_name_from_filename
def _fake_load_params():
pass
| 36.555556 | 117 | 0.699408 |
ac0406d097b2c425817270a16cec9aaa0dab57d1 | 425 | py | Python | events/migrations/0003_invitation_detail.py | ebar0n/mishteh | dd025add9b80dff2253c1ee976fc656dff3abc03 | [
"MIT"
] | null | null | null | events/migrations/0003_invitation_detail.py | ebar0n/mishteh | dd025add9b80dff2253c1ee976fc656dff3abc03 | [
"MIT"
] | null | null | null | events/migrations/0003_invitation_detail.py | ebar0n/mishteh | dd025add9b80dff2253c1ee976fc656dff3abc03 | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2019-10-13 19:09
from django.db import migrations, models
| 23.611111 | 70 | 0.623529 |
ac045942a364b8e9223f019c563338e05ffde92d | 1,954 | py | Python | pygitscrum/print.py | thib1984/pygitscrum | 4f5f726e5b3c95f93da33c21da51953657bd0349 | [
"MIT"
] | 2 | 2021-04-23T11:10:32.000Z | 2021-04-23T11:10:41.000Z | pygitscrum/print.py | thib1984/pygitscrum | 4f5f726e5b3c95f93da33c21da51953657bd0349 | [
"MIT"
] | 2 | 2021-11-23T09:26:50.000Z | 2021-11-23T09:27:02.000Z | pygitscrum/print.py | thib1984/pygitscrum | 4f5f726e5b3c95f93da33c21da51953657bd0349 | [
"MIT"
] | null | null | null | """
print scripts
"""
from termcolor import colored
from pygitscrum.args import compute_args
import colorama
def print_resume_list(list_to_print, message):
"""
print list summary
"""
if len(list_to_print) > 0:
print("")
print(
my_colored(
message + " : ",
"green",
)
)
print(
my_colored(
"\n".join(map(str, list_to_print)),
"yellow",
)
)
print(
my_colored(
"total : " + str(len(list_to_print)),
"green",
)
)
def print_resume_map(dict_to_print, message):
"""
print dict summary
"""
if len(dict_to_print) > 0:
print("")
print(my_colored(message + " : ", "green"))
for key in dict_to_print:
print(
my_colored(
key
+ " --> "
+ str(dict_to_print[key])
+ " elements",
"yellow",
)
)
print(
my_colored(
"total : "
+ str(len(dict_to_print))
+ " --> "
+ str(sum(dict_to_print.values()))
+ " elements ",
"green",
)
)
def print_debug(message):
"""
print debug message
"""
if compute_args().debug:
print("debug : " + message)
def print_y(message):
"""
print yellow message
"""
print(my_colored(message, "yellow"))
def print_g(message):
"""
print green message
"""
print(my_colored(message, "green"))
def print_r(message):
"""
print red message
"""
print(my_colored(message, "red"))
| 20.14433 | 53 | 0.449335 |
ac062225c63cd5c3323bbc8f4dcab95e8e43641a | 261 | py | Python | test/_test_compute_m.py | yiruiliu110/eegnn | 253773c301681bb00b4789c34f48c82468ad16da | [
"MIT"
] | null | null | null | test/_test_compute_m.py | yiruiliu110/eegnn | 253773c301681bb00b4789c34f48c82468ad16da | [
"MIT"
] | null | null | null | test/_test_compute_m.py | yiruiliu110/eegnn | 253773c301681bb00b4789c34f48c82468ad16da | [
"MIT"
] | null | null | null | import torch
from estimation import compute_m
i = [[0, 1, 1, 2],
[2, 0, 2, 1]]
v_z = [3, 4, 5, 2]
v_c = [0, 1, 1, 0]
z = torch.sparse_coo_tensor(i, v_z, (3, 3))
c = torch.sparse_coo_tensor(i, v_c, (3, 3))
max_K = 10
m = compute_m(z, c, max_K)
print(m) | 15.352941 | 43 | 0.578544 |
ac07185d13ca3e632e2ca4e17fcc91869d099238 | 3,677 | py | Python | test/SMSGateway_test.py | S2Innovation/ds-s2i-smsgateway | eed5ce3d630c26b0fd73117d79c84606a12bc783 | [
"MIT"
] | null | null | null | test/SMSGateway_test.py | S2Innovation/ds-s2i-smsgateway | eed5ce3d630c26b0fd73117d79c84606a12bc783 | [
"MIT"
] | null | null | null | test/SMSGateway_test.py | S2Innovation/ds-s2i-smsgateway | eed5ce3d630c26b0fd73117d79c84606a12bc783 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of the SMSGateway project
#
#
#
# Distributed under the terms of the MIT license.
# See LICENSE.txt for more info.
"""Contain the tests for the SMSGateway for PANIC."""
# Path
import sys
import os
path = os.path.join(os.path.dirname(__file__), os.pardir)
sys.path.insert(0, os.path.abspath(path))
# Imports
from time import sleep
from mock import MagicMock
from PyTango import DevFailed, DevState
from devicetest import DeviceTestCase, main
from SMSGateway import SMSGateway
# Note:
#
# Since the device uses an inner thread, it is necessary to
# wait during the tests in order the let the device update itself.
# Hence, the sleep calls have to be secured enough not to produce
# any inconsistent behavior. However, the unittests need to run fast.
# Here, we use a factor 3 between the read period and the sleep calls.
#
# Look at devicetest examples for more advanced testing
# Device test case
# Main execution
if __name__ == "__main__":
main()
| 32.830357 | 77 | 0.661409 |
ac07adb5420f61816fe1726ed429cadf16e37902 | 505 | py | Python | simfile/_private/serializable.py | garcia/simfile | 4e15660c964d8d3c0e6d1f69431138e7eb4db288 | [
"MIT"
] | 22 | 2017-04-24T05:37:13.000Z | 2022-03-08T00:41:37.000Z | simfile/_private/serializable.py | garcia/simfile | 4e15660c964d8d3c0e6d1f69431138e7eb4db288 | [
"MIT"
] | 10 | 2021-05-31T01:21:56.000Z | 2022-03-17T04:26:54.000Z | simfile/_private/serializable.py | garcia/simfile | 4e15660c964d8d3c0e6d1f69431138e7eb4db288 | [
"MIT"
] | 3 | 2019-06-05T15:23:53.000Z | 2021-09-11T02:39:36.000Z | from abc import ABCMeta, abstractmethod
from io import StringIO
from typing import TextIO
| 24.047619 | 61 | 0.615842 |
ac07f9a51ba5bae2e9b9b9afd0ca35481fa33be3 | 214 | py | Python | Flask/Lezione4/webapp/project/serate/templates/serate/forms.py | nick87ds/MaterialeSerate | 51627e47ff1d3c3ecfc9ce6741c04b91b3295359 | [
"MIT"
] | 12 | 2021-12-12T22:19:52.000Z | 2022-03-18T11:45:17.000Z | Flask/Lezione4/webapp/project/serate/templates/serate/forms.py | nick87ds/MaterialeSerate | 51627e47ff1d3c3ecfc9ce6741c04b91b3295359 | [
"MIT"
] | 1 | 2022-03-23T13:58:33.000Z | 2022-03-23T14:05:08.000Z | Flask/Lezione4/webapp/project/serate/templates/serate/forms.py | nick87ds/MaterialeSerate | 51627e47ff1d3c3ecfc9ce6741c04b91b3295359 | [
"MIT"
] | 7 | 2021-02-01T22:09:14.000Z | 2021-06-22T08:30:16.000Z | from time import strftime
from flask_wtf import FlaskForm
from wtforms import (
Form,
validators,
StringField,
IntegerField,
SubmitField,
BooleanField,
SelectField,
TextAreaField,
)
| 16.461538 | 31 | 0.705607 |
ac0aebc29b01daccd8a1302b366f51ee3eb40958 | 190 | py | Python | intered/bin/django-admin.py | allenallen/interedregistration | d6b93bfc33d7bb9bfbabdcdb27b685f3a6be3ea9 | [
"MIT"
] | null | null | null | intered/bin/django-admin.py | allenallen/interedregistration | d6b93bfc33d7bb9bfbabdcdb27b685f3a6be3ea9 | [
"MIT"
] | 6 | 2020-02-11T23:05:13.000Z | 2021-06-10T20:43:51.000Z | intered/bin/django-admin.py | allenallen/interedregistration | d6b93bfc33d7bb9bfbabdcdb27b685f3a6be3ea9 | [
"MIT"
] | null | null | null | #!/home/allen/Documents/TamarawTechProjects/interedregistration/intered/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 31.666667 | 83 | 0.821053 |
ac0b33a69404bee3fc2c70f72e63ffeda7d74b5d | 746 | py | Python | generate_hamming_command.py | zoeleeee/mnist_challenge | 8a98f7dde35ee1d7a1fb77e85ca931000fb71631 | [
"MIT"
] | null | null | null | generate_hamming_command.py | zoeleeee/mnist_challenge | 8a98f7dde35ee1d7a1fb77e85ca931000fb71631 | [
"MIT"
] | null | null | null | generate_hamming_command.py | zoeleeee/mnist_challenge | 8a98f7dde35ee1d7a1fb77e85ca931000fb71631 | [
"MIT"
] | null | null | null | import numpy as np
import os
path = 'preds'
files = os.listdir(path)
lst = []
for f in files:
if f.find('_0_HASH') == -1:
continue
if f.find('CW') == -1:
continue
if f.find('low')==-1 and f.find('high')==-1 and f.find('mix')==-1:
continue
if f.endswith('show.npy'):
lst.append(f)
for f in lst:
strs = f.split('_0_HASH_')
print(strs)
a = np.load(os.path.join(path, strs[0]+'_0_HASH_'+strs[1]))
b = np.load(os.path.join(path, strs[0]+'_20_HASH_'+strs[1]))
c = np.load(os.path.join(path, strs[0]+'_40_HASH_'+strs[1]))
d = np.load(os.path.join(path, strs[0]+'_60_HASH_'+strs[1]))
np.save(os.path.join(path, strs[0]+'_80_HASH_'+strs[1]), np.hstack((a,b,c,d)))
| 25.724138 | 82 | 0.567024 |
ac0bb6d2bc717836589dada86f31d1c73d9161be | 3,416 | py | Python | benchmarks/benchmarks/reflect.py | dcortie/refnx | 037434fa0a64755f72c540d75063986bd517ab10 | [
"BSD-3-Clause"
] | 32 | 2016-04-18T15:29:59.000Z | 2022-03-27T08:35:29.000Z | benchmarks/benchmarks/reflect.py | dcortie/refnx | 037434fa0a64755f72c540d75063986bd517ab10 | [
"BSD-3-Clause"
] | 116 | 2015-10-27T04:33:09.000Z | 2022-02-22T02:02:47.000Z | benchmarks/benchmarks/reflect.py | dcortie/refnx | 037434fa0a64755f72c540d75063986bd517ab10 | [
"BSD-3-Clause"
] | 22 | 2015-09-29T23:21:15.000Z | 2022-02-27T18:12:18.000Z | import os.path
import numpy as np
import pickle
from .common import Benchmark
from refnx.analysis import CurveFitter, Objective, Parameter
import refnx.reflect
from refnx.reflect._creflect import abeles as c_abeles
from refnx.reflect._reflect import abeles
from refnx.reflect import SLD, Slab, Structure, ReflectModel, reflectivity
from refnx.dataset import ReflectDataset as RD
| 30.774775 | 77 | 0.603923 |
ac0d30f40fdb142e0b5b6ff9a6caa98ff58e125e | 1,257 | py | Python | app/__init__.py | annerachael/fifth_year_project | 3843b4e6315e9a5374f80a2aabc0bcb8423fd0d9 | [
"Apache-2.0"
] | null | null | null | app/__init__.py | annerachael/fifth_year_project | 3843b4e6315e9a5374f80a2aabc0bcb8423fd0d9 | [
"Apache-2.0"
] | null | null | null | app/__init__.py | annerachael/fifth_year_project | 3843b4e6315e9a5374f80a2aabc0bcb8423fd0d9 | [
"Apache-2.0"
] | null | null | null | # app/__init__.py
from flask import Flask
from redis import Redis
from rq_scheduler import Scheduler
from flask_migrate import Migrate
from flask_login import LoginManager
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
"""
This file shall contain configurations for the web app
"""
# create app
app = Flask(__name__)
db = SQLAlchemy()
migrate = Migrate()
bootstrap = Bootstrap()
# Handles login functionality eg creating and removing login sessions
login = LoginManager()
| 25.14 | 69 | 0.731106 |
ac0e680fa5ad08e1900fc7ebe2eb246aebdc7e1d | 148 | py | Python | automation/openwebsites.py | abrahammachuki/dnav3-code | d278bf4facbc0702342f9c86a3845f0fb1c247bf | [
"MIT"
] | null | null | null | automation/openwebsites.py | abrahammachuki/dnav3-code | d278bf4facbc0702342f9c86a3845f0fb1c247bf | [
"MIT"
] | null | null | null | automation/openwebsites.py | abrahammachuki/dnav3-code | d278bf4facbc0702342f9c86a3845f0fb1c247bf | [
"MIT"
] | null | null | null | import webbrowser
website = ['site1', 'site2', 'site3', 'site4']
for i in range(len(website)):
site = 'http://' + website[i]
webbrowser.open(site) | 29.6 | 46 | 0.662162 |
ac105d162c447186bd1f92785b821628a3aa1ff5 | 1,865 | py | Python | hedger/tournament.py | dmalison/hedger | 8db634a484769fb4f3feb945c1847ef50803fafe | [
"MIT"
] | null | null | null | hedger/tournament.py | dmalison/hedger | 8db634a484769fb4f3feb945c1847ef50803fafe | [
"MIT"
] | null | null | null | hedger/tournament.py | dmalison/hedger | 8db634a484769fb4f3feb945c1847ef50803fafe | [
"MIT"
] | null | null | null | import hedger
from hedger import Result
| 27.835821 | 65 | 0.641287 |
ac110f6329cbb307af9cb472a6aa5c74e249a2ef | 5,294 | py | Python | src/tentaclio/clients/sqla_client.py | datavaluepeople/tentaclio | eb6920a0e115c6c08043063a8c1013d812ec34c8 | [
"MIT"
] | 12 | 2019-04-30T16:07:42.000Z | 2021-12-08T08:02:09.000Z | src/tentaclio/clients/sqla_client.py | octoenergy/tentaclio | eb6920a0e115c6c08043063a8c1013d812ec34c8 | [
"MIT"
] | 74 | 2019-04-25T11:18:22.000Z | 2022-01-18T11:31:14.000Z | src/tentaclio/clients/sqla_client.py | datavaluepeople/tentaclio | eb6920a0e115c6c08043063a8c1013d812ec34c8 | [
"MIT"
] | 4 | 2019-05-05T13:13:21.000Z | 2022-01-14T00:33:07.000Z | """Provide sql connection using sqlalchemy.
This client is used for convinience when using different sql
providers and unifying the client creation. We do not intent to rewriter sqlalchemy.
"""
import contextlib
from typing import Container, Generator, Optional, Union
import pandas as pd
from sqlalchemy.engine import Connection, create_engine, result
from sqlalchemy.engine import url as sqla_url
from sqlalchemy.orm import session, sessionmaker
from sqlalchemy.sql.schema import MetaData
from tentaclio import urls
from . import base_client, decorators
__all__ = ["SQLAlchemyClient", "bound_session", "atomic_session"]
SessionGenerator = Generator[None, session.Session, None]
# Session context managers:
| 29.909605 | 98 | 0.657537 |
ac119758f2d8245405f37028d9f047dacd5dfbff | 1,678 | py | Python | polls/views.py | agustinsuana/mysite | 03e196470520d5b93f60d6be40358332490f349e | [
"MIT"
] | null | null | null | polls/views.py | agustinsuana/mysite | 03e196470520d5b93f60d6be40358332490f349e | [
"MIT"
] | null | null | null | polls/views.py | agustinsuana/mysite | 03e196470520d5b93f60d6be40358332490f349e | [
"MIT"
] | null | null | null | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from django.template import loader
from .models import Question
from django.http import Http404
#def detail(request, question_id):
# return HttpResponse("Estas viendo el detalle de %s." % question_id)
#def detail(request, question_id):
# try:
# question = Question.objects.get(pk = question_id)
# except Question.DoesNotExist:
# raise Http404("La pagina no existe")
# return render(request, 'polls/detail.html', {"question" : question})
| 34.958333 | 73 | 0.722288 |
ac12e55df64cb0e22fe581ffef24c9ea5ad38135 | 2,343 | py | Python | cctk/file.py | ekwan/cctk | 85cb8d0b714a80e8e353987dc24006695f1d0532 | [
"Apache-2.0"
] | 10 | 2020-01-16T15:26:57.000Z | 2022-01-15T23:12:00.000Z | cctk/file.py | ekwan/cctk | 85cb8d0b714a80e8e353987dc24006695f1d0532 | [
"Apache-2.0"
] | 2 | 2020-05-27T21:04:36.000Z | 2020-09-26T20:49:53.000Z | cctk/file.py | ekwan/cctk | 85cb8d0b714a80e8e353987dc24006695f1d0532 | [
"Apache-2.0"
] | 2 | 2020-09-24T18:44:18.000Z | 2021-08-05T20:35:51.000Z | import os
from abc import ABC, abstractmethod
| 28.573171 | 94 | 0.545881 |
ac14c5baab8284824cd35d4e64729e5b1523569f | 582 | py | Python | elif_bayindir/phase_1/python_basic_1/day_4/q8.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 6 | 2020-05-23T19:53:25.000Z | 2021-05-08T20:21:30.000Z | elif_bayindir/phase_1/python_basic_1/day_4/q8.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 8 | 2020-05-14T18:53:12.000Z | 2020-07-03T00:06:20.000Z | elif_bayindir/phase_1/python_basic_1/day_4/q8.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 39 | 2020-05-10T20:55:02.000Z | 2020-09-12T17:40:59.000Z | # Question 8
# Print even numbers in a list, stop printing when the number is 237
numbers = [
386, 462, 47, 418, 907, 344, 236, 375, 823, 566, 597, 978, 328, 615, 953, 345,
399, 162, 758, 219, 918, 237, 412, 566, 826, 248, 866, 950, 626, 949, 687, 217,
815, 67, 104, 58, 512, 24, 892, 894, 767, 553, 81, 379, 843, 831, 445, 742, 717,
958,743, 527
]
for i in range(len(numbers)):
if numbers[i] % 2 == 0:
print(numbers[i])
elif numbers[i] == 237:
break
# Alternative,
""" for x in numbers:
if x % 2 == 0:
print(x)
elif x == 237:
break """
| 22.384615 | 85 | 0.573883 |
ac1751b7ad47eb3e921543a2e5f6b1310543b55f | 1,377 | py | Python | encode_morse.py | cmanagoli/github-upload | 9759b8ee905e1fd37b169231d2150de31e143191 | [
"MIT"
] | null | null | null | encode_morse.py | cmanagoli/github-upload | 9759b8ee905e1fd37b169231d2150de31e143191 | [
"MIT"
] | 4 | 2020-10-14T21:30:35.000Z | 2020-10-14T21:43:06.000Z | encode_morse.py | cmanagoli/github-upload | 9759b8ee905e1fd37b169231d2150de31e143191 | [
"MIT"
] | null | null | null | # Author: Chinmai Managoli
import sys as sys
# Morse code dictionary
char_to_dots = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.',
'G': '--.', 'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..',
'M': '--', 'N': '-.', 'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.',
'S': '...', 'T': '-', 'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-',
'Y': '-.--', 'Z': '--..', ' ': ' ', '0': '-----',
'1': '.----', '2': '..---', '3': '...--', '4': '....-', '5': '.....',
'6': '-....', '7': '--...', '8': '---..', '9': '----.',
'&': '.-...', "'": '.----.', '@': '.--.-.', ')': '-.--.-', '(': '-.--.',
':': '---...', ',': '--..--', '=': '-...-', '!': '-.-.--', '.': '.-.-.-',
'-': '-....-', '+': '.-.-.', '"': '.-..-.', '?': '..--..', '/': '-..-.'
}
if __name__ == "__main__":
print("This program will encode a string into Morse. Unicode characters are not supported.")
string = input("Enter the message to be encoded: ")
encode_morse(string)
sys.exit()
| 34.425 | 96 | 0.336964 |
ac18c099c076caa8b9713e7d997b71df79627791 | 22,442 | py | Python | self_implement_learning_to_adapt/maml_rl.py | chi6/Model-based-meta-learning-rl | fda134dcbd87ef3e91f339ea2f836f28ec5f7784 | [
"MIT"
] | 2 | 2019-09-10T10:13:20.000Z | 2020-07-17T01:37:27.000Z | self_implement_learning_to_adapt/maml_rl.py | chi6/Model-based-meta-learning-rl | fda134dcbd87ef3e91f339ea2f836f28ec5f7784 | [
"MIT"
] | null | null | null | self_implement_learning_to_adapt/maml_rl.py | chi6/Model-based-meta-learning-rl | fda134dcbd87ef3e91f339ea2f836f28ec5f7784 | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
from self_implement_learning_to_adapt.model import construct_fc_weights,construct_inputs,construct_loss,forward_fc
from self_implement_learning_to_adapt.batch_sampler import ParrallelSampler
from self_implement_learning_to_adapt.vectorized_sampler import VectorizedSampler
from rllab.misc import ext
import matplotlib.pyplot as plt
import scipy.signal as signal
from rllab.sampler.stateful_pool import singleton_pool
| 43.832031 | 142 | 0.549238 |
ac1910c0faa98f5af77a97256be74c749f17725a | 279 | py | Python | DeepRTS/__init__.py | cair/deep-rts | 7aa5dde0c5df10ae3a3d057e7b89641aec58e115 | [
"MIT"
] | 144 | 2018-07-13T07:47:50.000Z | 2022-03-31T06:29:50.000Z | DeepRTS/__init__.py | cair/DeepRTS | 2ea4de0993ea0ca2677fdb36a172779db4ce7868 | [
"MIT"
] | 18 | 2019-03-29T10:37:01.000Z | 2022-03-02T12:47:34.000Z | DeepRTS/__init__.py | cair/DeepRTS | 2ea4de0993ea0ca2677fdb36a172779db4ce7868 | [
"MIT"
] | 23 | 2018-11-02T18:12:51.000Z | 2022-02-15T20:32:18.000Z | try:
from DeepRTS import Engine
except ImportError:
import Engine
try:
from DeepRTS.Engine import Map, UnitManager, Constants, Player
from DeepRTS.Engine import Constants
except ImportError:
from Engine import Map, UnitManager, Constants, Player, Constants
| 23.25 | 69 | 0.763441 |
ac19348529e2bf02c00c7ac8ed17b55456b351e7 | 4,196 | py | Python | openprocurement/auctions/swiftsure/utils.py | bdmbdsm/openprocurement.auctions.swiftsure | f5b93555eb12212c69c8168f861376eae85f4648 | [
"Apache-2.0"
] | null | null | null | openprocurement/auctions/swiftsure/utils.py | bdmbdsm/openprocurement.auctions.swiftsure | f5b93555eb12212c69c8168f861376eae85f4648 | [
"Apache-2.0"
] | null | null | null | openprocurement/auctions/swiftsure/utils.py | bdmbdsm/openprocurement.auctions.swiftsure | f5b93555eb12212c69c8168f861376eae85f4648 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from logging import getLogger
from pkg_resources import get_distribution
from openprocurement.auctions.core.plugins.contracting.base.utils import (
check_auction_status
)
from openprocurement.auctions.core.utils import (
cleanup_bids_for_cancelled_lots, check_complaint_status,
remove_draft_bids,
context_unpack,
get_now,
TZ,
)
PKG = get_distribution(__package__)
LOGGER = getLogger(PKG.project_name)
| 42.383838 | 147 | 0.651335 |
ac1a66d846bb2dcb096e3c89c166dcb12db849c3 | 12,989 | py | Python | src/plugins/bilibili/__init__.py | shinnenijou/JiBot | c468a67e301597ca702170aded96c5145d864115 | [
"MIT"
] | null | null | null | src/plugins/bilibili/__init__.py | shinnenijou/JiBot | c468a67e301597ca702170aded96c5145d864115 | [
"MIT"
] | null | null | null | src/plugins/bilibili/__init__.py | shinnenijou/JiBot | c468a67e301597ca702170aded96c5145d864115 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Python STL
from time import time, sleep
import asyncio
from collections import deque
# Third-party
from bilibili_api import Credential, comment
import nonebot
from nonebot.log import logger
from nonebot import on_command, require
from nonebot.permission import SUPERUSER, USER
from nonebot.adapters.onebot.v11 import GROUP_ADMIN, GROUP_OWNER
from nonebot.adapters.onebot.v11 import GroupMessageEvent, Message, MessageSegment
# Self
import src.plugins.bilibili.dynamics as dynamics
import src.plugins.bilibili.db as db
import src.plugins.bilibili.users as users
from src.plugins.bilibili.live import LiveStatus, Room
# Initiate Database
db.init()
# Credential
SESSDATA = nonebot.get_driver().config.dict()['bili_sessdata']
BILI_JCT = nonebot.get_driver().config.dict()['bili_jct']
BUVID3 = nonebot.get_driver().config.dict()['bili_buvid3']
CREDENTIAL = Credential(SESSDATA, BILI_JCT, BUVID3)
# CONSTANT
BILI_SOURCE = nonebot.get_driver().config.dict()['bili_source']
BILI_TARGET = nonebot.get_driver().config.dict()['bili_target']
DYNAMIC_LISTEN_INTERVAL = nonebot.get_driver().config.dict()['dynamic_listen_interval']
LIVE_LISTEN_INTERVAL = nonebot.get_driver().config.dict()['live_listen_interval']
COMMENT_EXPIRATION = nonebot.get_driver().config.dict()['dynamic_comment_expiration']
# GLOBAL VIRIABLES
#UID_LIST, ROOM_LIST, NAME_LIST, NEWEST_DYNAMICS = db.get_user_list()
USER_LIST = db.get_user_list()
for uid, info in USER_LIST.items(): # Initialize Room list
info['room'] = Room(uid, info['room'], info['name'], CREDENTIAL)
TRANSLATOR_LIST = db.get_translator_list()
DYNAMIC_QUEUE = deque()
##########################
######### #########
helper = on_command(cmd='bili', priority=2, temp=False, block=True,
permission=GROUP_OWNER|GROUP_ADMIN|SUPERUSER)
#
scheduler = require('nonebot_plugin_apscheduler').scheduler
###########################
######### #########
###########################
######### #########
###########################
######### #########
send_comment = on_command(cmd='', priority=2, temp=False, block=True,
permission=USER(*TRANSLATOR_LIST.keys()))
###########################
######### #########
#
userlist = on_command(cmd='bili', priority=2, temp=False, block=True,
permission=GROUP_ADMIN|GROUP_OWNER|SUPERUSER)
#
follow_user = on_command(cmd='bili', priority=2, temp=False, block=True,
permission=GROUP_OWNER|GROUP_ADMIN|SUPERUSER)
#
unfollow_user = on_command('bili', priority=2, temp=False, block=True,
permission=GROUP_ADMIN|GROUP_OWNER|SUPERUSER)
#
translate_on = on_command('', priority=2, temp=False, block=True,
permission=GROUP_ADMIN|GROUP_OWNER|SUPERUSER)
#
translate_off = on_command('', priority=2, temp=False, block=True,
permission=GROUP_ADMIN|GROUP_OWNER|SUPERUSER)
###########################
######### #########
#
show_translator = on_command(cmd='', priority=2, temp=False, block=True,
permission=SUPERUSER)
#
add_translator = on_command(cmd='', priority=2, temp=False, block=True,
permission=SUPERUSER)
#
remove_translator = on_command(cmd='', priority=2, temp=False, block=True,
permission=SUPERUSER)
| 37.111429 | 96 | 0.614212 |
ac1aafb69f1a23988f2e6e269f2b272b5e40a652 | 6,593 | py | Python | rlzoo/common/build_rlbench_env.py | tensorlayer/RLzoo | 9a587b97f706b2a59ac98555945822bf3987b1d1 | [
"Apache-2.0"
] | 750 | 2019-07-26T10:56:28.000Z | 2022-03-25T08:36:38.000Z | rlzoo/common/build_rlbench_env.py | tensorlayer/RLzoo | 9a587b97f706b2a59ac98555945822bf3987b1d1 | [
"Apache-2.0"
] | 29 | 2019-10-25T02:46:19.000Z | 2022-02-09T23:41:24.000Z | rlzoo/common/build_rlbench_env.py | tensorlayer/RLzoo | 9a587b97f706b2a59ac98555945822bf3987b1d1 | [
"Apache-2.0"
] | 101 | 2019-08-04T12:21:25.000Z | 2022-03-18T18:06:50.000Z | import sys
from collections import OrderedDict
import numpy as np
from gym import spaces
from pyrep.const import RenderMode
from pyrep.objects.dummy import Dummy
from pyrep.objects.vision_sensor import VisionSensor
from rlbench.environment import Environment
from rlbench.action_modes import ArmActionMode, ActionMode
from rlbench.observation_config import ObservationConfig
from rlbench.tasks import *
# Don't forget to add: export PYTHONPATH=PATH_TO_YOUR_LOCAL_RLBENCH_REPO
# list of state types
state_types = ['left_shoulder_rgb',
'left_shoulder_depth',
'left_shoulder_mask',
'right_shoulder_rgb',
'right_shoulder_depth',
'right_shoulder_mask',
'wrist_rgb',
'wrist_depth',
'wrist_mask',
'joint_velocities',
'joint_velocities_noise',
'joint_positions',
'joint_positions_noise',
'joint_forces',
'joint_forces_noise',
'gripper_pose',
'gripper_touch_forces',
'task_low_dim_state']
| 40.447853 | 108 | 0.574245 |
ac1ab705364244ba4229d2c3d2d14676f947e30d | 4,117 | py | Python | scripts/generate/xgboost_.py | thomas-young-2013/automl_space | f6267dac27ae6b17123ec17c4a6c843a40e4e963 | [
"MIT"
] | 2 | 2021-04-23T10:31:47.000Z | 2021-11-25T07:59:05.000Z | scripts/generate/xgboost_.py | thomas-young-2013/automl_space | f6267dac27ae6b17123ec17c4a6c843a40e4e963 | [
"MIT"
] | null | null | null | scripts/generate/xgboost_.py | thomas-young-2013/automl_space | f6267dac27ae6b17123ec17c4a6c843a40e4e963 | [
"MIT"
] | 1 | 2021-08-11T15:03:13.000Z | 2021-08-11T15:03:13.000Z | import numpy as np
from ConfigSpace import ConfigurationSpace
from ConfigSpace.hyperparameters import UniformFloatHyperparameter, \
UniformIntegerHyperparameter, UnParametrizedHyperparameter
import argparse
import pickle as pkl
import os
import sys
sys.path.insert(0, '.')
from scripts.utils import check_none, check_for_bool
from scripts.generate.generate_utils import run_exp
parser = argparse.ArgumentParser()
parser.add_argument('--datasets', type=str, default='None')
parser.add_argument('--rep_num', type=int, default=1000)
cs = XGBoost.get_hyperparameter_search_space()
if __name__ == '__main__':
args = parser.parse_args()
datasets = args.datasets.split(',')
rep_num = args.rep_num
algo_id = 'xgboost'
run_exp(datasets, cs, rep_num, objective_func, algo_id, data_dir='../soln-ml/')
| 37.770642 | 107 | 0.683022 |
ac1adabe581fd30a1766857374cb20cf0b69b1b2 | 2,362 | py | Python | OnStage/player_chair.py | IanDCarroll/xox | 38feac84e81e8c00a397f7f976efee15756cd3ac | [
"MIT"
] | null | null | null | OnStage/player_chair.py | IanDCarroll/xox | 38feac84e81e8c00a397f7f976efee15756cd3ac | [
"MIT"
] | 30 | 2016-11-25T05:34:34.000Z | 2017-02-11T00:10:17.000Z | OnStage/player_chair.py | IanDCarroll/tik-tak-toe | 38feac84e81e8c00a397f7f976efee15756cd3ac | [
"MIT"
] | 1 | 2016-11-26T01:41:37.000Z | 2016-11-26T01:41:37.000Z | import sys
from Training.observer_abilities import *
from Training.cortex_3x3_caddy import *
| 27.149425 | 58 | 0.595682 |
ac1b3ebd8ffb64179ceb128585149b4d27bf039c | 575 | py | Python | content_f_conditions/ex45_multiple_conditions.py | Alyssonmach/learning-python-with-codes | e5ef70f3b56712e98449b3053eb34416b8025cb1 | [
"MIT"
] | 3 | 2020-11-28T08:26:54.000Z | 2020-12-23T18:37:37.000Z | content_f_conditions/ex45_multiple_conditions.py | Alyssonmach/learning-python-with-codes | e5ef70f3b56712e98449b3053eb34416b8025cb1 | [
"MIT"
] | 1 | 2021-02-12T12:17:49.000Z | 2021-02-12T12:17:49.000Z | content_f_conditions/ex45_multiple_conditions.py | Alyssonmach/learning-python-with-codes | e5ef70f3b56712e98449b3053eb34416b8025cb1 | [
"MIT"
] | null | null | null | requested_toppings = ['mushrooms', 'extra cheese']
if 'mushrooms' in requested_toppings:
print("Adding mushrooms.")
if 'pepperoni' in requested_toppings:
print("Adding pepperoni.")
if 'extra cheese' in requested_toppings:
print("Adding extra cheese.")
print("\nFinished making your first pizza!")
if 'mushrooms' in requested_toppings:
print("Adding mushrooms.")
elif 'pepperoni' in requested_toppings:
print("Adding pepperoni.")
elif 'extra cheese' in requested_toppings:
print("Adding extra cheese.")
print("\nFinished making your second pizza!") | 30.263158 | 50 | 0.73913 |
ac1cc4d7aa81ff92764789ce626b2f4acf382ec9 | 4,000 | py | Python | blog/models.py | njuaplusplus/j0shua | d14c657c72df157aaf2e471010b06bd85f415296 | [
"Apache-2.0"
] | null | null | null | blog/models.py | njuaplusplus/j0shua | d14c657c72df157aaf2e471010b06bd85f415296 | [
"Apache-2.0"
] | null | null | null | blog/models.py | njuaplusplus/j0shua | d14c657c72df157aaf2e471010b06bd85f415296 | [
"Apache-2.0"
] | null | null | null | #!/usr/local/bin/python
# coding=utf-8
from django.db import models
from django.utils.translation import ugettext as _
from markdown import markdown
from django.contrib.auth.models import User
from uuslug import uuslug
from django import forms
from pagedown.widgets import PagedownWidget
# from bootstrap3_datetime.widgets import DateTimePicker
from datetimewidget.widgets import DateTimeWidget
| 31.007752 | 131 | 0.58325 |
ac1cf53bf0793269cb8c9b3fe1ee4967ef2b9385 | 4,788 | py | Python | sw_edit.py | nellore/deidentify | e82befbf9d45e70df739ab2aaafaa1a5513e4aeb | [
"MIT"
] | null | null | null | sw_edit.py | nellore/deidentify | e82befbf9d45e70df739ab2aaafaa1a5513e4aeb | [
"MIT"
] | null | null | null | sw_edit.py | nellore/deidentify | e82befbf9d45e70df739ab2aaafaa1a5513e4aeb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
sw_edit.py
Deidentifies SW_SUMMARY.csv and SW_MINUTE.csv in LABS 2 data; these files
cannot be deidentified properly by date_eliminator.py. This script replaces
dates with days since first day.
This software is licensed under the MIT License.
Copyright (c) 2016 Abhinav Nellore
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import argparse
import sys
import os
import csv
import datetime
import errno
import itertools
_date_formats = ['%m/%d/%Y', '%d/%m/%Y']
if __name__ == '__main__':
# Print file's docstring if -h is invoked
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--input-dir', '-i', type=str, required=True,
help=('input directory; should contain SW_MINUTE.csv and '
'SW_SUMMARY.csv')
)
parser.add_argument('--output-dir', '-o', type=str, required=True,
help='output directory'
)
args = parser.parse_args()
try:
os.makedirs(args.output_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Use sorted input file list to ensure reproducibility
with open(
os.path.join(args.input_dir, 'SW_MINUTE.csv')
) as minute_stream, open(
os.path.join(args.output_dir, 'SW_MINUTE.csv'), 'w'
) as output_stream:
try:
dialect = csv.Sniffer().sniff(minute_stream.read(1000000))
except csv.Error as e:
print >>sys.stderr, (
'Could not determine delimiter for SW_MINUTE.csv; '
'skipping....'
)
minute_stream.seek(0)
csv_reader = csv.reader(minute_stream, dialect)
# Print header
print >>output_stream, ','.join(csv_reader.next())
for key, group in itertools.groupby(csv_reader, lambda x:x[0]):
zero_date = None
for tokens in group:
if zero_date is None:
zero_date = datetime.datetime.strptime(tokens[7],
'%m/%d/%Y')
print >>output_stream, ','.join(tokens[:6] + [
tokens[6].partition('-')[0] + (
(' ' + ' '.join(tokens[6].split(' ')[-2:]))
if tokens[6].endswith('M') else ''), str(
(datetime.datetime.strptime(tokens[7],
'%m/%d/%Y')
- zero_date).days
)
] + tokens[8:])
with open(
os.path.join(args.input_dir, 'SW_SUMMARY.csv')
) as summary_stream, open(
os.path.join(args.output_dir, 'SW_SUMMARY.csv'), 'w'
) as output_stream:
try:
dialect = csv.Sniffer().sniff(summary_stream.read(1000000))
except csv.Error as e:
print >>sys.stderr, (
'Could not determine delimiter for SW_SUMMARY.csv; '
'skipping....'
)
summary_stream.seek(0)
csv_reader = csv.reader(summary_stream, dialect)
''' Print header; note field 8 is excluded because it's day of week,
which is more specific than year.'''
print >>output_stream, ','.join([token for i, token in enumerate(
csv_reader.next()
) if i != 8])
for tokens in csv_reader:
print >>output_stream, ','.join(tokens[:6] + [
tokens[6].rpartition('/')[-1],
tokens[7].rpartition('/')[-1]
] + tokens[9:]
)
| 41.634783 | 78 | 0.581662 |
ac1d0576b9d96127b532e1ac5e9548932d7f9611 | 39 | py | Python | pwas/__init__.py | cgreencode/pwas | e65901e115491ad9661832c7b622b01b1e81c934 | [
"MIT"
] | 19 | 2020-06-22T02:39:25.000Z | 2022-02-21T14:37:33.000Z | pwas/__init__.py | cgreencode/pwas | e65901e115491ad9661832c7b622b01b1e81c934 | [
"MIT"
] | 5 | 2020-09-28T11:26:01.000Z | 2021-05-06T15:34:16.000Z | pwas/__init__.py | cgreencode/pwas | e65901e115491ad9661832c7b622b01b1e81c934 | [
"MIT"
] | 4 | 2020-06-25T18:19:58.000Z | 2022-01-29T04:02:20.000Z | from .genotype import GenotypingManager | 39 | 39 | 0.897436 |
ac1ea04b12bfdc49c4c9cf7624e0e9869351d1bb | 6,191 | py | Python | CNN_freq.py | vishnubk/ml_tutorial_pulsars | 1a1b1eabbce43c39222b32974e29dfff5a722601 | [
"MIT"
] | null | null | null | CNN_freq.py | vishnubk/ml_tutorial_pulsars | 1a1b1eabbce43c39222b32974e29dfff5a722601 | [
"MIT"
] | null | null | null | CNN_freq.py | vishnubk/ml_tutorial_pulsars | 1a1b1eabbce43c39222b32974e29dfff5a722601 | [
"MIT"
] | null | null | null | from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score
from keras import backend as K
from keras import optimizers
import numpy as np
import math
from sklearn.model_selection import train_test_split
from keras.callbacks import TensorBoard
#x, x_test, y, y_test = = train_test_split(xtrain,labels,test_size=0.2,train_size=0.8)
img_width, img_height = 48, 48
input_shape = (48, 48, 1)
batch_size = 200
tensor_board = TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True)
if K.image_data_format() == 'channels_first':
input_shape = (1, img_width, img_height)
else:
input_shape = (img_width, img_height, 1)
# Load all data
time_phase_pulsars = np.load('time_phase_data_pulsars.npy')
time_phase_nonpulsars = np.load('time_phase_data_nonpulsars.npy')
freq_phase_pulsars = np.load('freq_phase_data_pulsars.npy')
freq_phase_nonpulsars = np.load('freq_phase_data_nonpulsars.npy')
pulse_profile_pulsars = np.load('pulse_profile_data_pulsars.npy')
pulse_profile_nonpulsars = np.load('pulse_profile_data_nonpulsars.npy')
dm_curve_pulsars = np.load('dm_curve_data_pulsars.npy')
dm_curve_nonpulsars = np.load('dm_curve_data_nonpulsars.npy')
reshaped_time_phase_pulsars = [np.reshape(f,(48,48,1)) for f in time_phase_pulsars]
reshaped_time_phase_nonpulsars = [np.reshape(f,(48,48,1)) for f in time_phase_nonpulsars]
reshaped_freq_phase_pulsars = [np.reshape(f,(48,48,1)) for f in freq_phase_pulsars]
reshaped_freq_phase_nonpulsars = [np.reshape(f,(48,48,1)) for f in freq_phase_nonpulsars]
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# After these layers, we convert our 3D feature maps to 1D feature vectors ith the help of 'flatten'. We use 'dropout' layer to prevent overfitting
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
#print(model.summary())
label_reshaped_time_phase_pulsars = np.ones(len(reshaped_time_phase_pulsars))
label_reshaped_time_phase_nonpulsars = np.zeros(len(reshaped_time_phase_nonpulsars))
time_phase_data_combined = np.concatenate((reshaped_time_phase_pulsars, reshaped_time_phase_nonpulsars), axis = 0)
time_phase_label_combined = np.concatenate((label_reshaped_time_phase_pulsars, label_reshaped_time_phase_nonpulsars), axis = 0)
time_phase_train, time_phase_test, time_phase_label_train, time_phase_label_test = train_test_split(time_phase_data_combined, time_phase_label_combined, test_size=0.2, random_state=42)
model.fit_generator(generator(batch_size, time_phase_train, time_phase_label_train), steps_per_epoch=len(time_phase_train)//batch_size, epochs=100, callbacks=[tensor_board])
model.save_weights('first_try.h5')
#number_of_examples = len(time_phase_test)
#number_of_generator_calls = math.ceil(number_of_examples / (1.0 * batch_size))
predict = model.predict_generator(generator(batch_size, time_phase_test, time_phase_label_test), steps=math.ceil(len(time_phase_test)/batch_size))
np.save('predictions.npy', predict)
classified_results = np.rint(predict)
f_score = f1_score(time_phase_label_test, classified_results, average='binary')
precision = precision_score(time_phase_label_test, classified_results, average='binary')
recall = recall_score(time_phase_label_test, classified_results, average='binary')
print('F Score:', f_score, 'Precision:', precision, 'Recall:', recall)
print('Start testing GBNCC data')
gbncc_pulsars = np.load('time_phase_gbncc_test_data_pulsars.npy')
gbncc_nonpulsars = np.load('time_phase_gbncc_test_data_nonpulsars_part3.npy')
reshaped_time_phase_gbncc_pulsars = [np.reshape(f,(48,48,1)) for f in gbncc_pulsars]
reshaped_time_phase_gbncc_nonpulsars = [np.reshape(f,(48,48,1)) for f in gbncc_nonpulsars]
label_reshaped_time_phase_gbncc_pulsars = np.ones(len(gbncc_pulsars), dtype=np.int8)
label_reshaped_time_phase_gbncc_nonpulsars = np.zeros(len(gbncc_nonpulsars), dtype=np.int8)
time_phase_gbncc_data_combined = np.concatenate((reshaped_time_phase_gbncc_pulsars, reshaped_time_phase_gbncc_nonpulsars), axis = 0)
time_phase_gbncc_label_combined = np.concatenate((label_reshaped_time_phase_gbncc_pulsars, label_reshaped_time_phase_gbncc_nonpulsars), axis = 0)
predict = model.predict_generator(generator(batch_size, time_phase_gbncc_data_combined, time_phase_gbncc_label_combined), steps=math.ceil(len(time_phase_gbncc_data_combined)/batch_size))
np.save('predictions_gbncc.npy', predict)
#test = np.rint(predict)
#test = np.reshape(test, (22709))
test = np.random.uniform(0,1,22709)
test = np.rint(test)
f_score = f1_score(time_phase_gbncc_label_combined, test, average='binary')
precision = precision_score(time_phase_gbncc_label_combined, test, average='binary')
recall = recall_score(time_phase_gbncc_label_combined, test, average='binary')
print('F Score:', f_score, 'Precision:', precision, 'Recall:', recall)
accuracy = np.sum(test == time_phase_gbncc_label_combined)
print('Accuracy:', accuracy)
#generator(batch_size, time_phase_data_combined, time_phase_label_combined)
#train_datagen = ImageDataGenerator(rotation_range = 0)
#train_generator = train_datagen.flow_from_directory('train/', target_size=(img_width, img_height), batch_size=batch_size, class_mode='binary')
#print(train_generator)
| 43.598592 | 186 | 0.800517 |
ac1eac77532b97e37684d7282cd7c2a9da13f188 | 1,332 | py | Python | src/config/contents.py | miloszowi/everyone-mention-telegram-bot | a6b441b197b743f57e089dbe32d262b87a155140 | [
"MIT"
] | 13 | 2021-09-20T17:04:28.000Z | 2022-03-15T09:27:25.000Z | src/config/contents.py | miloszowi/everyone-mention-telegram-bot | a6b441b197b743f57e089dbe32d262b87a155140 | [
"MIT"
] | null | null | null | src/config/contents.py | miloszowi/everyone-mention-telegram-bot | a6b441b197b743f57e089dbe32d262b87a155140 | [
"MIT"
] | null | null | null | # markdownv2 python-telegram-bot specific
joined = '{} joined group `{}`'
not_joined = '{} is already in group `{}`'
left = '{} left group `{}`'
not_left = '{} did not join group `{}` before'
mention_failed = 'There are no users to mention'
no_groups = 'There are no groups for this chat'
# html python-telegram-bot specific
start_text = """
Hello!
@everyone_mention_bot here.
I am here to help you with multiple user mentions.
<b>Usage</b>:
Users that joined the group by <code>/join</code> command,
can be mentioned after typing one of those in your message:
<code>@all</code>, <code>@channel</code>, <code>@chat</code>, <code>@everyone</code>, <code>@group</code> or <code>@here</code>.
If you did create a group named <code>gaming</code>, simply use <code>@gaming</code> to call users from that group.
You can also use <code>/everyone</code> command.
<b>Commands</b>:
<pre>/join {group-name}</pre>
Joins (or creates if group did not exist before) group.
<pre>/leave {group-name}</pre>
Leaves (or deletes if no other users are left) the group
<pre>/everyone {group-name}</pre>
Mentions everyone that joined the group.
<pre>/groups</pre>
Show all created groups in this chat.
<pre>/start</pre>
Show start & help text
<b>Please note</b>
<code>{group-name}</code> is not required, <code>default</code> if not given.
"""
| 30.976744 | 128 | 0.701201 |
ac202e2b18e2572dfa3962aaeb16577bcf9a3ce4 | 3,298 | py | Python | portal/libs/utils.py | Artis-Physis/utopia-cms | 5cb8d941d0b2df53fddc566a52e9d3baee4a007e | [
"BSD-3-Clause"
] | 8 | 2020-12-15T17:11:08.000Z | 2021-12-13T22:08:33.000Z | portal/libs/utils.py | Artis-Physis/utopia-cms | 5cb8d941d0b2df53fddc566a52e9d3baee4a007e | [
"BSD-3-Clause"
] | 28 | 2020-12-15T17:34:03.000Z | 2022-02-01T04:09:10.000Z | portal/libs/utils.py | Artis-Physis/utopia-cms | 5cb8d941d0b2df53fddc566a52e9d3baee4a007e | [
"BSD-3-Clause"
] | 7 | 2020-12-15T19:59:17.000Z | 2021-11-24T16:47:06.000Z | # -*- coding: utf-8 -*-
from hashlib import md5
import re
import smtplib
from django.conf import settings
from django.http import HttpResponseBadRequest
def md5file(filename):
"""
Re-implementation of md5sum in python. Return the hex digest of a file
without loading it all into memory.
By Nick Craig-Wood <nick@craig-wood.com>
"""
fh = open(filename)
digest = md5.new()
while 1:
buf = fh.read(4096)
if buf == "":
break
digest.update(buf)
fh.close()
return digest.hexdigest()
def smtp_connect(alternative=False):
"""
Authenticate to SMTP (if any auth needed) and return the conn instance.
If alternative is True, connect to the alternative SMTP instead of the default.
"""
email_conf = {}
for setting in ('HOST', 'PORT', 'HOST_USER', 'HOST_PASSWORD', 'USE_TLS'):
email_conf[setting] = getattr(settings, ('EMAIL_%s' + setting) % ('ALTERNATIVE_' if alternative else ''), None)
s = smtplib.SMTP(email_conf['HOST'], email_conf['PORT'])
if email_conf['USE_TLS']:
s.starttls()
if email_conf['HOST_USER']:
try:
s.login(email_conf['HOST_USER'], email_conf['HOST_PASSWORD'])
except smtplib.SMTPException:
pass
return s
| 28.678261 | 119 | 0.632201 |
ac20aa316bc8bb6155930c3ea46cc8a431427a33 | 382 | py | Python | Python_OO/DelegarAcessoAtributos.py | Madara701/Python_OO | 8d67569a8c4771dd82f5259c2ed5e782cd4e4036 | [
"Apache-2.0"
] | null | null | null | Python_OO/DelegarAcessoAtributos.py | Madara701/Python_OO | 8d67569a8c4771dd82f5259c2ed5e782cd4e4036 | [
"Apache-2.0"
] | null | null | null | Python_OO/DelegarAcessoAtributos.py | Madara701/Python_OO | 8d67569a8c4771dd82f5259c2ed5e782cd4e4036 | [
"Apache-2.0"
] | null | null | null |
b = B()
print(b.fazer_algo())
print(b.outro())
| 17.363636 | 34 | 0.565445 |
ac20ce5e35b873341e2f84f5642a7ee91537325e | 777 | py | Python | app/main.py | sebastian-hatala-leanix/chain-reaction-event | 15e34ce450b2c267eb9dd2ec5981fbfc51caac23 | [
"Apache-2.0"
] | null | null | null | app/main.py | sebastian-hatala-leanix/chain-reaction-event | 15e34ce450b2c267eb9dd2ec5981fbfc51caac23 | [
"Apache-2.0"
] | null | null | null | app/main.py | sebastian-hatala-leanix/chain-reaction-event | 15e34ce450b2c267eb9dd2ec5981fbfc51caac23 | [
"Apache-2.0"
] | null | null | null | import cv2
import numpy as np
from numpy.linalg import norm
import requests
if __name__ == "__main__":
main() | 22.852941 | 77 | 0.655084 |
ac2256d9966602bb7091a7d1aec1644daf7e0196 | 8,134 | py | Python | habitat_baselines/utils/gym_adapter.py | srama2512/habitat-api | bc85d0961cef3b4a08bc9263869606109fb6ff0a | [
"MIT"
] | 355 | 2020-08-18T03:48:26.000Z | 2022-03-30T00:22:50.000Z | habitat_baselines/utils/gym_adapter.py | srama2512/habitat-api | bc85d0961cef3b4a08bc9263869606109fb6ff0a | [
"MIT"
] | 328 | 2020-08-12T21:25:09.000Z | 2022-03-31T10:39:21.000Z | habitat_baselines/utils/gym_adapter.py | srama2512/habitat-api | bc85d0961cef3b4a08bc9263869606109fb6ff0a | [
"MIT"
] | 159 | 2020-08-12T22:23:36.000Z | 2022-03-30T22:56:52.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Optional, Union
import gym
import numpy as np
from gym import spaces
from habitat.core.simulator import Observations
from habitat.utils.visualizations.utils import observations_to_image
| 36.475336 | 99 | 0.630194 |
ac25119082b547f7c1b12e27563aa843d253ac6b | 1,279 | py | Python | TopicExtractor/src/SampleFlaskApp/app.py | npnkbabu/mymlproject | 9b9aaeef4a5dac2d967262166ca8cdf4fa09cd5d | [
"Apache-2.0"
] | null | null | null | TopicExtractor/src/SampleFlaskApp/app.py | npnkbabu/mymlproject | 9b9aaeef4a5dac2d967262166ca8cdf4fa09cd5d | [
"Apache-2.0"
] | null | null | null | TopicExtractor/src/SampleFlaskApp/app.py | npnkbabu/mymlproject | 9b9aaeef4a5dac2d967262166ca8cdf4fa09cd5d | [
"Apache-2.0"
] | null | null | null | from flask import Flask, render_template, Response
from topicsconsumer import TopicsConsumer
import math
import time
import queue
import threading
import json
app = Flask(__name__)
if __name__ == '__main__':
app.run(debug=True,port=5050)
| 31.975 | 249 | 0.532447 |
ac28cc037b37abb77ef41622f5d43a997fc33033 | 690 | bzl | Python | runsc/test/build_defs.bzl | dna2fork/gvisor | eefa817cfdb04ff07e7069396f21bd6ba2c89957 | [
"Apache-2.0"
] | 2 | 2020-07-24T04:06:44.000Z | 2021-06-28T00:49:20.000Z | runsc/test/build_defs.bzl | dna2fork/gvisor | eefa817cfdb04ff07e7069396f21bd6ba2c89957 | [
"Apache-2.0"
] | null | null | null | runsc/test/build_defs.bzl | dna2fork/gvisor | eefa817cfdb04ff07e7069396f21bd6ba2c89957 | [
"Apache-2.0"
] | 1 | 2020-10-07T12:33:19.000Z | 2020-10-07T12:33:19.000Z | """Defines a rule for runsc test targets."""
load("@io_bazel_rules_go//go:def.bzl", _go_test = "go_test")
# runtime_test is a macro that will create targets to run the given test target
# with different runtime options.
def runtime_test(**kwargs):
"""Runs the given test target with different runtime options."""
name = kwargs["name"]
_go_test(**kwargs)
kwargs["name"] = name + "_hostnet"
kwargs["args"] = ["--runtime-type=hostnet"]
_go_test(**kwargs)
kwargs["name"] = name + "_kvm"
kwargs["args"] = ["--runtime-type=kvm"]
_go_test(**kwargs)
kwargs["name"] = name + "_overlay"
kwargs["args"] = ["--runtime-type=overlay"]
_go_test(**kwargs)
| 34.5 | 79 | 0.649275 |
ac2bbb2fda911605c3e823751d84b99cb9f30d2f | 918 | py | Python | python/akg/ms/utils.py | tianjiashuo/akg | a9cbf642063fb1086a93e8bc6be6feb145689817 | [
"Apache-2.0"
] | 286 | 2020-06-23T06:40:44.000Z | 2022-03-30T01:27:49.000Z | python/akg/ms/utils.py | tianjiashuo/akg | a9cbf642063fb1086a93e8bc6be6feb145689817 | [
"Apache-2.0"
] | 10 | 2020-07-31T03:26:59.000Z | 2021-12-27T15:00:54.000Z | python/akg/ms/utils.py | tianjiashuo/akg | a9cbf642063fb1086a93e8bc6be6feb145689817 | [
"Apache-2.0"
] | 30 | 2020-07-17T01:04:14.000Z | 2021-12-27T14:05:19.000Z | # Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""utils"""
# input format begin
DEFAULT = "DefaultFormat"
NCHW = "NCHW"
NHWC = "NHWC"
HWCN = "HWCN"
NC1HWC0 = "NC1HWC0"
FRAC_Z = "FracZ"
# input format end
# fusion type begin
ELEMWISE = "ELEMWISE"
CONVLUTION = "CONVLUTION"
COMMREDUCE = "COMMREDUCE"
SEGMENT = "SEGMENT"
OPAQUE = "OPAQUE"
# fusion type end
BINDS = "binds" | 27.818182 | 74 | 0.740741 |
ac2c508dc7ed127c37e761fe6253d817c522f603 | 5,254 | py | Python | foursight_core/sqs_utils.py | 4dn-dcic/foursight-core | 2e5ea594d38d04ad58f63ee42e5fb4b920bfb63c | [
"MIT"
] | null | null | null | foursight_core/sqs_utils.py | 4dn-dcic/foursight-core | 2e5ea594d38d04ad58f63ee42e5fb4b920bfb63c | [
"MIT"
] | 3 | 2021-08-11T07:09:24.000Z | 2022-02-16T18:58:45.000Z | foursight_core/sqs_utils.py | 4dn-dcic/foursight-core | 2e5ea594d38d04ad58f63ee42e5fb4b920bfb63c | [
"MIT"
] | null | null | null | from datetime import datetime
import boto3
import json
from .stage import Stage
| 33.896774 | 87 | 0.592882 |
ac2c93e2be1b6adb27d68e1973a9207d9ea0da74 | 1,476 | py | Python | setup.py | sabraha2/shadho | c17109a4526961113933d1189f0ca98eb8119ac1 | [
"MIT"
] | null | null | null | setup.py | sabraha2/shadho | c17109a4526961113933d1189f0ca98eb8119ac1 | [
"MIT"
] | null | null | null | setup.py | sabraha2/shadho | c17109a4526961113933d1189f0ca98eb8119ac1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
from setuptools import setup
from setuptools.command.install import install
LONG_DESCRIPTION = ""
with open(os.path.join(os.path.dirname(__file__), 'README.md'), 'r') as f:
LONG_DESCRIPTION = f.read()
setup(
name='shadho',
version='0.4.3.post2',
description='Hyperparameter optimizer with distributed hardware at heart',
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
url='https://github.com/jeffkinnison/shadho',
author='Jeff Kinnison',
author_email='jkinniso@nd.edu',
python_requires='>=3.5',
packages=['shadho',
'shadho.installers',
'shadho.managers',
'shadho.workers',],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: POSIX',
'Operating System :: Unix',
],
keywords='machine_learning hyperparameters distributed_computing',
install_requires=[
'numpy',
'scipy',
'scikit-learn',
'pyrameter'
],
tests_require=['pytest'],
include_package_data=True,
)
| 31.404255 | 78 | 0.630759 |
ac2e50e2c0ed0f0cb69c0d20141ee9476a2e3b0c | 20,837 | py | Python | foyer/tests/test_gmso_forcefield.py | jennyfothergill/foyer | 60a738da43fbb18c7207821662688361a1e6017d | [
"MIT"
] | 62 | 2017-02-14T19:38:31.000Z | 2022-02-25T16:09:54.000Z | foyer/tests/test_gmso_forcefield.py | jennyfothergill/foyer | 60a738da43fbb18c7207821662688361a1e6017d | [
"MIT"
] | 384 | 2017-02-10T05:56:41.000Z | 2022-03-30T21:47:22.000Z | foyer/tests/test_gmso_forcefield.py | jennyfothergill/foyer | 60a738da43fbb18c7207821662688361a1e6017d | [
"MIT"
] | 65 | 2017-02-24T16:43:20.000Z | 2022-01-06T21:01:28.000Z | import difflib
import glob
import os
import gmso
import mbuild as mb
import pytest
from pkg_resources import resource_filename
from foyer.exceptions import FoyerError
from foyer.general_forcefield import Forcefield
from foyer.tests.base_test import BaseTest
from foyer.tests.utils import get_fn, register_mock_request
FF_DIR = resource_filename("foyer", "forcefields")
FORCEFIELDS = glob.glob(os.path.join(FF_DIR, "xml/*.xml"))
RESPONSE_BIB_ETHANE_JA962170 = """@article{Jorgensen_1996,
doi = {10.1021/ja9621760},
url = {https://doi.org/10.1021%2Fja9621760},
year = 1996,
month = {jan},
publisher = {American Chemical Society ({ACS})},
volume = {118},
number = {45},
pages = {11225--11236},
author = {William L. Jorgensen and David S. Maxwell and Julian Tirado-Rives},
title = {Development and Testing of the {OPLS} All-Atom Force Field on Conformational Energetics and Properties of Organic Liquids},
journal = {Journal of the American Chemical Society}
}"""
RESPONSE_BIB_ETHANE_JP0484579 = """@article{Jorgensen_2004,
doi = {10.1021/jp0484579},
url = {https://doi.org/10.1021%2Fjp0484579},
year = 2004,
month = {oct},
publisher = {American Chemical Society ({ACS})},
volume = {108},
number = {41},
pages = {16264--16270},
author = {William L. Jorgensen and Jakob P. Ulmschneider and Julian Tirado-Rives},
title = {Free Energies of Hydration from a Generalized Born Model and an All-Atom Force Field},
journal = {The Journal of Physical Chemistry B}
}"""
| 41.757515 | 133 | 0.625474 |
ac2e60dace04a2bf9deac82a5edb304e62120e11 | 2,315 | py | Python | visualize/usecases/get_user_info.py | RevanthRyo/Alize | 60f4153c0c4b665e60c02bc90f99f833bf3173c8 | [
"Unlicense"
] | 160 | 2018-05-08T09:12:35.000Z | 2021-11-08T14:45:18.000Z | visualize/usecases/get_user_info.py | RevanthRyo/Alize | 60f4153c0c4b665e60c02bc90f99f833bf3173c8 | [
"Unlicense"
] | 15 | 2018-05-08T09:13:53.000Z | 2022-03-11T23:20:39.000Z | visualize/usecases/get_user_info.py | RevanthRyo/Alize | 60f4153c0c4b665e60c02bc90f99f833bf3173c8 | [
"Unlicense"
] | 12 | 2018-05-08T16:19:11.000Z | 2021-11-08T14:45:58.000Z | import requests
from django.conf import settings
from visualize.utils.api import Client
| 31.283784 | 84 | 0.651404 |
ac2ed7f7134d3ec9fcd2d668ed386c6b314f071b | 1,306 | py | Python | FusionIIIT/applications/counselling_cell/migrations/0002_auto_20210501_1036.py | sabhishekpratap5/sonarcubeTest2 | 9bd8105e457f6feb8c38fa94b335e54783fca99e | [
"bzip2-1.0.6"
] | 1 | 2021-08-05T10:31:35.000Z | 2021-08-05T10:31:35.000Z | FusionIIIT/applications/counselling_cell/migrations/0002_auto_20210501_1036.py | sabhishekpratap5/sonarcubeTest2 | 9bd8105e457f6feb8c38fa94b335e54783fca99e | [
"bzip2-1.0.6"
] | 1 | 2021-05-05T09:50:22.000Z | 2021-05-05T09:50:22.000Z | FusionIIIT/applications/counselling_cell/migrations/0002_auto_20210501_1036.py | sabhishekpratap5/sonarcubeTest2 | 9bd8105e457f6feb8c38fa94b335e54783fca99e | [
"bzip2-1.0.6"
] | 4 | 2021-03-16T08:11:42.000Z | 2021-05-06T11:03:44.000Z | # Generated by Django 3.1.5 on 2021-05-01 10:36
from django.db import migrations, models
import django.db.models.deletion
| 31.853659 | 128 | 0.624043 |
ac2f7b7236d2c24e643b4339626cb713bad31a0c | 2,650 | py | Python | config.py | KodeInWork/openue_exp | 31b46999188bf73160af84c0e6f5d3e75c0c52ea | [
"MIT"
] | null | null | null | config.py | KodeInWork/openue_exp | 31b46999188bf73160af84c0e6f5d3e75c0c52ea | [
"MIT"
] | null | null | null | config.py | KodeInWork/openue_exp | 31b46999188bf73160af84c0e6f5d3e75c0c52ea | [
"MIT"
] | null | null | null | import os
| 38.970588 | 347 | 0.441132 |
ac3000c2861cfc54577cebc22127160c6d8e95fb | 86 | py | Python | ipfsApi/__init__.py | dardevelin/python-ipfs-api | 841595f7d2b07db511eef34aa046163ee4a3020e | [
"MIT"
] | 1 | 2018-08-14T02:10:35.000Z | 2018-08-14T02:10:35.000Z | ipfsApi/__init__.py | ipfs-client-libraries/python-ipfs-api | 841595f7d2b07db511eef34aa046163ee4a3020e | [
"MIT"
] | null | null | null | ipfsApi/__init__.py | ipfs-client-libraries/python-ipfs-api | 841595f7d2b07db511eef34aa046163ee4a3020e | [
"MIT"
] | 1 | 2022-01-28T13:37:31.000Z | 2022-01-28T13:37:31.000Z | from __future__ import absolute_import
from .client import *
__version__ = '0.2.2'
| 12.285714 | 38 | 0.755814 |
ac31a080b5043ec20246c62e223e567243898b56 | 440 | py | Python | leprikon/models/useragreement.py | leprikon-cz/leprikon | b1bec36fb6bcf0220bffccca53b6f200f9e95910 | [
"BSD-3-Clause"
] | 4 | 2018-10-29T17:46:09.000Z | 2021-12-16T08:57:48.000Z | leprikon/models/useragreement.py | leprikon-cz/leprikon | b1bec36fb6bcf0220bffccca53b6f200f9e95910 | [
"BSD-3-Clause"
] | 68 | 2016-07-11T07:48:54.000Z | 2022-03-18T01:32:06.000Z | leprikon/models/useragreement.py | leprikon-cz/leprikon | b1bec36fb6bcf0220bffccca53b6f200f9e95910 | [
"BSD-3-Clause"
] | 2 | 2016-07-12T20:39:53.000Z | 2020-10-10T03:14:42.000Z | from django.db import models
from django.utils.translation import ugettext_lazy as _
from ..conf import settings
| 27.5 | 89 | 0.706818 |
ac3349d8016af9897cd5989665c953a2b04c0963 | 2,076 | py | Python | fair-api-datasets-update.py | RossBarnie/fair-api-beta-utilities | 4642d2d0cf5f601c55b343e788f79824acf227ee | [
"MIT"
] | 3 | 2020-09-04T08:21:09.000Z | 2021-03-04T00:19:57.000Z | fair-api-datasets-update.py | RossBarnie/fair-api-beta-utilities | 4642d2d0cf5f601c55b343e788f79824acf227ee | [
"MIT"
] | 9 | 2020-09-02T11:23:04.000Z | 2021-09-27T15:12:56.000Z | fair-api-datasets-update.py | RossBarnie/fair-api-beta-utilities | 4642d2d0cf5f601c55b343e788f79824acf227ee | [
"MIT"
] | 2 | 2021-03-25T16:48:53.000Z | 2021-05-19T10:00:16.000Z | import json
import sys
import os
import requests
from datasets.diff_helper import DiffHelper
from common.constants import BASE_HEADERS, FAIR_API_ENDPOINT, SSL_VERIFY, FAIR_URL, DRY_RUN
# Script must be run with at least 1 argument
if len(sys.argv) < 2:
print(f'Usage: {sys.argv[0]} <path to dataset definition json file> <--dry-run>')
exit(1)
# First argument must be a path to a file
definition_file = sys.argv[1]
if not os.path.isfile(definition_file):
print(f'Provided path "{definition_file}" does not seem to be a file, ensure the path is correct and try again')
exit(1)
with open(definition_file) as fh:
payload=fh.read()
data=json.loads(payload)
patch_request(data)
| 34.032787 | 121 | 0.67341 |
ac343909a3ad8704870bfb000e6780fc9350783a | 18,207 | py | Python | scripts/convert_excel_files_to_json.py | sheecegardezi/sifra | a7af896159ea7db231e23aeab187b7493887a080 | [
"Apache-2.0"
] | null | null | null | scripts/convert_excel_files_to_json.py | sheecegardezi/sifra | a7af896159ea7db231e23aeab187b7493887a080 | [
"Apache-2.0"
] | null | null | null | scripts/convert_excel_files_to_json.py | sheecegardezi/sifra | a7af896159ea7db231e23aeab187b7493887a080 | [
"Apache-2.0"
] | null | null | null | import os
import json
from collections import OrderedDict
import pandas as pd
# replace " with ' if the occur within brackets
# eg {"key":"["Key":"value"]"} => {"key":"['Key':'value']"}
if __name__ == "__main__":
main()
| 43.766827 | 79 | 0.543417 |
ac36030e8e89e493e372409b81e3e6f1ab9b3e03 | 1,173 | py | Python | QUANTAXIS/example/DataFetcher.py | cyy1229/QUANTAXIS | 320eff53dfa2cde8032a5e066499f4da0b5064a2 | [
"MIT"
] | null | null | null | QUANTAXIS/example/DataFetcher.py | cyy1229/QUANTAXIS | 320eff53dfa2cde8032a5e066499f4da0b5064a2 | [
"MIT"
] | null | null | null | QUANTAXIS/example/DataFetcher.py | cyy1229/QUANTAXIS | 320eff53dfa2cde8032a5e066499f4da0b5064a2 | [
"MIT"
] | null | null | null | from QUANTAXIS import QA_fetch_stock_day_adv, QA_fetch_stock_list_adv, QA_fetch_stock_day_full_adv, QA_Setting
import pandas as pd
QASETTING = QA_Setting()
DATABASE = QASETTING.client.quantaxis
# def getAllTradeCal():
# return pd.DataFrame(DATABASE.trade_date.find({"is_open": 1}))
if __name__ == '__main__':
print(MongoDataLoader().load_tushare_stock_day(end='20210630'))
| 27.27907 | 110 | 0.627451 |
ac36a8837c04b4a05f6772531a3bc4400bce36fc | 2,934 | py | Python | temp-uplift-submission/sparkml/adult_spark.py | damslab/reproducibility | f7804b2513859f7e6f14fa7842d81003d0758bf8 | [
"Apache-2.0"
] | 4 | 2021-12-10T17:20:26.000Z | 2021-12-27T14:38:40.000Z | temp-uplift-submission/sparkml/adult_spark.py | damslab/reproducibility | f7804b2513859f7e6f14fa7842d81003d0758bf8 | [
"Apache-2.0"
] | null | null | null | temp-uplift-submission/sparkml/adult_spark.py | damslab/reproducibility | f7804b2513859f7e6f14fa7842d81003d0758bf8 | [
"Apache-2.0"
] | null | null | null | import sys
import time
import numpy as np
import scipy as sp
import pandas as pd
import math
import warnings
from pyspark.sql import SparkSession
from pyspark import StorageLevel
from pyspark.ml.feature import Normalizer
from pyspark.ml.feature import StringIndexer, StandardScaler
from pyspark.ml.feature import OneHotEncoder, VectorAssembler
from pyspark.ml.feature import QuantileDiscretizer
from pyspark.sql.types import StringType, DoubleType
from pyspark.ml import Pipeline
spark = SparkSession\
.builder\
.master("local[*]")\
.config("spark.driver.memory", "110g")\
.config("spark.kryoserializer.buffer.max", "1024m")\
.appName("CriteoBySparkML")\
.getOrCreate()
spark.sparkContext.setLogLevel('ERROR')
X = readNprep(spark)
# The 1st call may read the dataset. Don't count the 1st call
t1 = time.time()
X_prep1 = transform(X)
print("Elapsed time for transformations via sparkml = %s sec" % (time.time() - t1))
# Average of three calls
totTime = 0
t1 = time.time()
X_prep2 = transform(X)
totTime = totTime + ((time.time() - t1) * 1000) #millisec
print("Elapsed time for transformations via sparkml = %s sec" % (time.time() - t1))
t1 = time.time()
X_prep3 = transform(X)
totTime = totTime + ((time.time() - t1) * 1000) #millisec
print("Elapsed time for transformations via sparkml = %s sec" % (time.time() - t1))
t1 = time.time()
X_prep4 = transform(X)
totTime = totTime + ((time.time() - t1) * 1000) #millisec
print("Elapsed time for transformations via sparkml = %s sec" % (time.time() - t1))
print("Average elapsed time = %s millisec" % (totTime/3))
| 36.675 | 106 | 0.720177 |
ac373cedd278bb0dc68d30fdfd250e87a1a074b4 | 800 | py | Python | populate/entities/SmellEmission.py | Odeuropa/knowledge-graph | c9c10cb984e79760b202325fe4314c8706de26fa | [
"Apache-2.0"
] | null | null | null | populate/entities/SmellEmission.py | Odeuropa/knowledge-graph | c9c10cb984e79760b202325fe4314c8706de26fa | [
"Apache-2.0"
] | null | null | null | populate/entities/SmellEmission.py | Odeuropa/knowledge-graph | c9c10cb984e79760b202325fe4314c8706de26fa | [
"Apache-2.0"
] | null | null | null | import re
from .Entity import Entity
from .SmellSource import SmellSource
from .ontologies import ODEUROPA
| 30.769231 | 88 | 0.57125 |
ac37bd5c4fb9186e20a9020b619a9482fbce7644 | 489 | py | Python | constants.py | duongntbk/ToyMachineLearning | 0a9cb02ddaebc3e4064d7ecb04bc654a08f4b1ee | [
"MIT"
] | null | null | null | constants.py | duongntbk/ToyMachineLearning | 0a9cb02ddaebc3e4064d7ecb04bc654a08f4b1ee | [
"MIT"
] | null | null | null | constants.py | duongntbk/ToyMachineLearning | 0a9cb02ddaebc3e4064d7ecb04bc654a08f4b1ee | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
MNIST_DATASET_PATH = 'raw_data/mnist.pkl.gz'
TEST_FOLDER = 'test/'
TRAIN_FOLDER = 'train/'
MODEL_FILE_PATH = 'model/recognizer.pickle'
LABEL_ENCODER_FILE_PATH = 'model/label_encoder.pickle'
# Manual
DEMO_HELP_MSG = '\n' + \
'Input parameter is incorrect\n' + \
'Display help: \'python demo.py -h\''
TRAINER_HELP_MSG = '\n' + \
'Input parameter is incorrect\n' + \
'Display help: \'python extractor.py -h\''
| 30.5625 | 58 | 0.619632 |
ac3856d097cb7252fe1c11137bb19dafaaf4bd31 | 435 | py | Python | python3/cut_the_sticks.py | ahavrylyuk/hackerrank | a8be83c8166a05f6f91bdd86cca3d4c544428b4b | [
"MIT"
] | null | null | null | python3/cut_the_sticks.py | ahavrylyuk/hackerrank | a8be83c8166a05f6f91bdd86cca3d4c544428b4b | [
"MIT"
] | null | null | null | python3/cut_the_sticks.py | ahavrylyuk/hackerrank | a8be83c8166a05f6f91bdd86cca3d4c544428b4b | [
"MIT"
] | null | null | null | #! /usr/bin/env python
if __name__ == '__main__':
_ = input()
value = map(int, input().split(' '))
res = cut_the_sticks(sorted(value, reverse=True))
for v in res:
print(v)
| 20.714286 | 53 | 0.508046 |
ac3afa06674bd280c370406d538274f60a4acaa0 | 2,330 | py | Python | ex01_search.py | tbaptista/pacman | f30213e1104b794996204fa0a4ac90c583f8a2e4 | [
"Apache-2.0"
] | 1 | 2019-01-10T05:37:10.000Z | 2019-01-10T05:37:10.000Z | ex01_search.py | tbaptista/pacman | f30213e1104b794996204fa0a4ac90c583f8a2e4 | [
"Apache-2.0"
] | null | null | null | ex01_search.py | tbaptista/pacman | f30213e1104b794996204fa0a4ac90c583f8a2e4 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
# -----------------------------------------------------------------------------
# Copyright (c) 2015 Tiago Baptista
# All rights reserved.
# -----------------------------------------------------------------------------
"""
Path-finding exercise using the pac-man game. Using the mouse, choose a target
location for the pac-man agent. Given this target the agent should compute the
path to that location.
"""
from __future__ import division
__docformat__ = 'restructuredtext'
__author__ = 'Tiago Baptista'
__version__ = '1.0'
import pacman
import pyafai
from pyglet.window import mouse
class SearchDisplay(pacman.PacmanDisplay):
if __name__ == '__main__':
setup()
pyafai.run() | 28.072289 | 82 | 0.584979 |
ac3bc1d1f68c8f2adb204c9c5f0374180c3d4c1e | 3,867 | py | Python | site_search/tests/test_permissions.py | AccentDesign/djangocms-site-search | 90ed1e5ab5fe96be8f1a4a74994f18164a7363aa | [
"MIT"
] | 1 | 2019-06-06T12:56:30.000Z | 2019-06-06T12:56:30.000Z | site_search/tests/test_permissions.py | AccentDesign/djangocms-site-search | 90ed1e5ab5fe96be8f1a4a74994f18164a7363aa | [
"MIT"
] | null | null | null | site_search/tests/test_permissions.py | AccentDesign/djangocms-site-search | 90ed1e5ab5fe96be8f1a4a74994f18164a7363aa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from django.contrib.auth.models import User
from django.test import TestCase
from cms.api import assign_user_to_page, create_page
from ..helpers import get_request
from ..views import SearchResultsView
| 40.28125 | 75 | 0.58495 |
ac3bc471644b6e8784c772369a7f273ad6a22e32 | 12,179 | py | Python | FSO_Comm_Demo.py | MansourM61/FSO-Comm-GnuRadio-Module | 44bfefaa95fb9af19f9817029f663892b0f84417 | [
"MIT"
] | 6 | 2019-10-31T10:02:49.000Z | 2022-03-03T21:42:19.000Z | FSO_Comm_Demo.py | MansourM61/FSO-Comm-GnuRadio-Module | 44bfefaa95fb9af19f9817029f663892b0f84417 | [
"MIT"
] | null | null | null | FSO_Comm_Demo.py | MansourM61/FSO-Comm-GnuRadio-Module | 44bfefaa95fb9af19f9817029f663892b0f84417 | [
"MIT"
] | 2 | 2022-01-03T07:59:44.000Z | 2022-01-30T11:25:21.000Z | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: FSO Communication Block Modules Test
# Author: M Mansour Abadi
# Description: Modules from FSO_Comm are used in a simple FSO comunication link including various channel effects.
# Generated: Tue Oct 29 17:50:38 2019
##################################################
from distutils.version import StrictVersion
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt5 import Qt
from PyQt5 import Qt, QtCore
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio import qtgui
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from optparse import OptionParser
import FSO_Comm
import numpy
import sip
import sys
from gnuradio import qtgui
if __name__ == '__main__':
main()
| 36.247024 | 131 | 0.642417 |
ac3ca086610d59e10a3cca75b54708abf363a598 | 6,194 | py | Python | Script/WDI_writer_functions.py | Riemer1818/Cattlelyst_wikibase_2021 | 1f3e3199391844206e6621e63756461bf984bf36 | [
"MIT"
] | null | null | null | Script/WDI_writer_functions.py | Riemer1818/Cattlelyst_wikibase_2021 | 1f3e3199391844206e6621e63756461bf984bf36 | [
"MIT"
] | null | null | null | Script/WDI_writer_functions.py | Riemer1818/Cattlelyst_wikibase_2021 | 1f3e3199391844206e6621e63756461bf984bf36 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from wikidataintegrator import wdi_core, wdi_login
import logging
import pickle
__author__ = "Riemer van der Vliet"
__copyright__ = "Copyright 2020, Laboratory of Systems and Synthetic Biology"
__credits__ = ["Riemer van der Vliet", "Jasper Koehorst"]
__license__ = "GPL"
__version__ = "2.0.0"
__maintainer__ = "Riemer van der Vliet"
__email__ = "riemer.vandervliet@wur.nl"
__status__ = "Development"
"""
functions used by WDI writer file
"""
def get_properties(endpoint_url: str) -> dict:
"""Finds properties on the endpoint url and returns the IDs
:param endpoint_url: Wikibase SPARQL endpoint
:return: Property lookup dictionary of key property string and value property ID of Wikibase
"""
# placeholder for dictionary
property_lookup = {}
# creates query
query = """SELECT ?property ?label WHERE {
?property a wikibase:Property .
?property rdfs:label ?label .
FILTER (LANG(?label) = "en" )}
"""
# gets results
results = wdi_core.WDItemEngine.execute_sparql_query(query=query, endpoint=endpoint_url)
# iterates iterates data
for result in results["results"]["bindings"]:
label = result["label"]["value"].split("/")[-1]
property_lookup[label] = result["property"]["value"].split("/")[-1]
return property_lookup
def get_items(items: list, endpoint_url: str) -> dict:
"""Gets the IDs for each of the items in the item list. First tries to find it in the pickle file.
:param items: list of items of which IDs need to be traced
:param endpoint_url: Wikibase SPARQL endpoint
:return: item_lookup dictionary of key item string and value item ID of Wikibase
"""
if os.path.isfile("../Parts/item_lookup.pickle"):
with open('../Parts/item_lookup.pickle', 'rb') as handle:
item_lookup = pickle.load(handle)
else:
item_lookup = {}
for item_x in items:
logging.info("Retrieving item " + item_x)
if item_x in item_lookup: continue
item_lookup[item_x] = get_item_by_name(item_x, endpoint_url)
with open('../Parts/item_lookup.pickle', 'wb') as handle:
pickle.dump(item_lookup, handle, protocol=pickle.DEFAULT_PROTOCOL)
return item_lookup
def get_item_by_name(label: str, endpoint_url: str) -> str or None:
"""Finds items on the endpoint url and returns the IDs
:param label: Item label
:param endpoint_url: Wikibase SPARQL endpoint
:return: string of Wikibase ID or None
"""
# set query
query = """
SELECT DISTINCT ?item WHERE {
VALUES ?label { \"""" + label + """\"@en }
?item rdfs:label ?label .
}"""
# get results
try:
results = wdi_core.WDItemEngine.execute_sparql_query(query, endpoint=endpoint_url)
except:
print("Query failed: ")
raise Exception("Query failed")
# parse and return results
for result in results["results"]["bindings"]:
return result["item"]["value"].split("/")[-1]
return None
def prepare(items: list, endpoint_url: str) -> list:
"""Returns a list of lists of items ID and property IDs
:param items: list of items of which IDs need to be traced
:param endpoint_url: Wikibase SPARQL endpoint
:return: list of item dictionary and of property dictionary
"""
return [get_items(items, endpoint_url), get_properties(endpoint_url)]
def get_properties(endpoint_url: str) -> dict:
"""Finds properties on the endpoint url and returns the IDs
:param endpoint_url: Wikibase SPARQL endpoint
:return: property_lookup dictionary of key property string and value property ID of Wikibase
"""
# placeholder for dictionary
property_lookup = {}
# set query
query = """SELECT ?property ?label WHERE {
?property a wikibase:Property .
?property rdfs:label ?label .
FILTER (LANG(?label) = "en" )}
"""
# get results
results = wdi_core.WDItemEngine.execute_sparql_query(query=query, endpoint=endpoint_url)
# parse results
for result in results["results"]["bindings"]:
label = result["label"]["value"].split("/")[-1]
property_lookup[label] = result["property"]["value"].split("/")[-1]
return property_lookup
def get_items(items: list, endpoint_url: str) -> dict:
"""Gets the IDs for each of the items in the item list. First tries to find it in the pickle file.
:param items: list of items of which IDs need to be traced
:param endpoint_url: Wikibase SPARQL endpoint
:return: item_lookup dictionary with item strings and value IDs
"""
# checks if there is a pickle file under name item_lookup.pickle,
# otherwise creates dictionary placeholder
if os.path.isfile("../Parts/item_lookup.pickle"):
with open('../Parts/item_lookup.pickle', 'rb') as handle:
item_lookup = pickle.load(handle)
else:
item_lookup = {}
# iterates items and gets the item ID by name
for item_x in items:
logging.info("Retrieving item " + item_x)
if item_x in item_lookup: continue
item_lookup[item_x] = get_item_by_name(item_x, endpoint_url)
# dumps object as pickle file
with open('../Parts/item_lookup.pickle', 'wb') as handle:
pickle.dump(item_lookup, handle, protocol=pickle.DEFAULT_PROTOCOL)
return item_lookup
def get_item_by_name(label: str, endpoint_url: str) -> str or bool:
"""Finds items on the endpoint url and returns the IDs
:param label: Item label
:param endpoint_url: Wikibase SPARQL endpoint
:return: result string of wikibase ID or None
"""
# sets query
query = """
SELECT DISTINCT ?item WHERE {
VALUES ?label { \"""" + label + """\"@en }
?item rdfs:label ?label .
}"""
# gets results
try:
results = wdi_core.WDItemEngine.execute_sparql_query(query, endpoint=endpoint_url)
except:
print("Query failed: ")
raise Exception("Query failed")
# iterates results
for result in results["results"]["bindings"]:
return result["item"]["value"].split("/")[-1]
return None
| 30.512315 | 102 | 0.66516 |
ac3d7ed801511882792da9a883bfb2bc98512cd5 | 3,449 | py | Python | kitt/callbacks.py | David-Ciz/kitt | 44a6faf7fcf1bc6f3db082debeab0ef4dfcc9c4a | [
"MIT"
] | null | null | null | kitt/callbacks.py | David-Ciz/kitt | 44a6faf7fcf1bc6f3db082debeab0ef4dfcc9c4a | [
"MIT"
] | null | null | null | kitt/callbacks.py | David-Ciz/kitt | 44a6faf7fcf1bc6f3db082debeab0ef4dfcc9c4a | [
"MIT"
] | null | null | null | import heapq
import logging
import os
from tensorflow.keras.callbacks import Callback
| 35.556701 | 95 | 0.612351 |
ac3e07e4760f9a790f20faf0e15bb7e637cec1a9 | 3,851 | py | Python | migrations/versions/2018_03_30_1f4385bac8f9_change_activity_picture_paths_to_file_.py | tch1bo/viaduct | bfd37b0a8408b2dd66fb01138163b80ce97699ff | [
"MIT"
] | 11 | 2015-04-23T21:57:56.000Z | 2019-04-28T12:48:58.000Z | migrations/versions/2018_03_30_1f4385bac8f9_change_activity_picture_paths_to_file_.py | tch1bo/viaduct | bfd37b0a8408b2dd66fb01138163b80ce97699ff | [
"MIT"
] | 1 | 2016-10-05T14:10:58.000Z | 2016-10-05T14:12:23.000Z | migrations/versions/2018_03_30_1f4385bac8f9_change_activity_picture_paths_to_file_.py | tch1bo/viaduct | bfd37b0a8408b2dd66fb01138163b80ce97699ff | [
"MIT"
] | 3 | 2016-10-05T14:00:42.000Z | 2019-01-16T14:33:43.000Z | """Change activity picture paths to file ids.
Revision ID: 1f4385bac8f9
Revises: c8cd32037cde
Create Date: 2018-03-30 16:01:56.532893
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
import os
import re
from app import hashfs
from app.models.base_model import BaseEntity
from app.enums import FileCategory
# revision identifiers, used by Alembic.
revision = '1f4385bac8f9'
down_revision = 'c8cd32037cde'
Base = declarative_base()
db = sa
db.Model = Base
db.relationship = relationship
filename_regex = re.compile(r'(.+)\.([^\s.]+)')
def migrate_files():
picture_dir = 'app/static/activity_pictures/'
activities = db.session.query(IntermediateActivity).all()
total = len(activities)
stepsize = 10
for i, activity in enumerate(activities):
if (i + 1) % stepsize == 0:
print("{}/{}".format(i + 1, total))
if activity.picture is None:
continue
path = os.path.join(picture_dir, activity.picture)
if not os.path.isfile(path):
print("File does not exist:", path)
activity.picture_file = None
continue
with open(path, 'rb') as file_reader:
address = hashfs.put(file_reader)
f = File()
f.category = FileCategory.ACTIVITY_PICTURE
f.hash = address.id
m = filename_regex.match(activity.picture)
if m is not None:
f.extension = m.group(2).lower()
else:
f.extension = ""
activity.picture_file = f
db.session.add(f)
db.session.commit()
def create_session():
connection = op.get_bind()
session_maker = sa.orm.sessionmaker()
session = session_maker(bind=connection)
db.session = session
def upgrade():
create_session()
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('activity', sa.Column('picture_file_id', sa.Integer(), nullable=True))
op.create_foreign_key(op.f('fk_activity_picture_file_id_file'), 'activity', 'file', ['picture_file_id'], ['id'])
# Change ACTIVITY_PICTURES -> ACTIVITY_PICTURE
op.alter_column('file', 'category',
existing_type=mysql.ENUM('UPLOADS', 'EXAMINATION', 'ACTIVITY_PICTURE', 'ALV_DOCUMENT', 'COMPANY_LOGO', 'USER_AVATAR'),
nullable=False)
try:
migrate_files()
except:
op.drop_constraint(op.f('fk_activity_picture_file_id_file'), 'activity', type_='foreignkey')
op.drop_column('activity', 'picture_file_id')
raise
op.drop_column('activity', 'picture')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
raise Exception("Undoing this migration is impossible")
# op.add_column('activity', sa.Column('picture', mysql.VARCHAR(length=255), nullable=True))
# op.drop_constraint(op.f('fk_activity_picture_file_id_file'), 'activity', type_='foreignkey')
# op.drop_column('activity', 'picture_file_id')
# ### end Alembic commands ###
# vim: ft=python
| 27.905797 | 138 | 0.65879 |
ac3e3b4c92edc26c0c53ec7942b32aed32b778c8 | 1,165 | py | Python | fapistrano/signal.py | liwushuo/fapistrano | 2a31aad01a04d7ea9108dc6f95aee9a53290459f | [
"MIT"
] | 18 | 2016-03-25T09:40:20.000Z | 2022-02-23T02:09:50.000Z | fapistrano/signal.py | liwushuo/fapistrano | 2a31aad01a04d7ea9108dc6f95aee9a53290459f | [
"MIT"
] | null | null | null | fapistrano/signal.py | liwushuo/fapistrano | 2a31aad01a04d7ea9108dc6f95aee9a53290459f | [
"MIT"
] | 3 | 2016-03-22T07:41:15.000Z | 2021-02-25T04:27:53.000Z | # -*- coding: utf-8 -*-
from functools import wraps
from .utils import run_function
namespace = Namespace()
if __name__ == '__main__':
register('hello', handle_hello)
emit('hello', keyword='world')
| 22.843137 | 62 | 0.611159 |
ac3e6405e38364554897a98ee0697c92ce3335ab | 463 | py | Python | Pyon exercicios/Exercicios/011.py | alefbispo/Exercicios-do-curso-de-Python | 16cd569ab16542135b834ac8d0cfb0ae84836d53 | [
"MIT"
] | null | null | null | Pyon exercicios/Exercicios/011.py | alefbispo/Exercicios-do-curso-de-Python | 16cd569ab16542135b834ac8d0cfb0ae84836d53 | [
"MIT"
] | null | null | null | Pyon exercicios/Exercicios/011.py | alefbispo/Exercicios-do-curso-de-Python | 16cd569ab16542135b834ac8d0cfb0ae84836d53 | [
"MIT"
] | null | null | null | #pedir a altura e a largura de uma parede e dizer quantos litros de tinta vai gastar sabendo que cada litro de tinta pinta 2m2
altura = float(input('Qual a altura da parede? '))
largura = float(input('Qual a largura da parede? '))
area = altura * largura
tinta = (altura * largura) / 2
print('Voce tem a area de {}x{} e sua parede tem a area de: {}M \n voce vai precisar de {:.2f} litros de tinta pra pintar a parede!!'.format(altura, largura, area, tinta))
| 42.090909 | 172 | 0.708423 |
ac3fd84e905bc1166a7d4dcb6bd2d1a33b2c8e12 | 148 | py | Python | textutils/pages/views.py | sohanur-shanto/Django-Play-With-Text | e81177c22e409a584daebd8a826e2aaee14fb59c | [
"BSD-3-Clause-Attribution"
] | 2 | 2021-04-09T12:54:26.000Z | 2021-04-10T07:36:22.000Z | textutils/pages/views.py | sohanur-shanto/Django-Play-With-Text | e81177c22e409a584daebd8a826e2aaee14fb59c | [
"BSD-3-Clause-Attribution"
] | null | null | null | textutils/pages/views.py | sohanur-shanto/Django-Play-With-Text | e81177c22e409a584daebd8a826e2aaee14fb59c | [
"BSD-3-Clause-Attribution"
] | null | null | null | from django.http import HttpResponse
from django.shortcuts import render
| 24.666667 | 47 | 0.797297 |
ac40152e1997a785e6185c250577f8e42a50d310 | 1,155 | py | Python | main.py | fthome/repetier_ui | 49be402ac479f5e066ae27eaa2a8a02473bb8045 | [
"MIT"
] | null | null | null | main.py | fthome/repetier_ui | 49be402ac479f5e066ae27eaa2a8a02473bb8045 | [
"MIT"
] | null | null | null | main.py | fthome/repetier_ui | 49be402ac479f5e066ae27eaa2a8a02473bb8045 | [
"MIT"
] | null | null | null | # -*-coding:Utf-8 -*
from repetier_ui import *
import time
import set_ifttt
from FUTIL.my_logging import *
my_logging(console_level = DEBUG, logfile_level = INFO)
HD = repetier_printer (repetier_api(api_key='142a8eed-7d86-4bea-96bc-cfcf5b3ca742'),'HD')
sys.path.insert(0,'/home/pi')
import iftt_key
ifttt0 = set_ifttt.ifttt(iftt_key.key)
UI = repetier_ui(debug=False, wake_up = wake_up ) #debug = True : pas d'envoie des gcode
UI.add_action(22,repetier_file_action("extract.gcode",HD))
UI.add_action(27,repetier_file_action("extrude_100_vite.gcode",HD))
UI.add_action(17,repetier_file_action("extrude_50.gcode",HD))
UI.add_action(10,repetier_file_action("goto_z_max.gcode",HD, only_if_has_axis = True))
UI.add_action(19,repetier_file_action("stop_all.gcode",HD))
UI.add_action(18,repetier_file_action("pause.gcode", HD, only_if_printing = True)) # Detection de prsence fil
UI.add_successive_actions(26,repetier_file_action("pause.gcode",HD), repetier_action_action("continueJob",HD))
try:
while True:
time.sleep(10)
except KeyboardInterrupt:
print('interrupted!')
finally:
UI.close()
| 31.216216 | 110 | 0.768831 |
ac40231ed6f638e7905b68291dba05edd0eca13c | 5,195 | py | Python | cows/model/wms.py | cedadev/cows | db9ed729c886b271ce85355b97e39243081e8246 | [
"BSD-2-Clause"
] | 2 | 2018-05-09T16:12:43.000Z | 2018-08-21T17:10:22.000Z | cows/model/wms.py | cedadev/cows | db9ed729c886b271ce85355b97e39243081e8246 | [
"BSD-2-Clause"
] | null | null | null | cows/model/wms.py | cedadev/cows | db9ed729c886b271ce85355b97e39243081e8246 | [
"BSD-2-Clause"
] | null | null | null | # BSD Licence
# Copyright (c) 2009, Science & Technology Facilities Council (STFC)
# All rights reserved.
#
# See the LICENSE file in the source distribution of this software for
# the full license text.
# Copyright (C) 2007 STFC & NERC (Science and Technology Facilities Council).
# This software may be distributed under the terms of the
# Q Public License, version 1.0 or later.
# http://ndg.nerc.ac.uk/public_docs/QPublic_license.txt
"""
Extends cows classes where necessary for implementing WMS 1.3.0
:author: Stephen Pascoe
"""
from cows.model.contents import DatasetSummary
from cows.model.domain import Domain
#
#!TODO: Other objects referenced by WmsDatasetSummary
# | 30.739645 | 95 | 0.663523 |
ac42226253879560e3b7ec3fc4e10b477bc7a82f | 2,942 | py | Python | utilities/prev_projects/DATAFIT/VIEW/WINDOWSVIEW/GraphView.py | Saldenisov/pyconlyse | 1de301b4a4c15ee0bd19034aa8d5da1beacfd124 | [
"MIT"
] | null | null | null | utilities/prev_projects/DATAFIT/VIEW/WINDOWSVIEW/GraphView.py | Saldenisov/pyconlyse | 1de301b4a4c15ee0bd19034aa8d5da1beacfd124 | [
"MIT"
] | null | null | null | utilities/prev_projects/DATAFIT/VIEW/WINDOWSVIEW/GraphView.py | Saldenisov/pyconlyse | 1de301b4a4c15ee0bd19034aa8d5da1beacfd124 | [
"MIT"
] | null | null | null | from PyQt5.Qt import QMainWindow
from prev_projects.DATAFIT.UTILITY.OBSERVER import GraphObserver
from prev_projects.DATAFIT.UTILITY.META import Meta
from prev_projects.DATAFIT.VIEW.UI import Ui_GraphWindow
| 40.30137 | 98 | 0.71516 |
ac435b3434ef327610b43e5ed8a12c8f4b36a43d | 1,205 | py | Python | src/normalizer.py | lucassouzamatos/water-potability-ai | 6714e894b4575a58e35cc6e1cac699f0f5f1e9bc | [
"MIT"
] | null | null | null | src/normalizer.py | lucassouzamatos/water-potability-ai | 6714e894b4575a58e35cc6e1cac699f0f5f1e9bc | [
"MIT"
] | null | null | null | src/normalizer.py | lucassouzamatos/water-potability-ai | 6714e894b4575a58e35cc6e1cac699f0f5f1e9bc | [
"MIT"
] | null | null | null | import pandas as pd
if __name__ == '__main__':
normalizer = Normalizer() | 40.166667 | 149 | 0.712863 |
ac44378df9c1e3bccd01c971db20ecdd0d460d5a | 541 | py | Python | CoProcessing/PythonCatalyst/Testing/Cxx/outputcheck.py | brown-ccv/paraview-scalable | 64b221a540737d2ac94a120039bd8d1e661bdc8f | [
"Apache-2.0",
"BSD-3-Clause"
] | 2 | 2019-09-27T08:04:34.000Z | 2019-10-16T22:30:54.000Z | CoProcessing/PythonCatalyst/Testing/Cxx/outputcheck.py | sakjain92/paraview | f3af0cd9f6750e24ad038eac573b870c88d6b7dd | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | CoProcessing/PythonCatalyst/Testing/Cxx/outputcheck.py | sakjain92/paraview | f3af0cd9f6750e24ad038eac573b870c88d6b7dd | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2021-03-13T03:35:01.000Z | 2021-03-13T03:35:01.000Z | import sys
if len(sys.argv) != 2:
print("command is 'python <vtk file>'")
sys.exit(1)
from paraview.simple import *
proxy = OpenDataFile(sys.argv[1])
r = proxy.GetClientSideObject()
r.Update()
g = r.GetOutput()
if g.GetNumberOfPoints() != 441 or g.GetNumberOfCells() != 800:
print('Output grid is incorrect. The number of points is %d '\
'but should be 441 and the number of cells is %d ' \
'but should be 800.' % (g.GetNumberOfPoints(), g.GetNumberOfCells()))
sys.exit(1)
else:
print("All's good!!!!")
| 25.761905 | 77 | 0.64695 |
ac455afb457dd1a64725218027c80809e20d17f1 | 3,594 | py | Python | mrl/g_models/generative_base.py | DarkMatterAI/mrl | e000c3570d4461c3054c882697cce55217ede552 | [
"MIT"
] | 4 | 2021-11-16T09:29:55.000Z | 2021-12-27T17:55:32.000Z | mrl/g_models/generative_base.py | DarkMatterAI/mrl | e000c3570d4461c3054c882697cce55217ede552 | [
"MIT"
] | null | null | null | mrl/g_models/generative_base.py | DarkMatterAI/mrl | e000c3570d4461c3054c882697cce55217ede552 | [
"MIT"
] | 3 | 2021-11-16T09:41:41.000Z | 2021-12-27T17:55:33.000Z | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/09_generative_models.generative_base.ipynb (unless otherwise specified).
__all__ = ['GenerativeModel', 'beam_search']
# Cell
from ..imports import *
from ..torch_imports import *
from ..torch_core import *
from ..layers import *
# Cell
# Cell
def beam_search(model, seed_ints, k, beam_size, sl, temperature, pad_idx=None):
'''
beam_search - perform beam search using `model`
Inputs:
- `model nn.Module`: model
- `seed_ints torch.Longtensor`: seed sequence
- `k int`: top k beam sampling
- `beam_size int`: maximum number of beams to retain
- `sl int`: max sequence length
- `temperature float`: sample temperature
- `pad_idx Optional[int]`: pad index if applicable
'''
# currently only works for LSTM_LM. TODO: work for all generative models
current_device = next(model.parameters()).device
if seed_ints.ndim==1:
seed_ints = seed_ints.unsqueeze(0)
preds = seed_ints.repeat(k,1)
preds = to_device(preds, current_device)
idxs = preds[:,-1].unsqueeze(-1)
lps = idxs.new_zeros((k, 1)).float()
with torch.no_grad():
for i in range(sl):
x, hiddens, encoded = model._forward(idxs, hiddens)
x.div_(temperature)
log_probs = F.log_softmax(x, -1)
values, indices = log_probs.topk(k, dim=-1)
lps = torch.cat([lps.unsqueeze(-1).repeat(1,1,values.shape[-1]), -values], 1)
current_sl = lps.shape[1]
lps = lps.permute(0,2,1).reshape(-1,current_sl)
preds = torch.cat([preds[:,None].expand(preds.size(0), k , preds.size(1)),
indices.squeeze(1)[:,:,None].expand(preds.size(0), k, 1),], dim=2)
preds = preds.view(-1, preds.size(2))
scores = lps.sum(-1)
indices_idx = torch.arange(0,preds.size(0))[:,None].expand(preds.size(0), k).contiguous().view(-1)
sort_idx = scores.argsort()[:beam_size]
preds = preds[sort_idx]
lps = lps[sort_idx]
idxs = preds[:,-1].unsqueeze(-1)
hiddens = [(i[0][:, indices_idx[sort_idx], :],
i[1][:, indices_idx[sort_idx], :]) for i in hiddens]
if pad_idx is not None:
if (preds[:,-1]==pad_idx).all():
break
return preds, -lps | 28.299213 | 120 | 0.598219 |
ac45e6abbbb88f1ae1939bcf71db99437c006d19 | 154 | py | Python | desafios/des003/des003_p03.py | brenoedl0/python | 92ee4ea141584e0bd140449c093f871c2140b1a5 | [
"MIT"
] | null | null | null | desafios/des003/des003_p03.py | brenoedl0/python | 92ee4ea141584e0bd140449c093f871c2140b1a5 | [
"MIT"
] | null | null | null | desafios/des003/des003_p03.py | brenoedl0/python | 92ee4ea141584e0bd140449c093f871c2140b1a5 | [
"MIT"
] | null | null | null | nota1 = float(input('nota 1: '))
nota2 = float(input('nota 2: '))
media = (nota1 + nota2)/2
print('A media entre a nota 1 e a nota 2 {}'.format(media))
| 30.8 | 61 | 0.62987 |
ac460ebb2a0293670e0c132534dbc9da8b9efb13 | 5,588 | py | Python | rio_tiler/mosaic/methods/defaults.py | vincentsarago/rio-tiler | 21022a0766009a64acf0038dc6adae33d9831a41 | [
"BSD-3-Clause"
] | 77 | 2017-10-12T18:17:14.000Z | 2019-01-17T15:39:24.000Z | rio_tiler/mosaic/methods/defaults.py | vincentsarago/rio-tiler | 21022a0766009a64acf0038dc6adae33d9831a41 | [
"BSD-3-Clause"
] | 40 | 2017-10-17T08:31:51.000Z | 2019-01-11T22:00:44.000Z | rio_tiler/mosaic/methods/defaults.py | vincentsarago/rio-tiler | 21022a0766009a64acf0038dc6adae33d9831a41 | [
"BSD-3-Clause"
] | 23 | 2017-10-13T21:41:08.000Z | 2019-01-09T06:08:27.000Z | """rio_tiler.mosaic.methods.defaults: default mosaic filling methods."""
import numpy
from .base import MosaicMethodBase
| 29.256545 | 86 | 0.573193 |
ac4628419c7ff8fb2c36d8b816ed31e520537c50 | 13,442 | py | Python | apps/render_data_.py | ckxz/PIFu | d1cf528652ba538368ec114ddafcbea5c73d7e3d | [
"MIT"
] | null | null | null | apps/render_data_.py | ckxz/PIFu | d1cf528652ba538368ec114ddafcbea5c73d7e3d | [
"MIT"
] | null | null | null | apps/render_data_.py | ckxz/PIFu | d1cf528652ba538368ec114ddafcbea5c73d7e3d | [
"MIT"
] | null | null | null | import os, sys
import random
import argparse
from pathlib import Path
from tqdm import tqdm
import cv2
import math
import pyexr
import shutil
import numpy as np
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
from lib.renderer.camera import Camera
from lib.renderer.mesh import load_obj_mesh, compute_tangent, compute_normal, load_obj_mesh_mtl
# from data.config import raw_dataset, render_dataset, archive_dataset, model_list, zip_path
# RUN
#wtight_bust = [x[:-1] for x in open('/Volumes/CKXZ 1/@City/363, FP/Dataset(s)/decimated_obj-dataset/watertight_BUSTS.txt', 'r').readlines() if '.obj' in x] # Local
#wtight_statue = [x[:-1] for x in open('/Volumes/CKXZ 1/@City/363, FP/Dataset(s)/decimated_obj-dataset/watertight_STATUES.txt', 'r').readlines() if '.obj' in x] # Local
wtight_bust = [x[:-1] for x in open('/home/enterprise.internal.city.ac.uk/adbb120/pifuhd/data/decimated_reoriented_vt/watertight_BUST.txt', 'r').readlines() if '.obj' in x] # Camber
wtight_statue = [x[:-1] for x in open('/home/enterprise.internal.city.ac.uk/adbb120/pifuhd/data/decimated_reoriented_vt/watertight_STATUE.txt', 'r').readlines() if '.obj' in x] # Camber
#file_src = '/Volumes/CKXZ 1/@City/363, FP/Dataset(s)/decimated_obj-dataset_vt' # Local
file_src = '/home/enterprise.internal.city.ac.uk/adbb120/pifuhd/data/decimated_reoriented_vt' # Camber
#prep_src = '/Volumes/CKXZ 1/@City/363, FP/AISculpture/PIFuHD/DS-Related/preprocessd_data/prt_util' # Local
prep_src = '/home/enterprise.internal.city.ac.uk/adbb120/pifuhd/data/prt_util_reoriented' # Camber
#dst = '/Volumes/CKXZ 1/@City/363, FP/AISculpture/PIFuHD/DS-Related/preprocessd_data/output_tryitlocal' # Local
dst = '/home/enterprise.internal.city.ac.uk/adbb120/pifuhd/data/pp_output_reoriented' # Camber
#env_sh = '/Users/ckxz/Desktop/@City/363, FP/PIFu/env_sh.npy' # Local
env_sh = '/home/enterprise.internal.city.ac.uk/adbb120/pifuhd/PIFu/env_sh.npy'
folders = sorted([x for x in os.listdir(file_src) if not x.startswith('.') and not x.endswith(('.txt', '.zip'))], key=int)
shs = np.load(env_sh)
from lib.renderer.gl.init_gl import initialize_GL_context
initialize_GL_context(width=512, height=512, egl=True)
from lib.renderer.gl.prt_render import PRTRender
rndr = PRTRender(width=512, height=512, ms_rate=1, egl=True)
rndr_uv = PRTRender(width=512, height=512, uv_mode=True, egl=True)
#ccount = 0
#fcount = 0
#ftcount = 0
for folder in folders:
#if not os.path.exists(os.path.join(dst, folder)):
# os.mkdir(os.path.join(dst, folder))
reps = [x for x in os.listdir(f'{file_src}/{folder}') if not x.startswith('.')]
for rep in reps:
if not os.path.exists(os.path.join(dst, rep)):
os.mkdir(os.path.join(dst, rep))
files = [x for x in os.listdir(os.path.join(file_src, folder, rep)) if not x.startswith('.') and not x.endswith(('.mtl', '.png'))]
for fname in files:
if os.path.join(folder, rep, fname) not in wtight_bust and os.path.join(folder, rep, fname) not in wtight_statue:
#ccount += 1
#with open('/home/enterprise.internal.city.ac.uk/adbb120/pifuhd/data/ccount.txt', 'w') as f:
# f.write(str(ccount))
continue
else:
#fcount += 1
#with open('/home/enterprise.internal.city.ac.uk/adbb120/pifuhd/data/fcount.txt', 'w') as f:
# f.write(str(fcount))
objnuv_filepath = os.path.join(file_src, folder, rep, fname[:-4])
print(objnuv_filepath.split('/')[-1])
prep_filespath = os.path.join(prep_src, folder, rep, fname[:-4] + '__')
dst_path = os.path.join(dst, rep)
render_prt_ortho(dst_path, objnuv_filepath, prep_filespath, shs, rndr, rndr_uv, 512, 1, 1, pitch=[0])
#ftcount += 1
#with open('/home/enterprise.internal.city.ac.uk/adbb120/pifuhd/data/ftcount.txt', 'w') as f:
# f.write(str(ftcount))
| 35.096606 | 185 | 0.668725 |
ac474bcb1cc36e8c400164e2a77001ca5f025265 | 498 | py | Python | venv/lib/python3.8/site-packages/webargs/__init__.py | mrunix1998/booking-flights-system | 4eab3d845c4ba6742bd550604fe69b7f101c8da4 | [
"MIT"
] | 1 | 2022-03-28T16:37:17.000Z | 2022-03-28T16:37:17.000Z | venv/venv/lib/python3.8/site-packages/webargs/__init__.py | mrunix1998/booking-flights-system | 4eab3d845c4ba6742bd550604fe69b7f101c8da4 | [
"MIT"
] | null | null | null | venv/venv/lib/python3.8/site-packages/webargs/__init__.py | mrunix1998/booking-flights-system | 4eab3d845c4ba6742bd550604fe69b7f101c8da4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from distutils.version import LooseVersion
from marshmallow.utils import missing
# Make marshmallow's validation functions importable from webargs
from marshmallow import validate
from webargs.core import dict2schema, ValidationError
from webargs import fields
__version__ = "5.3.2"
__version_info__ = tuple(LooseVersion(__version__).version)
__author__ = "Steven Loria"
__license__ = "MIT"
__all__ = ("dict2schema", "ValidationError", "fields", "missing", "validate")
| 27.666667 | 77 | 0.783133 |
ac483bee0ecf390755efd9546940d7a56a66bf85 | 483 | py | Python | scripts/imageio_remove_bin-script.py | shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings | 1fa4cd6a566c8745f455fc3d2273208f21f88ced | [
"bzip2-1.0.6"
] | 1 | 2021-11-25T02:14:23.000Z | 2021-11-25T02:14:23.000Z | scripts/imageio_remove_bin-script.py | shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings | 1fa4cd6a566c8745f455fc3d2273208f21f88ced | [
"bzip2-1.0.6"
] | null | null | null | scripts/imageio_remove_bin-script.py | shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings | 1fa4cd6a566c8745f455fc3d2273208f21f88ced | [
"bzip2-1.0.6"
] | null | null | null | #!C:\Users\stpny\Downloads\grasp_public-master\grasp_public-master\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'imageio==2.5.0','console_scripts','imageio_remove_bin'
__requires__ = 'imageio==2.5.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('imageio==2.5.0', 'console_scripts', 'imageio_remove_bin')()
)
| 37.153846 | 86 | 0.689441 |
ac4880579ecf2bb75288ce5118717d81f57be27a | 1,351 | py | Python | mw2fcitx/build_dict.py | outloudvi/mw2fcitx | a4fbbcd5e8068ee1f08714f0e18b46c8b289a42c | [
"Unlicense"
] | 67 | 2020-08-13T13:58:03.000Z | 2022-03-29T11:33:51.000Z | mw2fcitx/build_dict.py | outloudvi/fcitx5-pinyin-moegirl | c62d3f7d049143a4d8726f408bdd345f53ff3347 | [
"Unlicense"
] | 5 | 2020-11-16T01:48:32.000Z | 2022-02-18T08:04:32.000Z | mw2fcitx/build_dict.py | outloudvi/fcitx5-pinyin-moegirl | c62d3f7d049143a4d8726f408bdd345f53ff3347 | [
"Unlicense"
] | 3 | 2020-10-08T15:44:30.000Z | 2022-03-23T12:40:11.000Z | import logging
import sys
from .pipeline import MWFPipeline
| 39.735294 | 76 | 0.61658 |
ac495d1405722c44232ce6b138bdc896307b81e8 | 21,345 | py | Python | pythonweb/user/views.py | onwebbe/rasiberryPiWebManager | 14ff9f14f3f873457666fa1669fae715148538c9 | [
"Apache-2.0"
] | null | null | null | pythonweb/user/views.py | onwebbe/rasiberryPiWebManager | 14ff9f14f3f873457666fa1669fae715148538c9 | [
"Apache-2.0"
] | 7 | 2020-09-07T07:51:28.000Z | 2022-02-26T17:54:49.000Z | pythonweb/user/views.py | onwebbe/rasiberryPiWebManager | 14ff9f14f3f873457666fa1669fae715148538c9 | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render
from django.http import HttpResponse
import json
# Create your views here.
| 25.747889 | 357 | 0.469056 |
ac4bb093b09ad6b3234a1c157636387e7fbb5f98 | 3,278 | py | Python | WHI_long_term_size_distr_including_fresh_emissions_plotting.py | annahs/atmos_research | b5853c9b12e327492f8f8ba5069bca3fd2e981c8 | [
"MIT"
] | 2 | 2018-08-17T15:25:26.000Z | 2019-04-17T16:50:00.000Z | WHI_long_term_size_distr_including_fresh_emissions_plotting.py | annahs/atmos_research | b5853c9b12e327492f8f8ba5069bca3fd2e981c8 | [
"MIT"
] | null | null | null | WHI_long_term_size_distr_including_fresh_emissions_plotting.py | annahs/atmos_research | b5853c9b12e327492f8f8ba5069bca3fd2e981c8 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
from matplotlib import dates
import os
import pickle
from datetime import datetime
from pprint import pprint
import sys
import math
import traceback
import time
os.chdir('C:/Users/Sarah Hanna/Documents/Data/WHI long term record/coatings/')
file = open('raw size and number distributions by air mass for 69.76nm to 220.11nm.binpickl', 'r')
distr_data = pickle.load(file)
file.close()
modified_distr_data = {}
interval_length = 5.0
fit_bins = []
for x in range (30,800,5):
fit_bins.append(x+2)
for air_mass, distribution_data in distr_data.iteritems():
print air_mass
#distribution_data.pop(70, None)
distr_bins_p = []
mass_distr_values = []
numb_distr_values = []
for bin, distr_values in distribution_data.iteritems(): #normalize
n_mass_val = distr_values[0]/(math.log(bin+interval_length)-math.log(bin)) #dM/dlog(VED)
mass_distr_values.append(n_mass_val)
n_numb_val = distr_values[1]/(math.log(bin+interval_length)-math.log(bin)) #d/dlog(VED)
numb_distr_values.append(n_numb_val)
distr_bins_p.append(bin+interval_length/2.0) #correction for our binning code recording bin starts as keys instead of midpoints
norm_mass_distr_values_p = []
for mass in mass_distr_values:
norm_mass = mass/np.max(mass_distr_values)
norm_mass_distr_values_p.append(norm_mass)
norm_mass_distr_values = np.array(norm_mass_distr_values_p)
norm_numb_distr_values_p = []
for numb in numb_distr_values:
norm_numb = numb/np.max(numb_distr_values)
norm_numb_distr_values_p.append(norm_numb)
norm_numb_distr_values = np.array(norm_numb_distr_values_p)
distr_bins = np.array(distr_bins_p)
fit_failure = False
try:
popt, pcov = curve_fit(lognorm, distr_bins, norm_numb_distr_values)
perr = np.sqrt(np.diag(pcov)) #from docs: To compute one standard deviation errors on the parameters use perr = np.sqrt(np.diag(pcov))
err_variables = [popt[0]-perr[0], popt[1]-perr[1], popt[2]-perr[2]]
except:
print 'fit_failure'
fit_failure = True
fit_y_vals = []
for bin in fit_bins:
if fit_failure == True:
fit_val = np.nan
else:
fit_val = lognorm(bin, popt[0], popt[1], popt[2])
fit_y_vals.append(fit_val)
err_fit_y_vals = []
for bin in fit_bins:
if fit_failure == True:
err_fit_val = np.nan
else:
err_fit_val = lognorm(bin, err_variables[0], err_variables[1], err_variables[2])
err_fit_y_vals.append(err_fit_val)
modified_distr_data[air_mass] = [distr_bins,norm_numb_distr_values,fit_bins,fit_y_vals]
pprint(modified_distr_data['GBPS'])
#plotting
fig = plt.figure()
ax1 = fig.add_subplot(111)
colors=['magenta', 'red', 'green', 'cyan', 'blue', 'black']
i=0
for air_mass, distr in modified_distr_data.iteritems():
bins = modified_distr_data[air_mass][0]
data = modified_distr_data[air_mass][1]
fit_bins = modified_distr_data[air_mass][2]
fits = modified_distr_data[air_mass][3]
m_distr = ax1.scatter(bins,data, label = air_mass,color = colors[i])
f_distr = ax1.semilogx(fit_bins,fits,color = colors[i])
ax1.set_xlim(40,500)
ax1.set_ylim(0,1.1)
i+=1
plt.legend()
plt.show()
| 26.650407 | 137 | 0.742221 |
ac4e9a7286b947fb0b00f67815da9872ce954025 | 161 | py | Python | Estrutura_Decisao/pair_or_odd.py | M3nin0/supreme-broccoli | 186c1ea3b839ba3139f9301660dec8fbd27a162e | [
"Apache-2.0"
] | null | null | null | Estrutura_Decisao/pair_or_odd.py | M3nin0/supreme-broccoli | 186c1ea3b839ba3139f9301660dec8fbd27a162e | [
"Apache-2.0"
] | null | null | null | Estrutura_Decisao/pair_or_odd.py | M3nin0/supreme-broccoli | 186c1ea3b839ba3139f9301660dec8fbd27a162e | [
"Apache-2.0"
] | null | null | null | num = int(input("Insira um numero para descobrir se este par ou impar: "))
if num % 2 == 0:
print("Este numero par")
else:
print("Este numero impar")
| 17.888889 | 76 | 0.658385 |
ac4ee0a9c265d35fc43a606e8c10147a4a14ebe8 | 342 | py | Python | pos_multie_print/config/docs.py | ashish-greycube/pos_multie_print | f84f36cdf32f53b200c8fe7b9c754e199094d841 | [
"MIT"
] | null | null | null | pos_multie_print/config/docs.py | ashish-greycube/pos_multie_print | f84f36cdf32f53b200c8fe7b9c754e199094d841 | [
"MIT"
] | null | null | null | pos_multie_print/config/docs.py | ashish-greycube/pos_multie_print | f84f36cdf32f53b200c8fe7b9c754e199094d841 | [
"MIT"
] | null | null | null | """
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/pos_multie_print"
# docs_base_url = "https://[org_name].github.io/pos_multie_print"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
| 28.5 | 68 | 0.739766 |
ac4f1637d2da63115e2a93b02c3d3a4bb30ba74a | 56 | py | Python | src/import_hook/__init__.py | zthxxx/sniputils | e67f55dfa0689f1dde6b6e78d76f04022b4d4585 | [
"MIT"
] | null | null | null | src/import_hook/__init__.py | zthxxx/sniputils | e67f55dfa0689f1dde6b6e78d76f04022b4d4585 | [
"MIT"
] | null | null | null | src/import_hook/__init__.py | zthxxx/sniputils | e67f55dfa0689f1dde6b6e78d76f04022b4d4585 | [
"MIT"
] | null | null | null | from .import_track import *
from .reimportable import *
| 18.666667 | 27 | 0.785714 |
ac4f175249ba254c543b5b853dde50f7e4c40661 | 837 | py | Python | check_performance.py | 5laps2go/xbrr | 4c0824b53bfe971111d60e6c1ff4e36f4f4845a3 | [
"MIT"
] | null | null | null | check_performance.py | 5laps2go/xbrr | 4c0824b53bfe971111d60e6c1ff4e36f4f4845a3 | [
"MIT"
] | null | null | null | check_performance.py | 5laps2go/xbrr | 4c0824b53bfe971111d60e6c1ff4e36f4f4845a3 | [
"MIT"
] | null | null | null | import os
import shutil
import pyfbi
from xbrr.edinet.client.document_client import DocumentClient
from xbrr.edinet.reader.reader import Reader
from xbrr.edinet.reader.doc import Doc
from xbrr.edinet.reader.aspects.finance import Finance
with pyfbi.watch():
check()
pyfbi.dump("result")
pyfbi.show()
| 26.15625 | 78 | 0.710872 |
ac4f82e72b64166dbb545dc5a1c2ec940777bbee | 1,096 | py | Python | pe3.py | ChrisCalderon/project-euler | 96055343fc3ef7653184708fe350018ee751ea17 | [
"MIT"
] | 1 | 2015-12-16T05:13:30.000Z | 2015-12-16T05:13:30.000Z | pe3.py | ChrisCalderon/project-euler | 96055343fc3ef7653184708fe350018ee751ea17 | [
"MIT"
] | null | null | null | pe3.py | ChrisCalderon/project-euler | 96055343fc3ef7653184708fe350018ee751ea17 | [
"MIT"
] | null | null | null | PRIMES = [3]
if __name__ == '__main__':
main()
| 22.833333 | 83 | 0.469891 |
ac4fb7ef759fec615c1233d88bd6d5b5c8a82c1d | 144 | py | Python | backend/puzzle/apps.py | mductran/puzzle | c4598f5420dff126fa67db1e0adee1677a8baf8f | [
"Apache-2.0"
] | null | null | null | backend/puzzle/apps.py | mductran/puzzle | c4598f5420dff126fa67db1e0adee1677a8baf8f | [
"Apache-2.0"
] | null | null | null | backend/puzzle/apps.py | mductran/puzzle | c4598f5420dff126fa67db1e0adee1677a8baf8f | [
"Apache-2.0"
] | null | null | null | from django.apps import AppConfig
| 20.571429 | 56 | 0.756944 |
ac505c7c29aa070c9931ea9a50fc3af3c4aa490f | 10,701 | py | Python | cryptoquant/api/okex/futures_api.py | studyquant/StudyQuant | 24790634ac320b25361672754558c3797f4fc9e3 | [
"Apache-2.0"
] | 74 | 2018-08-10T17:05:57.000Z | 2022-03-26T07:06:02.000Z | cryptoquant/api/okex/futures_api.py | ezailwoo/studyquant | 24790634ac320b25361672754558c3797f4fc9e3 | [
"Apache-2.0"
] | 1 | 2022-03-24T06:42:00.000Z | 2022-03-24T06:42:00.000Z | cryptoquant/api/okex/futures_api.py | ezailwoo/studyquant | 24790634ac320b25361672754558c3797f4fc9e3 | [
"Apache-2.0"
] | 18 | 2020-09-22T09:03:49.000Z | 2022-03-31T20:48:54.000Z | from .client import Client
from .consts import *
| 43.149194 | 236 | 0.657882 |
ac52168f298fb9c551b44c7fca2f04721962c5e4 | 2,254 | py | Python | advent_of_code_2017/day 14/solution.py | jvanelteren/advent_of_code | 3c547645250adb2d95ebac43d5d2111cdf9b09e9 | [
"MIT"
] | 1 | 2021-12-23T11:24:11.000Z | 2021-12-23T11:24:11.000Z | advent_of_code_2017/day 14/solution.py | jvanelteren/advent_of_code | 3c547645250adb2d95ebac43d5d2111cdf9b09e9 | [
"MIT"
] | null | null | null | advent_of_code_2017/day 14/solution.py | jvanelteren/advent_of_code | 3c547645250adb2d95ebac43d5d2111cdf9b09e9 | [
"MIT"
] | null | null | null | #%%
# read full assignment
# think algo before implementing
# dont use a dict when you need a list
# assignment is still = and not ==
# dont use itertools when you can use np.roll
# check mathemathical functions if the parentheses are ok
# networkx is awesome
# sometimes while true is better than just too small for loop
# networkx addes nodes when adding edge to nonexistent node
# %%
import os
import re
import numpy as np
try:
os.chdir(os.path.join(os.getcwd(), 'day 14'))
print(os.getcwd())
except:
pass
from functools import reduce
import operator
import networkx as nx
import numpy as np
# f = open('input.txt','r').read().strip()
count= 0
f = 'stpzcrnm'
for r in range(128):
h = gethash('stpzcrnm-'+str(r))
count+=len(''.join([getbits(b) for b in h]).replace('0',''))
count
# %%
count= 0
grid = []
f = 'stpzcrnm'
for r in range(128):
h = gethash('stpzcrnm-'+str(r))
grid.append(list(''.join([getbits(b) for b in h])))
count+=len(''.join([getbits(b) for b in h]).replace('0',''))
# %%
grid = np.array(grid)
print(grid.shape)
G = nx.Graph()
for index,output in np.ndenumerate(grid):
if output == '1':
i,j = index[0], index[1]
G.add_edge((i,j),(i+1,j))
G.add_edge((i,j),(i-1,j))
G.add_edge((i,j),(i,j+1))
G.add_edge((i,j),(i,j-1))
for index,output in np.ndenumerate(grid):
if output == '0':
if G.has_node(index): G.remove_node(index)
nx.number_connected_components(G)
# %%
| 25.325843 | 72 | 0.613576 |
ac53a111991d6177a1eaab1a5dfb80e80e02f826 | 1,423 | py | Python | binyard/general_diff.py | giprayogo/binyard | c1cfa880cb9907416da2363fa0e4ca2de920543e | [
"MIT"
] | null | null | null | binyard/general_diff.py | giprayogo/binyard | c1cfa880cb9907416da2363fa0e4ca2de920543e | [
"MIT"
] | null | null | null | binyard/general_diff.py | giprayogo/binyard | c1cfa880cb9907416da2363fa0e4ca2de920543e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import math
import argparse
from fractions import Fraction
parser = argparse.ArgumentParser()
parser.add_argument('filename')
parser.add_argument('--column', '-c', required=True)
parser.add_argument('--error-column', '-e')
args = parser.parse_args()
filename = args.filename
columns = list(map(int, args.column.split(',')))
err_columns = list(map(int, args.error_column.split(','))) if args.error_column else None
with open(filename, 'r') as data_file:
reference_data = {}
for line in data_file.readlines():
if '#' in line:
print(line.rstrip())
continue
else:
split = line.split()
if not any(reference_data.values()):
for column in columns:
reference_data[column] = float(split[column])
if err_columns:
for err_column in err_columns:
reference_data[err_column] = float(split[err_column])
print('ref: ',' '.join(map(str,split)))
else:
for column in columns:
split[column] = float(split[column]) - reference_data[column]
if err_columns:
for err_column in err_columns:
split[err_column] = math.sqrt(float(split[err_column])**2 + reference_data[err_column]**2)
print(' '.join(map(str,split)))
| 36.487179 | 114 | 0.583275 |
ac53b737b7f7790c24745dd75f232a7019768317 | 7,968 | py | Python | challenges/challenge1_test.py | jamiejamiebobamie/CS-2.2-Advanced-Recursion-and-Graphs | 3de50122ed783ee9dee251ae87173286a861f33d | [
"MIT"
] | null | null | null | challenges/challenge1_test.py | jamiejamiebobamie/CS-2.2-Advanced-Recursion-and-Graphs | 3de50122ed783ee9dee251ae87173286a861f33d | [
"MIT"
] | 5 | 2019-07-26T05:39:34.000Z | 2019-08-16T14:59:21.000Z | challenges/challenge1_test.py | jamiejamiebobamie/CS-2.2-Advanced-Recursion-and-Graphs | 3de50122ed783ee9dee251ae87173286a861f33d | [
"MIT"
] | null | null | null |
import unittest
from challenge1 import *
"""class Test_Read_Graph_Method(unittest.TestCase):
def __init__(self):
super(TestingClass, self).__init__()
self.filepath = "graph_data.txt"
self.vertices, self.edges = read_graph(filepath)
def test_read(self):
self.assertTrue(self.vertices,['1', '2', '3', '4'])
self.assertTrue(self.edges,[(1,2), (1,4), (2,3), (2,4)])
class Test_LLGraph_Methods(unittest.TestCase):
def __init__(self):
super(TestingClass, self).__init__()
self.filepath = "graph_data.txt"
self.vertices, self.edges = read_graph(filepath)
self.LLGraph = LLGraph(self.vertices)
def test_init_(self):
self.assertTrue(self.LLGraph.numberOfVertices, 4)
self.assertTrue(self.LLGraph.get_vertices(), ['1', '2', '3', '4'])
def test_add_edges(self):
self.LLGraph.add_edges(self.edges)
self.assertTrue(self.LLGraph.get_edges(1), [(1, 2, 1), (1, 4, 1)])
self.assertTrue(self.LLGraph.get_edges(2), [(2, 3, 1), (2, 4, 1)])
self.assertTrue(self.LLGraph.get_edges(3), "No out-going edges.")
self.assertTrue(self.LLGraph.get_edges(4), "No out-going edges.")
self.LLGraph.add_edge(1, 3, 5)
self.assertTrue(self.LLGraph.get_edges(1), [(1, 2, 1), (1, 4, 1), (1, 3, 5)])
self.LLGraph.add_edge(4, 3, 2)
self.assertTrue(self.LLGraph.get_edges(4), (4, 3, 2))
self.LLGraph.add_edge(3, 4) # testing default weight of one if weight is not entered
self.assertTrue(self.LLGraph.get_edges(3), (3, 4, 1))
def test_add_vertex(self):
self.LLGraph.add_vertex()
self.assertTrue(self.LLGraph.get_vertices(), ['1', '2', '3', '4', '5'])
self.assertTrue(self.LLGraph.numberOfVertices, 5)
# def test_iter_(self):
# self.assertTrue(self.LLGraph.__iter__(), ['1', [(1, 2, 1), (1, 4, 1), (1, 3, 5)]], ['2', [(2, 3, 1), (2, 4, 1)]], ['3', (3, 4, 1)], ['4', (4, 3, 2)], ['5', 'No out-going edges.']])
def test_get_neighbors_of_a_vertex(self):
self.assertTrue(self.LLGraph.get_neighbors_of_a_vertex(1), [2, 4, 3])
class Test_AM_Graph_Methods(unittest.TestCase):
def __init__(self):
super(TestingClass, self).__init__()
self.filepath = "graph_data.txt"
self.vertices, self.edges = read_graph(filepath)
self.AMGraph = AMGraph(len(self.vertices))
def test_init_(self):
self.assertTrue(self.AMGraph.numberOfVertices, 4)
self.assertTrue(self.AMGraph.get_vertices(), [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])
def test_add_edges(self):
self.AMGraph.add_edges(self.edges)
self.assertTrue(self.AMGraph.get_edges(1), [0, 1, 0, 1])
self.assertTrue(self.AMGraph.get_edges(2), [0, 0, 1, 1])
self.assertTrue(self.AMGraph.get_edges(3), [0, 0, 0, 0])
self.assertTrue(self.AMGraph.get_edges(4), [0, 0, 0, 0])
selfAMLGraph.add_edge(1, 3, 5)
self.assertTrue(self.AMGraph.get_edges(1), [0, 1, 5, 1])
self.AMGraph.add_edge(4, 3, 2)
self.assertTrue(self.AMGraph.get_edges(4), [0, 0, 2, 0])
self.AMGraph.add_edge(3, 4) # testing default weight of one if weight is not entered
self.assertTrue(self.AMGraph.get_edges(3), [0, 0, 0, 1])
def test_add_vertex(self):
self.AMGraph.add_vertex()
self.assertTrue(self.AMGraph.get_vertices(), [[0, 1, 5, 1, 0], [0, 0, 1, 1, 0], [0, 0, 0, 1, 0], [0, 0, 2, 0, 0], [0, 0, 0, 0, 0]])
self.assertTrue(self.AMGraph.numberOfVertices, 5)
class Test_Dict_Graph_Methods(unittest.TestCase):
def __init__(self):
super(TestingClass, self).__init__()
self.filepath = "graph_data.txt"
self.vertices, self.edges = read_graph(filepath)
self.Graph = Graph(len(self.vertices))
def test_init_(self):
self.assertTrue(self.Graph.numberOfVertices, 4)
self.assertTrue(self.Graph.get_vertices(), ['1', '2', '3', '4'])
def test_add_edges(self):
self.Graph.add_edges(self.edges)
self.assertTrue(self.Graph.get_edges(1), [(1, 2, 1), (1, 4, 1)])
self.assertTrue(self.Graph.get_edges(2), [(2, 3, 1), (2, 4, 1)])
self.assertTrue(self.Graph.get_edges(3), "No out-going edges.")
self.assertTrue(self.Graph.get_edges(4), "No out-going edges.")
self.Graph.add_edge(1, 3, 5)
self.assertTrue(self.Graph.get_edges(1), [(1, 2, 1), (1, 4, 1), (1, 3, 5)])
self.Graph.add_edge(4, 3, 2)
self.assertTrue(self.Graph.get_edges(4), (4, 3, 2))
self.Graph.add_edge(3, 4) # testing default weight of one if weight is not entered
self.assertTrue(self.Graph.get_edges(3), (3, 4, 1))
def test_add_vertex(self):
self.Graph.add_vertex()
self.assertTrue(self.Graph.get_vertices(), ['1', '2', '3', '4', '5'])
self.assertTrue(self.Graph.numberOfVertices, 5)
# def test_iter_(self):
# self.assertTrue(self.LLGraph.__iter__(), ['1', [(1, 2, 1), (1, 4, 1), (1, 3, 5)]], ['2', [(2, 3, 1), (2, 4, 1)]], ['3', (3, 4, 1)], ['4', (4, 3, 2)], ['5', 'No out-going edges.']])
def test_get_neighbors_of_a_vertex(self):
self.assertTrue(self.Graph.get_neighbors_of_a_vertex(1), [2, 4, 3])
if __name__ == '__main__':
unittest.main()"""
from challenge1 import *
filepath = "graph_data.txt"
data = read_graph(filepath)
assert data[0] == ['1', '2', '3', '4']
assert data[1] == [(1,2), (1,4), (2,3), (2,4)]
# linked list implementation
print("\ntesting linked list implementation...")
newGraph = LLGraph(data[0]) # adding the vertices
assert newGraph.numberOfVertices == 4
assert newGraph.get_vertices() == ['1', '2', '3', '4']
newGraph.add_edges(data[1]) # adding edges
assert newGraph.get_edges(1) == [(1, 2, 1), (1, 4, 1)]
assert newGraph.get_edges(2) == [(2, 3, 1), (2, 4, 1)]
assert newGraph.get_edges(3) and newGraph.get_edges(4) == "No out-going edges."
newGraph.add_edge(1, 3, 5)
assert newGraph.get_edges(1) == [(1, 2, 1), (1, 4, 1), (1, 3, 5)]
newGraph.add_edge(4, 3, 2)
assert newGraph.get_edges(4) == (4, 3, 2)
newGraph.add_edge(3, 4)
assert newGraph.get_edges(3) == (3, 4, 1)
assert newGraph.get_vertices() == ['1','2','3','4']
assert newGraph.numberOfVertices == 4
newGraph.add_vertex()
assert newGraph.get_vertices() == ['1','2','3','4','5']
assert newGraph.numberOfVertices == 5
assert newGraph.__iter__() == [['1', [(1, 2, 1), (1, 4, 1), (1, 3, 5)]], ['2', [(2, 3, 1), (2, 4, 1)]], ['3', (3, 4, 1)], ['4', (4, 3, 2)], ['5', 'No out-going edges.']]
assert newGraph.get_neighbors_of_a_vertex(1) == [2, 4, 3]
linkedL = LinkedList()
newGraph.vertices.append(linkedL)
newGraph.numberOfVertices += 1 # hacking my graph to test getVertex method
assert newGraph.get_vertex(newGraph.numberOfVertices) == linkedL
print("all linked-list-graph tests pass")
# adjacency matrix implementation
print("\ntesting adjacenecy matrix implementation...")
newGraph = AMGraph(len(data[0])) # adding the vertices
assert newGraph.numberOfVertices == 4
assert newGraph.vertices == [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
assert newGraph.get_vertices() == [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
newGraph.add_edges(data[1]) # adding edges
assert newGraph.get_edges(1) == [0, 1, 0, 1]
assert newGraph.get_edges(2) == [0, 0, 1, 1]
assert newGraph.get_edges(3) and newGraph.get_edges(4) == [0, 0, 0, 0]
newGraph.add_edge(1, 3, 5)
assert newGraph.get_edges(1) == [0, 1, 5, 1]
newGraph.add_edge(4, 3, 2)
assert newGraph.get_edges(4) == [0, 0, 2, 0]
newGraph.add_edge(3, 4)
assert newGraph.get_edges(3) == [0, 0, 0, 1]
newGraph.add_vertex()
assert newGraph.numberOfVertices == 5
assert newGraph.vertices == [[0, 1, 5, 1, 0], [0, 0, 1, 1, 0], [0, 0, 0, 1, 0], [0, 0, 2, 0, 0], [0, 0, 0, 0, 0]]
assert newGraph.get_vertices() == [[0, 1, 5, 1, 0], [0, 0, 1, 1, 0], [0, 0, 0, 1, 0], [0, 0, 2, 0, 0], [0, 0, 0, 0, 0]]
print("all adjacenecy matrix graph tests pass")
| 41.717277 | 190 | 0.616717 |
ac53ceb43286b504010da1532640f80e8e04aec8 | 870 | py | Python | utils.py | twerkmeister/tacotron2 | 404c0758591dab8b72933f010f51c5c2b7490827 | [
"BSD-3-Clause"
] | null | null | null | utils.py | twerkmeister/tacotron2 | 404c0758591dab8b72933f010f51c5c2b7490827 | [
"BSD-3-Clause"
] | null | null | null | utils.py | twerkmeister/tacotron2 | 404c0758591dab8b72933f010f51c5c2b7490827 | [
"BSD-3-Clause"
] | 1 | 2020-04-30T11:21:15.000Z | 2020-04-30T11:21:15.000Z | import numpy as np
from scipy.io.wavfile import read
import torch
| 26.363636 | 74 | 0.687356 |
ac54c2f02d6419e0bb0da94bc53fab30b66b86a4 | 8,526 | py | Python | plugins/_Post_Process/_XAI/lime_tabular_batch.py | isabella232/nnc-plugin | 3bc71266696d0341e5e9a2ff2020980700f28719 | [
"Apache-2.0"
] | null | null | null | plugins/_Post_Process/_XAI/lime_tabular_batch.py | isabella232/nnc-plugin | 3bc71266696d0341e5e9a2ff2020980700f28719 | [
"Apache-2.0"
] | null | null | null | plugins/_Post_Process/_XAI/lime_tabular_batch.py | isabella232/nnc-plugin | 3bc71266696d0341e5e9a2ff2020980700f28719 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021,2022 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
from scipy import stats
from sklearn.linear_model import Ridge
import csv
import collections
from nnabla import logger
import nnabla.utils.load as load
from nnabla.utils.cli.utility import let_data_to_variable
if __name__ == '__main__':
main()
| 40.407583 | 144 | 0.59301 |
ac54d54663f6738aa459a24b061f95f72da27abe | 3,454 | py | Python | examples/applications/plot_circuits.py | ImportanceOfBeingErnest/networkx | eb3a675c5b2b15e33b0a4a35bcee34d6b81ed94d | [
"BSD-3-Clause"
] | 1 | 2020-03-06T05:04:14.000Z | 2020-03-06T05:04:14.000Z | examples/applications/plot_circuits.py | ImportanceOfBeingErnest/networkx | eb3a675c5b2b15e33b0a4a35bcee34d6b81ed94d | [
"BSD-3-Clause"
] | 1 | 2019-11-28T21:08:50.000Z | 2019-11-28T21:08:50.000Z | examples/applications/plot_circuits.py | ImportanceOfBeingErnest/networkx | eb3a675c5b2b15e33b0a4a35bcee34d6b81ed94d | [
"BSD-3-Clause"
] | 4 | 2019-07-19T15:06:37.000Z | 2021-03-17T22:29:04.000Z | #!/usr/bin/env python
# circuits.py - convert a Boolean circuit to an equivalent Boolean formula
#
# Copyright 2016 Jeffrey Finkelstein <jeffrey.finkelstein@gmail.com>.
#
# This file is part of NetworkX.
#
# NetworkX is distributed under a BSD license; see LICENSE.txt for more
# information.
"""
========
Circuits
========
Convert a Boolean circuit to an equivalent Boolean formula.
A Boolean circuit can be exponentially more expressive than an
equivalent formula in the worst case, since the circuit can reuse
subcircuits multiple times, whereas a formula cannot reuse subformulas
more than once. Thus creating a Boolean formula from a Boolean circuit
in this way may be infeasible if the circuit is large.
"""
from networkx import dag_to_branching
from networkx import DiGraph
from networkx.utils import arbitrary_element
if __name__ == '__main__':
main()
| 34.19802 | 76 | 0.675738 |
ac576d0fc1700d09b6ee7d12ba1be4753eca3284 | 4,669 | py | Python | torchtools/tensors/tensor_group.py | cjwcommuny/torch-tools | a64c0bdd87df065744fb49644f767165d3516b27 | [
"MIT"
] | null | null | null | torchtools/tensors/tensor_group.py | cjwcommuny/torch-tools | a64c0bdd87df065744fb49644f767165d3516b27 | [
"MIT"
] | null | null | null | torchtools/tensors/tensor_group.py | cjwcommuny/torch-tools | a64c0bdd87df065744fb49644f767165d3516b27 | [
"MIT"
] | null | null | null | from typing import Dict, Tuple
import torch
from torch import Tensor
from torchtools.tensors.function import unsqueeze
| 32.2 | 128 | 0.572714 |
ac57b9751b82e69ea88e5b4020e3f0156b95a4e8 | 5,029 | py | Python | merlinservices/urls.py | USGS-WiM/merlin_django | 880a5634736de36fbb48cbfe7f60305a83975dcf | [
"CC0-1.0"
] | null | null | null | merlinservices/urls.py | USGS-WiM/merlin_django | 880a5634736de36fbb48cbfe7f60305a83975dcf | [
"CC0-1.0"
] | 28 | 2019-08-20T20:06:32.000Z | 2021-12-17T23:08:05.000Z | merlinservices/urls.py | USGS-WiM/merlin_django | 880a5634736de36fbb48cbfe7f60305a83975dcf | [
"CC0-1.0"
] | 2 | 2020-02-21T17:52:18.000Z | 2020-05-08T09:05:55.000Z | from django.urls import path
from django.conf.urls import url, include
from django.views.generic.base import TemplateView
from merlinservices import views
from rest_framework.routers import DefaultRouter
from rest_framework.schemas import get_schema_view
from rest_framework_bulk.routes import BulkRouter
#router = DefaultRouter()
router = BulkRouter()
router.register(r'acids', views.AcidViewSet, 'acids')
router.register(r'analyses', views.AnalysisTypeViewSet, 'analyses')
router.register(r'blankwaters', views.BlankWaterViewSet, 'blankwaters')
router.register(r'bottles', views.BottleViewSet, 'bottles')
router.register(r'bottleprefixes', views.BottlePrefixViewSet, 'bottleprefixes')
router.register(r'bottletypes', views.BottleTypeViewSet, 'bottletypes')
router.register(r'brominations', views.BrominationViewSet, 'brominations')
router.register(r'constituents', views.ConstituentTypeViewSet, 'constituents')
router.register(r'cooperators', views.CooperatorViewSet, 'cooperators')
router.register(r'detectionflags', views.DetectionFlagViewSet, 'detectionflags')
router.register(r'filters', views.FilterTypeViewSet, 'filters')
router.register(r'isotopeflags', views.IsotopeFlagViewSet, 'isotopeflags')
router.register(r'mediums', views.MediumTypeViewSet, 'mediums')
router.register(r'methods', views.MethodTypeViewSet, 'methods')
router.register(r'preservations', views.PreservationTypeViewSet, 'preservations')
router.register(r'projects', views.ProjectViewSet, 'projects')
router.register(r'projectssites', views.ProjectSiteViewSet, 'projectssites')
router.register(r'processings', views.ProcessingTypeViewSet, 'processings')
router.register(r'qualityassurances', views.QualityAssuranceViewSet, 'qualityassurances')
router.register(r'qualityassurancetypes', views.QualityAssuranceTypeViewSet, 'qualityassurancetypes')
router.register(r'results', views.ResultViewSet, 'results')
router.register(r'resultdatafiles', views.ResultDataFileViewSet, 'resultdatafiles')
router.register(r'samples', views.SampleViewSet, 'samples')
router.register(r'samplebottles', views.SampleBottleViewSet, 'samplebottles')
router.register(r'samplebottlebrominations', views.SampleBottleBrominationViewSet, 'samplebottlebrominations')
router.register(r'sites', views.SiteViewSet, 'sites')
router.register(r'units', views.UnitTypeViewSet, 'units')
router.register(r'users', views.UserViewSet, 'users')
router.register(r'fullresults', views.FullResultViewSet, 'fullresults')
router.register(r'fullsamplebottles', views.FullSampleBottleViewSet, 'fullsamplebottles')
router.register(r'bulkacids', views.AcidBulkUpdateViewSet, 'bulkacids')
router.register(r'bulkblankwaters', views.BlankWaterBulkUpdateViewSet, 'bulkblankwaters')
router.register(r'bulkbottles', views.BottleBulkCreateUpdateViewSet, 'bulkbottles')
router.register(r'bulkbottleprefixes', views.BottlePrefixBulkCreateUpdateViewSet, 'bulkbottleprefixes')
router.register(r'bulkbrominations', views.BrominationBulkUpdateViewSet, 'bulkbrominations')
router.register(r'bulkcooperators', views.CooperatorBulkUpdateViewSet, 'bulkcooperators')
router.register(r'bulkprojects', views.ProjectBulkUpdateViewSet, 'bulkprojects')
router.register(r'bulkprojectssites', views.ProjectBulkUpdateViewSet, 'bulkprojectssites')
router.register(r'bulkresults', views.ResultBulkCreateUpdateViewSet, 'bulkresults')
router.register(r'bulksamples', views.SampleBulkCreateUpdateViewSet, 'bulksamples')
router.register(r'bulksamplebottles', views.SampleBottleBulkCreateUpdateViewSet, 'bulksamplebottles')
router.register(r'bulksamplebottlebrominations',
views.SampleBottleBrominationBulkCreateUpdateViewSet, 'bulksamplebottlebrominations')
router.register(r'bulksites', views.SiteBulkUpdateViewSet, 'bulksites')
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path('openapi', get_schema_view(title="CBRSServices", description="API for CBRS DMS"), name='openapi-schema'),
path('swagger-ui/', TemplateView.as_view(template_name='swagger-ui.html',
extra_context={'schema_url': 'openapi-schema'}), name='swagger-ui'),
path('redoc/', TemplateView.as_view(template_name='redoc.html',
extra_context={'schema_url': 'openapi-schema'}), name='redoc'),
url(r'^auth/$', views.AuthView.as_view(), name='authenticate'),
url(r'^batchupload', views.BatchUpload.as_view(), name='batchupload'),
url(r'^reportresultscooperator/',
views.ReportResultsCooperator.as_view(), name='reportresultscooperator'),
url(r'^reportresultsnwis/', views.ReportResultsNwis.as_view(), name='reportresultsnwis'),
url(r'^reportsamplesnwis/', views.ReportSamplesNwis.as_view(), name='reportsamplesnwis'),
url(r'^resultcountprojects/',
views.ReportResultsCountProjects.as_view(), name='resultcountprojects'),
url(r'^resultcountnawqa/', views.ReportResultsCountNawqa.as_view(), name='resultcountnawqa'),
]
| 65.311688 | 114 | 0.791211 |
ac5a48ef46f5c110ca09df0b1e0e5cb2859ebb3d | 1,134 | py | Python | model.py | bruchano/StockPricePrediction | 6fa3a643e9959fbf26ffd95af54981b077ddd33f | [
"MIT"
] | null | null | null | model.py | bruchano/StockPricePrediction | 6fa3a643e9959fbf26ffd95af54981b077ddd33f | [
"MIT"
] | null | null | null | model.py | bruchano/StockPricePrediction | 6fa3a643e9959fbf26ffd95af54981b077ddd33f | [
"MIT"
] | null | null | null | import torch
import torch.nn.functional as F
import datetime as dt
import pandas_datareader
| 32.4 | 90 | 0.614638 |