hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5370c3d3d7c64120cfceac3826e677a88c4d71af
| 3,556
|
py
|
Python
|
laia/data/transforms/vision/random_beta_morphology.py
|
eivtho/PyLaia
|
2a7a6e2eeb9b5af68c0faed0c564b02063e72be0
|
[
"MIT"
] | 89
|
2018-12-12T23:06:26.000Z
|
2022-02-03T09:04:21.000Z
|
laia/data/transforms/vision/random_beta_morphology.py
|
eivtho/PyLaia
|
2a7a6e2eeb9b5af68c0faed0c564b02063e72be0
|
[
"MIT"
] | 30
|
2019-03-06T14:29:48.000Z
|
2022-03-16T14:53:43.000Z
|
laia/data/transforms/vision/random_beta_morphology.py
|
jpuigcerver/PyLaia
|
1b2e864247f1bfb8d95ac1910de9c52df71c017a
|
[
"MIT"
] | 26
|
2018-12-13T17:48:19.000Z
|
2022-02-28T12:52:29.000Z
|
from typing import List, Tuple, Union
import numpy as np
import scipy.special
from PIL import Image, ImageFilter
if __name__ == "__main__":
import argparse
from PIL import ImageOps
parser = argparse.ArgumentParser()
parser.add_argument("--operation", choices=("dilate", "erode"), default="dilate")
parser.add_argument("images", type=argparse.FileType("rb"), nargs="+")
args = parser.parse_args()
transformer = Dilate() if args.operation == "dilate" else Erode()
for f in args.images:
x = Image.open(f, "r").convert("L")
x = ImageOps.invert(x)
y = transformer(x)
w, h = x.size
z = Image.new("L", (w, 2 * h))
z.paste(x, (0, 0))
z.paste(y, (0, h))
z = z.resize(size=(w // 2, h), resample=Image.BICUBIC)
z.show()
input()
| 32.327273
| 86
| 0.615298
|
537138998ce86bd69153421493a543bbc8be7c36
| 723
|
py
|
Python
|
hemp/internal/utils.py
|
Addvilz/hemp
|
2cd1d437fc59a8f7b24f5d150c623bf75c3b6747
|
[
"Apache-2.0"
] | 1
|
2020-08-13T22:28:28.000Z
|
2020-08-13T22:28:28.000Z
|
hemp/internal/utils.py
|
Addvilz/hemp
|
2cd1d437fc59a8f7b24f5d150c623bf75c3b6747
|
[
"Apache-2.0"
] | null | null | null |
hemp/internal/utils.py
|
Addvilz/hemp
|
2cd1d437fc59a8f7b24f5d150c623bf75c3b6747
|
[
"Apache-2.0"
] | null | null | null |
import sys
from fabric.utils import error, puts
from git import RemoteProgress
| 26.777778
| 76
| 0.637621
|
53713acb71d2f50fa7d7472d8e125a179f1d5d33
| 417
|
py
|
Python
|
backend/links/sentence.py
|
dla1635/hyLink
|
8f3d1b6b0cad57ce2f6861583eb2b523f9fceee7
|
[
"MIT"
] | 1
|
2020-07-17T05:57:47.000Z
|
2020-07-17T05:57:47.000Z
|
backend/links/sentence.py
|
dla1635/hyLink
|
8f3d1b6b0cad57ce2f6861583eb2b523f9fceee7
|
[
"MIT"
] | 11
|
2020-06-06T00:30:23.000Z
|
2022-02-26T19:59:06.000Z
|
backend/links/sentence.py
|
dla1635/hylink
|
8f3d1b6b0cad57ce2f6861583eb2b523f9fceee7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from collections import Counter
from konlpy.tag import Okt
| 18.954545
| 49
| 0.611511
|
53726406b1ce515956afb2308d74b2a4c7e1b255
| 4,227
|
py
|
Python
|
tests/arch/x86/test_x86parser.py
|
IMULMUL/barf-project
|
9547ef843b8eb021c2c32c140e36173c0b4eafa3
|
[
"BSD-2-Clause"
] | 1,395
|
2015-01-02T11:43:30.000Z
|
2022-03-30T01:15:26.000Z
|
tests/arch/x86/test_x86parser.py
|
IMULMUL/barf-project
|
9547ef843b8eb021c2c32c140e36173c0b4eafa3
|
[
"BSD-2-Clause"
] | 54
|
2015-02-11T05:18:05.000Z
|
2021-12-10T08:45:39.000Z
|
tests/arch/x86/test_x86parser.py
|
IMULMUL/barf-project
|
9547ef843b8eb021c2c32c140e36173c0b4eafa3
|
[
"BSD-2-Clause"
] | 207
|
2015-01-05T09:47:54.000Z
|
2022-03-30T01:15:29.000Z
|
# Copyright (c) 2014, Fundacion Dr. Manuel Sadosky
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
import unittest
from barf.arch import ARCH_X86_MODE_32
from barf.arch import ARCH_X86_MODE_64
from barf.arch.x86.parser import X86Parser
def main():
unittest.main()
if __name__ == '__main__':
main()
| 33.283465
| 80
| 0.666903
|
5374082003f5a0ab7717d7cbdda9e4ca3ac483ea
| 1,236
|
py
|
Python
|
Concurrency/codeSample/Part4_Thread_Synchronuzation_Primitives/sema_signal.py
|
Chyi341152/pyConPaper
|
851190d59f8dc85b4f2a0b47c6505edd0367a6fe
|
[
"MIT"
] | 1
|
2018-05-30T02:36:46.000Z
|
2018-05-30T02:36:46.000Z
|
Concurrency/codeSample/Part4_Thread_Synchronuzation_Primitives/sema_signal.py
|
Chyi341152/pyConPaper
|
851190d59f8dc85b4f2a0b47c6505edd0367a6fe
|
[
"MIT"
] | null | null | null |
Concurrency/codeSample/Part4_Thread_Synchronuzation_Primitives/sema_signal.py
|
Chyi341152/pyConPaper
|
851190d59f8dc85b4f2a0b47c6505edd0367a6fe
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# sema_signal.py
#
# An example of using a semaphore for signaling between threads
import threading
import time
done = threading.Semaphore(0) # Resource control.
item = None
t1 = threading.Thread(target=producer)
t2 = threading.Thread(target=consumer)
t1.start()
t2.start()
"""
Semaphore Uses:
1. Resource control
You can limit the number of threads performing certain operations.For example, performing database queries making network connections
2. Signaling
Semaphores can be used to send "signals" between threads. For example, having one thread wake up another thread
"""
| 29.428571
| 145
| 0.669903
|
5375dec1385aae371f742bbb1feff08c0d14da3b
| 3,199
|
py
|
Python
|
temp_wc_analysis/analysis.py
|
KarrLab/wc_sim
|
5b0ee03c3d19193fa67a3797d4258b753e6bc576
|
[
"MIT"
] | 8
|
2018-03-27T21:35:25.000Z
|
2022-01-18T08:32:20.000Z
|
temp_wc_analysis/analysis.py
|
KarrLab/wc_sim
|
5b0ee03c3d19193fa67a3797d4258b753e6bc576
|
[
"MIT"
] | 114
|
2018-02-27T14:14:39.000Z
|
2020-12-30T15:06:51.000Z
|
temp_wc_analysis/analysis.py
|
KarrLab/wc_sim
|
5b0ee03c3d19193fa67a3797d4258b753e6bc576
|
[
"MIT"
] | 2
|
2019-04-05T16:17:28.000Z
|
2020-05-17T12:55:20.000Z
|
'''Analysis utility functions.
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2016-03-26
:Copyright: 2016-2018, Karr Lab
:License: MIT
'''
# TODO(Arthur): IMPORTANT: refactor and replace
from matplotlib import pyplot
from matplotlib import ticker
from wc_lang import Model, Submodel
from scipy.constants import Avogadro
import numpy as np
import re
| 29.081818
| 96
| 0.56924
|
5378047f0579efdd010c7d57b8aefd313753aa1d
| 907
|
py
|
Python
|
setup.py
|
bstuddard/bonsai
|
3610fc50a3b24818288d850048c2a23306215367
|
[
"MIT"
] | 26
|
2021-07-18T14:52:47.000Z
|
2022-01-27T10:35:44.000Z
|
setup.py
|
bstuddard/bonsai
|
3610fc50a3b24818288d850048c2a23306215367
|
[
"MIT"
] | null | null | null |
setup.py
|
bstuddard/bonsai
|
3610fc50a3b24818288d850048c2a23306215367
|
[
"MIT"
] | 3
|
2021-07-20T03:25:22.000Z
|
2021-08-17T04:06:27.000Z
|
from setuptools import setup, find_packages
with open("README.md", "r") as readme_file:
readme = readme_file.read()
requirements = [
'xgboost>=0.90',
'catboost>=0.26',
'bayesian-optimization>=1.2.0',
'numpy>=1.19.5',
'pandas>=1.1.5',
'matplotlib>=3.2.2',
'seaborn>=0.11.1',
'plotly>=4.4.1',
'pyyaml>=5.4.1'
]
setup(
name="bonsai-tree",
version="1.2",
author="Landon Buechner",
author_email="mechior.magi@gmail.com",
description="Bayesian Optimization + Gradient Boosted Trees",
long_description=readme,
url="https://github.com/magi-1/bonsai",
packages=find_packages(),
package_data={'': ['*.yml']},
install_requires=requirements,
license = 'MIT',
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
| 25.914286
| 65
| 0.607497
|
537b221bff7d480fcdf886ab83757cc48372b358
| 823
|
py
|
Python
|
_scripts/increment_version.py
|
clockhart/pathogen
|
1764d4a7d2dd7c1f5dcc08afc016ec4edf809c36
|
[
"MIT"
] | null | null | null |
_scripts/increment_version.py
|
clockhart/pathogen
|
1764d4a7d2dd7c1f5dcc08afc016ec4edf809c36
|
[
"MIT"
] | null | null | null |
_scripts/increment_version.py
|
clockhart/pathogen
|
1764d4a7d2dd7c1f5dcc08afc016ec4edf809c36
|
[
"MIT"
] | null | null | null |
"""
increment_version.py
written in Python3
author: C. Lockhart <chris@lockhartlab.org>
"""
import yaml
# Read in version
with open('version.yml', 'r') as f:
version = yaml.safe_load(f.read())
# Strip "dev" out of micro
version['micro'] = int(str(version['micro']).replace('dev', ''))
# Update patch
version['micro'] += 1
# Add "dev" back to patch
if version['micro'] != 0:
version['micro'] = 'dev' + str(version['micro'])
# Output version
with open('version.yml', 'w') as f:
yaml.safe_dump(version, f, sort_keys=False)
# Transform version dict to string
version = '.'.join([str(version[key]) for key in ['major', 'minor', 'micro']])
# Write version string to pathogen/_version.py
with open('pathogen/version.py', 'w') as f:
f.write("__version__ = '{}'\n".format(version))
# Return
print(version)
| 22.243243
| 78
| 0.660996
|
537c67be5a305675d3c345fd99a5e6be9b4b00c1
| 15,725
|
py
|
Python
|
holoviews/core/data/ibis.py
|
TheoMathurin/holoviews
|
0defcef994d6dd6d2054f75a0e332d02d121f8b0
|
[
"BSD-3-Clause"
] | 1
|
2017-03-01T07:08:23.000Z
|
2017-03-01T07:08:23.000Z
|
holoviews/core/data/ibis.py
|
chrinide/holoviews
|
e1234a60ae0809ac561c204b1998dff0452b2bf0
|
[
"BSD-3-Clause"
] | null | null | null |
holoviews/core/data/ibis.py
|
chrinide/holoviews
|
e1234a60ae0809ac561c204b1998dff0452b2bf0
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import numpy
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
from .. import util
from ..element import Element
from ..ndmapping import NdMapping, item_check, sorted_context
from .interface import Interface
from . import pandas
from .util import cached
Interface.register(IbisInterface)
| 35.022272
| 106
| 0.569348
|
537e41912df4cf73c680542167c1c109a8513d39
| 3,907
|
py
|
Python
|
chess/models/tournament.py
|
S0Imyr/Projet-4
|
1d93e125bc6e44bc560f3ffc9b11e14e35291c98
|
[
"MIT"
] | null | null | null |
chess/models/tournament.py
|
S0Imyr/Projet-4
|
1d93e125bc6e44bc560f3ffc9b11e14e35291c98
|
[
"MIT"
] | null | null | null |
chess/models/tournament.py
|
S0Imyr/Projet-4
|
1d93e125bc6e44bc560f3ffc9b11e14e35291c98
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Handles the tournament logic
"""
import datetime
from chess.utils.utils import get_new_id
from chess.models.actors import Player
from chess.models.round import Round
TOURNAMENT_ID_WIDTH = 8
NB_ROUND = 4
NB_PLAYERS = 8
NB_MATCH = 4
| 33.393162
| 102
| 0.621449
|
537ea975bc8b1468e691c88bd35a36f7347e9442
| 1,895
|
py
|
Python
|
set-config.py
|
astubenazy/vrops-metric-collection
|
c4e5b8d7058759aa5eded74cc619d1dedcbc821a
|
[
"MIT"
] | 2
|
2020-04-08T13:03:00.000Z
|
2020-08-25T18:21:27.000Z
|
set-config.py
|
astubenazy/vrops-metric-collection
|
c4e5b8d7058759aa5eded74cc619d1dedcbc821a
|
[
"MIT"
] | 1
|
2019-08-15T11:19:18.000Z
|
2019-08-17T11:38:48.000Z
|
set-config.py
|
astubenazy/vrops-metric-collection
|
c4e5b8d7058759aa5eded74cc619d1dedcbc821a
|
[
"MIT"
] | 7
|
2018-06-06T13:47:52.000Z
|
2021-06-17T18:33:27.000Z
|
# !/usr/bin python
"""
#
# set-config - a small python program to setup the configuration environment for data-collect.py
# data-collect.py contain the python program to gather Metrics from vROps
# Author Sajal Debnath <sdebnath@vmware.com>
#
"""
# Importing the required modules
import json
import base64
import os,sys
# Getting the absolute path from where the script is being run
# Getting the path where config.json file should be kept
path = get_script_path()
fullpath = path+"/"+"config.json"
# Getting the data for the config.json file
final_data = get_the_inputs()
# Saving the data to config.json file
with open(fullpath, 'w') as outfile:
json.dump(final_data, outfile, sort_keys = True, indent = 2, separators=(',', ':'), ensure_ascii=False)
| 29.153846
| 107
| 0.701847
|
537f1ecf5b58054b91b3f560bcbfa1d5fc3ac88d
| 16,328
|
py
|
Python
|
tests/test_app.py
|
inmanta/inmanta-core
|
ae2153d57f124d00ad1b58e6d4bc6818364be4a8
|
[
"Apache-2.0"
] | 6
|
2021-03-09T10:24:02.000Z
|
2022-01-16T03:52:11.000Z
|
tests/test_app.py
|
inmanta/inmanta-core
|
ae2153d57f124d00ad1b58e6d4bc6818364be4a8
|
[
"Apache-2.0"
] | 1,319
|
2020-12-18T08:52:29.000Z
|
2022-03-31T18:17:32.000Z
|
tests/test_app.py
|
inmanta/inmanta-core
|
ae2153d57f124d00ad1b58e6d4bc6818364be4a8
|
[
"Apache-2.0"
] | 4
|
2021-03-03T15:36:50.000Z
|
2022-03-11T11:41:51.000Z
|
"""
Copyright 2018 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: code@inmanta.com
"""
import os
import re
import signal
import subprocess
import sys
from subprocess import TimeoutExpired
from threading import Timer
import pytest
import inmanta.util
from inmanta import const
def run_with_tty(args, killtime=3, termtime=2):
"""Could not get code for actual tty to run stable in docker, so we are faking it """
env = {const.ENVIRON_FORCE_TTY: "true"}
return run_without_tty(args, env=env, killtime=killtime, termtime=termtime)
def test_verify_that_colorama_package_is_not_present():
"""
The colorama package turns the colored characters in TTY-based terminal into uncolored characters.
As such, this package should not be present.
"""
assert not is_colorama_package_available()
def check_logs(log_lines, regexes_required_lines, regexes_forbidden_lines, timed):
compiled_regexes_requires_lines = get_compiled_regexes(regexes_required_lines, timed)
compiled_regexes_forbidden_lines = get_compiled_regexes(regexes_forbidden_lines, timed)
for line in log_lines:
print(line)
for regex in compiled_regexes_requires_lines:
if not any(regex.match(line) for line in log_lines):
pytest.fail("Required pattern was not found in log lines: %s" % (regex.pattern,))
for regex in compiled_regexes_forbidden_lines:
if any(regex.match(line) for line in log_lines):
pytest.fail("Forbidden pattern found in log lines: %s" % (regex.pattern,))
def test_init_project(tmpdir):
args = [sys.executable, "-m", "inmanta.app", "project", "init", "-n", "test-project", "-o", tmpdir, "--default"]
(stdout, stderr, return_code) = run_without_tty(args, killtime=15, termtime=10)
test_project_path = os.path.join(tmpdir, "test-project")
assert return_code == 0
assert os.path.exists(test_project_path)
(stdout, stderr, return_code) = run_without_tty(args, killtime=15, termtime=10)
assert return_code != 0
assert len(stderr) == 1
assert "already exists" in stderr[0]
| 33.390593
| 125
| 0.614282
|
537fda8bf126c424a17def77a9e57731a1bb799c
| 449
|
py
|
Python
|
AtC_Beg_Con_021-030/ABC027/C.py
|
yosho-18/AtCoder
|
50f6d5c92a01792552c31ac912ce1cd557b06fb0
|
[
"MIT"
] | null | null | null |
AtC_Beg_Con_021-030/ABC027/C.py
|
yosho-18/AtCoder
|
50f6d5c92a01792552c31ac912ce1cd557b06fb0
|
[
"MIT"
] | null | null | null |
AtC_Beg_Con_021-030/ABC027/C.py
|
yosho-18/AtCoder
|
50f6d5c92a01792552c31ac912ce1cd557b06fb0
|
[
"MIT"
] | null | null | null |
n = int(input())
row = 0
for i in range(100):
if 2 ** i <= n <= 2 ** (i + 1) - 1:
row = i
break
k = 0
if row % 2 != 0:
k = 2
cri = seki(k, row // 2)
if n < cri:
print("Aoki")
else:
print("Takahashi")
else:
k = 1
cri = seki(k, row // 2)
if n < cri:
print("Takahashi")
else:
print("Aoki")
| 14.966667
| 39
| 0.4098
|
5382d0895ddebaa840fcd4f4a2179b700c0dfe67
| 21,396
|
py
|
Python
|
extplugins/codvote.py
|
Desi-Boyz/cod4x-server-B3-configs
|
03a323d7ea293efe1831ed315001391b9aaf532a
|
[
"MIT"
] | 1
|
2017-07-17T22:21:10.000Z
|
2017-07-17T22:21:10.000Z
|
extplugins/codvote.py
|
Desi-Boyz/cod4x-server-B3-configs
|
03a323d7ea293efe1831ed315001391b9aaf532a
|
[
"MIT"
] | null | null | null |
extplugins/codvote.py
|
Desi-Boyz/cod4x-server-B3-configs
|
03a323d7ea293efe1831ed315001391b9aaf532a
|
[
"MIT"
] | null | null | null |
# CoDVote plugin for BigBrotherBot(B3) (www.bigbrotherbot.net)
# Copyright (C) 2015 ph03n1x
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# Changelog:
# v1.0.1 - Fixed vote remaining in progress if requirements for vote unmet.
# v1.0.2 - Added "!vote maps" to show what maps can be called into vote.
# - Fixed issue where person who called vote needed to vote as well. Changed to automatic yes vote.
__version__ = '1.0.2'
__author__ = 'ph03n1x'
import b3, threading
import b3.plugin
import b3.events
| 37.081456
| 139
| 0.550804
|
538362192f9fc22f5fcaa82bb61990dd548e6c63
| 3,947
|
py
|
Python
|
utils.py
|
bianan/cfl
|
e09043d213c7330d5410e27ba90c943d4323dbe8
|
[
"Apache-2.0"
] | 4
|
2020-07-29T10:18:59.000Z
|
2021-06-27T22:57:37.000Z
|
utils.py
|
bianan/cfl
|
e09043d213c7330d5410e27ba90c943d4323dbe8
|
[
"Apache-2.0"
] | null | null | null |
utils.py
|
bianan/cfl
|
e09043d213c7330d5410e27ba90c943d4323dbe8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for manipulating variables in Federated personalization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
TRAIN_NAME = "Train"
VALIDATION_NAME = "Validation"
TEST_NAME = "Test"
LOSS_NAME = "loss"
LOSS_SUMMARY_NAME = "perplexity"
# Vars type.
VARS_TYPE_ALL = "all"
VARS_TYPE_SHARED = "shared"
VARS_TYPE_PERSONAL = "personal"
def get_var_dict(vars_):
"""Gets a dict of var base_name (e.g. 'w') to the variable."""
var_dict = {}
for v in vars_:
var_base_name = get_base_name(v)
var_dict[var_base_name] = v
return var_dict
def generate_update_ops(vars_):
"""Generates update ops and placeholders.
For each var, it generates a placeholder to feed in the new values.
Then it takes the mean of the inputs along dimension 0.
Args:
vars_: Vars for which the update ops will be generated.
Returns:
update_ops: A list of update ops.
dict_update_placeholders: A dict of var base name to its update-placeholder.
"""
update_ops = []
dict_update_placeholders = {}
for v in vars_:
# For every var in the scope, add a placeholder to feed in the new values.
# The placeholder may need to hold multiple values, this happens
# when updating the server from many clients.
var_in_shape = [None] + v.shape.as_list()
var_in_name = get_update_placeholder_name(v)
var_in = tf.placeholder(v.dtype, shape=var_in_shape, name=var_in_name)
var_in_mean = tf.reduce_mean(var_in, 0)
update_op = v.assign(var_in_mean)
update_ops.append(update_op)
dict_update_placeholders[get_base_name(v)] = var_in
return update_ops, dict_update_placeholders
def add_prefix(prefix, name):
"""Adds prefix to name."""
return "/".join((prefix, name))
def add_suffix(suffix, name):
"""Adds subfix to name."""
return "/".join((name, suffix))
def get_attribute_dict(class_instance):
"""Gets a dict of attributeds of a class instance."""
# first start by grabbing the Class items
attribute_dict = dict((x, y)
for x, y in class_instance.__class__.__dict__.items()
if x[:2] != "__")
# then update the class items with the instance items
attribute_dict.update(class_instance.__dict__)
return attribute_dict
| 28.395683
| 80
| 0.727135
|
53840797fa9f83c58be0cb1122c4f31c4c62dc94
| 4,841
|
py
|
Python
|
unittest/scripts/py_devapi/scripts/mysqlx_collection_remove.py
|
mueller/mysql-shell
|
29bafc5692bd536a12c4e41c54cb587375fe52cf
|
[
"Apache-2.0"
] | 119
|
2016-04-14T14:16:22.000Z
|
2022-03-08T20:24:38.000Z
|
unittest/scripts/py_devapi/scripts/mysqlx_collection_remove.py
|
mueller/mysql-shell
|
29bafc5692bd536a12c4e41c54cb587375fe52cf
|
[
"Apache-2.0"
] | 9
|
2017-04-26T20:48:42.000Z
|
2021-09-07T01:52:44.000Z
|
unittest/scripts/py_devapi/scripts/mysqlx_collection_remove.py
|
mueller/mysql-shell
|
29bafc5692bd536a12c4e41c54cb587375fe52cf
|
[
"Apache-2.0"
] | 51
|
2016-07-20T05:06:48.000Z
|
2022-03-09T01:20:53.000Z
|
# Assumptions: validate_crud_functions available
# Assumes __uripwd is defined as <user>:<pwd>@<host>:<plugin_port>
from __future__ import print_function
from mysqlsh import mysqlx
mySession = mysqlx.get_session(__uripwd)
ensure_schema_does_not_exist(mySession, 'js_shell_test')
schema = mySession.create_schema('js_shell_test')
# Creates a test collection and inserts data into it
collection = schema.create_collection('collection1')
result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA01", "name": 'jack', "age": 17, "gender": 'male'}).execute()
result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA02", "name": 'adam', "age": 15, "gender": 'male'}).execute()
result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA03", "name": 'brian', "age": 14, "gender": 'male'}).execute()
result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA04", "name": 'alma', "age": 13, "gender": 'female'}).execute()
result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA05", "name": 'carol', "age": 14, "gender": 'female'}).execute()
result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA06", "name": 'donna', "age": 16, "gender": 'female'}).execute()
result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA07", "name": 'angel', "age": 14, "gender": 'male'}).execute()
# ------------------------------------------------
# collection.remove Unit Testing: Dynamic Behavior
# ------------------------------------------------
#@ CollectionRemove: valid operations after remove
crud = collection.remove('some_condition')
validate_crud_functions(crud, ['sort', 'limit', 'bind', 'execute'])
#@ CollectionRemove: valid operations after sort
crud = crud.sort(['name'])
validate_crud_functions(crud, ['limit', 'bind', 'execute'])
#@ CollectionRemove: valid operations after limit
crud = crud.limit(1)
validate_crud_functions(crud, ['bind', 'execute'])
#@ CollectionRemove: valid operations after bind
crud = collection.remove('name = :data').bind('data', 'donna')
validate_crud_functions(crud, ['bind', 'execute'])
#@ CollectionRemove: valid operations after execute
result = crud.execute()
validate_crud_functions(crud, ['limit', 'bind', 'execute'])
#@ Reusing CRUD with binding
print('Deleted donna:', result.affected_items_count, '\n')
result=crud.bind('data', 'alma').execute()
print('Deleted alma:', result.affected_items_count, '\n')
# ----------------------------------------------
# collection.remove Unit Testing: Error Conditions
# ----------------------------------------------
#@# CollectionRemove: Error conditions on remove
crud = collection.remove()
crud = collection.remove(' ')
crud = collection.remove(5)
crud = collection.remove('test = "2')
#@# CollectionRemove: Error conditions sort
crud = collection.remove('some_condition').sort()
crud = collection.remove('some_condition').sort(5)
crud = collection.remove('some_condition').sort([])
crud = collection.remove('some_condition').sort(['name', 5])
crud = collection.remove('some_condition').sort('name', 5)
#@# CollectionRemove: Error conditions on limit
crud = collection.remove('some_condition').limit()
crud = collection.remove('some_condition').limit('')
#@# CollectionRemove: Error conditions on bind
crud = collection.remove('name = :data and age > :years').bind()
crud = collection.remove('name = :data and age > :years').bind(5, 5)
crud = collection.remove('name = :data and age > :years').bind('another', 5)
#@# CollectionRemove: Error conditions on execute
crud = collection.remove('name = :data and age > :years').execute()
crud = collection.remove('name = :data and age > :years').bind('years', 5).execute()
# ---------------------------------------
# collection.remove Unit Testing: Execution
# ---------------------------------------
#@ CollectionRemove: remove under condition
//! [CollectionRemove: remove under condition]
result = collection.remove('age = 15').execute()
print('Affected Rows:', result.affected_items_count, '\n')
docs = collection.find().execute().fetch_all()
print('Records Left:', len(docs), '\n')
//! [CollectionRemove: remove under condition]
#@ CollectionRemove: remove with binding
//! [CollectionRemove: remove with binding]
result = collection.remove('gender = :heorshe').limit(2).bind('heorshe', 'male').execute()
print('Affected Rows:', result.affected_items_count, '\n')
//! [CollectionRemove: remove with binding]
docs = collection.find().execute().fetch_all()
print('Records Left:', len(docs), '\n')
#@ CollectionRemove: full remove
//! [CollectionRemove: full remove]
result = collection.remove('1').execute()
print('Affected Rows:', result.affected_items_count, '\n')
docs = collection.find().execute().fetch_all()
print('Records Left:', len(docs), '\n')
//! [CollectionRemove: full remove]
# Cleanup
mySession.drop_schema('js_shell_test')
mySession.close()
| 41.732759
| 126
| 0.685602
|
538622f0e20beb2e31f0c54850a3e278464da569
| 1,323
|
py
|
Python
|
indian-flag.py
|
aditya270520/indian-flag
|
65851eefdd229cca150d2bbe8fa61c9e06e120e0
|
[
"MIT"
] | null | null | null |
indian-flag.py
|
aditya270520/indian-flag
|
65851eefdd229cca150d2bbe8fa61c9e06e120e0
|
[
"MIT"
] | null | null | null |
indian-flag.py
|
aditya270520/indian-flag
|
65851eefdd229cca150d2bbe8fa61c9e06e120e0
|
[
"MIT"
] | null | null | null |
import turtle
turtle.bgcolor('black')
wn=turtle.Screen()
tr=turtle.Turtle()
move=1
tr.speed("fastest")
for i in range (360):
tr.write("ADITYA",'false','center',font=('Showcard gothic',50))
tr.penup()
tr.goto(-200,100)
tr.pendown()
tr.color("orange")
tr.right(move)
tr.forward(100)
tr.penup()
tr.color("white")
tr.pendown()
tr.right(30)
tr.forward(60)
tr.pendown()
tr.color("light green")
tr.left(10)
tr.forward(50)
tr.right(70)
tr.penup()
tr.pendown()
tr.color('light blue')
tr.forward(50)
tr.color('light green')
tr.pu()
tr.pd()
tr.color("light blue")
tr.forward(100)
tr.color('brown')
tr.forward(200)
tr.pu()
tr.pd()
tr.color('light green')
tr.circle(2)
tr.color('light blue')
tr.circle(4)
tr.pu()
tr.fd(20)
tr.pd()
tr.circle(6)
tr.pu()
tr.fd(40)
tr.pd()
tr.circle(8)
tr.pu()
tr.fd(80)
tr.pd()
tr.circle(10)
tr.pu()
tr.fd(120)
tr.pd()
tr.circle(20)
tr.color('yellow')
tr.circle(10)
tr.pu()
tr.pd()
tr.color('white')
tr.forward(150)
tr.color('red')
tr.fd(50)
tr.color ('blue')
tr.begin_fill()
tr.penup()
tr.home()
move=move+1
tr.penup()
tr.forward(50)
turtle.done()
| 17.64
| 67
| 0.543462
|
538700fd5d58b1e117fad14517de686aecad4c56
| 171
|
py
|
Python
|
leaf/rbac/model/__init__.py
|
guiqiqi/leaf
|
79e34f4b8fba8c6fd208b5a3049103dca2064ab5
|
[
"Apache-2.0"
] | 119
|
2020-01-30T04:25:03.000Z
|
2022-03-27T07:15:45.000Z
|
leaf/rbac/model/__init__.py
|
guiqiqi/leaf
|
79e34f4b8fba8c6fd208b5a3049103dca2064ab5
|
[
"Apache-2.0"
] | 8
|
2020-02-02T05:49:47.000Z
|
2021-01-25T03:31:09.000Z
|
leaf/rbac/model/__init__.py
|
guiqiqi/leaf
|
79e34f4b8fba8c6fd208b5a3049103dca2064ab5
|
[
"Apache-2.0"
] | 11
|
2020-01-31T15:07:11.000Z
|
2021-03-24T03:47:48.000Z
|
""", , """
from .group import Group
from .user import User
from .user import UserIndex
from .auth import Authentication
from .accesspoint import AccessPoint
| 21.375
| 36
| 0.783626
|
53898a41d0b3979d97ed59d9bf3e85e1664af2da
| 103
|
py
|
Python
|
programacao basica/7.py
|
m-brito/Neps-Academy
|
0d962fb921d74c5f97f10fcdd8a0f464c0ccdb14
|
[
"MIT"
] | null | null | null |
programacao basica/7.py
|
m-brito/Neps-Academy
|
0d962fb921d74c5f97f10fcdd8a0f464c0ccdb14
|
[
"MIT"
] | null | null | null |
programacao basica/7.py
|
m-brito/Neps-Academy
|
0d962fb921d74c5f97f10fcdd8a0f464c0ccdb14
|
[
"MIT"
] | null | null | null |
bino = int(input())
cino = int(input())
if (bino+cino)%2==0:
print("Bino")
else:
print("Cino")
| 14.714286
| 20
| 0.563107
|
5389a92b434b224efc0d211777895516ff271648
| 1,023
|
py
|
Python
|
update_readme.py
|
CalmScout/LeetCode
|
62720934b5906e6b255c7e91d3a6fa1d713e4391
|
[
"MIT"
] | null | null | null |
update_readme.py
|
CalmScout/LeetCode
|
62720934b5906e6b255c7e91d3a6fa1d713e4391
|
[
"MIT"
] | null | null | null |
update_readme.py
|
CalmScout/LeetCode
|
62720934b5906e6b255c7e91d3a6fa1d713e4391
|
[
"MIT"
] | null | null | null |
"""
Script updates `README.md` with respect to files at ./easy and ./medium folders.
"""
import os
curr_dir = os.path.dirname(__file__)
with open(os.path.join(curr_dir, "README.md"), 'w') as readme:
readme.write("# LeetCode\nDeliberate practice in coding.\n")
langs = [l for l in os.listdir(curr_dir) if os.path.isdir(os.path.join(curr_dir, l)) and l[0] != '.']
for lang in langs:
readme.write("## {}\n".format(lang))
readme.write("### Easy\n")
easy = sorted(os.listdir(f"{curr_dir}/{lang}/easy"))
easy = [x.split("_")[0] for x in easy]
easy_solved = ""
for el in easy:
easy_solved += "{}, ".format(el)
readme.write(easy_solved[:-2] + "\n")
readme.write("### Medium\n")
medium = sorted(os.listdir(f"{curr_dir}/{lang}/medium"))
medium = [x.split("_")[0] for x in medium]
medium_solved = ""
for el in medium:
medium_solved += "{}, ".format(el)
readme.write(medium_solved[:-2] + '\n')
| 39.346154
| 105
| 0.572825
|
538a493d99ff3d905d532327c5a14418aa3d3b7e
| 10,614
|
py
|
Python
|
scripts/biotimesql.py
|
Jay-Iam/retriever
|
26e321cdb86fcb4cb78184c4bf5c0c6902a97d2c
|
[
"MIT"
] | null | null | null |
scripts/biotimesql.py
|
Jay-Iam/retriever
|
26e321cdb86fcb4cb78184c4bf5c0c6902a97d2c
|
[
"MIT"
] | 1
|
2019-02-23T14:11:34.000Z
|
2019-02-28T21:18:51.000Z
|
scripts/biotimesql.py
|
harshitbansal05/retriever
|
a5b849ee5ed3cc8a92f8aff93e5ec2ba54599213
|
[
"MIT"
] | 1
|
2020-01-06T11:37:54.000Z
|
2020-01-06T11:37:54.000Z
|
# -*- coding: utf-8 -*-
#retriever
import csv
from pkg_resources import parse_version
from retriever.lib.models import Table
from retriever.lib.templates import Script
try:
from retriever.lib.defaults import VERSION
try:
from retriever.lib.tools import open_fr, open_fw, open_csvw
except ImportError:
from retriever.lib.scripts import open_fr, open_fw
except ImportError:
from retriever import open_fr, open_fw, VERSION
SCRIPT = main()
| 39.022059
| 216
| 0.531939
|
538b05195aa3c62cda3499af221928cc57bfb7bb
| 1,423
|
py
|
Python
|
alipay/aop/api/domain/KbAdvertSettleBillResponse.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/domain/KbAdvertSettleBillResponse.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/domain/KbAdvertSettleBillResponse.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
| 25.410714
| 75
| 0.599438
|
538b8d9cb91e4b908b2574c10cefedcf90ea344f
| 6,356
|
py
|
Python
|
day5.py
|
PLCoster/adventofcode2019
|
7aad1503dcf80b127b21191850ad9c93f91a602a
|
[
"MIT"
] | 1
|
2019-12-09T21:26:22.000Z
|
2019-12-09T21:26:22.000Z
|
day5.py
|
PLCoster/adventofcode2019
|
7aad1503dcf80b127b21191850ad9c93f91a602a
|
[
"MIT"
] | null | null | null |
day5.py
|
PLCoster/adventofcode2019
|
7aad1503dcf80b127b21191850ad9c93f91a602a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 2 11:06:59 2019
@author: Paul
"""
def read_data(filename):
"""
Reads csv file into a list, and converts to ints
"""
data = []
f = open(filename, 'r')
for line in f:
data += line.strip('\n').split(',')
int_data = [int(i) for i in data]
f.close()
return int_data
def run_intcode(program, input_int):
"""
Takes data, list of ints to run int_code on.
Returns list of ints after intcode program has been run.
Running Intcode program looks reads in the integers sequentially in sets of 4:
data[i] == Parameter Mode + Opcode (last two digits)
data[i+1] == Entry 1
data[i+2] == Entry 2
data[i+3] == Entry 3
If Opcode == 1, the value of the opcode at index location = entry 1 and 2
in the program are summed and stored at the index location of entry 3.
If Opcode == 2, the value of the opcode at index location = entry 1 and 2
in the program are multiplied and stored at the index location of entry 3.
If Opcode == 3, the the single integer (input) is saved to the position given
by index 1.
If Opcode == 4, the program outputs the value of its only parameter. E.g. 4,50
would output the value at address 50.
If Opcode == 5 and entry 1 is != 0, the intcode position moves to the index stored
at entry 2. Otherwise it does nothing.
If Opcode == 6 and entry 1 is 0, the intcode postion moves to the index stored
at entry 2. Otherwise it does nothing.
If Opcode == 7 and entry 1> entry 2, store 1 in position given by third param,
otherwise store 0 at position given by third param.
If Opcode == 7 and entry 1 = entry 2, store 1 in position given by third param,
otherwise store 0 at position given by third param.
If Opcode == 99, the program is completed and will stop running.
Parameters are digits to the left of the opcode, read left to right:
Parameter 0 -> Position mode - the entry is treated as an index location
Parameter 1 -> Immediate mode - the entry is treated as a value
"""
data = program[:]
answer = -1
params = [0, 0, 0]
param_modes = ['', '', '']
i = 0
while (i < len(program)):
#print("i = ", i)
# Determine Opcode and parameter codes:
opcode_str = "{:0>5d}".format(data[i])
opcode = int(opcode_str[3:])
param_modes[0] = opcode_str[2]
param_modes[1] = opcode_str[1]
param_modes[2] = opcode_str[0]
#print(opcode_str)
for j in range(2):
if param_modes[j] == '0':
try:
params[j] = data[data[i+j+1]]
except IndexError:
continue
else:
try:
params[j] = data[i+j+1]
except IndexError:
continue
#print(params, param_modes)
# If opcode is 1, add relevant entries:
if opcode == 1:
data[data[i+3]] = params[0] + params[1]
i += 4;
# If opcode is 2, multiply the relevant entries:
elif opcode == 2:
data[data[i+3]] = params[0] * params[1]
i += 4;
# If opcode is 3, store input value at required location.
elif opcode == 3:
data[data[i+1]] = input_int
i += 2;
# If opcode is 4, print out the input stored at specified location.
elif opcode == 4:
answer = data[data[i+1]]
print("Program output: ", data[data[i+1]])
i += 2;
# If the opcode is 5 and the next parameter !=0, jump forward
elif opcode == 5:
if params[0] != 0:
i = params[1]
else:
i += 3
# If the opcode is 6 and next parameter is 0, jump forward
elif opcode == 6:
if params[0] == 0:
i = params[1]
else:
i += 3
# If the opcode is 7, carry out less than comparison and store 1/0 at loc 3
elif opcode == 7:
if params[0] < params[1]:
data[data[i+3]] = 1
else:
data[data[i+3]] = 0
i += 4
# If the opcode is 8, carry out equality comparison and store 1/0 at loc 3
elif opcode == 8:
if params[0] == params[1]:
data[data[i+3]] = 1
else:
data[data[i+3]] = 0
i += 4
# If the opcode is 99, halt the intcode
elif opcode == 99:
print("Program ended by halt code")
break
# If opcode is anything else something has gone wrong!
else:
print("Problem with the Program")
break
return data, answer
program = read_data("day5input.txt")
#print(program)
result1, answer1 = run_intcode(program, 1)
#print(result1)
print("Part 1: Answer is: ", answer1)
result2, answer2 = run_intcode(program, 5)
#print(result2)
print("Part 2: Answer is: ", answer2)
#test_program = [1002,4,3,4,33]
#test_program2 = [3,0,4,0,99]
#test_program3 = [1101,100,-1,4,0]
#test_program4 = [3,9,8,9,10,9,4,9,99,-1,8] # 1 if input = 8, 0 otherwise
#test_program5 = [3,9,7,9,10,9,4,9,99,-1,8] # 1 if input < 8, 0 otherwise
#test_program6 = [3,3,1108,-1,8,3,4,3,99] # 1 if input = 8, 0 otherwise
#test_program7 = [3,3,1107,-1,8,3,4,3,99] # 1 if input < 8, 0 otherwise
#test_program8 = [3,12,6,12,15,1,13,14,13,4,13,99,-1,0,1,9] # 0 if input = 0, 1 otherwise
#test_program9 = [3,3,1105,-1,9,1101,0,0,12,4,12,99,1] # 0 if input = 0, 1 otherwise
#test_program10 = [3,21,1008,21,8,20,1005,20,22,107,8,21,20,1006,20,31,1106,0,
#36,98,0,0,1002,21,125,20,4,20,1105,1,46,104,999,1105,1,46,1101,1000,1,20,4,20,
#1105,1,46,98,99] # 999 if input < 8, 1000 if input = 8, 1001 if input > 8
| 34.73224
| 92
| 0.522498
|
538bf59cdb6e50d49c8fe6d1f6a72767b79df904
| 3,333
|
py
|
Python
|
textvis/textprizm/models.py
|
scclab/textvisdrg-prototype
|
e912e4441b0e42e0f6c477edd03227b93b8ace73
|
[
"MIT"
] | null | null | null |
textvis/textprizm/models.py
|
scclab/textvisdrg-prototype
|
e912e4441b0e42e0f6c477edd03227b93b8ace73
|
[
"MIT"
] | null | null | null |
textvis/textprizm/models.py
|
scclab/textvisdrg-prototype
|
e912e4441b0e42e0f6c477edd03227b93b8ace73
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
| 28.245763
| 101
| 0.659166
|
538cf8a863a1cdd537656657d4741a5309d4d759
| 8,079
|
py
|
Python
|
test/test_purchasing.py
|
jacob22/accounting
|
e2fceea880e3f056703ba97b6cf52b73cd7af93b
|
[
"Apache-2.0"
] | null | null | null |
test/test_purchasing.py
|
jacob22/accounting
|
e2fceea880e3f056703ba97b6cf52b73cd7af93b
|
[
"Apache-2.0"
] | null | null | null |
test/test_purchasing.py
|
jacob22/accounting
|
e2fceea880e3f056703ba97b6cf52b73cd7af93b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2019 Open End AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
if (sys.version_info >=(3, 0)):
PYT3 = True
import urllib.request
import urllib.parse
else:
PYT3 = False
import urllib2
import urlparse
import contextlib
import json
import os
import py
import subprocess
import time
import uuid
from . import support
here = os.path.dirname(__file__)
| 39.409756
| 89
| 0.564179
|
538d31ed98e59299719777fcb1330ca052cef24d
| 1,455
|
py
|
Python
|
iot/downstream/fog_processes.py
|
SENERGY-Platform/senergy-connector
|
7198f6b2ec08b3c09c53755f259a2711921fdcbe
|
[
"Apache-2.0"
] | null | null | null |
iot/downstream/fog_processes.py
|
SENERGY-Platform/senergy-connector
|
7198f6b2ec08b3c09c53755f259a2711921fdcbe
|
[
"Apache-2.0"
] | null | null | null |
iot/downstream/fog_processes.py
|
SENERGY-Platform/senergy-connector
|
7198f6b2ec08b3c09c53755f259a2711921fdcbe
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 InfAI (CC SES)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ("Router", )
from ..util import conf, get_logger, mqtt
import threading
import cc_lib
logger = get_logger(__name__.split(".", 1)[-1])
| 31.630435
| 96
| 0.648797
|
538d3918006c09254385e7ece91e4c11554aa399
| 462
|
py
|
Python
|
django_project/user_profile/migrations/0003_order_payment_method.py
|
aliyaandabekova/DJANGO_PROJECT
|
7b94f80fa56acf936da014aa5d91da79457bf4eb
|
[
"MIT"
] | null | null | null |
django_project/user_profile/migrations/0003_order_payment_method.py
|
aliyaandabekova/DJANGO_PROJECT
|
7b94f80fa56acf936da014aa5d91da79457bf4eb
|
[
"MIT"
] | null | null | null |
django_project/user_profile/migrations/0003_order_payment_method.py
|
aliyaandabekova/DJANGO_PROJECT
|
7b94f80fa56acf936da014aa5d91da79457bf4eb
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.3 on 2021-05-27 13:34
from django.db import migrations, models
| 24.315789
| 116
| 0.603896
|
538daa45b22d9013e84ef526505b8753b513ae7f
| 2,522
|
py
|
Python
|
day07/test.py
|
mpirnat/aoc2016
|
1aec59aca01541d0d1c30f85d4668959c82fa35c
|
[
"MIT"
] | null | null | null |
day07/test.py
|
mpirnat/aoc2016
|
1aec59aca01541d0d1c30f85d4668959c82fa35c
|
[
"MIT"
] | null | null | null |
day07/test.py
|
mpirnat/aoc2016
|
1aec59aca01541d0d1c30f85d4668959c82fa35c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import unittest
from day07 import has_abba, get_abba_allowed_strings, get_abba_disallowed_strings
from day07 import supports_tls, count_tls_addresses
from day07 import find_abas, supports_ssl, count_ssl_addresses
if __name__ == '__main__':
unittest.main()
| 27.714286
| 81
| 0.635607
|
538e1ba9c8f2894b4bdf8950c5cd9a8fa42ed826
| 4,787
|
py
|
Python
|
rlnets/PG.py
|
HTRPOCODES/HTRPO-v2
|
7e085e8077e6caa38d192bbd33b41c49b36ad6a6
|
[
"MIT"
] | 7
|
2020-02-24T15:05:20.000Z
|
2021-08-24T02:27:13.000Z
|
rlnets/PG.py
|
ZhangHanbo/Deep-Reinforcement-Learning-Package
|
10ab418fcb4807747ebe162920f3df1e80b80a2a
|
[
"MIT"
] | null | null | null |
rlnets/PG.py
|
ZhangHanbo/Deep-Reinforcement-Learning-Package
|
10ab418fcb4807747ebe162920f3df1e80b80a2a
|
[
"MIT"
] | 1
|
2020-04-11T13:08:23.000Z
|
2020-04-11T13:08:23.000Z
|
import torch
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
from basenets.MLP import MLP
from basenets.Conv import Conv
from torch import nn
# TODO: support multi-layer value function in which action is concat before the final layer
| 34.192857
| 91
| 0.48569
|
538e7c69b579d9dbd9a344fd3df293fc4cfca562
| 10,057
|
py
|
Python
|
tensorflow/python/kernel_tests/sparse_tensors_map_ops_test.py
|
m4rkl1u/tensorflow
|
90a8825c7ae9719e8969d45040b4155b0e7de130
|
[
"Apache-2.0"
] | 2
|
2018-12-05T10:58:40.000Z
|
2019-01-24T11:36:01.000Z
|
tensorflow/python/kernel_tests/sparse_tensors_map_ops_test.py
|
m4rkl1u/tensorflow
|
90a8825c7ae9719e8969d45040b4155b0e7de130
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/kernel_tests/sparse_tensors_map_ops_test.py
|
m4rkl1u/tensorflow
|
90a8825c7ae9719e8969d45040b4155b0e7de130
|
[
"Apache-2.0"
] | 2
|
2019-02-26T16:21:15.000Z
|
2020-12-04T17:48:17.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseTensorsMap."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
# pylint: disable=protected-access
add_sparse_to_tensors_map = sparse_ops._add_sparse_to_tensors_map
add_many_sparse_to_tensors_map = sparse_ops._add_many_sparse_to_tensors_map
take_many_sparse_from_tensors_map = (
sparse_ops._take_many_sparse_from_tensors_map)
# pylint: enable=protected-access
if __name__ == "__main__":
test.main()
| 42.079498
| 80
| 0.704484
|
538ed9ab23e9e71ee700c89f6a7e07b38fae61a0
| 50,485
|
py
|
Python
|
cloudroast/objectstorage/smoke/object_smoke.py
|
RULCSoft/cloudroast
|
30f0e64672676c3f90b4a582fe90fac6621475b3
|
[
"Apache-2.0"
] | null | null | null |
cloudroast/objectstorage/smoke/object_smoke.py
|
RULCSoft/cloudroast
|
30f0e64672676c3f90b4a582fe90fac6621475b3
|
[
"Apache-2.0"
] | null | null | null |
cloudroast/objectstorage/smoke/object_smoke.py
|
RULCSoft/cloudroast
|
30f0e64672676c3f90b4a582fe90fac6621475b3
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import calendar
import time
import zlib
from hashlib import md5
import unittest
from cafe.drivers.unittest.decorators import (
DataDrivenFixture, data_driven_test)
from cloudcafe.objectstorage.objectstorage_api.common.constants import \
Constants
from cloudroast.objectstorage.fixtures import ObjectStorageFixture
from cloudroast.objectstorage.generators import (
ObjectDatasetList, CONTENT_TYPES)
CONTAINER_DESCRIPTOR = 'object_smoke_test'
STATUS_CODE_MSG = ('{method} expected status code {expected}'
' received status code {received}')
| 34.273591
| 79
| 0.605051
|
538f0d9adeec1b1a9f1d17d56827c035463ad1c5
| 1,412
|
py
|
Python
|
ceph/tests/conftest.py
|
remicalixte/integrations-core
|
b115e18c52820fe1a92495f538fdc14ddf83cfe1
|
[
"BSD-3-Clause"
] | 1
|
2021-03-24T13:00:14.000Z
|
2021-03-24T13:00:14.000Z
|
ceph/tests/conftest.py
|
remicalixte/integrations-core
|
b115e18c52820fe1a92495f538fdc14ddf83cfe1
|
[
"BSD-3-Clause"
] | null | null | null |
ceph/tests/conftest.py
|
remicalixte/integrations-core
|
b115e18c52820fe1a92495f538fdc14ddf83cfe1
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import pytest
from datadog_checks.dev import docker_run
from datadog_checks.dev.conditions import CheckDockerLogs
from datadog_checks.dev.subprocess import run_command
from .common import BASIC_CONFIG, HERE
E2E_METADATA = {
'start_commands': [
'apt-get update',
'apt-get install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y docker.io',
],
'docker_volumes': ['/var/run/docker.sock:/var/run/docker.sock'],
}
| 32.837209
| 115
| 0.659348
|
538f4e290b42893ff7be5c3f3a19a555501eb1e6
| 3,025
|
py
|
Python
|
federation/hostmeta/fetchers.py
|
weex/federation
|
01357aacb04b076442ce5f803a0fc65df5a74d09
|
[
"BSD-3-Clause"
] | 93
|
2016-11-26T10:52:13.000Z
|
2022-01-15T20:07:35.000Z
|
federation/hostmeta/fetchers.py
|
weex/federation
|
01357aacb04b076442ce5f803a0fc65df5a74d09
|
[
"BSD-3-Clause"
] | 75
|
2016-10-18T10:15:44.000Z
|
2019-10-05T22:16:32.000Z
|
federation/hostmeta/fetchers.py
|
weex/federation
|
01357aacb04b076442ce5f803a0fc65df5a74d09
|
[
"BSD-3-Clause"
] | 9
|
2017-04-08T08:03:45.000Z
|
2021-09-13T22:00:48.000Z
|
import json
from typing import Dict, Optional
import requests
from federation.hostmeta.parsers import (
parse_nodeinfo_document, parse_nodeinfo2_document, parse_statisticsjson_document, parse_mastodon_document,
parse_matrix_document, parse_misskey_document)
from federation.utils.network import fetch_document
HIGHEST_SUPPORTED_NODEINFO_VERSION = 2.1
| 28.809524
| 110
| 0.668099
|
538fd4b4cff424f1346a608bba50033518ef9ea5
| 2,582
|
py
|
Python
|
features/analysis_features.py
|
iag0g0mes/t2_fis_driving_style
|
7f62ac3e67e65e7bd1273a2f845eb05820e95b70
|
[
"Apache-2.0"
] | 5
|
2021-04-20T16:03:37.000Z
|
2022-03-11T00:13:11.000Z
|
features/analysis_features.py
|
iag0g0mes/t2_fis_driving_style
|
7f62ac3e67e65e7bd1273a2f845eb05820e95b70
|
[
"Apache-2.0"
] | 1
|
2021-04-21T02:35:38.000Z
|
2021-04-21T12:54:14.000Z
|
features/analysis_features.py
|
iag0g0mes/t2fis_driving_style
|
7f62ac3e67e65e7bd1273a2f845eb05820e95b70
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from typing import Any, Dict, List, Tuple, NoReturn
import argparse
import os
def parse_arguments() -> Any:
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir",
default="",
type=str,
help="Directory where the features (npy files) are saved",
)
parser.add_argument("--mode",
required=True,
type=str,
help="train/val/test/sample",
choices=['train', 'test', 'val','sample'])
parser.add_argument("--obs_len",
default=2,
type=int,
help="Observed length of the trajectory in seconds",
choices=[1,2,3,4,5])
parser.add_argument("--filter",
default='ekf',
type=str,
help="Filter to process the data noise. (ekf/none/ekf-savgol/savgol",
choices=['ekf', 'none', 'ekf-savgol', 'savgol'])
return parser.parse_args()
if __name__== '__main__':
#_filters = ['none', 'ekf', 'savgol', 'ekf-savgol']
#_modes = ['train', 'val', 'test', 'sample']
#_obs_len = [2,5]
#seg = _obs_len[0]
#mode = _modes[3]
#filter_name = _filters[0]
args = parse_arguments()
if args.mode == 'test':
args.obs_len = 2
assert os.path.exists(args.data_dir),\
f'[Analysis][main][ERROR] data_dir not found!({args.data_dir})'
data_file = 'features_{}_{}s_{}.npy'.format(args.mode,
args.obs_len,
args.filter)
assert os.path.exists(os.path.join(args.data_dir, data_file)),\
f'[Analysis][main][ERROR] data_file not found!({data_file})'
print ('[Analysis] loading dataset....')
# (m, 4)
# [mean_v, mean_acc, mean_deac, std_jy]
data = np.load(os.path.join(args.data_dir,data_file))
print ('[Analysis] mode:{} | filter:{} | obs_len:{}'.format(args.mode,
args.filter,
args.obs_len))
print ('[Analysis] data shape:{}'.format(data.shape))
print ('[Analysis] stats:')
stats(data)
| 23.907407
| 88
| 0.606119
|
538fed081c6f7c33b40d25f1c7cac9cd82761148
| 2,916
|
py
|
Python
|
python-watcher-2.0.0/watcher/tests/notifications/test_service_notifications.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | null | null | null |
python-watcher-2.0.0/watcher/tests/notifications/test_service_notifications.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
python-watcher-2.0.0/watcher/tests/notifications/test_service_notifications.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# -*- encoding: utf-8 -*-
# Copyright (c) 2017 Servionica
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import freezegun
import mock
import oslo_messaging as om
from watcher.common import rpc
from watcher import notifications
from watcher.objects import service as w_service
from watcher.tests.db import base
from watcher.tests.objects import utils
| 37.384615
| 79
| 0.607339
|
53901ad02fc361ceba4528f28baf2995acc82248
| 1,422
|
py
|
Python
|
leetcode/medium/best-time-to-buy-and-sell-stock-ii.py
|
rainzhop/cumulus-tank
|
09ebc7858ea53630e30606945adfea856a80faa3
|
[
"MIT"
] | null | null | null |
leetcode/medium/best-time-to-buy-and-sell-stock-ii.py
|
rainzhop/cumulus-tank
|
09ebc7858ea53630e30606945adfea856a80faa3
|
[
"MIT"
] | null | null | null |
leetcode/medium/best-time-to-buy-and-sell-stock-ii.py
|
rainzhop/cumulus-tank
|
09ebc7858ea53630e30606945adfea856a80faa3
|
[
"MIT"
] | null | null | null |
# https://leetcode.com/problems/best-time-to-buy-and-sell-stock-ii/
#
# Say you have an array for which the ith element is the price of a given stock on day i.
#
# Design an algorithm to find the maximum profit.
# You may complete as many transactions as you like (ie, buy one and sell one share of the stock multiple times).
# However, you may not engage in multiple transactions at the same time (ie, you must sell the stock before you buy again).
if __name__ == '__main__':
prices = [8,9,2,5]
s = Solution()
print s.maxProfit(prices)
| 32.318182
| 123
| 0.524613
|
5391eb5d4685629e3d8228f4e55d8a98857010ab
| 7,787
|
py
|
Python
|
django_loci/tests/base/test_admin.py
|
yashikajotwani12/django-loci
|
2c0bcb33f4a56d559f798e37fd17b2143b912ce4
|
[
"BSD-3-Clause"
] | 205
|
2017-11-17T10:35:02.000Z
|
2022-03-29T18:50:32.000Z
|
django_loci/tests/base/test_admin.py
|
yashikajotwani12/django-loci
|
2c0bcb33f4a56d559f798e37fd17b2143b912ce4
|
[
"BSD-3-Clause"
] | 98
|
2017-11-20T16:03:27.000Z
|
2022-01-19T21:12:47.000Z
|
django_loci/tests/base/test_admin.py
|
yashikajotwani12/django-loci
|
2c0bcb33f4a56d559f798e37fd17b2143b912ce4
|
[
"BSD-3-Clause"
] | 46
|
2017-11-20T23:25:26.000Z
|
2022-02-10T05:06:16.000Z
|
import json
import os
import responses
from django.urls import reverse
from .. import TestAdminMixin, TestLociMixin
| 38.549505
| 99
| 0.617054
|
539267e2204960bd72eacaf1dd33c30f2edce8d2
| 1,270
|
py
|
Python
|
dca_models/deform_offsets_module.py
|
vatsalag99/Deformable-Channel-Attention
|
d904135fd7be45331a16d9cb84e44f8e1ff5c07e
|
[
"MIT"
] | 1
|
2020-12-01T20:57:09.000Z
|
2020-12-01T20:57:09.000Z
|
dca_models/deform_offsets_module.py
|
vatsalag99/Deformable-Channel-Attention
|
d904135fd7be45331a16d9cb84e44f8e1ff5c07e
|
[
"MIT"
] | null | null | null |
dca_models/deform_offsets_module.py
|
vatsalag99/Deformable-Channel-Attention
|
d904135fd7be45331a16d9cb84e44f8e1ff5c07e
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
from torch.nn.parameter import Parameter
from einops import rearrange, reduce, repeat
| 35.277778
| 82
| 0.640157
|
539324c139f4acda8b0dbb87e42e77a126f0fc1b
| 155
|
py
|
Python
|
tests/__init__.py
|
egor43/PyImageComparsion
|
5270f5646c40391cc5ac225305d7be9b0b7de140
|
[
"BSD-2-Clause"
] | null | null | null |
tests/__init__.py
|
egor43/PyImageComparsion
|
5270f5646c40391cc5ac225305d7be9b0b7de140
|
[
"BSD-2-Clause"
] | null | null | null |
tests/__init__.py
|
egor43/PyImageComparsion
|
5270f5646c40391cc5ac225305d7be9b0b7de140
|
[
"BSD-2-Clause"
] | null | null | null |
from . import test_helpers
from . import test_image_opener
from . import test_image_metrick
from . import test_compare_tools
from . import test_compare_api
| 31
| 32
| 0.845161
|
5395cbb4a78f713d4a2814a8d200c21fd6a061c3
| 485
|
py
|
Python
|
core/urls.py
|
donnellan0007/blog
|
02c8850688422e3b685ffac10c32bf3e7a7c2e7a
|
[
"MIT"
] | null | null | null |
core/urls.py
|
donnellan0007/blog
|
02c8850688422e3b685ffac10c32bf3e7a7c2e7a
|
[
"MIT"
] | null | null | null |
core/urls.py
|
donnellan0007/blog
|
02c8850688422e3b685ffac10c32bf3e7a7c2e7a
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path
from .views import index, email, post_detail, posts, hot_takes, take_detail
from . import views
app_name = "core"
urlpatterns = [
path('',views.index,name="index"),
path('email/',views.email,name="email"),
path('post/<slug>/',views.post_detail,name='post'),
path('posts/',views.posts,name='posts'),
path('takes/',views.hot_takes,name='takes'),
path('take/<slug>/',views.take_detail,name='take'),
]
| 32.333333
| 75
| 0.68866
|
53971f3415b6410a3e353dbb14eb4ceab3a8c1a1
| 30
|
py
|
Python
|
griddy/__init__.py
|
pgolding/pandas-grid
|
0f80db1511097656496dee503d7bb281b97b8bdc
|
[
"BSD-2-Clause"
] | 1
|
2018-01-03T11:34:08.000Z
|
2018-01-03T11:34:08.000Z
|
griddy/__init__.py
|
pgolding/pandas-grid
|
0f80db1511097656496dee503d7bb281b97b8bdc
|
[
"BSD-2-Clause"
] | null | null | null |
griddy/__init__.py
|
pgolding/pandas-grid
|
0f80db1511097656496dee503d7bb281b97b8bdc
|
[
"BSD-2-Clause"
] | null | null | null |
from .grid import render_table
| 30
| 30
| 0.866667
|
5398b81471428ab8f27e820b3cfc198272b782d9
| 1,573
|
py
|
Python
|
utils/dbconn.py
|
iamvishnuks/Xmigrate
|
f8405c72a2ee4203b0fc5ddb55c0a1d9f8d8a7c7
|
[
"MIT"
] | 4
|
2020-05-26T11:19:02.000Z
|
2020-08-06T11:12:34.000Z
|
utils/dbconn.py
|
iamvishnuks/Xmigrate
|
f8405c72a2ee4203b0fc5ddb55c0a1d9f8d8a7c7
|
[
"MIT"
] | 46
|
2022-02-19T09:11:11.000Z
|
2022-03-31T15:42:50.000Z
|
utils/dbconn.py
|
iamvishnuks/Xmigrate
|
f8405c72a2ee4203b0fc5ddb55c0a1d9f8d8a7c7
|
[
"MIT"
] | 2
|
2019-12-20T12:30:33.000Z
|
2020-01-02T22:01:25.000Z
|
from mongoengine import *
from dotenv import load_dotenv
from os import getenv
from cassandra.cluster import Cluster
from cassandra.auth import PlainTextAuthProvider
from cassandra.cqlengine import connection
from cassandra.cqlengine.management import sync_table
from cassandra.query import ordered_dict_factory
from model.discover import *
from model.blueprint import *
from model.disk import *
from model.storage import *
from model.project import *
from model.network import *
from model.user import *
load_dotenv()
cass_db = getenv("CASS_DB")
cass_password = getenv("CASS_PASSWORD")
cass_user = getenv("CASS_USER")
| 33.468085
| 91
| 0.760966
|
53990709c9653095e01a4f58d04ac79451da6d42
| 3,921
|
py
|
Python
|
src/syft/lib/__init__.py
|
godormad/PySyft
|
fcb3374b6318dcccf377175fb8db6f70e9e1d1e3
|
[
"Apache-2.0"
] | null | null | null |
src/syft/lib/__init__.py
|
godormad/PySyft
|
fcb3374b6318dcccf377175fb8db6f70e9e1d1e3
|
[
"Apache-2.0"
] | null | null | null |
src/syft/lib/__init__.py
|
godormad/PySyft
|
fcb3374b6318dcccf377175fb8db6f70e9e1d1e3
|
[
"Apache-2.0"
] | null | null | null |
# stdlib
import importlib
import sys
from typing import Any
from typing import Any as TypeAny
from typing import Dict as TypeDict
from typing import Optional
# third party
from packaging import version
# syft relative
from ..ast.globals import Globals
from ..lib.python import create_python_ast
from ..lib.torch import create_torch_ast
from ..lib.torchvision import create_torchvision_ast
from ..logger import critical
from ..logger import traceback_and_raise
from .misc import create_union_ast
# now we need to load the relevant frameworks onto the node
lib_ast = create_lib_ast(None)
| 35.972477
| 88
| 0.665902
|
5399748c26ec62ec3b268e3e29283c1ccc28b398
| 8,742
|
py
|
Python
|
scripts/griffin_GC_counts.py
|
GavinHaLab/Griffin
|
83942189c0e3e62ac533d6b6a5ffd7d2dfd2d4b3
|
[
"BSD-3-Clause-Clear"
] | 1
|
2021-09-08T05:43:15.000Z
|
2021-09-08T05:43:15.000Z
|
scripts/griffin_GC_counts.py
|
GavinHaLab/Griffin
|
83942189c0e3e62ac533d6b6a5ffd7d2dfd2d4b3
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
scripts/griffin_GC_counts.py
|
GavinHaLab/Griffin
|
83942189c0e3e62ac533d6b6a5ffd7d2dfd2d4b3
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import pysam
import os
import pandas as pd
import numpy as np
import time
import argparse
import sys
from multiprocessing import Pool
# In[ ]:
# ##arguments for testing
# bam_file_path = '/fh/scratch/delete90/ha_g/realigned_bams/cfDNA_MBC_ULP_hg38/realign_bam_paired_snakemake-master/results/MBC_1041_1_ULP/MBC_1041_1_ULP_recalibrated.bam'
# bam_file_name = 'MBC_1041_1_ULP'
# mapable_path = '../../downloads/genome/repeat_masker.mapable.k50.Umap.hg38.bedGraph'
# ref_seq_path = '/fh/fast/ha_g/grp/reference/GRCh38/GRCh38.fa'
# chrom_sizes_path = '/fh/fast/ha_g/grp/reference/GRCh38/hg38.standard.chrom.sizes'
# out_dir = './tmp/'
# map_q = 20
# size_range = [15,500]
# CPU = 4
# In[ ]:
parser = argparse.ArgumentParser()
parser.add_argument('--bam_file', help='sample_bam_file', required=True)
parser.add_argument('--bam_file_name', help='sample name (does not need to match actual file name)', required=True)
parser.add_argument('--mapable_regions', help='highly mapable regions to be used in GC correction, bedGraph or bed foramt', required=True)
parser.add_argument('--ref_seq',help='reference sequence (fasta format)',required=True)
parser.add_argument('--chrom_sizes',help='path to chromosome sizes for the reference seq',required=True)
parser.add_argument('--out_dir',help='folder for GC bias results',required=True)
parser.add_argument('--map_q',help='minimum mapping quality for reads to be considered',type=int,required=True)
parser.add_argument('--size_range',help='range of read sizes to be included',nargs=2, type=int, required=True)
parser.add_argument('--CPU',help='number of CPU for parallelizing', type=int, required=True)
args = parser.parse_args()
bam_file_path = args.bam_file
bam_file_name = args.bam_file_name
mapable_path=args.mapable_regions
ref_seq_path = args.ref_seq
chrom_sizes_path = args.chrom_sizes
out_dir = args.out_dir
map_q = args.map_q
size_range = args.size_range
CPU = args.CPU
# In[ ]:
print('arguments provided:')
print('\tbam_file_path = "'+bam_file_path+'"')
print('\tbam_file_name = "'+bam_file_name+'"')
print('\tmapable_regions = "'+mapable_path+'"')
print('\tref_seq_path = "'+ref_seq_path+'"')
print('\tchrom_sizes_path = "'+chrom_sizes_path+'"')
print('\tout_dir = "'+out_dir+'"')
print('\tmap_q = '+str(map_q))
print('\tsize_range = '+str(size_range))
print('\tCPU = '+str(CPU))
# In[ ]:
mapable_name = mapable_path.rsplit('/',1)[1].rsplit('.',1)[0]
out_file = out_dir +'/'+mapable_name+'/GC_counts/'+ bam_file_name+'.GC_counts.txt'
print('out_file',out_file)
# In[ ]:
#create a directory for the GC data
if not os.path.exists(out_dir +'/'+mapable_name):
os.mkdir(out_dir +'/'+mapable_name)
if not os.path.exists(out_dir +'/'+mapable_name+'/GC_counts/'):
os.mkdir(out_dir +'/'+mapable_name+'/GC_counts/')
# In[ ]:
#import filter
mapable_intervals = pd.read_csv(mapable_path, sep='\t', header=None)
#remove non standard chromosomes and X and Y
chroms = ['chr'+str(m) for m in range(1,23)]
mapable_intervals = mapable_intervals[mapable_intervals[0].isin(chroms)]
print('chroms:', chroms)
print('number_of_intervals:',len(mapable_intervals))
sys.stdout.flush()
# In[ ]:
# In[ ]:
start_time = time.time()
p = Pool(processes=CPU) #use the available CPU
sublists = np.array_split(mapable_intervals,CPU) #split the list into sublists, one per CPU
GC_dict_list = p.map(collect_reads, sublists, 1)
# In[ ]:
all_GC_df = pd.DataFrame()
for i,GC_dict in enumerate(GC_dict_list):
GC_df = pd.DataFrame()
for length in GC_dict.keys():
current = pd.Series(GC_dict[length]).reset_index()
current = current.rename(columns={'index':'num_GC',0:'number_of_fragments'})
current['length']=length
current = current[['length','num_GC','number_of_fragments']]
GC_df = GC_df.append(current, ignore_index=True)
GC_df = GC_df.set_index(['length','num_GC'])
all_GC_df[i] = GC_df['number_of_fragments']
del(GC_df,GC_dict)
all_GC_df = all_GC_df.sum(axis=1)
all_GC_df = pd.DataFrame(all_GC_df).rename(columns = {0:'number_of_fragments'})
all_GC_df = all_GC_df.reset_index()
all_GC_df.to_csv(out_file,sep='\t',index=False)
# In[ ]:
print('done')
# In[ ]:
# In[ ]:
# In[ ]:
| 33.366412
| 241
| 0.636811
|
5399b6c7047b5726e42c8b72d0dc40c3dfb01acf
| 4,372
|
py
|
Python
|
task2/04-task2-upload-dim-tables.py
|
canovasjm/InterviewProject_JuanCanovas
|
6ff385c66664328cea0678454560e89e44851e24
|
[
"MIT"
] | null | null | null |
task2/04-task2-upload-dim-tables.py
|
canovasjm/InterviewProject_JuanCanovas
|
6ff385c66664328cea0678454560e89e44851e24
|
[
"MIT"
] | null | null | null |
task2/04-task2-upload-dim-tables.py
|
canovasjm/InterviewProject_JuanCanovas
|
6ff385c66664328cea0678454560e89e44851e24
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 1 18:17:07 2021
@author: jm
"""
# %% required libraries
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
# %% connect to DB
# create connection using pymssql
engine = create_engine('mssql+pymssql://sa:<YourStrong@Passw0rd>@localhost:1433/rga')
connection = engine.connect()
# %% read data sets from where I will build the dimension tables
# read employee roster data
employee_roster = pd.read_excel("datasources/Employee_Roster_Data.xlsx", sheet_name = 'Sheet1')
# read skills data
skills = pd.read_excel("datasources/skills.xlsx", sheet_name = "Sheet1")
# read hours data
hours = pd.read_excel("datasources/hours.xlsx", sheet_name = "Sheet1")
# %% dimensions created from source employee_roster
# %% create DIM_Currency
# get unique values
currencies = sorted(employee_roster['Currency'].unique())
# create a data frame
DIM_Currency = pd.DataFrame({'id_currency': (np.arange(len(currencies)) + 1), 'currency': currencies})
# send data frame to DB
DIM_Currency.to_sql('DIM_Currency', con = connection, if_exists = 'append', index = False)
# %% create DIM_Department
# get unique values
departments = sorted(pd.concat([employee_roster['Department'], skills['Department']], axis = 0).unique())
# create a data frame
DIM_Department = pd.DataFrame({'id_department': (np.arange(len(departments)) + 1), 'department': departments})
# send data frame to DB
DIM_Department.to_sql('DIM_Department', con = connection, if_exists = 'append', index = False)
# %% create DIM_Gender
# get unique values
genders = sorted(pd.concat([employee_roster['Gender'], skills['Gender']], axis = 0).unique())
# create a data frame
DIM_Gender = pd.DataFrame({'id_gender': (np.arange(len(genders)) + 1), 'gender': genders})
# send data frame to DB
DIM_Gender.to_sql('DIM_Gender', con = connection, if_exists = 'append', index = False)
# %% create DIM_User
# check if 'UserId' values in 'skills' are in 'User_ID' in 'employee_roster'
# we get 20134 'True' values, meaning that all 'UserId' in 'skills' are already
# in 'User_ID' in employee_roster
users_check_1 = np.isin(skills['UserId'], employee_roster['User_ID']).sum()
# check if 'UserId' values in 'hours' are in 'User_ID' in 'employee_roster'
# we get 7659 'True' values, meaning that NOT all 'UserId' in 'hours' are already
# in 'User_ID' in employee_roster
users_check_2 = np.isin(hours['UserId'], employee_roster['User_ID']).sum()
# get unique values
users = sorted(pd.concat([employee_roster['User_ID'], skills['UserId'], hours['UserId']], axis = 0).unique())
# create a data frame to use pd.merge()
df_users = pd.DataFrame({'User_ID': users})
# left join 'df_user' with 'employee_roster' on 'UserID'
users_final = pd.merge(df_users, employee_roster, on = 'User_ID', how ='left')
# select only columns I need
users_final = users_final[['User_ID', 'Email_ID', 'Fullname']]
# rename columns
users_final.rename(columns = {'User_ID': 'id_user', 'Email_ID': 'id_email', 'Fullname': 'fullname'}, inplace = True)
# send data frame to DB
users_final.to_sql('DIM_User', con = connection, if_exists = 'append', index = False)
# %% dimensions created from source skills
# %% create DIM_AttributeGroup
# get unique values
att_group = sorted(skills['Attribute Group'].unique())
# create a data frame
DIM_AttributeGroup = pd.DataFrame({'id_att_group': (np.arange(len(att_group)) + 1), 'attribute_group': att_group})
# send data frame to DB
DIM_AttributeGroup.to_sql('DIM_AttributeGroup', con = connection, if_exists = 'append', index = False)
# %% create DIM_AttributeSubGroup
# get unique values
att_sub_group = sorted(skills['Attribute Sub-Group'].unique())
# create a data frame
DIM_AttributeSubGroup = pd.DataFrame({'id_att_sub_group': (np.arange(len(att_sub_group)) + 1), 'attribute_sub_group': att_sub_group})
# send data frame to DB
DIM_AttributeSubGroup.to_sql('DIM_AttributeSubGroup', con = connection, if_exists = 'append', index = False)
# %% create DIM_AttributeName
# get unique values
att_name = sorted(skills['Attribute Name'].unique())
# create a data frame
DIM_AttributeName = pd.DataFrame({'id_att_name': (np.arange(len(att_name)) + 1), 'attribute_name': att_name})
# send data frame to DB
DIM_AttributeName.to_sql('DIM_AttributeName', con = connection, if_exists = 'append', index = False)
| 34.698413
| 133
| 0.730101
|
539a58166d003e0486119a3a4445a376e8149b19
| 6,897
|
py
|
Python
|
cogs/server.py
|
vikasbaghel1001/Kanna-Chan
|
6f74978cb73b66cdb0952351a7e84a9e4ef4ebeb
|
[
"MIT"
] | 5
|
2021-10-17T07:29:42.000Z
|
2022-03-23T11:01:58.000Z
|
cogs/server.py
|
vikasbaghel1001/Kanna-Chan
|
6f74978cb73b66cdb0952351a7e84a9e4ef4ebeb
|
[
"MIT"
] | 1
|
2021-10-17T08:14:09.000Z
|
2021-10-17T08:14:09.000Z
|
cogs/server.py
|
vikasbaghel1001/Kanna-Chan
|
6f74978cb73b66cdb0952351a7e84a9e4ef4ebeb
|
[
"MIT"
] | 4
|
2021-07-12T04:20:22.000Z
|
2021-10-01T03:29:50.000Z
|
import discord
from discord.ext import commands
arrow = "<a:right:877425183839891496>"
kwee = "<:kannawee:877036162122924072>"
kdance = "<a:kanna_dance:877038778798207016>"
kbored = "<:kanna_bored:877036162827583538>"
ksmug = "<:kanna_smug:877038777896427560>"
heart = "<a:explosion_heart:877426228775227392>"
| 54.307087
| 636
| 0.641438
|
539b64bd9ed2668ae9a573fa432b5a05793c8032
| 109
|
py
|
Python
|
test/run/t344.py
|
timmartin/skulpt
|
2e3a3fbbaccc12baa29094a717ceec491a8a6750
|
[
"MIT"
] | 2,671
|
2015-01-03T08:23:25.000Z
|
2022-03-31T06:15:48.000Z
|
test/run/t344.py
|
timmartin/skulpt
|
2e3a3fbbaccc12baa29094a717ceec491a8a6750
|
[
"MIT"
] | 972
|
2015-01-05T08:11:00.000Z
|
2022-03-29T13:47:15.000Z
|
test/run/t344.py
|
timmartin/skulpt
|
2e3a3fbbaccc12baa29094a717ceec491a8a6750
|
[
"MIT"
] | 845
|
2015-01-03T19:53:36.000Z
|
2022-03-29T18:34:22.000Z
|
for ch in "Hello world!":
d = ord(ch)
h = hex(d)
o = oct(d)
b = bin(d)
print ch, d, h, o, b
| 12.111111
| 25
| 0.449541
|
539b84ee2616f61a9bf370a8a3b1b21465720328
| 10,016
|
py
|
Python
|
paho/mqtt/subscribe.py
|
RandomGamer342/TTM4115-plantsensor
|
e63c34160d284bb6fd26563eeba949d54026348b
|
[
"MIT"
] | 8
|
2017-01-17T02:25:08.000Z
|
2019-07-24T13:39:55.000Z
|
python/lib/python3.4/site-packages/paho/mqtt/subscribe.py
|
nidiascampos/smartgreen
|
d574d90918702ac3bd383ed77d673f871576c5b0
|
[
"Apache-2.0"
] | 5
|
2018-11-20T16:57:21.000Z
|
2019-03-17T19:59:52.000Z
|
python/lib/python3.4/site-packages/paho/mqtt/subscribe.py
|
nidiascampos/smartgreen
|
d574d90918702ac3bd383ed77d673f871576c5b0
|
[
"Apache-2.0"
] | 9
|
2017-01-19T03:56:05.000Z
|
2020-03-10T04:03:20.000Z
|
# Copyright (c) 2016 Roger Light <roger@atchoo.org>
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# and Eclipse Distribution License v1.0 which accompany this distribution.
#
# The Eclipse Public License is available at
# http://www.eclipse.org/legal/epl-v10.html
# and the Eclipse Distribution License is available at
# http://www.eclipse.org/org/documents/edl-v10.php.
#
# Contributors:
# Roger Light - initial API and implementation
"""
This module provides some helper functions to allow straightforward subscribing
to topics and retrieving messages. The two functions are simple(), which
returns one or messages matching a set of topics, and callback() which allows
you to pass a callback for processing of messages.
"""
import paho.mqtt.client as paho
import paho.mqtt as mqtt
import ssl
def _on_connect(c, userdata, flags, rc):
"""Internal callback"""
if rc != 0:
raise mqtt.MQTTException(paho.connack_string(rc))
if type(userdata['topics']) is list:
for t in userdata['topics']:
c.subscribe(t, userdata['qos'])
else:
c.subscribe(userdata['topics'], userdata['qos'])
def _on_message_callback(c, userdata, message):
"""Internal callback"""
userdata['callback'](c, userdata['userdata'], message)
def _on_message_simple(c, userdata, message):
"""Internal callback"""
if userdata['msg_count'] == 0:
return
# Don't process stale retained messages if 'retained' was false
if userdata['retained'] == False and message.retain == True:
return
userdata['msg_count'] = userdata['msg_count'] - 1
if userdata['messages'] is None and userdata['msg_count'] == 0:
userdata['messages'] = message
c.disconnect()
return
userdata['messages'].append(message)
if userdata['msg_count'] == 0:
c.disconnect()
def callback(callback, topics, qos=0, userdata=None, hostname="localhost",
port=1883, client_id="", keepalive=60, will=None, auth=None, tls=None,
protocol=paho.MQTTv311, transport="tcp"):
"""Subscribe to a list of topics and process them in a callback function.
This function creates an MQTT client, connects to a broker and subscribes
to a list of topics. Incoming messages are processed by the user provided
callback. This is a blocking function and will never return.
callback : function of the form "on_message(client, userdata, message)" for
processing the messages received.
topics : either a string containing a single topic to subscribe to, or a
list of topics to subscribe to.
qos : the qos to use when subscribing. This is applied to all topics.
userdata : passed to the callback
hostname : a string containing the address of the broker to connect to.
Defaults to localhost.
port : the port to connect to the broker on. Defaults to 1883.
client_id : the MQTT client id to use. If "" or None, the Paho library will
generate a client id automatically.
keepalive : the keepalive timeout value for the client. Defaults to 60
seconds.
will : a dict containing will parameters for the client: will = {'topic':
"<topic>", 'payload':"<payload">, 'qos':<qos>, 'retain':<retain>}.
Topic is required, all other parameters are optional and will
default to None, 0 and False respectively.
Defaults to None, which indicates no will should be used.
auth : a dict containing authentication parameters for the client:
auth = {'username':"<username>", 'password':"<password>"}
Username is required, password is optional and will default to None
if not provided.
Defaults to None, which indicates no authentication is to be used.
tls : a dict containing TLS configuration parameters for the client:
dict = {'ca_certs':"<ca_certs>", 'certfile':"<certfile>",
'keyfile':"<keyfile>", 'tls_version':"<tls_version>",
'ciphers':"<ciphers">}
ca_certs is required, all other parameters are optional and will
default to None if not provided, which results in the client using
the default behaviour - see the paho.mqtt.client documentation.
Defaults to None, which indicates that TLS should not be used.
transport : set to "tcp" to use the default setting of transport which is
raw TCP. Set to "websockets" to use WebSockets as the transport.
"""
if qos < 0 or qos > 2:
raise ValueError('qos must be in the range 0-2')
callback_userdata = {
'callback':callback,
'topics':topics,
'qos':qos,
'userdata':userdata}
client = paho.Client(client_id=client_id,
userdata=callback_userdata, protocol=protocol, transport=transport)
client.on_message = _on_message_callback
client.on_connect = _on_connect
if auth is not None:
username = auth['username']
try:
password = auth['password']
except KeyError:
password = None
client.username_pw_set(username, password)
if will is not None:
will_topic = will['topic']
try:
will_payload = will['payload']
except KeyError:
will_payload = None
try:
will_qos = will['qos']
except KeyError:
will_qos = 0
try:
will_retain = will['retain']
except KeyError:
will_retain = False
client.will_set(will_topic, will_payload, will_qos, will_retain)
if tls is not None:
ca_certs = tls['ca_certs']
try:
certfile = tls['certfile']
except KeyError:
certfile = None
try:
keyfile = tls['keyfile']
except KeyError:
keyfile = None
try:
tls_version = tls['tls_version']
except KeyError:
tls_version = ssl.PROTOCOL_SSLv23;
try:
ciphers = tls['ciphers']
except KeyError:
ciphers = None
client.tls_set(ca_certs, certfile, keyfile, tls_version=tls_version,
ciphers=ciphers)
client.connect(hostname, port, keepalive)
client.loop_forever()
def simple(topics, qos=0, msg_count=1, retained=True, hostname="localhost", port=1883,
client_id="", keepalive=60, will=None, auth=None, tls=None,
protocol=paho.MQTTv311, transport="tcp"):
"""Subscribe to a list of topics and return msg_count messages.
This function creates an MQTT client, connects to a broker and subscribes
to a list of topics. Once "msg_count" messages have been received, it
disconnects cleanly from the broker and returns the messages.
topics : either a string containing a single topic to subscribe to, or a
list of topics to subscribe to.
qos : the qos to use when subscribing. This is applied to all topics.
msg_count : the number of messages to retrieve from the broker.
if msg_count == 1 then a single MQTTMessage will be returned.
if msg_count > 1 then a list of MQTTMessages will be returned.
retained : If set to True, retained messages will be processed the same as
non-retained messages. If set to False, retained messages will
be ignored. This means that with retained=False and msg_count=1,
the function will return the first message received that does
not have the retained flag set.
hostname : a string containing the address of the broker to connect to.
Defaults to localhost.
port : the port to connect to the broker on. Defaults to 1883.
client_id : the MQTT client id to use. If "" or None, the Paho library will
generate a client id automatically.
keepalive : the keepalive timeout value for the client. Defaults to 60
seconds.
will : a dict containing will parameters for the client: will = {'topic':
"<topic>", 'payload':"<payload">, 'qos':<qos>, 'retain':<retain>}.
Topic is required, all other parameters are optional and will
default to None, 0 and False respectively.
Defaults to None, which indicates no will should be used.
auth : a dict containing authentication parameters for the client:
auth = {'username':"<username>", 'password':"<password>"}
Username is required, password is optional and will default to None
if not provided.
Defaults to None, which indicates no authentication is to be used.
tls : a dict containing TLS configuration parameters for the client:
dict = {'ca_certs':"<ca_certs>", 'certfile':"<certfile>",
'keyfile':"<keyfile>", 'tls_version':"<tls_version>",
'ciphers':"<ciphers">}
ca_certs is required, all other parameters are optional and will
default to None if not provided, which results in the client using
the default behaviour - see the paho.mqtt.client documentation.
Defaults to None, which indicates that TLS should not be used.
transport : set to "tcp" to use the default setting of transport which is
raw TCP. Set to "websockets" to use WebSockets as the transport.
"""
if msg_count < 1:
raise ValueError('msg_count must be > 0')
# Set ourselves up to return a single message if msg_count == 1, or a list
# if > 1.
if msg_count == 1:
messages = None
else:
messages = []
userdata = {'retained':retained, 'msg_count':msg_count, 'messages':messages}
callback(_on_message_simple, topics, qos, userdata, hostname, port,
client_id, keepalive, will, auth, tls, protocol, transport)
return userdata['messages']
| 38.523077
| 92
| 0.648862
|
539b8675dc9b20bffab7e413aa5943d934069113
| 1,561
|
py
|
Python
|
py/2017/day24/aoc_day_24.py
|
cs-cordero/advent-of-code
|
614b8f78b43c54ef180a7dc411a0d1366a62944f
|
[
"MIT"
] | null | null | null |
py/2017/day24/aoc_day_24.py
|
cs-cordero/advent-of-code
|
614b8f78b43c54ef180a7dc411a0d1366a62944f
|
[
"MIT"
] | null | null | null |
py/2017/day24/aoc_day_24.py
|
cs-cordero/advent-of-code
|
614b8f78b43c54ef180a7dc411a0d1366a62944f
|
[
"MIT"
] | 2
|
2019-12-01T15:33:27.000Z
|
2020-12-14T05:37:23.000Z
|
from collections import defaultdict
if __name__ == "__main__":
d = defaultdict(set)
# with open('aoc_day_24_sample.txt') as f:
with open("aoc_day_24_input.txt") as f:
sample = f.readlines()
# sample = [
# '0/1',
# '1/2',
# '1/3',
# '1/4',
# '5/0',
# '2/5',
# '3/6',
# '4/500'
# ]
for component in sample:
a, b = map(int, component.split("/"))
d[a].add(component)
d[b].add(component)
solution()
| 27.875
| 88
| 0.59385
|
539ea2a319db010bc0f4b82dc9bd72f7d9cbdfe7
| 175
|
py
|
Python
|
scratchnet/scratchnet.py
|
Gr1m3y/scratchnet
|
5fce471b6e12dc05b3a92fd8581445f7d598d1c3
|
[
"MIT"
] | null | null | null |
scratchnet/scratchnet.py
|
Gr1m3y/scratchnet
|
5fce471b6e12dc05b3a92fd8581445f7d598d1c3
|
[
"MIT"
] | null | null | null |
scratchnet/scratchnet.py
|
Gr1m3y/scratchnet
|
5fce471b6e12dc05b3a92fd8581445f7d598d1c3
|
[
"MIT"
] | null | null | null |
import numpy as np
import network
if __name__ == "__main__":
main()
| 13.461538
| 32
| 0.617143
|
539eb7f2ba00a494348f5e2c2412e8b083606e64
| 1,048
|
py
|
Python
|
live-plotting.py
|
rmhsawyer/EC601-Final-Project-Mapping_User_Face_To_Emoji
|
05a61dca25ef6dc6827e3389a753eb65a09c1813
|
[
"Apache-2.0"
] | null | null | null |
live-plotting.py
|
rmhsawyer/EC601-Final-Project-Mapping_User_Face_To_Emoji
|
05a61dca25ef6dc6827e3389a753eb65a09c1813
|
[
"Apache-2.0"
] | 22
|
2017-11-10T21:37:20.000Z
|
2017-12-05T22:36:50.000Z
|
live-plotting.py
|
rmhsawyer/EC601-Final-Project
|
05a61dca25ef6dc6827e3389a753eb65a09c1813
|
[
"Apache-2.0"
] | 3
|
2017-10-30T20:07:18.000Z
|
2017-12-03T00:47:18.000Z
|
#draw the predictions from real-time.py
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
style.use('fivethirtyeight')
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
ani = animation.FuncAnimation(fig, animate, interval=1000)
plt.show()
| 24.952381
| 78
| 0.605916
|
539f08b39f8bed483a13e19cdf11f4b9e2b776e6
| 1,850
|
py
|
Python
|
code/run_policy.py
|
kirk86/ARS
|
a4ac03e06bce5f183f7b18ea74b81c6c45c4426b
|
[
"BSD-2-Clause"
] | null | null | null |
code/run_policy.py
|
kirk86/ARS
|
a4ac03e06bce5f183f7b18ea74b81c6c45c4426b
|
[
"BSD-2-Clause"
] | null | null | null |
code/run_policy.py
|
kirk86/ARS
|
a4ac03e06bce5f183f7b18ea74b81c6c45c4426b
|
[
"BSD-2-Clause"
] | 1
|
2019-03-27T14:11:16.000Z
|
2019-03-27T14:11:16.000Z
|
"""
Code to load a policy and generate rollout data. Adapted from https://github.com/berkeleydeeprlcourse.
Example usage:
python run_policy.py ../trained_policies/Humanoid-v1/policy_reward_11600/lin_policy_plus.npz Humanoid-v1 --render \
--num_rollouts 20
"""
import numpy as np
import gym
if __name__ == '__main__':
main()
| 28.90625
| 119
| 0.605946
|
539f836eb4814996e6e8dcea4c9325a8edccf36d
| 6,048
|
py
|
Python
|
src/poliastro/plotting/tisserand.py
|
TreshUp/poliastro
|
602eb3c39d315be6dc1edaa12d72ab0e361334f6
|
[
"MIT"
] | null | null | null |
src/poliastro/plotting/tisserand.py
|
TreshUp/poliastro
|
602eb3c39d315be6dc1edaa12d72ab0e361334f6
|
[
"MIT"
] | null | null | null |
src/poliastro/plotting/tisserand.py
|
TreshUp/poliastro
|
602eb3c39d315be6dc1edaa12d72ab0e361334f6
|
[
"MIT"
] | null | null | null |
""" Generates Tisserand plots """
from enum import Enum
import numpy as np
from astropy import units as u
from matplotlib import pyplot as plt
from poliastro.plotting._base import BODY_COLORS
from poliastro.twobody.mean_elements import get_mean_elements
from poliastro.util import norm
| 30.24
| 81
| 0.586475
|
53a13df64d25ae2c757b6265afa2baab533adc4f
| 3,122
|
py
|
Python
|
libs/Rack.py
|
jlin/inventory
|
c098c98e570c3bf9fadfd811eb75e1213f6ea428
|
[
"BSD-3-Clause"
] | 22
|
2015-01-16T01:36:32.000Z
|
2020-06-08T00:46:18.000Z
|
libs/Rack.py
|
jlin/inventory
|
c098c98e570c3bf9fadfd811eb75e1213f6ea428
|
[
"BSD-3-Clause"
] | 8
|
2015-12-28T18:56:19.000Z
|
2019-04-01T17:33:48.000Z
|
libs/Rack.py
|
jlin/inventory
|
c098c98e570c3bf9fadfd811eb75e1213f6ea428
|
[
"BSD-3-Clause"
] | 13
|
2015-01-13T20:56:22.000Z
|
2022-02-23T06:01:17.000Z
|
from KeyValueTree import KeyValueTree
from truth.models import KeyValue as TruthKeyValue, Truth
from systems.models import KeyValue as KeyValue
from django.test.client import RequestFactory
from api_v2.keyvalue_handler import KeyValueHandler
import json
factory = RequestFactory()
| 34.688889
| 122
| 0.575593
|
53a26f62743c91c61bf312038531a22cbbef6701
| 151
|
py
|
Python
|
r2c_isg/functions/__init__.py
|
returntocorp/inputset-generator
|
c33952cc5683e9e70b24f76936c42ec8e354d121
|
[
"MIT"
] | 3
|
2019-11-02T20:14:34.000Z
|
2020-01-23T21:47:20.000Z
|
r2c_isg/functions/__init__.py
|
returntocorp/inputset-generator
|
c33952cc5683e9e70b24f76936c42ec8e354d121
|
[
"MIT"
] | 19
|
2019-09-18T01:48:07.000Z
|
2021-11-04T11:20:48.000Z
|
r2c_isg/functions/__init__.py
|
returntocorp/inputset-generator
|
c33952cc5683e9e70b24f76936c42ec8e354d121
|
[
"MIT"
] | 3
|
2019-11-15T22:31:13.000Z
|
2020-03-10T10:19:39.000Z
|
from .trim import trim
from .sample import sample
from .sort import sort
function_map = {
'trim': trim,
'sample': sample,
'sort': sort
}
| 13.727273
| 26
| 0.649007
|
53a287190d58a2db9d8427aaa2bd973ac3e2cd59
| 59
|
py
|
Python
|
__init__.py
|
csalyk/nirspec
|
58661371871d29103afe42bfccc0bff9ff773914
|
[
"MIT-0"
] | null | null | null |
__init__.py
|
csalyk/nirspec
|
58661371871d29103afe42bfccc0bff9ff773914
|
[
"MIT-0"
] | null | null | null |
__init__.py
|
csalyk/nirspec
|
58661371871d29103afe42bfccc0bff9ff773914
|
[
"MIT-0"
] | null | null | null |
from .nirspec import divspec
from .nirspec import gluespec
| 19.666667
| 29
| 0.830508
|
53a2e756b6afda167f3e4ff4e520ec037aac6965
| 9,526
|
py
|
Python
|
poem.py
|
xcollantes/poetry-generator
|
456c9702f0105b49b8c3edbb55043a10efbf359b
|
[
"MIT"
] | null | null | null |
poem.py
|
xcollantes/poetry-generator
|
456c9702f0105b49b8c3edbb55043a10efbf359b
|
[
"MIT"
] | null | null | null |
poem.py
|
xcollantes/poetry-generator
|
456c9702f0105b49b8c3edbb55043a10efbf359b
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import print_function
import datetime
import os
import random
import sys
import uuid
import base64
import yaml
import re
try:
import en
except:
print("DOWNLOD NODECUBE")
print("""wget https://www.nodebox.net/code/data/media/linguistics.zip
unzip linguistics.zip""")
VERSION = "1.1"
THEME_PROB = 0
bnf = bnfDictionary('brain.yaml')
if __name__ == '__main__':
poemtype = 'poem'
if 'mushy' in sys.argv[1:]:
poemtype = 'mushypoem'
p,seed_str=generate_poem(poemtype)
print(("*"*30 + "\n"*5))
filtered = []
for line in re.sub("<.*?>", " ", p).split("\n"):
if len(line.strip()) > 0:
filtered.append(line.strip())
else:
filtered.append("pause")
print(p)
| 39.526971
| 97
| 0.43607
|
53a46773e97ade0a733cbe735e77d4be70d5d02d
| 3,927
|
py
|
Python
|
openstack/tests/unit/block_storage/v2/test_proxy.py
|
infonova/openstacksdk
|
3cf6730a71d8fb448f24af8a5b4e82f2af749cea
|
[
"Apache-2.0"
] | null | null | null |
openstack/tests/unit/block_storage/v2/test_proxy.py
|
infonova/openstacksdk
|
3cf6730a71d8fb448f24af8a5b4e82f2af749cea
|
[
"Apache-2.0"
] | null | null | null |
openstack/tests/unit/block_storage/v2/test_proxy.py
|
infonova/openstacksdk
|
3cf6730a71d8fb448f24af8a5b4e82f2af749cea
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.block_storage.v2 import _proxy
from openstack.block_storage.v2 import snapshot
from openstack.block_storage.v2 import stats
from openstack.block_storage.v2 import type
from openstack.block_storage.v2 import volume
from openstack.tests.unit import test_proxy_base
| 39.27
| 75
| 0.663102
|
53a4815531cf8a3d91a379873dd45b934995baa1
| 20,346
|
py
|
Python
|
src/ncstyler/console.py
|
starofrainnight/ncstyler
|
d13a6fa330b955db1cb9aa7a6ff1751ec41e82eb
|
[
"MIT"
] | null | null | null |
src/ncstyler/console.py
|
starofrainnight/ncstyler
|
d13a6fa330b955db1cb9aa7a6ff1751ec41e82eb
|
[
"MIT"
] | null | null | null |
src/ncstyler/console.py
|
starofrainnight/ncstyler
|
d13a6fa330b955db1cb9aa7a6ff1751ec41e82eb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import argparse
import CppHeaderParser
import re
import sys
import yaml
import copy
import six
import os.path
import traceback
def main():
a = Application()
sys.exit(a.exec_())
if __name__ == "__main__":
# Execute only if run as a script
main()
| 38.172608
| 97
| 0.524182
|
53a4ae1a747ba84b0abf192cd72d5b27b2b5e891
| 1,527
|
py
|
Python
|
theone/wsgi/server.py
|
laozijiaojiangnan/TheOne
|
73c1e7cee545c2eb2b2118f2dbf2d4d0c56e3824
|
[
"Apache-2.0"
] | null | null | null |
theone/wsgi/server.py
|
laozijiaojiangnan/TheOne
|
73c1e7cee545c2eb2b2118f2dbf2d4d0c56e3824
|
[
"Apache-2.0"
] | null | null | null |
theone/wsgi/server.py
|
laozijiaojiangnan/TheOne
|
73c1e7cee545c2eb2b2118f2dbf2d4d0c56e3824
|
[
"Apache-2.0"
] | null | null | null |
import typing as t
from http.server import HTTPServer, BaseHTTPRequestHandler
from . import response as resp
| 28.277778
| 105
| 0.612967
|
53a59bcf9df24d2abf9133b0c94be6aa674beda0
| 4,462
|
py
|
Python
|
pytorch_translate/attention/multihead_attention.py
|
dzhulgakov/translate
|
018d3eed8d93ff32e86c912e68045c7a3f4ed0b7
|
[
"BSD-3-Clause"
] | 1
|
2019-06-14T20:20:39.000Z
|
2019-06-14T20:20:39.000Z
|
pytorch_translate/attention/multihead_attention.py
|
dzhulgakov/translate
|
018d3eed8d93ff32e86c912e68045c7a3f4ed0b7
|
[
"BSD-3-Clause"
] | null | null | null |
pytorch_translate/attention/multihead_attention.py
|
dzhulgakov/translate
|
018d3eed8d93ff32e86c912e68045c7a3f4ed0b7
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
from fairseq.modules import multihead_attention as fair_multihead
from pytorch_translate.attention import (
BaseAttention,
attention_utils,
register_attention,
)
| 35.412698
| 85
| 0.62528
|
53a74fabccfed340e02d074e5c163a36783d5463
| 1,102
|
py
|
Python
|
custom_components/purrsong/__init__.py
|
RobertD502/home-assistant-lavviebot
|
5c69f474786f043773cba42b7806fb77d4f89672
|
[
"MIT"
] | 3
|
2021-04-15T21:23:26.000Z
|
2021-12-18T07:45:40.000Z
|
custom_components/purrsong/__init__.py
|
RobertD502/home-assistant-lavviebot
|
5c69f474786f043773cba42b7806fb77d4f89672
|
[
"MIT"
] | 2
|
2021-10-21T12:08:32.000Z
|
2021-11-12T19:13:11.000Z
|
custom_components/purrsong/__init__.py
|
RobertD502/home-assistant-lavviebot
|
5c69f474786f043773cba42b7806fb77d4f89672
|
[
"MIT"
] | null | null | null |
"""Support for Purrsong LavvieBot S"""
import asyncio
import logging
import voluptuous as vol
from lavviebot import LavvieBotApi
import homeassistant.helpers.config_validation as cv
from homeassistant import config_entries
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.const import (
CONF_PASSWORD,
CONF_USERNAME
)
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
def setup(hass, config):
"""Setup of the component"""
return True
| 26.238095
| 83
| 0.772232
|
53a892c5198d37c345b5950774654f861533af79
| 2,904
|
py
|
Python
|
problems/Kelvin_Helmholtz/problem.py
|
sddyates/mars
|
a56735bd344b7337151fb419b1c832b0c702ea69
|
[
"MIT"
] | 1
|
2019-12-20T20:29:14.000Z
|
2019-12-20T20:29:14.000Z
|
problems/Kelvin_Helmholtz/problem.py
|
sddyates/mars
|
a56735bd344b7337151fb419b1c832b0c702ea69
|
[
"MIT"
] | 3
|
2019-08-30T08:12:16.000Z
|
2020-05-15T16:19:53.000Z
|
problems/Kelvin_Helmholtz/problem.py
|
sddyates/mars
|
a56735bd344b7337151fb419b1c832b0c702ea69
|
[
"MIT"
] | 1
|
2019-12-21T03:51:30.000Z
|
2019-12-21T03:51:30.000Z
|
from mars import main_loop
import numpy as np
from mars.settings import *
if __name__ == "__main__":
main_loop(Problem())
| 24.2
| 68
| 0.490358
|
53a8f467665d04dfb54d9331579d408e1a611989
| 1,461
|
py
|
Python
|
pythainlp/util/thai.py
|
korkeatw/pythainlp
|
6fc7c3434d5e58c8e8e2bf13470445cbab0866bd
|
[
"Apache-2.0"
] | null | null | null |
pythainlp/util/thai.py
|
korkeatw/pythainlp
|
6fc7c3434d5e58c8e8e2bf13470445cbab0866bd
|
[
"Apache-2.0"
] | null | null | null |
pythainlp/util/thai.py
|
korkeatw/pythainlp
|
6fc7c3434d5e58c8e8e2bf13470445cbab0866bd
|
[
"Apache-2.0"
] | 1
|
2020-05-27T09:53:09.000Z
|
2020-05-27T09:53:09.000Z
|
# -*- coding: utf-8 -*-
"""
Check if it is Thai text
"""
import string
_DEFAULT_IGNORE_CHARS = string.whitespace + string.digits + string.punctuation
def isthaichar(ch: str) -> bool:
"""
Check if a character is Thai
:param str ch: input character
:return: True or False
"""
ch_val = ord(ch)
if ch_val >= 3584 and ch_val <= 3711:
return True
return False
def isthai(word: str, ignore_chars: str = ".") -> bool:
"""
Check if all character is Thai
:param str word: input text
:param str ignore_chars: characters to be ignored (i.e. will be considered as Thai)
:return: True or False
"""
if not ignore_chars:
ignore_chars = ""
for ch in word:
if ch not in ignore_chars and not isthaichar(ch):
return False
return True
def countthai(text: str, ignore_chars: str = _DEFAULT_IGNORE_CHARS) -> float:
"""
:param str text: input text
:return: float, proportion of characters in the text that is Thai character
"""
if not text or not isinstance(text, str):
return 0
if not ignore_chars:
ignore_chars = ""
num_thai = 0
num_ignore = 0
for ch in text:
if ch in ignore_chars:
num_ignore += 1
elif isthaichar(ch):
num_thai += 1
num_count = len(text) - num_ignore
return (num_thai / num_count) * 100
| 22.476923
| 87
| 0.612594
|
53a95c744ad18d63a19b3fc856fe6442690ea1c8
| 54
|
py
|
Python
|
Numpy/tempCodeRunnerFile.py
|
zharmedia386/Data-Science-Stuff
|
40183c329e3b30c582c545c260ca7916f29e2f09
|
[
"MIT"
] | null | null | null |
Numpy/tempCodeRunnerFile.py
|
zharmedia386/Data-Science-Stuff
|
40183c329e3b30c582c545c260ca7916f29e2f09
|
[
"MIT"
] | null | null | null |
Numpy/tempCodeRunnerFile.py
|
zharmedia386/Data-Science-Stuff
|
40183c329e3b30c582c545c260ca7916f29e2f09
|
[
"MIT"
] | null | null | null |
print(b)
print(c)
print(d)
print(e)
print(f)
print(g)
| 7.714286
| 8
| 0.666667
|
53a96c42fcec2518a3a26c0e6dece5934119cc53
| 1,941
|
py
|
Python
|
Python/Filter.py
|
KilroyWasHere-cs-j/savitzky-golay
|
2ce110d54e9ad7bc1e4a0c29fa80ad8303ec530f
|
[
"MIT"
] | null | null | null |
Python/Filter.py
|
KilroyWasHere-cs-j/savitzky-golay
|
2ce110d54e9ad7bc1e4a0c29fa80ad8303ec530f
|
[
"MIT"
] | null | null | null |
Python/Filter.py
|
KilroyWasHere-cs-j/savitzky-golay
|
2ce110d54e9ad7bc1e4a0c29fa80ad8303ec530f
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy.signal import savgol_filter
import matplotlib.pyplot as plt
import MadDog
x = []
y = []
# Generating the noisy signal
x, y = fill_data()
print(len(y))
# Savitzky-Golay filter
x_filtered, y_filtered = savitzky(x, y, 2)
print("X unfiltered>> ", x)
print("Y unfiltered>> ", y)
print("X filtered>> ", x_filtered)
print("Y filtered>> ", y_filtered)
show(x_filtered, y_filtered, x, y)
| 26.589041
| 107
| 0.640907
|
53aa536c76b41bd1afbf13c8b634be33ef9462e1
| 8,087
|
py
|
Python
|
examples/adwords/v201406/advanced_operations/add_ad_customizer.py
|
dietrichc/streamline-ppc-reports
|
256f79246aba3c2cf8f792d87a066391a2f471e0
|
[
"Apache-2.0"
] | null | null | null |
examples/adwords/v201406/advanced_operations/add_ad_customizer.py
|
dietrichc/streamline-ppc-reports
|
256f79246aba3c2cf8f792d87a066391a2f471e0
|
[
"Apache-2.0"
] | null | null | null |
examples/adwords/v201406/advanced_operations/add_ad_customizer.py
|
dietrichc/streamline-ppc-reports
|
256f79246aba3c2cf8f792d87a066391a2f471e0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adds an ad customizer feed.
Associates the feed with customer and adds an ad that
uses the feed to populate dynamic data.
Tags: CustomerFeedService.mutate, FeedItemService.mutate
Tags: FeedMappingService.mutate, FeedService.mutate
Tags: AdGroupAdService.mutate
"""
__author__ = ('api.msaniscalchi@gmail.com (Mark Saniscalchi)',
'yufeng.dev@gmail.com (Yufeng Guo)')
# Import appropriate classes from the client library.
from googleads import adwords
# See the Placeholder reference page for a list of all the placeholder types
# and fields:
# https://developers.google.com/adwords/api/docs/appendix/placeholders
PLACEHOLDER_AD_CUSTOMIZER = '10'
PLACEHOLDER_FIELD_INTEGER = '1'
PLACEHOLDER_FIELD_FLOAT = '2'
PLACEHOLDER_FIELD_PRICE = '3'
PLACEHOLDER_FIELD_DATE = '4'
PLACEHOLDER_FIELD_STRING = '5'
ADGROUPS = [
'INSERT_ADGROUP_ID_HERE',
'INSERT_ADGROUP_ID_HERE'
]
FEEDNAME = 'INSERT_FEED_NAME_HERE'
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, ADGROUPS)
| 32.09127
| 80
| 0.614072
|
53aaad486aeb5cf94c98b45787e68241bed70175
| 2,001
|
py
|
Python
|
tests/test_minhash.py
|
azachar/pyminhash
|
8a595fb25fe7172ea31d604fe8a40b8c11f1b8af
|
[
"MIT"
] | null | null | null |
tests/test_minhash.py
|
azachar/pyminhash
|
8a595fb25fe7172ea31d604fe8a40b8c11f1b8af
|
[
"MIT"
] | null | null | null |
tests/test_minhash.py
|
azachar/pyminhash
|
8a595fb25fe7172ea31d604fe8a40b8c11f1b8af
|
[
"MIT"
] | null | null | null |
import pytest
from pyminhash import MinHash
from pyminhash.datasets import load_data
| 30.318182
| 102
| 0.667166
|
53ab5b39a644e03ecaaf97048f3ae768e29b5a48
| 503
|
py
|
Python
|
settings.py
|
danylo-dudok/youtube-rss
|
c4478605274cdeac33f909d7fcb7d265898e80bc
|
[
"MIT"
] | null | null | null |
settings.py
|
danylo-dudok/youtube-rss
|
c4478605274cdeac33f909d7fcb7d265898e80bc
|
[
"MIT"
] | null | null | null |
settings.py
|
danylo-dudok/youtube-rss
|
c4478605274cdeac33f909d7fcb7d265898e80bc
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
from typing import final
from tools import localize_time
RSS_URL_PREFIX: final = 'https://www.youtube.com/feeds/videos.xml?channel_id={0}'
LOCATION_ARGUMENT_PREFIX: final = '--location='
CHANNEL_ARGUMENT_PREFIX: final = '--channels='
LAST_CHECK_ARGUMENT_PREFIX: final = '--last-check='
TWO_WEEKS_IN_DAYS: final = 14
DEFAULT_LAST_CHECK: final = localize_time(datetime.now() - timedelta(days=TWO_WEEKS_IN_DAYS))
EMPTY: final = ''
CHANNEL_POSTS_LIMIT: final = 20
| 35.928571
| 93
| 0.787276
|
53ac58babeeeae8a59ad21aa748c5f201e132f9d
| 1,325
|
py
|
Python
|
openpicle/caravel.py
|
DX-MON/OpenPICle
|
c036333f807b1b4959af22bde8c4cac553ef162f
|
[
"BSD-3-Clause"
] | null | null | null |
openpicle/caravel.py
|
DX-MON/OpenPICle
|
c036333f807b1b4959af22bde8c4cac553ef162f
|
[
"BSD-3-Clause"
] | null | null | null |
openpicle/caravel.py
|
DX-MON/OpenPICle
|
c036333f807b1b4959af22bde8c4cac553ef162f
|
[
"BSD-3-Clause"
] | null | null | null |
# SPDX-License-Identifier: BSD-3-Clause
from amaranth import Elaboratable, Module, Signal, ResetInserter, EnableInserter
__all__ = (
'PIC16Caravel',
)
| 25
| 82
| 0.682264
|
53ad1ae14a311f840335b9dec9f60aa2cc4425a1
| 2,615
|
py
|
Python
|
cogs/stats.py
|
est73/raid-shack
|
727b79a50a0ff5a5fc1cdfe03d51ba6703343b2e
|
[
"MIT"
] | null | null | null |
cogs/stats.py
|
est73/raid-shack
|
727b79a50a0ff5a5fc1cdfe03d51ba6703343b2e
|
[
"MIT"
] | null | null | null |
cogs/stats.py
|
est73/raid-shack
|
727b79a50a0ff5a5fc1cdfe03d51ba6703343b2e
|
[
"MIT"
] | null | null | null |
from discord.ext import commands
import discord
| 35.337838
| 84
| 0.507457
|
53b0797fa1d2b73bd60c7d0448335bb8ff3970e6
| 2,995
|
py
|
Python
|
tests/bucket/test_bucket.py
|
WillChilds-Klein/mistress-mapreduce
|
c991a502545bd0d3ec4f914cdc63faf6a40e77ae
|
[
"Apache-2.0"
] | 2
|
2018-12-02T11:10:15.000Z
|
2019-02-21T22:24:00.000Z
|
tests/bucket/test_bucket.py
|
WillChilds-Klein/mistress-mapreduce
|
c991a502545bd0d3ec4f914cdc63faf6a40e77ae
|
[
"Apache-2.0"
] | 1
|
2019-02-21T22:23:36.000Z
|
2019-02-21T22:23:36.000Z
|
tests/bucket/test_bucket.py
|
WillChilds-Klein/mistress-mapreduce
|
c991a502545bd0d3ec4f914cdc63faf6a40e77ae
|
[
"Apache-2.0"
] | 3
|
2018-04-26T16:02:10.000Z
|
2018-12-02T11:10:16.000Z
|
from mrs.bucket import WriteBucket
from mrs import BinWriter, HexWriter
# vim: et sw=4 sts=4
| 26.741071
| 69
| 0.621035
|
53b14303d9879fe4fc46ca016bb6d34bfedbf48e
| 35,783
|
py
|
Python
|
inquire/agents/dempref.py
|
HARPLab/inquire
|
fa74eb10e5391a0f226753668a31527c68fc6962
|
[
"BSD-3-Clause"
] | null | null | null |
inquire/agents/dempref.py
|
HARPLab/inquire
|
fa74eb10e5391a0f226753668a31527c68fc6962
|
[
"BSD-3-Clause"
] | null | null | null |
inquire/agents/dempref.py
|
HARPLab/inquire
|
fa74eb10e5391a0f226753668a31527c68fc6962
|
[
"BSD-3-Clause"
] | null | null | null |
"""
An agent which uses demonstrations and preferences.
Code adapted from Learning Reward Functions
by Integrating Human Demonstrations and Preferences.
"""
import itertools
import os
import time
from pathlib import Path
from typing import Dict, List
import arviz as az
from inquire.agents.agent import Agent
from inquire.environments.environment import Environment
from inquire.interactions.feedback import Query, Trajectory
from inquire.interactions.modalities import Preference
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pymc3 as pm
import pymc3.distributions.transforms as tr
import scipy.optimize as opt
import theano.tensor as tt
| 40.570295
| 123
| 0.506386
|
53b25c7fce6d985ae97109a316a32f1fdb359f32
| 1,049
|
py
|
Python
|
coba/learners/__init__.py
|
mrucker/banditbenchmark
|
0365291b3a0cf1d862d294e0386d0ccad3f360f1
|
[
"BSD-3-Clause"
] | 1
|
2020-07-22T13:43:14.000Z
|
2020-07-22T13:43:14.000Z
|
coba/learners/__init__.py
|
mrucker/coba
|
4f679fb5c6e39e2d0bf3e609c77a2a6865168795
|
[
"BSD-3-Clause"
] | null | null | null |
coba/learners/__init__.py
|
mrucker/coba
|
4f679fb5c6e39e2d0bf3e609c77a2a6865168795
|
[
"BSD-3-Clause"
] | null | null | null |
"""This module contains all public learners and learner interfaces."""
from coba.learners.primitives import Learner, SafeLearner
from coba.learners.bandit import EpsilonBanditLearner, UcbBanditLearner, FixedLearner, RandomLearner
from coba.learners.corral import CorralLearner
from coba.learners.vowpal import VowpalMediator
from coba.learners.vowpal import VowpalArgsLearner, VowpalEpsilonLearner, VowpalSoftmaxLearner, VowpalBagLearner
from coba.learners.vowpal import VowpalCoverLearner, VowpalRegcbLearner, VowpalSquarecbLearner, VowpalOffPolicyLearner
from coba.learners.linucb import LinUCBLearner
__all__ = [
'Learner',
'SafeLearner',
'RandomLearner',
'FixedLearner',
'EpsilonBanditLearner',
'UcbBanditLearner',
'CorralLearner',
'LinUCBLearner',
'VowpalArgsLearner',
'VowpalEpsilonLearner',
'VowpalSoftmaxLearner',
'VowpalBagLearner',
'VowpalCoverLearner',
'VowpalRegcbLearner',
'VowpalSquarecbLearner',
'VowpalOffPolicyLearner',
'VowpalMediator'
]
| 36.172414
| 122
| 0.766444
|
53b40880bc916c9f0a3ace8c04060a57ded76e7b
| 24,347
|
py
|
Python
|
virtual/lib/python3.8/site-packages/dns/zonefile.py
|
Lenus254/personal_blog
|
aac38e4b5372c86efa8e24db2e051fef8e5feef8
|
[
"Unlicense"
] | 1
|
2022-01-27T05:54:14.000Z
|
2022-01-27T05:54:14.000Z
|
virtual/lib/python3.8/site-packages/dns/zonefile.py
|
Lenus254/personal_blog
|
aac38e4b5372c86efa8e24db2e051fef8e5feef8
|
[
"Unlicense"
] | null | null | null |
virtual/lib/python3.8/site-packages/dns/zonefile.py
|
Lenus254/personal_blog
|
aac38e4b5372c86efa8e24db2e051fef8e5feef8
|
[
"Unlicense"
] | null | null | null |
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Zones."""
import re
import sys
import dns.exception
import dns.name
import dns.node
import dns.rdataclass
import dns.rdatatype
import dns.rdata
import dns.rdtypes.ANY.SOA
import dns.rrset
import dns.tokenizer
import dns.transaction
import dns.ttl
import dns.grange
def _check_cname_and_other_data(txn, name, rdataset):
rdataset_kind = dns.node.NodeKind.classify_rdataset(rdataset)
node = txn.get_node(name)
if node is None:
# empty nodes are neutral.
return
node_kind = node.classify()
if node_kind == dns.node.NodeKind.CNAME and \
rdataset_kind == dns.node.NodeKind.REGULAR:
raise CNAMEAndOtherData('rdataset type is not compatible with a '
'CNAME node')
elif node_kind == dns.node.NodeKind.REGULAR and \
rdataset_kind == dns.node.NodeKind.CNAME:
raise CNAMEAndOtherData('CNAME rdataset is not compatible with a '
'regular data node')
# Otherwise at least one of the node and the rdataset is neutral, so
# adding the rdataset is ok
def read_rrsets(text, name=None, ttl=None, rdclass=dns.rdataclass.IN,
default_rdclass=dns.rdataclass.IN,
rdtype=None, default_ttl=None, idna_codec=None,
origin=dns.name.root, relativize=False):
"""Read one or more rrsets from the specified text, possibly subject
to restrictions.
*text*, a file object or a string, is the input to process.
*name*, a string, ``dns.name.Name``, or ``None``, is the owner name of
the rrset. If not ``None``, then the owner name is "forced", and the
input must not specify an owner name. If ``None``, then any owner names
are allowed and must be present in the input.
*ttl*, an ``int``, string, or None. If not ``None``, the the TTL is
forced to be the specified value and the input must not specify a TTL.
If ``None``, then a TTL may be specified in the input. If it is not
specified, then the *default_ttl* will be used.
*rdclass*, a ``dns.rdataclass.RdataClass``, string, or ``None``. If
not ``None``, then the class is forced to the specified value, and the
input must not specify a class. If ``None``, then the input may specify
a class that matches *default_rdclass*. Note that it is not possible to
return rrsets with differing classes; specifying ``None`` for the class
simply allows the user to optionally type a class as that may be convenient
when cutting and pasting.
*default_rdclass*, a ``dns.rdataclass.RdataClass`` or string. The class
of the returned rrsets.
*rdtype*, a ``dns.rdatatype.RdataType``, string, or ``None``. If not
``None``, then the type is forced to the specified value, and the
input must not specify a type. If ``None``, then a type must be present
for each RR.
*default_ttl*, an ``int``, string, or ``None``. If not ``None``, then if
the TTL is not forced and is not specified, then this value will be used.
if ``None``, then if the TTL is not forced an error will occur if the TTL
is not specified.
*idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder
is used. Note that codecs only apply to the owner name; dnspython does
not do IDNA for names in rdata, as there is no IDNA zonefile format.
*origin*, a string, ``dns.name.Name``, or ``None``, is the origin for any
relative names in the input, and also the origin to relativize to if
*relativize* is ``True``.
*relativize*, a bool. If ``True``, names are relativized to the *origin*;
if ``False`` then any relative names in the input are made absolute by
appending the *origin*.
"""
if isinstance(origin, str):
origin = dns.name.from_text(origin, dns.name.root, idna_codec)
if isinstance(name, str):
name = dns.name.from_text(name, origin, idna_codec)
if isinstance(ttl, str):
ttl = dns.ttl.from_text(ttl)
if isinstance(default_ttl, str):
default_ttl = dns.ttl.from_text(default_ttl)
if rdclass is not None:
rdclass = dns.rdataclass.RdataClass.make(rdclass)
default_rdclass = dns.rdataclass.RdataClass.make(default_rdclass)
if rdtype is not None:
rdtype = dns.rdatatype.RdataType.make(rdtype)
manager = RRSetsReaderManager(origin, relativize, default_rdclass)
with manager.writer(True) as txn:
tok = dns.tokenizer.Tokenizer(text, '<input>', idna_codec=idna_codec)
reader = Reader(tok, default_rdclass, txn, allow_directives=False,
force_name=name, force_ttl=ttl, force_rdclass=rdclass,
force_rdtype=rdtype, default_ttl=default_ttl)
reader.read()
return manager.rrsets
| 38.9552
| 83
| 0.548897
|
53b4099090d815c2fccdfff9285d6d8c4361e95f
| 11,719
|
py
|
Python
|
swift/common/daemon.py
|
fossabot/swift-1
|
63fc013b8b96484cede0e9901ad54676b8c93298
|
[
"Apache-2.0"
] | null | null | null |
swift/common/daemon.py
|
fossabot/swift-1
|
63fc013b8b96484cede0e9901ad54676b8c93298
|
[
"Apache-2.0"
] | null | null | null |
swift/common/daemon.py
|
fossabot/swift-1
|
63fc013b8b96484cede0e9901ad54676b8c93298
|
[
"Apache-2.0"
] | 1
|
2020-03-09T19:58:52.000Z
|
2020-03-09T19:58:52.000Z
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import sys
import time
import signal
from re import sub
import eventlet.debug
from eventlet.hubs import use_hub
from swift.common import utils
def run_daemon(klass, conf_file, section_name='', once=False, **kwargs):
"""
Loads settings from conf, then instantiates daemon ``klass`` and runs the
daemon with the specified ``once`` kwarg. The section_name will be derived
from the daemon ``klass`` if not provided (e.g. ObjectReplicator =>
object-replicator).
:param klass: Class to instantiate, subclass of :class:`Daemon`
:param conf_file: Path to configuration file
:param section_name: Section name from conf file to load config from
:param once: Passed to daemon :meth:`Daemon.run` method
"""
# very often the config section_name is based on the class name
# the None singleton will be passed through to readconf as is
if section_name == '':
section_name = sub(r'([a-z])([A-Z])', r'\1-\2',
klass.__name__).lower()
try:
conf = utils.readconf(conf_file, section_name,
log_name=kwargs.get('log_name'))
except (ValueError, IOError) as e:
# The message will be printed to stderr
# and results in an exit code of 1.
sys.exit(e)
use_hub(utils.get_hub())
# once on command line (i.e. daemonize=false) will over-ride config
once = once or not utils.config_true_value(conf.get('daemonize', 'true'))
# pre-configure logger
if 'logger' in kwargs:
logger = kwargs.pop('logger')
else:
logger = utils.get_logger(conf, conf.get('log_name', section_name),
log_to_console=kwargs.pop('verbose', False),
log_route=section_name)
# optional nice/ionice priority scheduling
utils.modify_priority(conf, logger)
# disable fallocate if desired
if utils.config_true_value(conf.get('disable_fallocate', 'no')):
utils.disable_fallocate()
# set utils.FALLOCATE_RESERVE if desired
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value(conf.get('fallocate_reserve', '1%'))
# By default, disable eventlet printing stacktraces
eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no'))
eventlet.debug.hub_exceptions(eventlet_debug)
# Ensure TZ environment variable exists to avoid stat('/etc/localtime') on
# some platforms. This locks in reported times to UTC.
os.environ['TZ'] = 'UTC+0'
time.tzset()
logger.notice('Starting %s', os.getpid())
try:
DaemonStrategy(klass(conf), logger).run(once=once, **kwargs)
except KeyboardInterrupt:
logger.info('User quit')
logger.notice('Exited %s', os.getpid())
| 36.621875
| 79
| 0.63572
|
53b4d42745fdda68cc9c6626c17825d3356f7324
| 474
|
py
|
Python
|
backend/resource_files_sample.py
|
Bhaskers-Blu-Org1/multicloud-incident-response-navigator
|
e6ba6322fdcc533b6ed14abb4681470a6bb6bd85
|
[
"Apache-2.0"
] | null | null | null |
backend/resource_files_sample.py
|
Bhaskers-Blu-Org1/multicloud-incident-response-navigator
|
e6ba6322fdcc533b6ed14abb4681470a6bb6bd85
|
[
"Apache-2.0"
] | null | null | null |
backend/resource_files_sample.py
|
Bhaskers-Blu-Org1/multicloud-incident-response-navigator
|
e6ba6322fdcc533b6ed14abb4681470a6bb6bd85
|
[
"Apache-2.0"
] | 1
|
2020-07-30T10:07:19.000Z
|
2020-07-30T10:07:19.000Z
|
import resource_files
resources = resource_files.ResourceFiles()
# sample use case of getting yamls
print(resources.get_yaml("Pod", "jumpy-shark-gbapp-frontend-844fdccf55-ggkbf", "default", "mycluster"))
# sample use case of getting events
print(resources.get_events('mycluster','default','78abd8c9-ac06-11e9-b68f-0e70a6ce6d3a'))
# sample use case of getting describe info
print(resources.get_logs('mycluster', 'default', "jumpy-shark-gbapp-frontend-844fdccf55-ggkbf"))
| 36.461538
| 103
| 0.78481
|
53b5ca21f061bcccc9e7720c97265d2e56f05552
| 1,305
|
py
|
Python
|
backend/api/v1/auth_module/auth_api.py
|
aroraenterprise/projecteos
|
e1fb0438af8cb59b77792523c6616c480b23a6f8
|
[
"MIT"
] | null | null | null |
backend/api/v1/auth_module/auth_api.py
|
aroraenterprise/projecteos
|
e1fb0438af8cb59b77792523c6616c480b23a6f8
|
[
"MIT"
] | null | null | null |
backend/api/v1/auth_module/auth_api.py
|
aroraenterprise/projecteos
|
e1fb0438af8cb59b77792523c6616c480b23a6f8
|
[
"MIT"
] | null | null | null |
"""
Project: flask-rest
Author: Saj Arora
Description: Handle auth endpoints such as auth/signup, auth/login
"""
from api.v1 import make_json_ok_response, SageController, SageMethod
from api.v1.fundamentals import helper
from .auth_controller import AuthController
auth_controller = {
'signup': SageController(sage_auth_signup_function, SageMethod.POST, authenticate=False),
'authenticate': SageController(sage_auth_authenticate_function, SageMethod.POST, authenticate=False)
}
| 36.25
| 104
| 0.744828
|
53b66284f62a337ba9819ca33a9acfe617722619
| 1,785
|
py
|
Python
|
tests/QuantumToolboxIntegration/test_singleQubitOpenDynamics.py
|
AngsarM/QuanGuru
|
5db6105f843bbc78c2d5b1547e32d494fbe10b8d
|
[
"BSD-3-Clause"
] | 9
|
2021-05-23T06:30:45.000Z
|
2021-12-27T13:33:54.000Z
|
tests/QuantumToolboxIntegration/test_singleQubitOpenDynamics.py
|
cahitkargi/QuanGuru
|
9b5c94465cd58bc32f6ff845f29dfdec7e0f9075
|
[
"BSD-3-Clause"
] | 26
|
2022-03-18T02:40:54.000Z
|
2022-03-25T07:00:25.000Z
|
tests/QuantumToolboxIntegration/test_singleQubitOpenDynamics.py
|
cahitkargi/QuanGuru
|
9b5c94465cd58bc32f6ff845f29dfdec7e0f9075
|
[
"BSD-3-Clause"
] | 5
|
2021-05-23T06:30:24.000Z
|
2022-02-04T02:40:08.000Z
|
import random as rn
import numpy as np
# open system dynamics of a qubit and compare numerical results with the analytical calculations
# NOTE these are also TUTORIALS of the library, so see the Tutorials for what these are doing and analytical
# calculations.
# currently includes 2 cases: (i) decay only, and (ii) unitary evolution by calling Liouville method without giving
# any collapse operators. For now, only looks at excited state populations
# TODO this is an unfinished test. below two tests are the same and it actually is not testing open system dynamics.
decayRateSM = rn.random()
excitedPopulation = lambda t: 0.5*np.exp(-(0.00001*(decayRateSM+1)*2+1j)*50*t)
populations = {'excitedAnalytical':[], 'excitedNumerical':[]}
# this is used as the calculate attribute of the qubit, and the singleQubit fixture evolve method calls this at every
# step of the evolution. It stores both numerical and analytical excited state populations into the dictionary above.
| 45.769231
| 117
| 0.773109
|
53b6650eb89817fbb23a4d021878f43cb942eb48
| 538
|
py
|
Python
|
QuGraphy/state.py
|
Mohamed-ShehabEldin/QuGraphy
|
c43fe7128f91e7bd383393f5ff16ff613077e8d7
|
[
"Apache-2.0"
] | null | null | null |
QuGraphy/state.py
|
Mohamed-ShehabEldin/QuGraphy
|
c43fe7128f91e7bd383393f5ff16ff613077e8d7
|
[
"Apache-2.0"
] | null | null | null |
QuGraphy/state.py
|
Mohamed-ShehabEldin/QuGraphy
|
c43fe7128f91e7bd383393f5ff16ff613077e8d7
|
[
"Apache-2.0"
] | null | null | null |
#this file will contain function that related to vector state
from .density import * #we may use some functions from them and dependencies
| 25.619048
| 79
| 0.633829
|
53b6dc5235fed6c6481fdc6dfb8b105b1f554689
| 4,480
|
py
|
Python
|
uncoverml/metadata_profiler.py
|
GeoscienceAustralia/uncoverml
|
672914377afa4ad1c069fcd4845bc45f80132e36
|
[
"Apache-2.0"
] | 34
|
2017-03-14T23:59:58.000Z
|
2022-03-03T18:04:25.000Z
|
uncoverml/metadata_profiler.py
|
GeoscienceAustralia/uncoverml
|
672914377afa4ad1c069fcd4845bc45f80132e36
|
[
"Apache-2.0"
] | 106
|
2017-03-22T00:26:10.000Z
|
2022-03-12T00:19:08.000Z
|
uncoverml/metadata_profiler.py
|
GeoscienceAustralia/uncoverml
|
672914377afa4ad1c069fcd4845bc45f80132e36
|
[
"Apache-2.0"
] | 21
|
2017-05-04T04:02:39.000Z
|
2022-02-04T00:55:18.000Z
|
#! /usr/bin/env python
"""
Description:
Gather Metadata for the uncover-ml prediction output results:
Reference: email 2019-05-24
Overview
Creator: (person who generated the model)
Model;
Name:
Type and date:
Algorithm:
Extent: Lat/long - location on Australia map?
SB Notes: None of the above is required as this information will be captured in the yaml file.
Model inputs:
1. Covariates - list (in full)
2. Targets: path to shapefile: csv file
SB Notes: Only covaraite list file. Targets and path to shapefile is not required as this is available in the yaml file. May be the full path to the shapefile has some merit as one can specify partial path.
Model performance
JSON file (in full)
SB Notes: Yes
Model outputs
1. Prediction grid including path
2. Quantiles Q5; Q95
3. Variance:
4. Entropy:
5. Feature rank file
6. Raw covariates file (target value - covariate value)
7. Optimisation output
8. Others ??
SB Notes: Not required as these are model dependent, and the metadata will be contained in each of the output geotif file.
Model parameters:
1. YAML file (in full)
2. .SH file (in full)
SB Notes: The .sh file is not required. YAML file is read as a python dictionary in uncoverml which can be dumped in the metadata.
CreationDate: 31/05/19
Developer: fei.zhang@ga.gov.au
Revision History:
LastUpdate: 31/05/19 FZ
LastUpdate: dd/mm/yyyy Who Optional description
"""
# import section
import os
import sys
import json
import pickle
import datetime
import getpass
import socket
from ppretty import ppretty
import uncoverml
| 32.941176
| 206
| 0.620536
|
53b7cf475edf549606a00bf10c8b39ab817c0d94
| 72
|
py
|
Python
|
testjpkg/jsonify/hij.py
|
thisisishara/test_pypi_cli
|
15b22ed8943a18a6d9de9ee4ba6a84249a633e2e
|
[
"MIT"
] | null | null | null |
testjpkg/jsonify/hij.py
|
thisisishara/test_pypi_cli
|
15b22ed8943a18a6d9de9ee4ba6a84249a633e2e
|
[
"MIT"
] | null | null | null |
testjpkg/jsonify/hij.py
|
thisisishara/test_pypi_cli
|
15b22ed8943a18a6d9de9ee4ba6a84249a633e2e
|
[
"MIT"
] | null | null | null |
print("hiiiiiiiiiiiiiiiix")
| 12
| 28
| 0.666667
|
53b7d55368f6a08688dd3db11b258ac91759ec48
| 2,447
|
py
|
Python
|
asv_bench/benchmarks/algorithms.py
|
raspbian-packages/pandas
|
fb33806b5286deb327b2e0fa96aedf25a6ed563f
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null |
asv_bench/benchmarks/algorithms.py
|
raspbian-packages/pandas
|
fb33806b5286deb327b2e0fa96aedf25a6ed563f
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null |
asv_bench/benchmarks/algorithms.py
|
raspbian-packages/pandas
|
fb33806b5286deb327b2e0fa96aedf25a6ed563f
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import pandas as pd
from pandas.util import testing as tm
| 26.89011
| 67
| 0.612178
|
53b8d7ac852024e1d3318cbf747bac9b0ef35d8a
| 28,857
|
py
|
Python
|
RMtools_1D/do_RMsynth_1D.py
|
lh-astro/RM-Tools
|
ac64cc41b2f696f21ee7dd001303cbad1ff71114
|
[
"MIT"
] | null | null | null |
RMtools_1D/do_RMsynth_1D.py
|
lh-astro/RM-Tools
|
ac64cc41b2f696f21ee7dd001303cbad1ff71114
|
[
"MIT"
] | null | null | null |
RMtools_1D/do_RMsynth_1D.py
|
lh-astro/RM-Tools
|
ac64cc41b2f696f21ee7dd001303cbad1ff71114
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#=============================================================================#
# #
# NAME: do_RMsynth_1D.py #
# #
# PURPOSE: API for runnning RM-synthesis on an ASCII Stokes I, Q & U spectrum.#
# #
# MODIFIED: 16-Nov-2018 by J. West #
# MODIFIED: 23-October-2019 by A. Thomson #
# #
#=============================================================================#
# #
# The MIT License (MIT) #
# #
# Copyright (c) 2015 - 2018 Cormac R. Purcell #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the "Software"), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
#=============================================================================#
import sys
import os
import time
import traceback
import json
import math as m
import numpy as np
import matplotlib.pyplot as plt
from RMutils.util_RM import do_rmsynth
from RMutils.util_RM import do_rmsynth_planes
from RMutils.util_RM import get_rmsf_planes
from RMutils.util_RM import measure_FDF_parms
from RMutils.util_RM import measure_qu_complexity
from RMutils.util_RM import measure_fdf_complexity
from RMutils.util_misc import nanmedian
from RMutils.util_misc import toscalar
from RMutils.util_misc import create_frac_spectra
from RMutils.util_misc import poly5
from RMutils.util_misc import MAD
from RMutils.util_plotTk import plot_Ipqu_spectra_fig
from RMutils.util_plotTk import plot_rmsf_fdf_fig
from RMutils.util_plotTk import plot_complexity_fig
from RMutils.util_plotTk import CustomNavbar
from RMutils.util_plotTk import plot_rmsIQU_vs_nu_ax
if sys.version_info.major == 2:
print('RM-tools will no longer run with Python 2! Please use Python 3.')
exit()
C = 2.997924538e8 # Speed of light [m/s]
#-----------------------------------------------------------------------------#
def run_rmsynth(data, polyOrd=3, phiMax_radm2=None, dPhi_radm2=None,
nSamples=10.0, weightType="variance", fitRMSF=False,
noStokesI=False, phiNoise_radm2=1e6, nBits=32, showPlots=False,
debug=False, verbose=False, log=print,units='Jy/beam', prefixOut="prefixOut", args=None):
"""Run RM synthesis on 1D data.
Args:
data (list): Contains frequency and polarization data as either:
[freq_Hz, I, Q, U, dI, dQ, dU]
freq_Hz (array_like): Frequency of each channel in Hz.
I (array_like): Stokes I intensity in each channel.
Q (array_like): Stokes Q intensity in each channel.
U (array_like): Stokes U intensity in each channel.
dI (array_like): Error in Stokes I intensity in each channel.
dQ (array_like): Error in Stokes Q intensity in each channel.
dU (array_like): Error in Stokes U intensity in each channel.
or
[freq_Hz, q, u, dq, du]
freq_Hz (array_like): Frequency of each channel in Hz.
q (array_like): Fractional Stokes Q intensity (Q/I) in each channel.
u (array_like): Fractional Stokes U intensity (U/I) in each channel.
dq (array_like): Error in fractional Stokes Q intensity in each channel.
du (array_like): Error in fractional Stokes U intensity in each channel.
Kwargs:
polyOrd (int): Order of polynomial to fit to Stokes I spectrum.
phiMax_radm2 (float): Maximum absolute Faraday depth (rad/m^2).
dPhi_radm2 (float): Faraday depth channel size (rad/m^2).
nSamples (float): Number of samples across the RMSF.
weightType (str): Can be "variance" or "uniform"
"variance" -- Weight by uncertainty in Q and U.
"uniform" -- Weight uniformly (i.e. with 1s)
fitRMSF (bool): Fit a Gaussian to the RMSF?
noStokesI (bool: Is Stokes I data provided?
phiNoise_radm2 (float): ????
nBits (int): Precision of floating point numbers.
showPlots (bool): Show plots?
debug (bool): Turn on debugging messages & plots?
verbose (bool): Verbosity.
log (function): Which logging function to use.
units (str): Units of data.
Returns:
mDict (dict): Summary of RM synthesis results.
aDict (dict): Data output by RM synthesis.
"""
# Sanity checks
if not os.path.exists(args.dataFile[0]):
print("File does not exist: '%s'." % args.dataFile[0])
sys.exit()
prefixOut, ext = os.path.splitext(args.dataFile[0])
# Default data types
dtFloat = "float" + str(nBits)
dtComplex = "complex" + str(2*nBits)
# freq_Hz, I, Q, U, dI, dQ, dU
try:
if verbose: log("> Trying [freq_Hz, I, Q, U, dI, dQ, dU]", end=' ')
(freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = data
if verbose: log("... success.")
except Exception:
if verbose: log("...failed.")
# freq_Hz, q, u, dq, du
try:
if verbose: log("> Trying [freq_Hz, q, u, dq, du]", end=' ')
(freqArr_Hz, QArr, UArr, dQArr, dUArr) = data
if verbose: log("... success.")
noStokesI = True
except Exception:
if verbose: log("...failed.")
if debug:
log(traceback.format_exc())
sys.exit()
if verbose: log("Successfully read in the Stokes spectra.")
# If no Stokes I present, create a dummy spectrum = unity
if noStokesI:
if verbose: log("Warn: no Stokes I data in use.")
IArr = np.ones_like(QArr)
dIArr = np.zeros_like(QArr)
# Convert to GHz for convenience
freqArr_GHz = freqArr_Hz / 1e9
dQUArr = (dQArr + dUArr)/2.0
# Fit the Stokes I spectrum and create the fractional spectra
IModArr, qArr, uArr, dqArr, duArr, fitDict = \
create_frac_spectra(freqArr = freqArr_GHz,
IArr = IArr,
QArr = QArr,
UArr = UArr,
dIArr = dIArr,
dQArr = dQArr,
dUArr = dUArr,
polyOrd = polyOrd,
verbose = True,
debug = debug)
# Plot the data and the Stokes I model fit
if verbose: log("Plotting the input data and spectral index fit.")
freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000)
IModHirArr = poly5(fitDict["p"])(freqHirArr_Hz/1e9)
specFig = plt.figure(figsize=(12.0, 8))
plot_Ipqu_spectra_fig(freqArr_Hz = freqArr_Hz,
IArr = IArr,
qArr = qArr,
uArr = uArr,
dIArr = dIArr,
dqArr = dqArr,
duArr = duArr,
freqHirArr_Hz = freqHirArr_Hz,
IModArr = IModHirArr,
fig = specFig,
units = units)
# Use the custom navigation toolbar (does not work on Mac OS X)
# try:
# specFig.canvas.toolbar.pack_forget()
# CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window)
# except Exception:
# pass
# Display the figure
# if not plt.isinteractive():
# specFig.show()
# DEBUG (plot the Q, U and average RMS spectrum)
if debug:
rmsFig = plt.figure(figsize=(12.0, 8))
ax = rmsFig.add_subplot(111)
ax.plot(freqArr_Hz/1e9, dQUArr, marker='o', color='k', lw=0.5,
label='rms <QU>')
ax.plot(freqArr_Hz/1e9, dQArr, marker='o', color='b', lw=0.5,
label='rms Q')
ax.plot(freqArr_Hz/1e9, dUArr, marker='o', color='r', lw=0.5,
label='rms U')
xRange = (np.nanmax(freqArr_Hz)-np.nanmin(freqArr_Hz))/1e9
ax.set_xlim( np.min(freqArr_Hz)/1e9 - xRange*0.05,
np.max(freqArr_Hz)/1e9 + xRange*0.05)
ax.set_xlabel('$\\nu$ (GHz)')
ax.set_ylabel('RMS '+units)
ax.set_title("RMS noise in Stokes Q, U and <Q,U> spectra")
# rmsFig.show()
#-------------------------------------------------------------------------#
# Calculate some wavelength parameters
lambdaSqArr_m2 = np.power(C/freqArr_Hz, 2.0)
dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz)))
lambdaSqRange_m2 = ( np.nanmax(lambdaSqArr_m2) -
np.nanmin(lambdaSqArr_m2) )
dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2)))
dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2)))
# Set the Faraday depth range
fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0) / lambdaSqRange_m2
if dPhi_radm2 is None:
dPhi_radm2 = fwhmRMSF_radm2 / nSamples
if phiMax_radm2 is None:
phiMax_radm2 = m.sqrt(3.0) / dLambdaSqMax_m2
phiMax_radm2 = max(phiMax_radm2, fwhmRMSF_radm2*10.) # Force the minimum phiMax to 10 FWHM
# Faraday depth sampling. Zero always centred on middle channel
nChanRM = int(round(abs((phiMax_radm2 - 0.0) / dPhi_radm2)) * 2.0 + 1.0)
startPhi_radm2 = - (nChanRM-1.0) * dPhi_radm2 / 2.0
stopPhi_radm2 = + (nChanRM-1.0) * dPhi_radm2 / 2.0
phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM)
phiArr_radm2 = phiArr_radm2.astype(dtFloat)
if verbose: log("PhiArr = %.2f to %.2f by %.2f (%d chans)." % (phiArr_radm2[0],
phiArr_radm2[-1],
float(dPhi_radm2),
nChanRM))
# Calculate the weighting as 1/sigma^2 or all 1s (uniform)
if weightType=="variance":
weightArr = 1.0 / np.power(dQUArr, 2.0)
else:
weightType = "uniform"
weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat)
if verbose: log("Weight type is '%s'." % weightType)
startTime = time.time()
# Perform RM-synthesis on the spectrum
dirtyFDF, lam0Sq_m2 = do_rmsynth_planes(dataQ = qArr,
dataU = uArr,
lambdaSqArr_m2 = lambdaSqArr_m2,
phiArr_radm2 = phiArr_radm2,
weightArr = weightArr,
nBits = nBits,
verbose = verbose,
log = log)
# Calculate the Rotation Measure Spread Function
RMSFArr, phi2Arr_radm2, fwhmRMSFArr, fitStatArr = \
get_rmsf_planes(lambdaSqArr_m2 = lambdaSqArr_m2,
phiArr_radm2 = phiArr_radm2,
weightArr = weightArr,
mskArr = ~np.isfinite(qArr),
lam0Sq_m2 = lam0Sq_m2,
double = True,
fitRMSF = fitRMSF,
fitRMSFreal = False,
nBits = nBits,
verbose = verbose,
log = log)
fwhmRMSF = float(fwhmRMSFArr)
# ALTERNATE RM-SYNTHESIS CODE --------------------------------------------#
#dirtyFDF, [phi2Arr_radm2, RMSFArr], lam0Sq_m2, fwhmRMSF = \
# do_rmsynth(qArr, uArr, lambdaSqArr_m2, phiArr_radm2, weightArr)
#-------------------------------------------------------------------------#
endTime = time.time()
cputime = (endTime - startTime)
if verbose: log("> RM-synthesis completed in %.2f seconds." % cputime)
# Determine the Stokes I value at lam0Sq_m2 from the Stokes I model
# Multiply the dirty FDF by Ifreq0 to recover the PI
freq0_Hz = C / m.sqrt(lam0Sq_m2)
Ifreq0 = poly5(fitDict["p"])(freq0_Hz/1e9)
dirtyFDF *= (Ifreq0) # FDF is in fracpol units initially, convert back to flux
# Calculate the theoretical noise in the FDF !!Old formula only works for wariance weights!
weightArr = np.where(np.isnan(weightArr), 0.0, weightArr)
dFDFth = np.sqrt( np.sum(weightArr**2 * np.nan_to_num(dQUArr)**2) / (np.sum(weightArr))**2 )
# Measure the parameters of the dirty FDF
# Use the theoretical noise to calculate uncertainties
mDict = measure_FDF_parms(FDF = dirtyFDF,
phiArr = phiArr_radm2,
fwhmRMSF = fwhmRMSF,
dFDF = dFDFth,
lamSqArr_m2 = lambdaSqArr_m2,
lam0Sq = lam0Sq_m2)
mDict["Ifreq0"] = toscalar(Ifreq0)
mDict["polyCoeffs"] = ",".join([str(x) for x in fitDict["p"]])
mDict["IfitStat"] = fitDict["fitStatus"]
mDict["IfitChiSqRed"] = fitDict["chiSqRed"]
mDict["lam0Sq_m2"] = toscalar(lam0Sq_m2)
mDict["freq0_Hz"] = toscalar(freq0_Hz)
mDict["fwhmRMSF"] = toscalar(fwhmRMSF)
mDict["dQU"] = toscalar(nanmedian(dQUArr))
mDict["dFDFth"] = toscalar(dFDFth)
mDict["units"] = units
if fitDict["fitStatus"] >= 128:
log("WARNING: Stokes I model contains negative values!")
elif fitDict["fitStatus"] >= 64:
log("Caution: Stokes I model has low signal-to-noise.")
#Add information on nature of channels:
good_channels=np.where(np.logical_and(weightArr != 0,np.isfinite(qArr)))[0]
mDict["min_freq"]=float(np.min(freqArr_Hz[good_channels]))
mDict["max_freq"]=float(np.max(freqArr_Hz[good_channels]))
mDict["N_channels"]=good_channels.size
mDict["median_channel_width"]=float(np.median(np.diff(freqArr_Hz)))
# Measure the complexity of the q and u spectra
mDict["fracPol"] = mDict["ampPeakPIfit"]/(Ifreq0)
mD, pD = measure_qu_complexity(freqArr_Hz = freqArr_Hz,
qArr = qArr,
uArr = uArr,
dqArr = dqArr,
duArr = duArr,
fracPol = mDict["fracPol"],
psi0_deg = mDict["polAngle0Fit_deg"],
RM_radm2 = mDict["phiPeakPIfit_rm2"])
mDict.update(mD)
# Debugging plots for spectral complexity measure
if debug:
tmpFig = plot_complexity_fig(xArr=pD["xArrQ"],
qArr=pD["yArrQ"],
dqArr=pD["dyArrQ"],
sigmaAddqArr=pD["sigmaAddArrQ"],
chiSqRedqArr=pD["chiSqRedArrQ"],
probqArr=pD["probArrQ"],
uArr=pD["yArrU"],
duArr=pD["dyArrU"],
sigmaAdduArr=pD["sigmaAddArrU"],
chiSqReduArr=pD["chiSqRedArrU"],
probuArr=pD["probArrU"],
mDict=mDict)
if saveOutput:
if verbose: print("Saving debug plots:")
outFilePlot = prefixOut + ".debug-plots.pdf"
if verbose: print("> " + outFilePlot)
tmpFig.savefig(outFilePlot, bbox_inches = 'tight')
else:
tmpFig.show()
#add array dictionary
aDict = dict()
aDict["phiArr_radm2"] = phiArr_radm2
aDict["phi2Arr_radm2"] = phi2Arr_radm2
aDict["RMSFArr"] = RMSFArr
aDict["freqArr_Hz"] = freqArr_Hz
aDict["weightArr"]=weightArr
aDict["dirtyFDF"]=dirtyFDF
if verbose:
# Print the results to the screen
log()
log('-'*80)
log('RESULTS:\n')
log('FWHM RMSF = %.4g rad/m^2' % (mDict["fwhmRMSF"]))
log('Pol Angle = %.4g (+/-%.4g) deg' % (mDict["polAngleFit_deg"],
mDict["dPolAngleFit_deg"]))
log('Pol Angle 0 = %.4g (+/-%.4g) deg' % (mDict["polAngle0Fit_deg"],
mDict["dPolAngle0Fit_deg"]))
log('Peak FD = %.4g (+/-%.4g) rad/m^2' % (mDict["phiPeakPIfit_rm2"],
mDict["dPhiPeakPIfit_rm2"]))
log('freq0_GHz = %.4g ' % (mDict["freq0_Hz"]/1e9))
log('I freq0 = %.4g %s' % (mDict["Ifreq0"],units))
log('Peak PI = %.4g (+/-%.4g) %s' % (mDict["ampPeakPIfit"],
mDict["dAmpPeakPIfit"],units))
log('QU Noise = %.4g %s' % (mDict["dQU"],units))
log('FDF Noise (theory) = %.4g %s' % (mDict["dFDFth"],units))
log('FDF Noise (Corrected MAD) = %.4g %s' % (mDict["dFDFcorMAD"],units))
log('FDF Noise (rms) = %.4g %s' % (mDict["dFDFrms"],units))
log('FDF SNR = %.4g ' % (mDict["snrPIfit"]))
log('sigma_add(q) = %.4g (+%.4g, -%.4g)' % (mDict["sigmaAddQ"],
mDict["dSigmaAddPlusQ"],
mDict["dSigmaAddMinusQ"]))
log('sigma_add(u) = %.4g (+%.4g, -%.4g)' % (mDict["sigmaAddU"],
mDict["dSigmaAddPlusU"],
mDict["dSigmaAddMinusU"]))
log()
log('-'*80)
# Plot the RM Spread Function and dirty FDF
if showPlots or saveOutput:
fdfFig = plt.figure(figsize=(12.0, 8))
plot_rmsf_fdf_fig(phiArr = phiArr_radm2,
FDF = dirtyFDF,
phi2Arr = phi2Arr_radm2,
RMSFArr = RMSFArr,
fwhmRMSF = fwhmRMSF,
vLine = mDict["phiPeakPIfit_rm2"],
fig = fdfFig,
units = units)
# Use the custom navigation toolbar
# try:
# fdfFig.canvas.toolbar.pack_forget()
# CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window)
# except Exception:
# pass
# Display the figure
# fdfFig.show()
# Pause if plotting enabled
if showPlots:
plt.show()
elif saveOutput or debug:
if verbose: print("Saving RMSF and dirty FDF plot:")
outFilePlot = prefixOut + ".RMSF-dirtyFDF-plots.pdf"
if verbose: print("> " + outFilePlot)
fdfFig.savefig(outFilePlot, bbox_inches = 'tight')
# #if verbose: print "Press <RETURN> to exit ...",
# input()
return mDict, aDict
def readFile(dataFile, nBits, verbose=True, debug=False):
"""
Read the I, Q & U data from the ASCII file.
Inputs:
datafile (str): relative or absolute path to file.
nBits (int): number of bits to store the data as.
verbose (bool): Print verbose messages to terminal?
debug (bool): Print full traceback in case of failure?
Returns:
data (list of arrays): List containing the columns found in the file.
If Stokes I is present, this will be [freq_Hz, I, Q, U, dI, dQ, dU],
else [freq_Hz, q, u, dq, du].
"""
# Default data types
dtFloat = "float" + str(nBits)
dtComplex = "complex" + str(2*nBits)
# Output prefix is derived from the input file name
# Read the data-file. Format=space-delimited, comments="#".
if verbose: print("Reading the data file '%s':" % dataFile)
# freq_Hz, I, Q, U, dI, dQ, dU
try:
if verbose: print("> Trying [freq_Hz, I, Q, U, dI, dQ, dU]", end=' ')
(freqArr_Hz, IArr, QArr, UArr,
dIArr, dQArr, dUArr) = \
np.loadtxt(dataFile, unpack=True, dtype=dtFloat)
if verbose: print("... success.")
data=[freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr]
except Exception:
if verbose: print("...failed.")
# freq_Hz, q, u, dq, du
try:
if verbose: print("> Trying [freq_Hz, q, u, dq, du]", end=' ')
(freqArr_Hz, QArr, UArr, dQArr, dUArr) = \
np.loadtxt(dataFile, unpack=True, dtype=dtFloat)
if verbose: print("... success.")
data=[freqArr_Hz, QArr, UArr, dQArr, dUArr]
noStokesI = True
except Exception:
if verbose: print("...failed.")
if debug:
print(traceback.format_exc())
sys.exit()
if verbose: print("Successfully read in the Stokes spectra.")
return data
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
if __name__ == "__main__":
main()
| 45.159624
| 111
| 0.524517
|
53b911e92af8c5251a19a68b93418217d94e2790
| 310
|
py
|
Python
|
cogdl/modules/conv/__init__.py
|
awesome-archive/cogdl
|
0a354eaaaf851e7218197508e7e85a81d3fb5753
|
[
"MIT"
] | 8
|
2020-06-03T00:55:09.000Z
|
2022-01-23T16:06:56.000Z
|
cogdl/modules/conv/__init__.py
|
awesome-archive/cogdl
|
0a354eaaaf851e7218197508e7e85a81d3fb5753
|
[
"MIT"
] | null | null | null |
cogdl/modules/conv/__init__.py
|
awesome-archive/cogdl
|
0a354eaaaf851e7218197508e7e85a81d3fb5753
|
[
"MIT"
] | 6
|
2020-06-03T00:55:11.000Z
|
2022-03-16T01:14:36.000Z
|
from .message_passing import MessagePassing
from .gcn_conv import GCNConv
from .gat_conv import GATConv
from .se_layer import SELayer
from .aggregator import Meanaggregator
from .maggregator import meanaggr
__all__ = [
'MessagePassing',
'GCNConv',
'GATConv',
'SELayer',
'Meanaggregator'
]
| 20.666667
| 43
| 0.751613
|
53b93c021c611ea7b35c2a4e8768e23aee0fabe0
| 1,449
|
py
|
Python
|
netket/utils/jax.py
|
gpescia/MyNetKet
|
958510966a5870d9d491de0628903cf1fc210921
|
[
"Apache-2.0"
] | 1
|
2022-01-31T15:19:09.000Z
|
2022-01-31T15:19:09.000Z
|
netket/utils/jax.py
|
gpescia/MyNetKet
|
958510966a5870d9d491de0628903cf1fc210921
|
[
"Apache-2.0"
] | 26
|
2021-08-06T15:27:57.000Z
|
2022-03-30T16:55:18.000Z
|
netket/utils/jax.py
|
gpescia/MyNetKet
|
958510966a5870d9d491de0628903cf1fc210921
|
[
"Apache-2.0"
] | 1
|
2021-04-25T15:47:32.000Z
|
2021-04-25T15:47:32.000Z
|
# Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable
from . import struct
def get_afun_if_module(mod_or_fun) -> Callable:
"""Returns the apply function if it's a module. Does nothing otherwise."""
if hasattr(mod_or_fun, "apply"):
return mod_or_fun.apply
else:
return mod_or_fun
def wrap_afun(mod_or_fun):
"""Wraps a callable to be a module-like object with the method `apply`.
Does nothing if it already has an apply method.
"""
if hasattr(mod_or_fun, "apply"):
return mod_or_fun
else:
return WrappedApplyFun(mod_or_fun)
| 30.829787
| 78
| 0.712215
|
53b95578f3b9aa9d904006c7f7edb3a1fb45bd48
| 10,933
|
py
|
Python
|
geetools/batch/featurecollection.py
|
Kungreye/gee_tools
|
d0712ac78410250c41503ca08075f536d58d2ef3
|
[
"MIT"
] | null | null | null |
geetools/batch/featurecollection.py
|
Kungreye/gee_tools
|
d0712ac78410250c41503ca08075f536d58d2ef3
|
[
"MIT"
] | null | null | null |
geetools/batch/featurecollection.py
|
Kungreye/gee_tools
|
d0712ac78410250c41503ca08075f536d58d2ef3
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import ee
from . import utils
import json
import csv
from .. import tools
def fromShapefile(filename, crs=None, start=None, end=None):
""" Convert an ESRI file (.shp and .dbf must be present) to a
ee.FeatureCollection
At the moment only works for shapes with less than 1000 records and doesn't
handle complex shapes.
:param filename: the name of the filename. If the shape is not in the
same path than the script, specify a path instead.
:type filename: str
:param start:
:return: the FeatureCollection
:rtype: ee.FeatureCollection
"""
import shapefile
wgs84 = ee.Projection('EPSG:4326')
# read the filename
reader = shapefile.Reader(filename)
fields = reader.fields[1:]
field_names = [field[0] for field in fields]
field_types = [field[1] for field in fields]
types = dict(zip(field_names, field_types))
features = []
projection = utils.getProjection(filename) if not crs else crs
# catch a string with format "EPSG:XXX"
if isinstance(projection, str):
if 'EPSG:' in projection:
projection = projection.split(':')[1]
projection = 'EPSG:{}'.format(projection)
# filter records with start and end
start = start if start else 0
if not end:
records = reader.shapeRecords()
end = len(records)
else:
end = end + 1
if (end-start)>1000:
msg = "Can't process more than 1000 records at a time. Found {}"
raise ValueError(msg.format(end-start))
for i in range(start, end):
# atr = dict(zip(field_names, sr.record))
sr = reader.shapeRecord(i)
atr = {}
for fld, rec in zip(field_names, sr.record):
fld_type = types[fld]
if fld_type == 'D':
value = ee.Date(rec.isoformat()).millis().getInfo()
elif fld_type in ['C', 'N', 'F']:
value = rec
else:
continue
atr[fld] = value
geom = sr.shape.__geo_interface__
if projection is not None:
geometry = ee.Geometry(geom, projection) \
.transform(wgs84, 1)
else:
geometry = ee.Geometry(geom)
feat = ee.Feature(geometry, atr)
features.append(feat)
return ee.FeatureCollection(features)
def fromGeoJSON(filename=None, data=None, crs=None):
""" Create a list of Features from a GeoJSON file. Return a python tuple
with ee.Feature inside. This is due to failing when attempting to create a
FeatureCollection (Broken Pipe ERROR) out of the list. You can try creating
it yourself casting the result of this function to a ee.List or using it
directly as a FeatureCollection argument.
:param filename: the name of the file to load
:type filename: str
:param crs: a coordinate reference system in EPSG format. If not specified
it will try to get it from the geoJSON, and if not there it will rise
an error
:type: crs: str
:return: a tuple of features.
"""
if filename:
with open(filename, 'r') as geoj:
content = geoj.read()
geodict = json.loads(content)
else:
geodict = data
features = []
# Get crs from GeoJSON
if not crs:
filecrs = geodict.get('crs')
if filecrs:
name = filecrs.get('properties').get('name')
splitcrs = name.split(':')
cleancrs = [part for part in splitcrs if part]
try:
if cleancrs[-1] == 'CRS84':
crs = 'EPSG:4326'
elif cleancrs[-2] == 'EPSG':
crs = '{}:{}'.format(cleancrs[-2], cleancrs[-1])
else:
raise ValueError('{} not recognized'.format(name))
except IndexError:
raise ValueError('{} not recognized'.format(name))
else:
crs = 'EPSG:4326'
for n, feat in enumerate(geodict.get('features')):
properties = feat.get('properties')
geom = feat.get('geometry')
ty = geom.get('type')
coords = geom.get('coordinates')
if ty == 'GeometryCollection':
ee_geom = utils.GEOMETRY_TYPES.get(ty)(geom, opt_proj=crs)
else:
if ty == 'Polygon':
coords = utils.removeZ(coords) if utils.hasZ(coords) else coords
ee_geom = utils.GEOMETRY_TYPES.get(ty)(coords, proj=ee.Projection(crs))
ee_feat = ee.feature.Feature(ee_geom, properties)
features.append(ee_feat)
return tuple(features)
def fromKML(filename=None, data=None, crs=None, encoding=None):
""" Create a list of Features from a KML file. Return a python tuple
with ee.Feature inside. This is due to failing when attempting to create a
FeatureCollection (Broken Pipe ERROR) out of the list. You can try creating
it yourself casting the result of this function to a ee.List or using it
directly as a FeatureCollection argument.
:param filename: the name of the file to load
:type filename: str
:param crs: a coordinate reference system in EPSG format. If not specified
it will try to get it from the geoJSON, and if not there it will rise
an error
:type: crs: str
:return: a tuple of features.
"""
geojsondict = utils.kmlToGeoJsonDict(filename, data, encoding)
features = geojsondict['features']
for feat in features:
# remove styleUrl
prop = feat['properties']
if 'styleUrl' in prop:
prop.pop('styleUrl')
# remove Z value if needed
geom = feat['geometry']
ty = geom['type']
if ty == 'GeometryCollection':
geometries = geom['geometries']
for g in geometries:
c = g['coordinates']
utils.removeZ(c)
else:
coords = geom['coordinates']
utils.removeZ(coords)
return fromGeoJSON(data=geojsondict, crs=crs)
def toDict(collection, split_at=4000):
""" Get the FeatureCollection as a dict object """
size = collection.size()
condition = size.gte(4999)
collections = ee.List(
ee.Algorithms.If(condition,
greater(),
ee.List([collection])))
collections_size = collections.size().getInfo()
col = ee.FeatureCollection(collections.get(0))
content = col.getInfo()
feats = content['features']
for i in range(0, collections_size):
c = ee.FeatureCollection(collections.get(i))
content_c = c.getInfo()
feats_c = content_c['features']
feats = feats + feats_c
content['features'] = feats
return content
def toGeoJSON(collection, name, path=None, split_at=4000):
""" Export a FeatureCollection to a GeoJSON file
:param collection: The collection to export
:type collection: ee.FeatureCollection
:param name: name of the resulting file
:type name: str
:param path: The path where to save the file. If None, will be saved
in the current folder
:type path: str
:param split_at: limit to avoid an EE Exception
:type split_at: int
:return: A GeoJSON (.geojson) file.
:rtype: file
"""
import json
import os
if not path:
path = os.getcwd()
# name
if name[-8:-1] != '.geojson':
fname = name+'.geojson'
content = toDict(collection, split_at)
with open(os.path.join(path, fname), 'w') as thefile:
thefile.write(json.dumps(content))
return thefile
def toCSV(collection, filename, split_at=4000):
""" Alternative to download a FeatureCollection as a CSV """
d = toDict(collection, split_at)
fields = list(d['columns'].keys())
fields.append('geometry')
features = d['features']
ext = filename[-4:]
if ext != '.csv':
filename += '.csv'
with open(filename, 'w') as thecsv:
writer = csv.DictWriter(thecsv, fields)
writer.writeheader()
# write rows
for feature in features:
properties = feature['properties']
fid = feature['id']
geom = feature['geometry']['type']
# match fields
properties['system:index'] = fid
properties['geometry'] = geom
# write row
writer.writerow(properties)
return thecsv
def toLocal(collection, filename, filetype=None, selectors=None, path=None):
""" Download a FeatureCollection to a local file a CSV or geoJSON file.
This uses a different method than `toGeoJSON` and `toCSV`
:param filetype: The filetype of download, either CSV or JSON.
Defaults to CSV.
:param selectors: The selectors that should be used to determine which
attributes will be downloaded.
:param filename: The name of the file to be downloaded
"""
if not filetype:
filetype = 'CSV'
url = collection.getDownloadURL(filetype, selectors, filename)
thefile = utils.downloadFile(url, filename, filetype, path)
return thefile
def toAsset(table, assetPath, name=None, create=True, verbose=False, **kwargs):
""" This function can create folders and ImageCollections on the fly.
The rest is the same to Export.image.toAsset. You can pass the same
params as the original function
:param table: the feature collection to upload
:type table: ee.FeatureCollection
:param assetPath: path to upload the image (only PATH, without
filename)
:type assetPath: str
:param name: filename for the image (AssetID will be assetPath + name)
:type name: str
:return: the tasks
:rtype: ee.batch.Task
"""
# Check if the user is specified in the asset path
is_user = (assetPath.split('/')[0] == 'users')
if not is_user:
user = ee.batch.data.getAssetRoots()[0]['id']
assetPath = "{}/{}".format(user, assetPath)
if create:
# Recrusive create path
path2create = assetPath # '/'.join(assetPath.split('/')[:-1])
utils.createAssets([path2create], 'Folder', True)
# Asset ID (Path + name)
assetId = '/'.join([assetPath, name])
# Description
description = utils.matchDescription(name)
# Init task
task = ee.batch.Export.table.toAsset(table, assetId=assetId,
description=description, **kwargs)
task.start()
if verbose:
print('Exporting {} to {}'.format(name, assetPath))
return task
| 32.346154
| 83
| 0.611269
|
53bae4caf0f5e1b3ae61fd16a27c99803d8b7c2e
| 1,357
|
py
|
Python
|
index.py
|
extwiii/Rock-paper-scissors-lizard-Spock
|
7a8eda9f168636a9878c91620e625997ba0994a8
|
[
"Apache-2.0"
] | 1
|
2018-08-02T00:52:33.000Z
|
2018-08-02T00:52:33.000Z
|
index.py
|
extwiii/Rock-paper-scissors-lizard-Spock
|
7a8eda9f168636a9878c91620e625997ba0994a8
|
[
"Apache-2.0"
] | null | null | null |
index.py
|
extwiii/Rock-paper-scissors-lizard-Spock
|
7a8eda9f168636a9878c91620e625997ba0994a8
|
[
"Apache-2.0"
] | null | null | null |
# Rock-paper-scissors-lizard-Spock template
# The key idea of this program is to equate the strings
# "rock", "paper", "scissors", "lizard", "Spock" to numbers
# as follows:
#
# 0 - rock
# 1 - Spock
# 2 - paper
# 3 - lizard
# 4 - scissors
import random
rpsls("rock")
rpsls("Spock")
rpsls("paper")
rpsls("lizard")
rpsls("scissors")
| 21.539683
| 59
| 0.590273
|
53bd7ca2bf66bb072074f8694f4fa68fad92a150
| 9,067
|
py
|
Python
|
libs/clustering/ensembles/utils.py
|
greenelab/phenoplier
|
95f04b17f0b5227560fcf32ac0a85b2c5aa9001f
|
[
"BSD-2-Clause-Patent"
] | 3
|
2021-08-17T21:59:19.000Z
|
2022-03-08T15:46:24.000Z
|
libs/clustering/ensembles/utils.py
|
greenelab/phenoplier
|
95f04b17f0b5227560fcf32ac0a85b2c5aa9001f
|
[
"BSD-2-Clause-Patent"
] | 4
|
2021-08-04T13:57:24.000Z
|
2021-10-11T14:57:15.000Z
|
libs/clustering/ensembles/utils.py
|
greenelab/phenoplier
|
95f04b17f0b5227560fcf32ac0a85b2c5aa9001f
|
[
"BSD-2-Clause-Patent"
] | null | null | null |
"""
Contains functions to generate and combine a clustering ensemble.
"""
import numpy as np
import pandas as pd
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score as ari
from sklearn.metrics import adjusted_mutual_info_score as ami
from sklearn.metrics import normalized_mutual_info_score as nmi
from tqdm import tqdm
from clustering.utils import reset_estimator, compare_arrays
def generate_ensemble(data, clusterers: dict, attributes: list, affinity_matrix=None):
"""
It generates an ensemble from the data given a set of clusterers (a
clusterer is an instance of a clustering algorithm with a fixed set of
parameters).
Args:
data:
A numpy array, pandas dataframe, or any other structure supported
by the clusterers as data input.
clusterers:
A dictionary with clusterers specified in this format: { 'k-means
#1': KMeans(n_clusters=2), ... }
attributes:
A list of attributes to save in the final dataframe; for example,
including "n_clusters" will extract this attribute from the
estimator and include it in the final dataframe returned.
affinity_matrix:
If the clustering algorithm is AgglomerativeClustering (from
sklearn) and the linkage method is different than ward (which only
support euclidean distance), the affinity_matrix is given as data
input to the estimator instead of data.
Returns:
A pandas DataFrame with all the partitions generated by the clusterers.
Columns include the clusterer name/id, the partition, the estimator
parameters (obtained with the get_params() method) and any other
attribute specified.
"""
ensemble = []
for clus_name, clus_obj in tqdm(clusterers.items(), total=len(clusterers)):
# get partition
#
# for agglomerative clustering both data and affinity_matrix should be
# given; for ward linkage, data is used, and for the other linkage
# methods the affinity_matrix is used
if (type(clus_obj).__name__ == "AgglomerativeClustering") and (
clus_obj.linkage != "ward"
):
partition = clus_obj.fit_predict(affinity_matrix).astype(float)
else:
partition = clus_obj.fit_predict(data).astype(float)
# remove from partition noisy points (for example, if using DBSCAN)
partition[partition < 0] = np.nan
# get number of clusters
partition_no_nan = partition[~np.isnan(partition)]
n_clusters = np.unique(partition_no_nan).shape[0]
# stop if n_clusters <= 1
if n_clusters <= 1:
reset_estimator(clus_obj)
continue
res = pd.Series(
{
"clusterer_id": clus_name,
"clusterer_params": str(clus_obj.get_params()),
"partition": partition,
}
)
for attr in attributes:
if attr == "n_clusters" and not hasattr(clus_obj, attr):
res[attr] = n_clusters
else:
res[attr] = getattr(clus_obj, attr)
ensemble.append(res)
# for some estimators such as DBSCAN this is needed, because otherwise
# the estimator saves references of huge data structures not needed in
# this context
reset_estimator(clus_obj)
return pd.DataFrame(ensemble).set_index("clusterer_id")
def get_ensemble_distance_matrix(ensemble, n_jobs=1):
"""
Given an ensemble, it computes the coassociation matrix (a distance matrix
for all objects using the ensemble information). For each object pair, the
coassociation matrix contains the percentage of times the pair of objects
was clustered together in the ensemble.
Args:
ensemble:
A numpy array representing a set of clustering solutions on the same
data. Each row is a clustering solution (partition) and columns are
objects.
n_jobs:
The number of jobs used by the pairwise_distance matrix from
sklearn.
Returns:
A numpy array representing a square distance matrix for all objects
(coassociation matrix).
"""
return pairwise_distances(
ensemble.T, metric=_compare, n_jobs=n_jobs, force_all_finite="allow-nan"
)
def supraconsensus(ensemble, k, methods, selection_criterion, n_jobs=1, use_tqdm=False):
"""
It combines a clustering ensemble using a set of methods that the user can
specify. Each of these methods combines the ensemble and returns a single
partition. This function returns the combined partition that maximizes the
selection criterion.
Args:
ensemble:
a clustering ensemble (rows are partitions, columns are objects).
k:
the final number of clusters for the combined partition.
methods:
a list of methods to apply on the ensemble; each returns a combined
partition.
selection_criterion:
a function that represents the selection criterion; this function
has to accept an ensemble as the first argument, and a partition as
the second one.
n_jobs:
number of jobs.
use_tqdm:
ensembles/disables the use of tqdm to show a progress bar.
Returns:
Returns a tuple: (partition, best method name, best criterion value)
"""
from concurrent.futures import ProcessPoolExecutor, as_completed
methods_results = {}
with ProcessPoolExecutor(max_workers=n_jobs) as executor:
tasks = {executor.submit(m, ensemble, k): m.__name__ for m in methods}
for future in tqdm(
as_completed(tasks),
total=len(tasks),
disable=(not use_tqdm),
ncols=100,
):
method_name = tasks[future]
part = future.result()
criterion_value = selection_criterion(ensemble, part)
methods_results[method_name] = {
"partition": part,
"criterion_value": criterion_value,
}
# select the best performing method according to the selection criterion
best_method = max(
methods_results, key=lambda x: methods_results[x]["criterion_value"]
)
best_method_results = methods_results[best_method]
return (
best_method_results["partition"],
best_method,
best_method_results["criterion_value"],
)
def run_method_and_compute_agreement(method_func, ensemble_data, ensemble, k, **kwargs):
"""
Runs a consensus clustering method on the ensemble data, obtains the
consolidated partition with the desired number of clusters, and computes
a series of performance measures.
Args:
method_func:
A consensus function (first argument is either the ensemble or
the coassociation matrix derived from the ensemble).
ensemble_data:
A numpy array with the ensemble data that will be given to the
specified method. For evidence accumulation methods, this is the
coassociation matrix (a square matrix with the distance between
object pairs derived from the ensemble).
ensemble:
A numpy array representing the ensemble (partitions in rows, objects
in columns).
k:
The number of clusters to obtain from the ensemble data using the
specified method.
kwargs:
Other parameters passed to `method_func`.
Returns:
It returns a tuple with the data partition derived from the ensemble
data using the specified method, and some performance measures of this
partition.
"""
part = method_func(ensemble_data, k, **kwargs)
nmi_values = np.array(
[
compare_arrays(ensemble_member, part, nmi, use_weighting=True)
for ensemble_member in ensemble
]
)
ami_values = np.array(
[
compare_arrays(ensemble_member, part, ami, use_weighting=True)
for ensemble_member in ensemble
]
)
ari_values = np.array(
[
compare_arrays(ensemble_member, part, ari, use_weighting=True)
for ensemble_member in ensemble
]
)
performance_values = {
"ari_mean": np.mean(ari_values),
"ari_median": np.median(ari_values),
"ari_std": np.std(ari_values),
"ami_mean": np.mean(ami_values),
"ami_median": np.median(ami_values),
"ami_std": np.std(ami_values),
"nmi_mean": np.mean(nmi_values),
"nmi_median": np.median(nmi_values),
"nmi_std": np.std(nmi_values),
}
return part, performance_values
| 35.837945
| 88
| 0.644645
|
53bdcb0790280882aedd07e5cb2cef0159140f96
| 7,236
|
py
|
Python
|
backend/chart/application/service/employees.py
|
toshi-click/chart_app
|
10577d7835554a93688ae0c58ecb25fbe2925bec
|
[
"BSD-3-Clause"
] | null | null | null |
backend/chart/application/service/employees.py
|
toshi-click/chart_app
|
10577d7835554a93688ae0c58ecb25fbe2925bec
|
[
"BSD-3-Clause"
] | 7
|
2020-10-25T05:34:54.000Z
|
2020-12-02T11:31:44.000Z
|
backend/chart/application/service/employees.py
|
toshi-click/chart_app
|
10577d7835554a93688ae0c58ecb25fbe2925bec
|
[
"BSD-3-Clause"
] | 1
|
2021-04-30T16:51:43.000Z
|
2021-04-30T16:51:43.000Z
|
import logging
from django.db import transaction, connection
from django.utils import timezone
from django.utils.timezone import localtime
from chart.application.enums.department_type import DepartmentType
from chart.application.enums.gender_type import GenderType
from chart.application.service.app_logic_base import AppLogicBaseService
from chart.models import Employees, Departments
"""
employees
"""
def _regist_departments(self, department_no, department_name):
"""
departments
"""
self.regist_model = Departments()
self.regist_model.department_no = department_no
self.regist_model.department_name = department_name
self.regist_model.delete_flag = 0
self.regist_model.regist_dt = localtime(timezone.now())
self.regist_model.update_dt = localtime(timezone.now())
self.regist_model.save()
def _update_employees_department(self, employees_id, department_id, department_date_from):
"""
"""
self.update_model = Employees()
self.update_model.pk = employees_id
self.update_model.department_id = department_id
self.update_model.department_date_from = department_date_from
self.update_model.update_dt = localtime(timezone.now())
self.update_model.save(update_fields=['department_id', 'department_date_from', 'update_dt'])
| 43.590361
| 116
| 0.674268
|
53bf55da72ae86acb1c699435bc12016f38e84ea
| 146
|
py
|
Python
|
DataQualityTester/views/pages.py
|
pwyf/data-quality-tester
|
d7674849c64d4d41ff4e4b6b12631994c7ce0a92
|
[
"MIT"
] | null | null | null |
DataQualityTester/views/pages.py
|
pwyf/data-quality-tester
|
d7674849c64d4d41ff4e4b6b12631994c7ce0a92
|
[
"MIT"
] | 53
|
2017-04-07T09:41:38.000Z
|
2022-02-11T14:26:46.000Z
|
DataQualityTester/views/pages.py
|
pwyf/iati-simple-tester
|
ef7f06ebbd4dd45e6ca76d93a3f624abc33d961c
|
[
"MIT"
] | 3
|
2017-07-19T13:43:14.000Z
|
2019-10-29T15:25:49.000Z
|
from flask import render_template
| 14.6
| 41
| 0.726027
|
53bfb5244dff3d80fd05051eac4247280b733cea
| 5,761
|
py
|
Python
|
hastakayit_gui.py
|
roselight/Image-Recognition-with-OpenCv
|
4d0607f37bc80ee0b00790cdcbb0a22c76852ac4
|
[
"MIT"
] | 2
|
2020-04-10T21:53:52.000Z
|
2020-04-11T12:24:35.000Z
|
hastakayit_gui.py
|
roselight/Image-Recognition-with-OpenCv
|
4d0607f37bc80ee0b00790cdcbb0a22c76852ac4
|
[
"MIT"
] | null | null | null |
hastakayit_gui.py
|
roselight/Image-Recognition-with-OpenCv
|
4d0607f37bc80ee0b00790cdcbb0a22c76852ac4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\hastakayit_gui.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
import mysql.connector
from PyQt5.QtWidgets import QMessageBox,QWidget,QMainWindow
from PyQt5.QtCore import Qt, QDate, QDateTime
# Veritaban balants iin sql cmlecii oluturuldu.
db = mysql.connector.connect(
host="localhost",
user="root",
passwd="12345",
database="cilth_vt"
)
cursor = db.cursor()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow2()
ui.setupUi2(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 44.658915
| 117
| 0.69849
|
53c0dd2b4f081d4c8d070b26922f68bf139eaa76
| 4,138
|
py
|
Python
|
.travis/manage_daily_builds.py
|
loonwerks/AGREE
|
58640ab89aaa3c72ccca0b8c80cf96d1815981da
|
[
"BSD-3-Clause"
] | 5
|
2020-12-28T15:41:04.000Z
|
2021-07-31T09:07:28.000Z
|
.travis/manage_daily_builds.py
|
loonwerks/AGREE
|
58640ab89aaa3c72ccca0b8c80cf96d1815981da
|
[
"BSD-3-Clause"
] | 89
|
2020-01-27T17:16:00.000Z
|
2022-03-31T09:57:25.000Z
|
.travis/manage_daily_builds.py
|
loonwerks/AGREE
|
58640ab89aaa3c72ccca0b8c80cf96d1815981da
|
[
"BSD-3-Clause"
] | 5
|
2020-02-25T00:33:21.000Z
|
2021-01-02T07:23:11.000Z
|
#!/usr/bin/env python3
'''
Copyright (c) 2021, Collins Aerospace.
Developed with the sponsorship of Defense Advanced Research Projects Agency (DARPA).
Permission is hereby granted, free of charge, to any person obtaining a copy of this data,
including any software or models in source or binary form, as well as any drawings, specifications,
and documentation (collectively "the Data"), to deal in the Data without restriction, including
without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Data, and to permit persons to whom the Data is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Data.
THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
'''
import os
import re
import sys
from github3 import GitHub
from pprint import pformat
GITHUB_API = 'https://api.github.com/repos'
GITHUB_RELEASES = 'releases'
AUTH_TOKEN = os.environ['GH_TOKEN'] if 'GH_TOKEN' in os.environ.keys() else None
REPOSITORY_OWNER = 'loonwerks'
REPOSITORY_REPO = 'AGREE'
PRODUCT_ASSET_PATTERN = re.compile(r'com.rockwellcollins.atc.agree.repository-\d+\.\d+\.\d+(-(\d{12}))?-.*')
if __name__ == '__main__':
manage_daily_builds(sys.argv[1])
| 48.682353
| 137
| 0.678347
|
53c1b1b92893f74554831ae30476aefdb5464370
| 5,743
|
py
|
Python
|
tests/crowdsourcing/tasks/turn_annotations_static/test_turn_annotations_static_analysis.py
|
KaihuiLiang/ParlAI
|
fb5c92741243756516fa50073d34e94ba0b6981e
|
[
"MIT"
] | null | null | null |
tests/crowdsourcing/tasks/turn_annotations_static/test_turn_annotations_static_analysis.py
|
KaihuiLiang/ParlAI
|
fb5c92741243756516fa50073d34e94ba0b6981e
|
[
"MIT"
] | 1
|
2020-11-12T02:20:02.000Z
|
2020-11-12T02:20:02.000Z
|
tests/crowdsourcing/tasks/turn_annotations_static/test_turn_annotations_static_analysis.py
|
MoPei/ParlAI
|
321bc857f2765cd76d5134531a802442ac4c9f5c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Test components of specific crowdsourcing tasks.
"""
import json
import os
import unittest
import pandas as pd
import parlai.utils.testing as testing_utils
try:
from parlai.crowdsourcing.tasks.turn_annotations_static.analysis.compile_results import (
TurnAnnotationsStaticResultsCompiler,
)
from parlai.crowdsourcing.utils.tests import check_stdout
except ImportError:
pass
if __name__ == "__main__":
unittest.main()
| 37.292208
| 93
| 0.482675
|
53c38f978d506f03ad72b1b6b50a34e76cbf6a7b
| 3,937
|
py
|
Python
|
applied_python/applied_python/lib/python2.7/site-packages/ansible/modules/extras/messaging/rabbitmq_plugin.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | 1
|
2020-10-14T00:06:54.000Z
|
2020-10-14T00:06:54.000Z
|
applied_python/applied_python/lib/python2.7/site-packages/ansible/modules/extras/messaging/rabbitmq_plugin.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | null | null | null |
applied_python/applied_python/lib/python2.7/site-packages/ansible/modules/extras/messaging/rabbitmq_plugin.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | 2
|
2015-08-06T07:45:48.000Z
|
2017-01-04T17:47:16.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Chatham Financial <oss@chathamfinancial.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rabbitmq_plugin
short_description: Adds or removes plugins to RabbitMQ
description:
- Enables or disables RabbitMQ plugins
version_added: "1.1"
author: Chris Hoffman
options:
names:
description:
- Comma-separated list of plugin names
required: true
default: null
aliases: [name]
new_only:
description:
- Only enable missing plugins
- Does not disable plugins that are not in the names list
required: false
default: "no"
choices: [ "yes", "no" ]
state:
description:
- Specify if plugins are to be enabled or disabled
required: false
default: enabled
choices: [enabled, disabled]
prefix:
description:
- Specify a custom install prefix to a Rabbit
required: false
version_added: "1.3"
default: null
'''
EXAMPLES = '''
# Enables the rabbitmq_management plugin
- rabbitmq_plugin: names=rabbitmq_management state=enabled
'''
# import module snippets
from ansible.module_utils.basic import *
main()
| 30.053435
| 88
| 0.654559
|
53c4401601b96a14bafd9a44d9c96d488de53fcf
| 7,279
|
py
|
Python
|
vitrage/datasources/static/driver.py
|
HoonMinJeongUm/Hunmin-vitrage
|
37d43d6b78e8b76fa6a2e83e5c739e9e4917a7b6
|
[
"Apache-2.0"
] | null | null | null |
vitrage/datasources/static/driver.py
|
HoonMinJeongUm/Hunmin-vitrage
|
37d43d6b78e8b76fa6a2e83e5c739e9e4917a7b6
|
[
"Apache-2.0"
] | null | null | null |
vitrage/datasources/static/driver.py
|
HoonMinJeongUm/Hunmin-vitrage
|
37d43d6b78e8b76fa6a2e83e5c739e9e4917a7b6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 - Nokia, ZTE
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from itertools import chain
from six.moves import reduce
from oslo_log import log
from vitrage.common.constants import DatasourceProperties as DSProps
from vitrage.common.constants import GraphAction
from vitrage.datasources.driver_base import DriverBase
from vitrage.datasources.static import STATIC_DATASOURCE
from vitrage.datasources.static import StaticFields
from vitrage.utils import file as file_utils
LOG = log.getLogger(__name__)
| 37.911458
| 79
| 0.637588
|
53c47f75ab180de02752f1ea49f9b87157a860e1
| 2,406
|
py
|
Python
|
napari/layers/shapes/mesh.py
|
marshuang80/napari
|
10f1d0f39fe9ccd42456c95458e2f23b59450f02
|
[
"BSD-3-Clause"
] | null | null | null |
napari/layers/shapes/mesh.py
|
marshuang80/napari
|
10f1d0f39fe9ccd42456c95458e2f23b59450f02
|
[
"BSD-3-Clause"
] | null | null | null |
napari/layers/shapes/mesh.py
|
marshuang80/napari
|
10f1d0f39fe9ccd42456c95458e2f23b59450f02
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
| 38.806452
| 79
| 0.646301
|
53c5781ea07cd092d5d5320da909512506460ef4
| 184
|
py
|
Python
|
python/helpers.py
|
cdacos/astrophysics_with_a_pc
|
b0017856005a4771fbd89c8137fb320b72b1b633
|
[
"FSFAP"
] | null | null | null |
python/helpers.py
|
cdacos/astrophysics_with_a_pc
|
b0017856005a4771fbd89c8137fb320b72b1b633
|
[
"FSFAP"
] | null | null | null |
python/helpers.py
|
cdacos/astrophysics_with_a_pc
|
b0017856005a4771fbd89c8137fb320b72b1b633
|
[
"FSFAP"
] | 1
|
2021-03-14T23:13:28.000Z
|
2021-03-14T23:13:28.000Z
|
import sys
| 20.444444
| 45
| 0.63587
|
53c5eb302f7f03de564020dfecea1ce909aa994c
| 12,916
|
py
|
Python
|
configs/docker-ubuntu-img/para.py
|
MarioCarrilloA/stx-packaging
|
56cf32c4d65ba20f9317102d922ce946a800527d
|
[
"Apache-2.0"
] | 1
|
2019-06-02T00:28:03.000Z
|
2019-06-02T00:28:03.000Z
|
configs/docker-ubuntu-img/para.py
|
MarioCarrilloA/stx-packaging
|
56cf32c4d65ba20f9317102d922ce946a800527d
|
[
"Apache-2.0"
] | 11
|
2019-04-05T16:04:54.000Z
|
2019-08-23T19:24:49.000Z
|
configs/docker-ubuntu-img/para.py
|
MarioCarrilloA/stx-packaging
|
56cf32c4d65ba20f9317102d922ce946a800527d
|
[
"Apache-2.0"
] | 5
|
2019-02-18T23:11:30.000Z
|
2019-04-29T07:42:31.000Z
|
#!/usr/bin/python3
# vim:se tw=0 sts=4 ts=4 et ai:
"""
Copyright 2014 Osamu Aoki
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import argparse
import os
import pwd
import sys
import time
import debmake.read
###########################################################################
# undefined environment variable -> ''
#######################################################################
# Initialize parameters
#######################################################################
#######################################################################
# Test code
#######################################################################
if __name__ == '__main__':
for p, v in para().items():
print("para['{}'] = \"{}\"".format(p,v))
| 38.440476
| 554
| 0.477083
|
53c6b101ead41851286a75be3bcca965a4128b2f
| 6,164
|
py
|
Python
|
build/lib/jet_django/views/model.py
|
lukejamison/jet-dasboard
|
5dce66b6ea2f107d7120e5e0256346d2d3bc8ed9
|
[
"MIT"
] | 193
|
2018-08-27T06:10:48.000Z
|
2022-03-08T13:04:55.000Z
|
build/lib/jet_django/views/model.py
|
lukejamison/jet-dasboard
|
5dce66b6ea2f107d7120e5e0256346d2d3bc8ed9
|
[
"MIT"
] | 23
|
2018-10-21T15:05:41.000Z
|
2020-12-20T15:18:58.000Z
|
build/lib/jet_django/views/model.py
|
lukejamison/jet-dasboard
|
5dce66b6ea2f107d7120e5e0256346d2d3bc8ed9
|
[
"MIT"
] | 38
|
2018-10-31T16:19:25.000Z
|
2022-02-10T05:08:24.000Z
|
from django.core.exceptions import NON_FIELD_ERRORS
from rest_framework import status, viewsets, serializers
from rest_framework.decorators import list_route
from rest_framework.response import Response
from rest_framework.serializers import ModelSerializer
from jet_django.filters.model_aggregate import AggregateFilter
from jet_django.filters.model_group import GroupFilter
from jet_django.pagination import CustomPageNumberPagination
from jet_django.permissions import HasProjectPermissions, ModifyNotInDemo
from jet_django.serializers.reorder import reorder_serializer_factory
def model_viewset_factory(build_model, build_filter_class, build_serializer_class, build_detail_serializer_class, build_queryset, build_actions, ordering_field):
ReorderSerializer = reorder_serializer_factory(build_queryset, ordering_field)
for action in build_actions:
decorator = list_route(methods=['post'])
route = decorator(route)
setattr(Viewset, action._meta.name, route)
return Viewset
| 36.91018
| 161
| 0.638384
|