hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
49dd8d278f7b65614e35a417984b39bb22fa9ca9 | 5,621 | py | Python | geoalchemy2/tests/test_functional.py | fredj/geoalchemy2 | 9f26714e8d181440ac03d7295d34d615cac11d02 | [
"MIT"
] | null | null | null | geoalchemy2/tests/test_functional.py | fredj/geoalchemy2 | 9f26714e8d181440ac03d7295d34d615cac11d02 | [
"MIT"
] | null | null | null | geoalchemy2/tests/test_functional.py | fredj/geoalchemy2 | 9f26714e8d181440ac03d7295d34d615cac11d02 | [
"MIT"
] | null | null | null | import unittest
from nose.tools import eq_, ok_, raises
from sqlalchemy import create_engine, MetaData, Column, Integer, func
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from geoalchemy2 import Geometry
from sqlalchemy.exc import DataError, IntegrityError, InternalError
engine = create_engine('postgresql://gis:gis@localhost/gis', echo=True)
metadata = MetaData(engine)
Base = declarative_base(metadata=metadata)
session = sessionmaker(bind=engine)()
postgis_version = session.execute(func.postgis_version()).scalar()
if not postgis_version.startswith('2.'):
# With PostGIS 1.x the AddGeometryColumn and DropGeometryColumn
# management functions should be used.
Lake.__table__.c.geom.type.management = True
| 29.898936 | 79 | 0.637609 |
49de3b66d3ba8d7b390aa4f38533368a7826b8e9 | 689 | py | Python | WebEmpresarial/social/models.py | MarcosKlender/Web_Empresarial | 79b481488a74415e88898cff029233f339dc1e97 | [
"BSD-3-Clause"
] | null | null | null | WebEmpresarial/social/models.py | MarcosKlender/Web_Empresarial | 79b481488a74415e88898cff029233f339dc1e97 | [
"BSD-3-Clause"
] | null | null | null | WebEmpresarial/social/models.py | MarcosKlender/Web_Empresarial | 79b481488a74415e88898cff029233f339dc1e97 | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
# Create your models here. | 40.529412 | 95 | 0.685051 |
49df0f9be98b9b2a452a359335aef6fd2e914b5c | 940 | py | Python | p138_copy_list_with_random_pointer.py | moonfruit/leetcode | 796b736d9b7b31f8052df6a0a140e34904b8230c | [
"MIT"
] | null | null | null | p138_copy_list_with_random_pointer.py | moonfruit/leetcode | 796b736d9b7b31f8052df6a0a140e34904b8230c | [
"MIT"
] | null | null | null | p138_copy_list_with_random_pointer.py | moonfruit/leetcode | 796b736d9b7b31f8052df6a0a140e34904b8230c | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- utf-8 -*-
# Definition for singly-linked list with a random pointer.
# class RandomListNode:
# def __init__(self, x):
# self.label = x
# self.next = None
# self.random = None
if __name__ == '__main__':
from leetcode import RandomListNode
head = RandomListNode.new(1,2,3,4,5,6)
head.print()
Solution().copyRandomList(head).print()
| 22.926829 | 58 | 0.554255 |
49e017e3c7994fdc80e6366ccb4d6c457be60b26 | 1,904 | py | Python | train.py | jmnybl/finnish-srl | aa53bc5e27e8c9e82bc9827602e448d805b4a960 | [
"Apache-2.0"
] | null | null | null | train.py | jmnybl/finnish-srl | aa53bc5e27e8c9e82bc9827602e448d805b4a960 | [
"Apache-2.0"
] | null | null | null | train.py | jmnybl/finnish-srl | aa53bc5e27e8c9e82bc9827602e448d805b4a960 | [
"Apache-2.0"
] | null | null | null | from data_reader import Vocabulary, transform_data, save_vocabularies, Corpus
from model import build_model
from keras.callbacks import ModelCheckpoint
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser(description='')
g=parser.add_argument_group("Reguired arguments")
g.add_argument('-d', '--data', type=str, required=True, help='Training file')
g.add_argument('-m', '--model_name', type=str, required=True, help='Name of the saved model')
g.add_argument('--min_count_word', type=int, default=2, help='Frequency threshold, how many times a word must occur to be included in the vocabulary? (default %(default)d)')
g.add_argument('--min_count_sense', type=int, default=2, help='Frequency threshold, how many times a verb sense must occur to be included in the vocabulary? (default %(default)d)')
g.add_argument('--epochs', type=int, default=10, help='Number of training epochs')
args = parser.parse_args()
train(args)
| 33.403509 | 184 | 0.719538 |
49e1f87f1d26cc79c1491809b52a416913e40d98 | 6,198 | py | Python | peekingduck/utils/requirement_checker.py | ericleehy/PeekingDuck | 8cf1be842235fa60bac13bc466cac09747a780ea | [
"Apache-2.0"
] | 1 | 2021-12-02T05:15:58.000Z | 2021-12-02T05:15:58.000Z | peekingduck/utils/requirement_checker.py | ericleehy/PeekingDuck | 8cf1be842235fa60bac13bc466cac09747a780ea | [
"Apache-2.0"
] | null | null | null | peekingduck/utils/requirement_checker.py | ericleehy/PeekingDuck | 8cf1be842235fa60bac13bc466cac09747a780ea | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 AI Singapore
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python package requirements checker."""
import collections
import importlib
import logging
import subprocess
import sys
from pathlib import Path
from typing import Any, Iterator, TextIO, Tuple, Union
import pkg_resources as pkg
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
PKD_NODE_PREFIX = "peekingduck.pipeline.nodes."
PKD_REQ_TYPE_LEN = 6 # string length of either PYTHON or SYSTEM
PKD_REQ_TYPE_PYTHON = "PYTHON" # type specifier for Python packages
ROOT = Path(__file__).resolve().parents[1]
OptionalRequirement = collections.namedtuple("OptionalRequirement", "name type")
def check_requirements(
identifier: str, requirements_path: Path = ROOT / "optional_requirements.txt"
) -> int:
"""Checks if the packages specified by the ``identifier`` in the
requirements file at ``requirements_path`` are present on the system. If
``install`` is ``True``, attempts to install the packages.
Args:
identifier (:obj:`str`): A unique identifier, typically a pipeline node
name, used to specify which packages to check for.
requirements_path (Path): Path to the requirements file
Returns:
(:obj:`int`): The number of packages updated.
"""
with open(requirements_path) as infile:
requirements = list(_parse_requirements(infile, identifier))
n_update = 0
for req in requirements:
if req.type == PKD_REQ_TYPE_PYTHON:
try:
pkg.require(req.name)
except (pkg.DistributionNotFound, pkg.VersionConflict):
logger.info(
f"{req.name} not found and is required, attempting auto-update..."
)
try:
logger.info(
subprocess.check_output(["pip", "install", req.name]).decode()
)
n_update += 1
except subprocess.CalledProcessError as exception:
logger.error(exception)
raise
else:
logger.warning(
f"The {identifier} node requires {req.name.strip()} which needs to be "
"manually installed. Please follow the instructions at "
"https://peekingduck.readthedocs.io/en/stable/master.html#api-documentation "
"and rerun. Ignore this warning if the package is already installed"
)
return n_update
def _split_type_and_name(string: str) -> Tuple[str, str]:
"""Split an optional requirement line into the requirement type and
name.
"""
return string[:PKD_REQ_TYPE_LEN], string[PKD_REQ_TYPE_LEN:]
| 37.113772 | 93 | 0.649564 |
49e20e1482e371d2d36af2e6abc0d413c62cd098 | 1,119 | py | Python | src/myproject/settings/admin_mail_console_handler.py | thinkAmi/DjangoCongress_JP_2019_talk | 0b746f62808d979c1570de80084686f709996e1d | [
"Unlicense"
] | 1 | 2019-05-18T04:34:59.000Z | 2019-05-18T04:34:59.000Z | src/myproject/settings/admin_mail_console_handler.py | thinkAmi/DjangoCongress_JP_2019_talk | 0b746f62808d979c1570de80084686f709996e1d | [
"Unlicense"
] | null | null | null | src/myproject/settings/admin_mail_console_handler.py | thinkAmi/DjangoCongress_JP_2019_talk | 0b746f62808d979c1570de80084686f709996e1d | [
"Unlicense"
] | null | null | null | from .base import *
#
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
# filebased
# '.'
# EMAIL_FILE_PATH = '.'
EMAIL_FILE_PATH = str(pathlib.Path(BASE_DIR).joinpath('logs'))
ADMINS = [('Admin1', 'admin1@example.com')]
# ADMINS
DEBUG = True
# EMAIL_BACKEND
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'console': {
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'email_backend':
'django.core.mail.backends.console.EmailBackend',
}
},
'loggers': {
'django': {
'handlers': ['console', 'mail_admins'],
'level': 'INFO',
},
}
} | 25.431818 | 66 | 0.570152 |
49e262c808a1276127a4a16b770e3d39b6997140 | 7,494 | py | Python | core/tests/machine_request.py | xuhang57/atmosphere | f53fea2a74ee89ccc8852906799b1d9a7e9178b7 | [
"BSD-3-Clause"
] | null | null | null | core/tests/machine_request.py | xuhang57/atmosphere | f53fea2a74ee89ccc8852906799b1d9a7e9178b7 | [
"BSD-3-Clause"
] | null | null | null | core/tests/machine_request.py | xuhang57/atmosphere | f53fea2a74ee89ccc8852906799b1d9a7e9178b7 | [
"BSD-3-Clause"
] | null | null | null | from dateutil.relativedelta import relativedelta
from uuid import uuid4
import unittest
import pytz
from django.test import TestCase
from django.utils.timezone import datetime
from core.tests.helpers import CoreProviderMachineHelper, CoreMachineRequestHelper, CoreInstanceHelper
from service.machine import process_machine_request
| 40.508108 | 102 | 0.665999 |
49e3a9de96ed6a59b374bbb066c35f87db59257f | 3,455 | py | Python | infoblox_netmri/api/remote/models/device_zone_remote.py | IngmarVG-IB/infoblox-netmri | b0c725fd64aee1890d83917d911b89236207e564 | [
"Apache-2.0"
] | null | null | null | infoblox_netmri/api/remote/models/device_zone_remote.py | IngmarVG-IB/infoblox-netmri | b0c725fd64aee1890d83917d911b89236207e564 | [
"Apache-2.0"
] | null | null | null | infoblox_netmri/api/remote/models/device_zone_remote.py | IngmarVG-IB/infoblox-netmri | b0c725fd64aee1890d83917d911b89236207e564 | [
"Apache-2.0"
] | null | null | null | from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
| 31.697248 | 186 | 0.600868 |
49e3eb2356e1b76dcdb5820a9f4030de1c698ff6 | 1,510 | py | Python | nitmis_admin/controllers/register.py | kalesh13/nitmis | b8c73f74411bbad441557c6553cbbd35acc5a5ee | [
"MIT"
] | null | null | null | nitmis_admin/controllers/register.py | kalesh13/nitmis | b8c73f74411bbad441557c6553cbbd35acc5a5ee | [
"MIT"
] | 5 | 2020-07-19T10:28:57.000Z | 2021-08-19T18:25:28.000Z | nitmis_admin/controllers/register.py | kalesh13/nitmis | b8c73f74411bbad441557c6553cbbd35acc5a5ee | [
"MIT"
] | null | null | null | from rest_framework.views import APIView
from rest_framework.response import Response
from django.shortcuts import render
from django.http.response import JsonResponse
from nitmis_admin.serializers.UserSerializer import UserSerializer
def create_user(role="Guest"):
"""
"""
return fun_wrapper
| 26.491228 | 74 | 0.625828 |
49e46e9b59b725cb283f9125430ec7a34bd75825 | 9,521 | py | Python | 3_0_pgo_icp/solution/pose_graph_optimization/assignment_I_2/pgo_2D.py | karanchawla/ai_for_robotics | 03bb66bae99bac3acd79bc1ec6d3b9c0eeabcdf8 | [
"BSD-3-Clause"
] | 65 | 2017-03-03T07:30:28.000Z | 2021-08-19T01:12:47.000Z | 3_0_pgo_icp/solution/pose_graph_optimization/assignment_I_2/pgo_2D.py | karanchawla/ai_for_robotics | 03bb66bae99bac3acd79bc1ec6d3b9c0eeabcdf8 | [
"BSD-3-Clause"
] | 4 | 2017-03-02T13:51:40.000Z | 2017-11-01T16:49:22.000Z | 3_0_pgo_icp/solution/pose_graph_optimization/assignment_I_2/pgo_2D.py | ethz-asl/ai_for_robotics | 03bb66bae99bac3acd79bc1ec6d3b9c0eeabcdf8 | [
"BSD-3-Clause"
] | 43 | 2017-03-02T11:31:21.000Z | 2020-10-30T07:10:59.000Z | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 2 10:00 2017
@author: Timo Hinzmann (hitimo@ethz.ch)
"""
import math
from math import floor, ceil
import numpy as np
import matplotlib.pyplot as plt
from scipy.sparse import linalg as sla
from scipy import array, linalg, dot
from enum import Enum
import copy
import pylab
# References:
# [1] Grisetti, Kuemmerle, Stachniss et al. "A Tutorial on Graph-Based SLAM"
# Pose-graph optimization closely following Algorithm 1, 2D from [1].
if __name__ == "__main__":
main()
| 36.619231 | 119 | 0.553513 |
49e502e61c40a6d5ce473d9fc363be1b2927b0ab | 1,019 | py | Python | simple_history/management/commands/_populate_utils.py | SummitESP/django-simple-history | 78c929159263dd583bd4d8d5a28a274a9ad6be0b | [
"BSD-3-Clause"
] | null | null | null | simple_history/management/commands/_populate_utils.py | SummitESP/django-simple-history | 78c929159263dd583bd4d8d5a28a274a9ad6be0b | [
"BSD-3-Clause"
] | 1 | 2018-01-09T20:51:34.000Z | 2018-01-09T20:51:34.000Z | simple_history/management/commands/_populate_utils.py | SummitESP/django-simple-history | 78c929159263dd583bd4d8d5a28a274a9ad6be0b | [
"BSD-3-Clause"
] | null | null | null | from django.utils.timezone import now
def get_history_model_for_model(model):
"""Find the history model for a given app model."""
try:
manager_name = model._meta.simple_history_manager_attribute
except AttributeError:
raise NotHistorical("Cannot find a historical model for "
"{model}.".format(model=model))
return getattr(model, manager_name).model
def bulk_history_create(model, history_model):
"""Save a copy of all instances to the historical model."""
historical_instances = [
history_model(
history_date=getattr(instance, '_history_date', now()),
history_user=getattr(instance, '_history_user', None),
**dict((field.attname, getattr(instance, field.attname))
for field in instance._meta.fields)
) for instance in model.objects.all()]
history_model.objects.bulk_create(historical_instances)
| 36.392857 | 68 | 0.677134 |
49e6d32460dd6b58678d99ca2b2edccf4f319501 | 1,174 | py | Python | anagrams.py | zubrik13/coding_intrv_prer | 853a7c8357ad43601313daadcc1c494d403a9aa0 | [
"MIT"
] | null | null | null | anagrams.py | zubrik13/coding_intrv_prer | 853a7c8357ad43601313daadcc1c494d403a9aa0 | [
"MIT"
] | null | null | null | anagrams.py | zubrik13/coding_intrv_prer | 853a7c8357ad43601313daadcc1c494d403a9aa0 | [
"MIT"
] | null | null | null | """
A student is taking a cryptography class and has found anagrams to be very useful.
Two strings are anagrams of each other if the first string's letters can be
rearranged to form the second string. In other words, both strings must
contain the same exact letters in the same exact frequency. For example,
bacdc and dcbac are anagrams, but bacdc and dcbad are not.
The student decides on an encryption scheme that involves two large strings.
The encryption is dependent on the minimum number of character deletions
required to make the two strings anagrams. Determine this number.
Given two strings, a and b, that may or may not be of the same length,
determine the minimum number of character deletions required to make
a and b anagrams. Any characters can be deleted from either of the strings.
"""
a = 'ceed'
b = 'acbeef'
total_len = len(a) + len(b)
match_counter = 0
c = list(a)
d = list(b)
if len(a) <= len(b):
for i in c:
if i in d:
match_counter += 2
d.remove(i)
else:
for i in d:
if i in c:
match_counter += 2
c.remove(i)
min_num = total_len - match_counter
print(min_num) | 30.894737 | 83 | 0.70017 |
49e795b64e0b3cac7465448efafac4bd98ed4236 | 267 | py | Python | PhoenixNow/login.py | ECGHelloWorld/PhoenixNow | 88f98f59a91a4e99763ae4432df7945d811b63bf | [
"MIT"
] | 2 | 2017-02-06T15:42:27.000Z | 2021-01-14T15:13:08.000Z | PhoenixNow/login.py | ECGHelloWorld/PhoenixNow | 88f98f59a91a4e99763ae4432df7945d811b63bf | [
"MIT"
] | null | null | null | PhoenixNow/login.py | ECGHelloWorld/PhoenixNow | 88f98f59a91a4e99763ae4432df7945d811b63bf | [
"MIT"
] | 6 | 2016-07-30T19:57:19.000Z | 2019-08-06T03:44:54.000Z | from flask_login import LoginManager
from PhoenixNow.model import User
login_manager = LoginManager()
login_manager.login_view = "regular.signin"
| 24.272727 | 51 | 0.794007 |
49e8bc016b4a92e63bbff49dadf2d0f5ff48a5c0 | 7,673 | py | Python | mobi_parse_data.py | josting/CS538_Project | b503de4f8e632166f715bb28b621d21770e3142e | [
"MIT"
] | null | null | null | mobi_parse_data.py | josting/CS538_Project | b503de4f8e632166f715bb28b621d21770e3142e | [
"MIT"
] | null | null | null | mobi_parse_data.py | josting/CS538_Project | b503de4f8e632166f715bb28b621d21770e3142e | [
"MIT"
] | null | null | null | import os
import datetime as dt
import random
import networkx
# import matplotlib as mpl
import matplotlib.pyplot as plt
from const import *
activity = {}
with open(os.path.join(DATA_DIR, "mobiclique", "activity.csv")) as activity_fd:
for line in activity_fd.readlines():
line = line.strip()
if "#" in line:
line = line[:line.index("#")]
if not line:
continue
user_id, start_ts, end_ts = line.split(';')
if user_id not in activity:
activity[user_id] = []
activity[user_id].append( (int(start_ts), int(end_ts)) )
transmission = {}
with open(os.path.join(DATA_DIR, "mobiclique", "transmission.csv")) as transmission_fd:
for line in transmission_fd.readlines():
line = line.strip()
if "#" in line:
line = line[:line.index("#")]
if not line:
continue
msg_type, msg_id, bytes, src_user_id, dst_user_id, ts, status = line.split(';')
#if status != '0':
# continue
if src_user_id not in transmission:
transmission[src_user_id] = {}
if dst_user_id not in transmission[src_user_id]:
transmission[src_user_id][dst_user_id] = []
ts = int(ts)
transmission[src_user_id][dst_user_id].append(ts)
reception = {}
with open(os.path.join(DATA_DIR, "mobiclique", "reception.csv")) as reception_fd:
for line in reception_fd.readlines():
line = line.strip()
if "#" in line:
line = line[:line.index("#")]
if not line:
continue
msg_type, msg_id, src_user_id, dst_user_id, ts = line.split(';')
if src_user_id not in reception:
reception[src_user_id] = {}
if dst_user_id not in reception[src_user_id]:
reception[src_user_id][dst_user_id] = []
ts = int(ts)
reception[src_user_id][dst_user_id].append(ts)
drift_dict = {}
for src_user_id in sorted(reception):
for dst_user_id in sorted(reception[src_user_id]):
for rcp_ts in reception[src_user_id][dst_user_id]:
if src_user_id not in transmission:
continue
transmissions = transmission[src_user_id].get(dst_user_id, None)
if transmissions is None:
continue
if (src_user_id, dst_user_id) not in drift_dict:
drift_dict[(src_user_id, dst_user_id)] = []
diff = [abs(rcp_ts - trn_ts) for trn_ts in transmissions]
idx = diff.index(min(diff))
trn_ts = transmission[src_user_id][dst_user_id][idx]
drift = trn_ts - rcp_ts
drift_dict[(src_user_id, dst_user_id)].append((trn_ts, drift))
for (src_user_id, dst_user_id) in sorted(drift_dict):
print src_user_id, dst_user_id, drift_dict[(src_user_id, dst_user_id)]
break
proximity = {}
with open(os.path.join(DATA_DIR, "mobiclique", "proximity.csv")) as proximity_fd:
for line in proximity_fd.readlines():
line = line.strip()
if "#" in line:
line = line[:line.index("#")]
if not line:
continue
ts, user_id, seen_user_id, major_code, minor_code = line.split(';')
ts = int(ts)
if ts not in proximity:
proximity[ts] = []
proximity[ts].append((user_id, seen_user_id))
MAX_RNG = 75
timestamps = sorted(proximity)
#write traces to user.dat files
if 0:
user_fds = {}
for ts in timestamps:
for (user_id, seen_id) in proximity[ts]:
if user_id not in user_fds:
fd = open(r"mobiclique\%s.dat" % user_id, 'w')
last_ts = -1
user_fds[user_id] = [fd, last_ts]
else:
[fd, last_ts] = user_fds[user_id]
if last_ts != ts:
if last_ts > 0:
fd.write('\n')
fd.write("{} {} {}".format(ts, user_id, seen_id))
else:
fd.write(",{}".format(seen_id))
user_fds[user_id][1] = ts
for (fd, last_ts) in user_fds.values():
fd.close()
# Graph using networkx
if 1:
idx = random.sample(xrange(len(timestamps)), 25)
idx.sort()
sample_timestamps = map(timestamps.__getitem__, idx)
sample_dts = map(lambda ts: START_DT + dt.timedelta(seconds=ts),sample_timestamps)
for ts in sample_timestamps:
other_timestamps = filter(lambda x: abs(x-ts) < MAX_RNG, timestamps)
edges = sorted(set(reduce(list.__add__, [proximity[x] for x in other_timestamps])))
G = networkx.Graph(edges)
networkx.draw(G)
fig_fname = os.path.join(r"C:\Users\Jon\Google Drive\Grad_School\CS 538\project\scripts\figures", "%s.png" % ts)
plt.savefig(fig_fname)
plt.close()
networks = []
n_networks = []
max_size = []
idx = random.sample(xrange(len(timestamps)), 1500)
idx.sort()
sample_timestamps = map(timestamps.__getitem__, idx)
sample_dts = map(lambda ts: START_DT + dt.timedelta(seconds=ts),sample_timestamps)
for ts in sample_timestamps:
other_timestamps = filter(lambda x: abs(x-ts) < MAX_RNG, timestamps)
edges = sorted(set(reduce(list.__add__, [proximity[x] for x in other_timestamps])))
nodes = sorted(set(reduce(list.__add__, map(list, edges))))
new_networks = get_networks(nodes, edges)
networks.append(new_networks)
n_networks.append(len(new_networks))
max_size.append(max(map(len,new_networks)))
fd = open("output2.csv", 'w')
for vals in zip(sample_dts, n_networks, max_size):
fd.write(','.join(map(str,(vals))))
fd.write('\n')
fd.close()
# Get networks
if 0:
networks = []
n_networks = []
max_size = []
idx = random.sample(xrange(len(timestamps)), 1500)
idx.sort()
sample_timestamps = map(timestamps.__getitem__, idx)
sample_dts = map(lambda ts: START_DT + dt.timedelta(seconds=ts),sample_timestamps)
for ts in sample_timestamps:
other_timestamps = filter(lambda x: abs(x-ts) < MAX_RNG, timestamps)
edges = sorted(set(reduce(list.__add__, [proximity[x] for x in other_timestamps])))
nodes = sorted(set(reduce(list.__add__, map(list, edges))))
new_networks = get_networks(nodes, edges)
networks.append(new_networks)
n_networks.append(len(new_networks))
max_size.append(max(map(len,new_networks)))
fd = open("output2.csv", 'w')
for vals in zip(sample_dts, n_networks, max_size):
fd.write(','.join(map(str,(vals))))
fd.write('\n')
fd.close()
| 37.247573 | 121 | 0.59377 |
49ea4f99ec3b4e468aba9dc32ef313173fda7ba3 | 6,995 | py | Python | tests/tests_geomstats/test_general_linear.py | tfunatomi/geomstats | a5651680f98dea95c1f82a48af1a6dccf3e26bd1 | [
"MIT"
] | 2 | 2020-01-23T04:01:02.000Z | 2020-08-18T19:20:27.000Z | tests/tests_geomstats/test_general_linear.py | tfunatomi/geomstats | a5651680f98dea95c1f82a48af1a6dccf3e26bd1 | [
"MIT"
] | null | null | null | tests/tests_geomstats/test_general_linear.py | tfunatomi/geomstats | a5651680f98dea95c1f82a48af1a6dccf3e26bd1 | [
"MIT"
] | null | null | null | """Unit tests for the General Linear group."""
import warnings
import tests.helper as helper
import geomstats.backend as gs
import geomstats.tests
from geomstats.geometry.general_linear import GeneralLinear
RTOL = 1e-5
| 32.840376 | 74 | 0.501358 |
49eb7a1d598ee46402c3d994c365fef1314082ef | 454 | py | Python | teszt/test_feladat03-06.py | python-feladatok-tesztekkel/05-01-10-dolgozat | ce1c8568daf83dc86bba1fb325cb7b8d1cf0dd3f | [
"CC0-1.0"
] | null | null | null | teszt/test_feladat03-06.py | python-feladatok-tesztekkel/05-01-10-dolgozat | ce1c8568daf83dc86bba1fb325cb7b8d1cf0dd3f | [
"CC0-1.0"
] | null | null | null | teszt/test_feladat03-06.py | python-feladatok-tesztekkel/05-01-10-dolgozat | ce1c8568daf83dc86bba1fb325cb7b8d1cf0dd3f | [
"CC0-1.0"
] | null | null | null | from unittest import TestCase
import os,sys,inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
import feladatok
| 32.428571 | 92 | 0.746696 |
49ebafbb9bb001087a55a6aebb22dbde9671371c | 271 | py | Python | {{cookiecutter.project_name}}/pages/main_page.py | victory-sokolov/selenium-boilerplate | 43ac0e37c93c6379186f06050ab29e8521ac3ad1 | [
"MIT"
] | null | null | null | {{cookiecutter.project_name}}/pages/main_page.py | victory-sokolov/selenium-boilerplate | 43ac0e37c93c6379186f06050ab29e8521ac3ad1 | [
"MIT"
] | null | null | null | {{cookiecutter.project_name}}/pages/main_page.py | victory-sokolov/selenium-boilerplate | 43ac0e37c93c6379186f06050ab29e8521ac3ad1 | [
"MIT"
] | null | null | null | from utils.Driver import Driver
from pages.base_page import BasePage
from pages.locators import MainPageLocators
| 27.1 | 44 | 0.734317 |
49edf4b8c87add119d94e632341ab23299a577d3 | 1,726 | py | Python | boardgames/main/migrations/0001_initial.py | diophung/django-sample | 4916f4aa70506f6f40b736f68a0bbe398ea1ea8e | [
"Apache-2.0"
] | null | null | null | boardgames/main/migrations/0001_initial.py | diophung/django-sample | 4916f4aa70506f6f40b736f68a0bbe398ea1ea8e | [
"Apache-2.0"
] | null | null | null | boardgames/main/migrations/0001_initial.py | diophung/django-sample | 4916f4aa70506f6f40b736f68a0bbe398ea1ea8e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-16 07:49
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 42.097561 | 162 | 0.636153 |
49ee2e293e1b411f588dd752ab4762901a62db20 | 7,801 | py | Python | src/tools/nuscenes-devkit/prediction/tests/test_mtp_loss.py | jie311/TraDeS | 896491a159abe65f61c6ad05662cda6e28d137a6 | [
"MIT"
] | 475 | 2021-03-13T16:33:36.000Z | 2022-03-30T06:00:39.000Z | src/tools/nuscenes-devkit/prediction/tests/test_mtp_loss.py | jie311/TraDeS | 896491a159abe65f61c6ad05662cda6e28d137a6 | [
"MIT"
] | 50 | 2021-03-17T04:48:20.000Z | 2022-03-08T13:55:32.000Z | src/tools/nuscenes-devkit/prediction/tests/test_mtp_loss.py | jie311/TraDeS | 896491a159abe65f61c6ad05662cda6e28d137a6 | [
"MIT"
] | 98 | 2021-03-14T12:12:49.000Z | 2022-03-19T16:19:13.000Z |
import math
import unittest
import torch
from nuscenes.prediction.models import mtp
| 42.396739 | 106 | 0.58313 |
49ef52693c6a396a1581cc399d1886b8613380b6 | 1,067 | py | Python | json2graph.py | daveshah1/hypergraph_part | cea56e615eec01cb536ed23206ed101c213864a5 | [
"0BSD"
] | null | null | null | json2graph.py | daveshah1/hypergraph_part | cea56e615eec01cb536ed23206ed101c213864a5 | [
"0BSD"
] | null | null | null | json2graph.py | daveshah1/hypergraph_part | cea56e615eec01cb536ed23206ed101c213864a5 | [
"0BSD"
] | null | null | null | #!/usr/bin/env python3
# Convert Yosys JSON to simple text hypergraph for performance testing
import sys, json
node_count = 0
edge2node = {}
netlist = None
with open(sys.argv[1]) as jf:
netlist = json.load(jf)
top_module = None
for name, module in sorted(netlist["modules"].items()):
if "attributes" not in module:
continue
if "top" not in module["attributes"]:
continue
if int(module["attributes"]["top"]) == 0:
continue
top_module = module
break
for cname, cell in sorted(top_module["cells"].items()):
if "connections" not in cell:
continue
for pname, bits in sorted(cell["connections"].items()):
for bit in bits:
if bit in ("0", "1", "x", "z"):
continue
if bit not in edge2node:
edge2node[bit] = set()
edge2node[bit].add(node_count)
node_count += 1
with open(sys.argv[2], "w") as hf:
print("{} {}".format(node_count, len(edge2node)), file=hf)
for n in range(node_count):
print("N 0 0", file=hf)
for e, nodes in sorted(edge2node.items()):
print("E 1 {}".format(" ".join([str(x) for x in sorted(nodes)])), file=hf)
| 24.25 | 76 | 0.665417 |
49f13b101323835947d8e0f19cb369eece2aefcf | 3,966 | py | Python | tools/lstm_dql_6.py | alexis-jacq/signals | 3c960e125ed5265dfc9cd3278df948f3c846a5dd | [
"0BSD"
] | 1 | 2020-02-18T12:52:02.000Z | 2020-02-18T12:52:02.000Z | tools/lstm_dql_6.py | alexis-jacq/signals | 3c960e125ed5265dfc9cd3278df948f3c846a5dd | [
"0BSD"
] | null | null | null | tools/lstm_dql_6.py | alexis-jacq/signals | 3c960e125ed5265dfc9cd3278df948f3c846a5dd | [
"0BSD"
] | null | null | null | from Tkinter import *
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.autograd as autograd
from torch.autograd import Variable
master = Tk()
goal = 0
var_goal = StringVar()
GAMMA = 0.9
last_state = Variable(torch.Tensor([0,0,0,0,0,0])).unsqueeze(0)
last_action = 0
last_reward = 0
model = Policy()
model.initHidden()
last_hidden = model.hidden_state
last_cell = model.cell_state
optimizer = optim.Adam(model.parameters(), lr=0.01)
Button(master, text='S1', height = 10, width = 30, command=lambda:update(0)).grid(row=0, column=0, sticky=W, pady=4)
Button(master, text='S2', height = 10, width = 30, command=lambda:update(1)).grid(row=0, column=1, sticky=W, pady=4)
Button(master, text='goal 0', height = 10, width = 30, command=lambda:set_goal(0)).grid(row=1, column=0, sticky=W, pady=4)
Button(master, text='goal 1', height = 10, width = 30, command=lambda:set_goal(1)).grid(row=1, column=1, sticky=W, pady=4)
Label(master, height = 10, textvariable = var_goal).grid(row=2, sticky=EW, pady=4)
mainloop( )
| 28.73913 | 122 | 0.660111 |
49f1fc4e36bfb8c6234c3e939d335df6e0c3dae5 | 500 | py | Python | Engine/Shaders/compile_all_shader.py | ValtoGameEngines/Fish-Engine | a4b9fb9b0a6dc202f7990e75f4b7d8d5163209d9 | [
"MIT"
] | 240 | 2017-02-17T10:08:19.000Z | 2022-03-25T14:45:29.000Z | Engine/Shaders/compile_all_shader.py | ValtoGameEngines/Fish-Engine | a4b9fb9b0a6dc202f7990e75f4b7d8d5163209d9 | [
"MIT"
] | 2 | 2016-10-12T07:08:38.000Z | 2017-04-05T01:56:30.000Z | Engine/Shaders/compile_all_shader.py | yushroom/FishEngine | a4b9fb9b0a6dc202f7990e75f4b7d8d5163209d9 | [
"MIT"
] | 39 | 2017-03-02T09:40:07.000Z | 2021-12-04T07:28:53.000Z | import os
import sys
compiler = r'../Binary/RelWithDebInfo/ShaderCompiler'
#compiler = r'../Binary/Debug/ShaderCompiler'
shader_dirs = ['.', './Editor']
count = 0
for d in shader_dirs:
for fn in os.listdir(d):
print(fn)
ext = fn.split('.')[-1]
if ext in ['surf', 'shader']:
cmd = compiler + ' ' + os.path.abspath(os.path.join(d, fn))
print(cmd)
if os.system(cmd) != 0:
print("Compile ERROR: ", fn)
sys.exit()
count += 1
print("Done. {} shaders compiled.".format(count)) | 22.727273 | 62 | 0.624 |
49f2ebec4bd34c27d749eb184a9d941a3fa4ea04 | 14,058 | py | Python | rbb_server/src/rbb_swagger_server/models/simulation_detailed.py | SK4P3/rbb_core | 618617270314af5335de30179072244e1f440c4c | [
"MIT"
] | 55 | 2019-05-09T06:43:05.000Z | 2021-12-08T05:56:43.000Z | rbb_server/src/rbb_swagger_server/models/simulation_detailed.py | SK4P3/rbb_core | 618617270314af5335de30179072244e1f440c4c | [
"MIT"
] | 5 | 2019-09-08T15:33:28.000Z | 2021-04-17T17:30:53.000Z | rbb_server/src/rbb_swagger_server/models/simulation_detailed.py | SK4P3/rbb_core | 618617270314af5335de30179072244e1f440c4c | [
"MIT"
] | 16 | 2019-08-08T07:15:35.000Z | 2021-12-07T15:34:41.000Z | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from rbb_swagger_server.models.base_model_ import Model
from rbb_swagger_server.models.simulation_environment_detailed import SimulationEnvironmentDetailed # noqa: F401,E501
from rbb_swagger_server.models.simulation_run_detailed import SimulationRunDetailed # noqa: F401,E501
from rbb_swagger_server.models.simulation_summary import SimulationSummary # noqa: F401,E501
from rbb_swagger_server.models.task_detailed import TaskDetailed # noqa: F401,E501
from rbb_swagger_server import util
| 34.20438 | 413 | 0.660905 |
49f3758e9d44d3107ec62931b01722d7ad937589 | 5,857 | py | Python | src/pathlinker.py | melliott432/spras | ba914f6a55a51c3e3b55b56844a533ff2936fae5 | [
"MIT"
] | 3 | 2021-05-05T23:40:39.000Z | 2021-05-13T03:35:22.000Z | src/pathlinker.py | melliott432/spras | ba914f6a55a51c3e3b55b56844a533ff2936fae5 | [
"MIT"
] | 41 | 2021-04-27T01:48:28.000Z | 2022-03-14T20:11:24.000Z | src/pathlinker.py | melliott432/spras | ba914f6a55a51c3e3b55b56844a533ff2936fae5 | [
"MIT"
] | 2 | 2021-07-06T18:27:19.000Z | 2022-01-25T03:56:49.000Z | import docker
import os
import sys
import pandas as pd
import warnings
from src.PRM import PRM
from pathlib import Path
from src.util import prepare_path_docker
__all__ = ['PathLinker']
| 42.751825 | 177 | 0.626771 |
49f40691673fa4f67fa8dd4ced6c7bd474270052 | 4,978 | py | Python | stonesoup/hypothesiser/gaussianmixture.py | mgomesborges/Stone-Soup | 39c7f02ce11e10c9b3c612ad359f6d8bca495266 | [
"MIT"
] | 1 | 2019-12-26T14:55:03.000Z | 2019-12-26T14:55:03.000Z | stonesoup/hypothesiser/gaussianmixture.py | mgomesborges/Stone-Soup | 39c7f02ce11e10c9b3c612ad359f6d8bca495266 | [
"MIT"
] | null | null | null | stonesoup/hypothesiser/gaussianmixture.py | mgomesborges/Stone-Soup | 39c7f02ce11e10c9b3c612ad359f6d8bca495266 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .base import Hypothesiser
from ..base import Property
from ..types.multihypothesis import MultipleHypothesis
from ..types.prediction import (TaggedWeightedGaussianStatePrediction,
WeightedGaussianStatePrediction)
from ..types.state import TaggedWeightedGaussianState
| 45.669725 | 96 | 0.606268 |
49f45e903b240c04c0489fac65ede708075df463 | 1,458 | py | Python | apps/approval/api/serializers.py | emilps/onlineweb4 | 6f4aca2a4522698366ecdc6ab63c807ce5df2a96 | [
"MIT"
] | null | null | null | apps/approval/api/serializers.py | emilps/onlineweb4 | 6f4aca2a4522698366ecdc6ab63c807ce5df2a96 | [
"MIT"
] | null | null | null | apps/approval/api/serializers.py | emilps/onlineweb4 | 6f4aca2a4522698366ecdc6ab63c807ce5df2a96 | [
"MIT"
] | null | null | null | from django.core.exceptions import ValidationError as DjangoValidationError
from rest_framework import serializers
from apps.approval.models import CommitteeApplication, CommitteePriority
from apps.authentication.serializers import UserSerializer
| 36.45 | 96 | 0.742798 |
49f6c77afc94bafb870dc3b17a265d3485c6c64e | 2,909 | py | Python | visualization/histogram.py | SalikLP/classification-of-encrypted-traffic | 3c86e098aab58941f9339bb64945c1112ab556ef | [
"MIT"
] | 35 | 2018-05-25T16:48:23.000Z | 2022-03-15T14:35:07.000Z | visualization/histogram.py | SalikLP/classification-of-encrypted-traffic | 3c86e098aab58941f9339bb64945c1112ab556ef | [
"MIT"
] | 3 | 2018-03-18T13:03:09.000Z | 2020-01-17T12:09:12.000Z | visualization/histogram.py | SalikLP/classification-of-encrypted-traffic | 3c86e098aab58941f9339bb64945c1112ab556ef | [
"MIT"
] | 14 | 2018-05-25T16:48:24.000Z | 2022-01-04T12:56:31.000Z | import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
import utils
import glob, os
import pca.dataanalyzer as da, pca.pca as pca
from sklearn.metrics import accuracy_score
# visulaize the important characteristics of the dataset
import matplotlib.pyplot as plt
seed = 0
num_headers = 16
data_len = 54*num_headers #1460
dirs = ["C:/Users/salik/Documents/Data/LinuxChrome/{}/".format(num_headers),
"C:/Users/salik/Documents/Data/WindowsFirefox/{}/".format(num_headers),
"C:/Users/salik/Documents/Data/WindowsChrome/{}/".format(num_headers),
"C:/Users/salik/Documents/Data/WindowsSalik/{}/".format(num_headers),
"C:/Users/salik/Documents/Data/WindowsAndreas/{}/".format(num_headers)]
# dirs = ["E:/Data/h5/https/", "E:/Data/h5/netflix/"]
# step 1: get the data
dataframes = []
num_examples = 0
for dir in dirs:
for fullname in glob.iglob(dir + '*.h5'):
filename = os.path.basename(fullname)
df = utils.load_h5(dir, filename)
dataframes.append(df)
num_examples = len(df.values)
# create one large dataframe
data = pd.concat(dataframes)
data.sample(frac=1, random_state=seed).reset_index(drop=True)
num_rows = data.shape[0]
columns = data.columns
print(columns)
# step 2: get features (x) and convert it to numpy array
x = da.getbytes(data, data_len)
# step 3: get class labels y and then encode it into number
# get class label data
y = data['label'].values
# encode the class label
class_labels = np.unique(y)
label_encoder = LabelEncoder()
y = label_encoder.fit_transform(y)
# step 4: split the data into training set and test set
test_percentage = 0.5
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=test_percentage, random_state=seed)
plot_savename = "histogram_payload"
from matplotlib import rcParams
# Make room for xlabel which is otherwise cut off
rcParams.update({'figure.autolayout': True})
# scatter plot the sample points among 5 classes
# markers = ('s', 'd', 'o', '^', 'v', ".", ",", "<", ">", "8", "p", "P", "*", "h", "H", "+", "x", "X", "D", "|", "_")
color_map = {0: '#487fff', 1: '#d342ff', 2: '#4eff4e', 3: '#2ee3ff', 4: '#ffca43', 5:'#ff365e', 6:'#626663'}
plt.figure()
for idx, cl in enumerate(np.unique(y_test)):
# Get count of unique values
values, counts = np.unique(x_test[y_test == cl], return_counts=True)
# Maybe remove zero as there is a lot of zeros in the header
# values = values[1:]
# counts = counts[1:]
n, bins, patches = plt.hist(values, weights=counts, bins=256, facecolor=color_map[idx], label=class_labels[cl], alpha=0.8)
plt.legend(loc='upper right')
plt.title('Histogram of : {}'.format(class_labels))
plt.tight_layout()
# plt.savefig('{0}{1}.png'.format(plot_savename, int(perplexity)), dpi=300)
plt.show() | 36.822785 | 127 | 0.700928 |
49f6f1f5b6e7113a385ba89e9bd8fb4c985968b5 | 421 | py | Python | examples/board_toolkit_simpletest.py | Neradoc/Adafruit_Board_Toolkit | c1602192f015924ce4ffd4e90dcd44769e565780 | [
"MIT",
"BSD-3-Clause",
"MIT-0",
"Unlicense"
] | 10 | 2021-03-16T18:05:53.000Z | 2022-03-20T20:40:38.000Z | examples/board_toolkit_simpletest.py | Neradoc/Adafruit_Board_Toolkit | c1602192f015924ce4ffd4e90dcd44769e565780 | [
"MIT",
"BSD-3-Clause",
"MIT-0",
"Unlicense"
] | 8 | 2021-03-17T18:32:54.000Z | 2021-12-31T19:58:01.000Z | examples/board_toolkit_simpletest.py | Neradoc/Adafruit_Board_Toolkit | c1602192f015924ce4ffd4e90dcd44769e565780 | [
"MIT",
"BSD-3-Clause",
"MIT-0",
"Unlicense"
] | 4 | 2021-04-21T13:48:18.000Z | 2022-03-13T15:07:01.000Z | # SPDX-FileCopyrightText: Copyright (c) 2021 Dan Halbert for Adafruit Industries
#
# SPDX-License-Identifier: Unlicense
import adafruit_board_toolkit.circuitpython_serial
comports = adafruit_board_toolkit.circuitpython_serial.repl_comports()
if not comports:
raise Exception("No CircuitPython boards found")
# Print the device paths or names that connect to a REPL.
print([comport.device for comport in comports])
| 32.384615 | 80 | 0.812352 |
49f718d53ef81854c33add0bb6b608250490d75e | 798 | py | Python | saq/utils.py | cofin/saq | be81f383a0904e9084e24ccb5334e07fc5b61e00 | [
"MIT"
] | 29 | 2022-01-05T07:07:18.000Z | 2022-03-29T20:09:03.000Z | saq/utils.py | cofin/saq | be81f383a0904e9084e24ccb5334e07fc5b61e00 | [
"MIT"
] | 3 | 2022-01-23T17:33:40.000Z | 2022-03-10T03:36:21.000Z | saq/utils.py | cofin/saq | be81f383a0904e9084e24ccb5334e07fc5b61e00 | [
"MIT"
] | 4 | 2022-01-06T18:33:33.000Z | 2022-03-23T18:44:19.000Z | import time
import uuid
from random import random
def exponential_backoff(
attempts,
base_delay,
max_delay=None,
jitter=True,
):
"""
Get the next delay for retries in exponential backoff.
attempts: Number of attempts so far
base_delay: Base delay, in seconds
max_delay: Max delay, in seconds. If None (default), there is no max.
jitter: If True, add a random jitter to the delay
"""
if max_delay is None:
max_delay = float("inf")
backoff = min(max_delay, base_delay * 2 ** max(attempts - 1, 0))
if jitter:
backoff = backoff * random()
return backoff
| 19 | 73 | 0.641604 |
49f8927dba9de24eccfdfa6bd46fde3e6e325f82 | 221 | py | Python | pipeline.py | sanidhya-singh/dagster-pipelines | 671c4869dca14f96902981e2e8c84df1319ca89e | [
"MIT"
] | null | null | null | pipeline.py | sanidhya-singh/dagster-pipelines | 671c4869dca14f96902981e2e8c84df1319ca89e | [
"MIT"
] | null | null | null | pipeline.py | sanidhya-singh/dagster-pipelines | 671c4869dca14f96902981e2e8c84df1319ca89e | [
"MIT"
] | null | null | null | from dagster import job, op
| 13.8125 | 48 | 0.669683 |
49fab0bf939b0f4cf2782196c0ddc5090bf9b5e5 | 4,508 | py | Python | qulab/drivers/AFG3102.py | liuqichun3809/quantum-lab | 05bea707b314ea1687866f56ee439079336cfbbc | [
"MIT"
] | 3 | 2020-08-30T16:11:49.000Z | 2021-03-05T12:09:30.000Z | qulab/drivers/AFG3102.py | liuqichun3809/quantum-lab | 05bea707b314ea1687866f56ee439079336cfbbc | [
"MIT"
] | null | null | null | qulab/drivers/AFG3102.py | liuqichun3809/quantum-lab | 05bea707b314ea1687866f56ee439079336cfbbc | [
"MIT"
] | 2 | 2019-07-24T15:12:31.000Z | 2019-09-20T02:17:28.000Z | # -*- coding: utf-8 -*-
import time
import numpy as np
from qulab.device import BaseDriver, QInteger, QOption, QReal, QString, QVector
| 53.035294 | 132 | 0.618234 |
49fc564845398fd6fccf8887fc72513069095963 | 1,357 | py | Python | 02-intermediate/lstm_network/main.py | kevin2018pg/pytorch-notes | 4ba3827fccbf17ec446b2538186dd78dea3ecb50 | [
"MIT"
] | 1 | 2020-12-03T02:41:07.000Z | 2020-12-03T02:41:07.000Z | 02-intermediate/lstm_network/main.py | kevin2018pg/pytorch-notes | 4ba3827fccbf17ec446b2538186dd78dea3ecb50 | [
"MIT"
] | null | null | null | 02-intermediate/lstm_network/main.py | kevin2018pg/pytorch-notes | 4ba3827fccbf17ec446b2538186dd78dea3ecb50 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
"""
3510
batch=3, seq_len=5, Embedding=10
"""
# LSTM1020,2LSTMLSTM
bilstm = nn.LSTM(input_size=10, hidden_size=20, num_layers=2, bidirectional=True)
#
input = torch.randn(5, 3, 10)
#
h0 = torch.randn(4, 3, 20) # [bidirection*num_layers, batch_size, hidden_size]
c0 = torch.randn(4, 3, 20) # [bidirection*num_layers, batch_size, hidden_size]
# 2lstmoutputlstm
output, (hn, cn) = bilstm(input, (h0, c0))
print("output shape", output.shape) # shapetorch.Size([5,3,40]),[seq_len,batch_size,2*hidden_size]
print("hn shape", hn.shape) # shapetorch.Size([4,3,20]),[bidirection*num_layers,batch_size,hidden_size]
print("cn shape", cn.shape) # shapetorch.Size([4,3,20]),[bidirection*num_layers,batch_size,hidden_size]
#
output = output.permute(1, 0, 2) # torch.Size([3,5,40]),[batch_size,seq_len,2*hidden_size]
output = output.contiguous() # torch.view()permutecontiguousviewtensor
batch_size = output.size(0)
output = output.view(batch_size, -1) # torch.Size([3,200]),[batch_size,seq_len*2*hidden_size]
fully_connected = nn.Linear(200, 2)
output = fully_connected(output)
print(output.shape) # torch.Size([3,2]),[batch_size,class]
print(output)
| 41.121212 | 106 | 0.740604 |
49fd04fd3ec6534f06e8ff42c0869a4f70bf3dd5 | 1,484 | py | Python | meiduo_mall/apps/meiduo_admin/views/order.py | zzZaida/meiduo_backend | c4f94ea7f9c47a08d3e37fb0ac2c1ec1dcf2c18b | [
"MIT"
] | null | null | null | meiduo_mall/apps/meiduo_admin/views/order.py | zzZaida/meiduo_backend | c4f94ea7f9c47a08d3e37fb0ac2c1ec1dcf2c18b | [
"MIT"
] | null | null | null | meiduo_mall/apps/meiduo_admin/views/order.py | zzZaida/meiduo_backend | c4f94ea7f9c47a08d3e37fb0ac2c1ec1dcf2c18b | [
"MIT"
] | null | null | null | from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from apps.meiduo_admin.serializers.order import OrderInfoSerializer
from apps.meiduo_admin.utils import PageNum
from apps.orders.models import OrderInfo
| 26.035088 | 67 | 0.623989 |
49fd9dcc627b703550931ebd10aa32549f023644 | 29,587 | py | Python | QA/pycopia/remote/windows_server.py | kdart/pycopia3 | 8a7c820f096245411eabbb72345e4f30a35988b6 | [
"Apache-2.0"
] | 3 | 2018-11-26T15:00:20.000Z | 2022-01-28T23:17:58.000Z | QA/pycopia/remote/windows_server.py | kdart/pycopia3 | 8a7c820f096245411eabbb72345e4f30a35988b6 | [
"Apache-2.0"
] | null | null | null | QA/pycopia/remote/windows_server.py | kdart/pycopia3 | 8a7c820f096245411eabbb72345e4f30a35988b6 | [
"Apache-2.0"
] | 1 | 2018-11-26T15:00:21.000Z | 2018-11-26T15:00:21.000Z | #!/usr/bin/python3.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Implements a Windows version of a client responder. This should run with the
native Python for Windows.
Install on a Windows server:
Place the following lines in c:\autoexec.bat::
PATH=%PATH%;C:\Python26;C:\Python26\Scripts
Now run (all on one line)::
C:\Python26>python.exe %PYTHONLIB%\site-packages\pycopia\remote\WindowsServer.py
--username DOMAIN\Administrator --password xxxxxxxx install
OR, for system process that can interact with console::
C:\Python26>python.exe %PYTHONLIB%\site-packages\pycopia\remote\WindowsServer.py
--interactive install
Note: if you get an error about an account not existing, you may need
to supply the username like this:
.\Administrator
If a username was supplied to run as, go to the Service Manger from the
Windows control panel, and perform the following.
- Select "Remote Agent Server" from the list. Right-clieck and select "properties".
- Select the "Log On" tab.
- Click the "This account:" radio button.
- Enter DOMAIN\Administrator in the account box (or something else appropriate).
- Enter the proper password (twice).
- Click "Apply". You should confirm a message saying user is
enabled to log in as a service.
- Click "General" tab.
- You may now start the service.
You may also need to disable the Windows firewall for this to function
properly. This service is a massive security hole, so only run it on
a throw-away test machine on an isolated network.
"""
import os, sys, shutil, errno
import threading
# Pycopia imports
from pycopia.aid import IF
from pycopia.anypath import cygwin2nt, nt2cygwin
from pycopia import shparser
# returnable objects
from pycopia.remote.WindowsObjects import ExitStatus
# Windows stuff
import msvcrt
import win32api
import win32file
import win32net
import win32process
import win32event
# constants
import pywintypes
import win32con
import win32netcon
# some constants that the API forgot...
USE_WILDCARD = -1
USE_DISKDEV = 0
USE_SPOOLDEV = 1
USE_CHARDEV = 2
USE_IPC = 3
import Pyro
import Pyro.util
setConfig()
Log=Pyro.util.Log
import Pyro.core
import Pyro.naming
from Pyro.ext.BasicNTService import BasicNTService, getRegistryParameters
_EXIT = False
UserLog = Pyro.util.UserLogger()
# msg, warn, or error methods
split_command_line = shparser.get_command_splitter()
# quick hack ... Windows sucks. No signal handling or anything useful, so it has to be faked.
# A server that performs filer client operations. This mostly delegates to the
# os module. But some special methods are provided for common functions.
# md5sums callback for counting files
######## main program #####
def run_server():
os.chdir(r"C:\tmp")
Pyro.core.initServer(banner=0, storageCheck=0)
ns=Pyro.naming.NameServerLocator().getNS()
daemon=Pyro.core.Daemon()
daemon.useNameServer(ns)
uri=daemon.connectPersistent(Win32Agent(),
"Agents.%s" % (win32api.GetComputerName().lower(),))
daemon.requestLoop(_checkexit)
daemon.shutdown()
def _checkexit():
global _EXIT
return not _EXIT
if __name__ == '__main__':
RemoteAgentService.HandleCommandLine()
| 31.475532 | 119 | 0.590935 |
49fe4d073489d2871ae5e7fb65b3eed92cc792a4 | 372 | py | Python | benchmark/info_locust.py | dmitryhd/avio | 4e99c123de12a682f1ac1141899d670fbab81de6 | [
"MIT"
] | 2 | 2018-05-28T14:15:00.000Z | 2018-10-15T09:33:38.000Z | benchmark/info_locust.py | dmitryhd/avio | 4e99c123de12a682f1ac1141899d670fbab81de6 | [
"MIT"
] | null | null | null | benchmark/info_locust.py | dmitryhd/avio | 4e99c123de12a682f1ac1141899d670fbab81de6 | [
"MIT"
] | null | null | null | from locust import HttpLocust, TaskSet
| 15.5 | 79 | 0.647849 |
b700ba706143879736399c02f81312c27b36379e | 513 | py | Python | userbot/plugins/funtxts.py | kumar451/CatUserbot | 44fab853232fad163fee63565cc4f3e645596527 | [
"MIT"
] | null | null | null | userbot/plugins/funtxts.py | kumar451/CatUserbot | 44fab853232fad163fee63565cc4f3e645596527 | [
"MIT"
] | null | null | null | userbot/plugins/funtxts.py | kumar451/CatUserbot | 44fab853232fad163fee63565cc4f3e645596527 | [
"MIT"
] | null | null | null | import nekos
from ..utils import admin_cmd
| 21.375 | 38 | 0.619883 |
b700c7a198400a2306ffbc65c60b311d50ca469c | 2,518 | py | Python | tests/test_thread_python_exit.py | justengel/continuous_threading | 33e109df22eee202774975a3a940fb15164e6a78 | [
"MIT"
] | 7 | 2020-05-30T05:57:39.000Z | 2022-03-05T06:09:26.000Z | tests/test_thread_python_exit.py | justengel/continuous_threading | 33e109df22eee202774975a3a940fb15164e6a78 | [
"MIT"
] | 2 | 2020-05-30T15:12:44.000Z | 2020-10-06T12:54:41.000Z | tests/test_thread_python_exit.py | justengel/continuous_threading | 33e109df22eee202774975a3a940fb15164e6a78 | [
"MIT"
] | 1 | 2020-03-03T19:37:44.000Z | 2020-03-03T19:37:44.000Z | import time
import continuous_threading
if __name__ == '__main__':
# Run one option at a time
import sys
# Default test run
# run_test = test_thread
# run_test = test_continuous
# run_test = test_pausable
run_test = test_operation
if len(sys.argv) > 1:
value = str(sys.argv[1]).lower()
if value == '0' or value == 'thread':
run_test = test_thread
elif value == '1' or 'continuous' in value:
run_test = test_continuous
elif value == '2' or 'paus' in value:
run_test = test_pausable
elif value == '3' or 'op' in value:
run_test = test_operation
run_test()
# You should observe that python.exe is no longer a running process when the program finishes.
# exit code should be 0
| 25.18 | 106 | 0.608817 |
b701550eed98d3100b7b0a2a4ed10c335a6dc06a | 2,587 | py | Python | src/models/transformer_encoder.py | tsumita/implicit_emotion | dae2d5a8162a2665b8e76812716068650feae710 | [
"MIT"
] | 6 | 2018-09-03T00:55:35.000Z | 2020-01-09T11:53:31.000Z | src/models/transformer_encoder.py | tsumita/implicit_emotion | dae2d5a8162a2665b8e76812716068650feae710 | [
"MIT"
] | null | null | null | src/models/transformer_encoder.py | tsumita/implicit_emotion | dae2d5a8162a2665b8e76812716068650feae710 | [
"MIT"
] | 2 | 2019-06-23T11:32:27.000Z | 2019-07-04T22:15:33.000Z | import copy
import torch.nn as nn
from .transformer import (Encoder,
EncoderLayer,
MultiHeadedAttention,
PositionwiseFeedforward,
PositionalEncoding)
| 32.3375 | 81 | 0.609586 |
b70252d52453ebc8294d55dbd8e4ca2fdec3a045 | 338 | py | Python | bluebottle/members/migrations/0028_merge_20190215_1441.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 10 | 2015-05-28T18:26:40.000Z | 2021-09-06T10:07:03.000Z | bluebottle/members/migrations/0028_merge_20190215_1441.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 762 | 2015-01-15T10:00:59.000Z | 2022-03-31T15:35:14.000Z | bluebottle/members/migrations/0028_merge_20190215_1441.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 9 | 2015-02-20T13:19:30.000Z | 2022-03-08T14:09:17.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-02-15 13:41
from __future__ import unicode_literals
from django.db import migrations
| 19.882353 | 48 | 0.662722 |
b7065834b7518e12325dc5b9284ed2b6d23d7a2b | 5,221 | py | Python | src/globus_cli/login_manager/tokenstore.py | sirosen/temp-cli-test | 416fd3fea17b4c7c2cf35d6ccde63cb5719a1af6 | [
"Apache-2.0"
] | 47 | 2016-04-21T19:51:17.000Z | 2022-02-25T14:13:30.000Z | src/globus_cli/login_manager/tokenstore.py | sirosen/temp-cli-test | 416fd3fea17b4c7c2cf35d6ccde63cb5719a1af6 | [
"Apache-2.0"
] | 421 | 2016-04-20T18:45:24.000Z | 2022-03-14T14:50:41.000Z | src/globus_cli/login_manager/tokenstore.py | sirosen/temp-cli-test | 416fd3fea17b4c7c2cf35d6ccde63cb5719a1af6 | [
"Apache-2.0"
] | 20 | 2016-09-10T20:25:27.000Z | 2021-10-06T16:02:47.000Z | import os
import sys
import globus_sdk
from globus_sdk.tokenstorage import SQLiteAdapter
from ._old_config import invalidate_old_config
# internal constants
_CLIENT_DATA_CONFIG_KEY = "auth_client_data"
# env vars used throughout this module
GLOBUS_ENV = os.environ.get("GLOBUS_SDK_ENVIRONMENT")
GLOBUS_PROFILE = os.environ.get("GLOBUS_PROFILE")
def internal_native_client():
"""
This is the client that represents the CLI itself (prior to templating)
"""
template_id = _template_client_id()
return globus_sdk.NativeAppAuthClient(
template_id, app_name="Globus CLI (native client)"
)
def internal_auth_client():
"""
Pull template client credentials from storage and use them to create a
ConfidentialAppAuthClient.
In the event that credentials are not found, template a new client via the Auth API,
save the credentials for that client, and then build and return the
ConfidentialAppAuthClient.
"""
adapter = token_storage_adapter()
client_data = adapter.read_config(_CLIENT_DATA_CONFIG_KEY)
if client_data is not None:
client_id = client_data["client_id"]
client_secret = client_data["client_secret"]
else:
# register a new instance client with auth
nc = internal_native_client()
res = nc.post(
"/v2/api/clients",
data={"client": {"template_id": nc.client_id, "name": "Globus CLI"}},
)
# get values and write to config
credential_data = res["included"]["client_credential"]
client_id = credential_data["client"]
client_secret = credential_data["secret"]
adapter.store_config(
_CLIENT_DATA_CONFIG_KEY,
{"client_id": client_id, "client_secret": client_secret},
)
return globus_sdk.ConfidentialAppAuthClient(
client_id, client_secret, app_name="Globus CLI"
)
| 34.348684 | 88 | 0.679755 |
b706818aa45f72b58b9687e3a435833411cd0110 | 5,325 | py | Python | launchMinecraft.py | Timurinyo/tchrHlprStudent | 598f0e1321b11555d327393ab78723e1e286703e | [
"MIT"
] | null | null | null | launchMinecraft.py | Timurinyo/tchrHlprStudent | 598f0e1321b11555d327393ab78723e1e286703e | [
"MIT"
] | null | null | null | launchMinecraft.py | Timurinyo/tchrHlprStudent | 598f0e1321b11555d327393ab78723e1e286703e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#coding:utf-8
__author__ = 'CoderZh and Tymur'
import sys
from time import sleep
# Important for multithreading
sys.coinit_flags = 0 # pythoncom.COINIT_MULTITHREADED
import win32com
import win32com.client
import win32gui
import win32con
import pythoncom
#import keyboard
from pathlib import Path
import os
import re
import subprocess
import psutil
#def connectToIEServer():
| 28.475936 | 159 | 0.689202 |
b7076a862b13e824331a204380735697e0b6b508 | 4,158 | py | Python | trabajo/Plugins/test_plugin_MontoEscrito.py | f2scali/siendo | 5e3c20143317e365cfecb5b56a0f2388acc46949 | [
"Apache-2.0"
] | null | null | null | trabajo/Plugins/test_plugin_MontoEscrito.py | f2scali/siendo | 5e3c20143317e365cfecb5b56a0f2388acc46949 | [
"Apache-2.0"
] | null | null | null | trabajo/Plugins/test_plugin_MontoEscrito.py | f2scali/siendo | 5e3c20143317e365cfecb5b56a0f2388acc46949 | [
"Apache-2.0"
] | 1 | 2021-10-01T22:22:09.000Z | 2021-10-01T22:22:09.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'efrenfuentes'
import unittest
from plugin_MontoEscrito import numero_a_letras, numero_a_moneda
if __name__ == '__main__':
unittest.main()
| 39.980769 | 199 | 0.685907 |
b708d72fd35c4c8f3891e434790ce2fd08903cc3 | 2,238 | py | Python | setup.py | hivesolutions/pconvert | ff4d09400dc1542080d86f3f99c702ab0ef1405d | [
"Apache-1.1"
] | 4 | 2020-04-18T08:38:42.000Z | 2020-12-10T01:54:57.000Z | setup.py | hivesolutions/pconvert | ff4d09400dc1542080d86f3f99c702ab0ef1405d | [
"Apache-1.1"
] | 3 | 2020-09-09T16:40:47.000Z | 2020-11-11T13:21:58.000Z | setup.py | hivesolutions/pconvert | ff4d09400dc1542080d86f3f99c702ab0ef1405d | [
"Apache-1.1"
] | 4 | 2016-09-28T10:32:42.000Z | 2020-11-11T12:39:02.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import setuptools
setuptools.setup(
name = "pconvert-python",
version = "0.4.1",
author = "Hive Solutions Lda.",
author_email = "development@hive.pt",
description = "PNG Convert",
license = "Apache License, Version 2.0",
keywords = "pconvert converted compositor",
url = "http://pconvert.hive.pt",
packages = [
"pconvert_py",
"pconvert_py.test"
],
test_suite = "pconvert_py.test",
package_dir = {
"" : os.path.normpath("src/python")
},
ext_modules = [
setuptools.Extension(
"pconvert",
include_dirs = ["src/pconvert", "/usr/local/include"],
libraries = [] if os.name in ("nt",) else ["m", "png"],
library_dirs = ["/usr/local/lib"],
extra_compile_args = [] if os.name in ("nt",) else [
"-O3",
"-finline-functions",
"-Winline"
],
sources = [
"src/pconvert/extension.c",
"src/pconvert/opencl.c",
"src/pconvert/pconvert.c",
"src/pconvert/stdafx.c",
"src/pconvert/structs.c",
"src/pconvert/util.c"
],
define_macros = [
("PCONVERT_EXTENSION", None),
("PASS_ERROR", None)
]
)
],
classifiers = [
"Development Status :: 5 - Production/Stable",
"Topic :: Utilities",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.0",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7"
]
)
| 33.402985 | 68 | 0.498213 |
b70b18ff12c786f422768e26c9e8b6e9b54e1407 | 2,281 | py | Python | tf2onnx/optimizer/optimizer_base.py | gcunhase/tensorflow-onnx | 8a61c99fbc39c36d70781f95e2c7c582f46ba2db | [
"Apache-2.0"
] | 1,473 | 2018-03-16T02:47:33.000Z | 2022-03-31T03:43:52.000Z | tf2onnx/optimizer/optimizer_base.py | gcunhase/tensorflow-onnx | 8a61c99fbc39c36d70781f95e2c7c582f46ba2db | [
"Apache-2.0"
] | 1,208 | 2018-03-14T09:58:49.000Z | 2022-03-31T17:56:20.000Z | tf2onnx/optimizer/optimizer_base.py | gcunhase/tensorflow-onnx | 8a61c99fbc39c36d70781f95e2c7c582f46ba2db | [
"Apache-2.0"
] | 350 | 2018-04-03T03:48:40.000Z | 2022-03-30T11:23:55.000Z | # SPDX-License-Identifier: Apache-2.0
"""Graph Optimizer Base"""
import copy
from .. import logging, utils
def _optimize(self, graph):
""" Derived class should override this function. """
raise NotImplementedError
| 29.623377 | 112 | 0.615081 |
b70b7e3ec23c6100b7d22c2fc18a52d85615b5ef | 3,299 | py | Python | train.py | anishjain18/Trigger-Word-Detector | 85d635cabc553c612db414853b4569ec869d9bf7 | [
"MIT"
] | 15 | 2021-11-03T04:33:22.000Z | 2022-03-30T18:24:57.000Z | train.py | anishjain18/Trigger-Word-Detector | 85d635cabc553c612db414853b4569ec869d9bf7 | [
"MIT"
] | null | null | null | train.py | anishjain18/Trigger-Word-Detector | 85d635cabc553c612db414853b4569ec869d9bf7 | [
"MIT"
] | 21 | 2021-11-03T04:34:11.000Z | 2022-03-22T10:17:06.000Z | import numpy as np
from pydub import AudioSegment
import random
import sys
import io
import os
import glob
import IPython
from td_utils import *
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import Model, load_model, Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D
from tensorflow.keras.layers import GRU, Bidirectional, BatchNormalization, Reshape
from tensorflow.keras.optimizers import Adam
Tx = 5511 # The number of time steps input to the model from the spectrogram
n_freq = 101 # Number of frequencies input to the model at each time step of the spectrogram
Ty = 1375 # The number of time steps in the output of our model
X = np.load("./XY_train/X0.npy")
Y = np.load("./XY_train/Y0.npy")
X = np.concatenate((X, np.load("./XY_train/X1.npy")), axis=0)
Y = np.concatenate((Y, np.load("./XY_train/Y1.npy")), axis=0)
Y = np.swapaxes(Y, 1, 2)
# Load preprocessed dev set examples
X_dev = np.load("./XY_dev/X_dev.npy")
Y_dev = np.load("./XY_dev/Y_dev.npy")
# GRADED FUNCTION: model
def modelf(input_shape):
"""
Function creating the model's graph in Keras.
Argument:
input_shape -- shape of the model's input data (using Keras conventions)
Returns:
model -- Keras model instance
"""
X_input = Input(shape = input_shape)
### START CODE HERE ###
# Step 1: CONV layer (4 lines)
X = Conv1D(196, kernel_size = 15, strides = 4)(X_input) # CONV1D
X = BatchNormalization()(X) # Batch normalization
X = Activation("relu")(X) # ReLu activation
X = Dropout(0.8)(X) # dropout (use 0.8)
# Step 2: First GRU Layer (4 lines)
X = GRU(units = 128, return_sequences = True)(X) # GRU (use 128 units and return the sequences)
X = Dropout(0.8)(X) # dropout (use 0.8)
X = BatchNormalization()(X) # Batch normalization
# Step 3: Second GRU Layer (4 lines)
X = GRU(units = 128, return_sequences = True)(X) # GRU (use 128 units and return the sequences)
X = Dropout(0.8)(X) # dropout (use 0.8)
X = BatchNormalization()(X) # Batch normalization
X = Dropout(0.8)(X) # dropout (use 0.8)
# Step 4: Time-distributed dense layer (1 line)
X = TimeDistributed(Dense(1, activation = "sigmoid"))(X) # time distributed (sigmoid)
### END CODE HERE ###
model = Model(inputs = X_input, outputs = X)
return model
model = modelf(input_shape = (Tx, n_freq))
model.summary()
opt = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, decay=0.01)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=["accuracy"])
model.fit(X, Y, batch_size=20, epochs=100)
loss, acc = model.evaluate(X_dev, Y_dev)
print("Dev set accuracy = ", acc)
from tensorflow.keras.models import model_from_json
json_file = open('./models/model_new3.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
model.load_weights('./models/model_new3.h5') | 34.726316 | 109 | 0.632313 |
b70ca02982a56be8fc00bc20da13192c0eb44f5a | 1,753 | py | Python | armchem/workspace.py | mmgalushka/armchem | 00bd0d2085b47b03724af422b75e2801619b8c03 | [
"MIT"
] | 3 | 2020-05-13T21:48:26.000Z | 2020-10-18T14:42:00.000Z | armchem/workspace.py | mmgalushka/armchem | 00bd0d2085b47b03724af422b75e2801619b8c03 | [
"MIT"
] | null | null | null | armchem/workspace.py | mmgalushka/armchem | 00bd0d2085b47b03724af422b75e2801619b8c03 | [
"MIT"
] | 1 | 2022-02-27T01:04:38.000Z | 2022-02-27T01:04:38.000Z | # =====================================================
# Copyright (c) 2017-present, AUROMIND Ltd.
# =====================================================
import os
from network import NeuralNetwork
from experiments import Experiment
from utils import save_object, load_object
# -----------------------------------------------------
# Exporting Method
# -----------------------------------------------------
# -----------------------------------------------------
# Workspace Handler
# -----------------------------------------------------
| 31.872727 | 84 | 0.497433 |
b70d32e911f44e99f8dbffab2918320edced91af | 4,353 | py | Python | uge/objects/cluster_queue_v1_0.py | gridengine/config-api | 694f9667bb6569170356336283a18351456e8b82 | [
"Apache-2.0"
] | 6 | 2017-01-18T00:11:19.000Z | 2022-02-10T08:18:00.000Z | uge/objects/cluster_queue_v1_0.py | gridengine/config-api | 694f9667bb6569170356336283a18351456e8b82 | [
"Apache-2.0"
] | 3 | 2017-05-11T13:54:42.000Z | 2020-08-12T06:15:43.000Z | uge/objects/cluster_queue_v1_0.py | gridengine/config-api | 694f9667bb6569170356336283a18351456e8b82 | [
"Apache-2.0"
] | 4 | 2017-05-11T13:27:33.000Z | 2019-10-29T02:02:24.000Z | #!/usr/bin/env python
#
# ___INFO__MARK_BEGIN__
#######################################################################################
# Copyright 2016-2021 Univa Corporation (acquired and owned by Altair Engineering Inc.)
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#######################################################################################
# ___INFO__MARK_END__
#
from .qconf_object import QconfObject
| 33.484615 | 172 | 0.574546 |
b70d6fb35471bf494fd9d6d6d9ddbd3c3ad564bf | 6,789 | py | Python | decomplz4l_prep/__init__.py | Tiempogithub/decomplz4l | 1043daf70072bb4dc229c7503bce3b212156921b | [
"BSD-3-Clause"
] | 1 | 2020-02-28T16:21:00.000Z | 2020-02-28T16:21:00.000Z | decomplz4l_prep/__init__.py | Tiempogithub/decomplz4l | 1043daf70072bb4dc229c7503bce3b212156921b | [
"BSD-3-Clause"
] | null | null | null | decomplz4l_prep/__init__.py | Tiempogithub/decomplz4l | 1043daf70072bb4dc229c7503bce3b212156921b | [
"BSD-3-Clause"
] | 2 | 2019-06-28T21:36:01.000Z | 2019-08-19T07:55:51.000Z | #!/usr/bin/env python3
import os
import sys
import runpy
from intelhex import IntelHex
#import lz4.frame
import subprocess
import shutil
if __name__ == "__main__":
script_directory = os.path.dirname(os.path.realpath(__file__))
lz4 = os.path.join(script_directory,'lz4')
if not os.path.isfile(lz4):
lz4 = shutil.which('lz4')
assert(lz4 is not None)
if (len(sys.argv) > 3) | (len(sys.argv) < 3) :
print("ERROR: incorrect arguments")
print("Usage:")
print("prep.py <ihex> <metainfo>")
exit()
ihexf = sys.argv[1]
metainfof = sys.argv[2]
ih = IntelHex()
ihgu = IntelHex()
ih.loadhex(ihexf)
all_sections = ih.segments()
print("input hex file sections:")
for sec in all_sections:
print("0x%08X 0x%08X"%(sec[0],sec[1]-1))
file_globals = runpy.run_path(metainfof,init_globals={'prep_path':os.path.dirname(script_directory)})
comp_storage_start=file_globals["comp_storage"]['start']
comp_storage_end=file_globals["comp_storage"]['end']
map_load_size=file_globals["map_load_size"]
map_run_size=file_globals["map_run_size"]
grow_up=file_globals["grow_up"]
comp_sections=file_globals["comp_sections"]
linear_mode=get_file_global("linear_mode",True)
start_at_end=get_file_global("start_at_end",False)
use_seg_as_linear=get_file_global("use_seg_as_linear",False)
print("%d sections to compress"%len(comp_sections))
for sec in comp_sections:
print("load: 0x%08X -> 0x%08X, run: 0x%08X -> 0x%08X, size: 0x%X"%(sec['load'],sec['load']+sec['size']-1,sec['run'],sec['run']+sec['size']-1,sec['size']))
mapsize = (map_load_size+map_run_size)*len(comp_sections)
map_storage=comp_storage_start
comp_storage=comp_storage_start+mapsize
#compress the sections
for sec in comp_sections:
#write the start address in the map LUT
start_offset_bytes = (comp_storage-comp_storage_start).to_bytes(8,byteorder='little')
for i in range(0,map_load_size):
ihgu[map_storage] = start_offset_bytes[i]
map_storage+=1
run_bytes = sec['run'].to_bytes(8,byteorder='little')
for i in range(0,map_run_size):
ihgu[map_storage] = run_bytes[i]
map_storage+=1
data = ih[sec['load']:sec['load']+sec['size']]
ba = bytearray()
for bi in range(sec['load'],sec['load']+sec['size']):
ba.append(ih[bi])
newfile=open('lz4_input.bin','wb')
newfile.write(ba)
newfile.close()
cmd = [ lz4,'-9','-l','-f','lz4_input.bin','lz4_output.bin']
subprocess.run(cmd,check=True)
size=0
with open('lz4_output.bin', "rb") as f:
#skip the frame descriptor
frame_descriptor = f.read(4)
byte = f.read(1)
while byte:
ihgu[comp_storage] = int.from_bytes( byte, byteorder='little', signed=False )
comp_storage+=1
size+=1
byte = f.read(1)
sec['comp_size']=size
if comp_storage>comp_storage_end:
print("ERROR: compressed storage overflow by %d"%(comp_storage - comp_storage_end))
exit(1)
else:
used = comp_storage - comp_storage_start
free = comp_storage_end+1-comp_storage
print("0x%08x bytes used in compressed storage"%(used))
print("0x%08x bytes free in compressed storage"%(free))
comp_storage_pad=0
if grow_up:
#just rename ihex object
iho = ihgu
else:
#reverse compressed area storage
iho = IntelHex()
map_storage=comp_storage_end+1
#if 0!=(free%16):
# comp_storage_pad = free%16
# free-=comp_storage_pad
comp_storage=comp_storage_start+free
if 0!=(comp_storage%16):
#add padding data
for i in range(comp_storage-(comp_storage%16),comp_storage):
iho[i]=0x55
#move the compressed data up
print("copy 0x%X bytes from 0x%08X to 0x%08X"%(used,comp_storage_start+mapsize,comp_storage_start+free))
for i in range(0,used):
iho[comp_storage_start+free+i] = ihgu[comp_storage_start+mapsize+i]
#rebuild map
for sec in comp_sections:
sec['load']=comp_storage
#write the start offset in the map LUT
map_storage-=map_load_size+map_run_size
start_offset_bytes = (comp_storage-comp_storage_start).to_bytes(8,byteorder='little')
for i in range(0,map_load_size):
iho[map_storage] = start_offset_bytes[i]
map_storage+=1
run_bytes = sec['run'].to_bytes(8,byteorder='little')
for i in range(0,map_run_size):
iho[map_storage] = run_bytes[i]
map_storage+=1
map_storage-=map_load_size+map_run_size
comp_storage+=sec['comp_size']
#print("0x%x"%comp_storage)
#print("0x%x"%map_storage)
assert(map_storage==comp_storage+comp_storage_pad)
#create a list of start address of the sections which have been compressed
print("compressed sections load addresses:")
comp_sections_start=[]
for sec in comp_sections:
print("0x%08X"%sec['load'])
comp_sections_start.append(sec['load'])
#copy all regular sections
for sec in all_sections:
print("copy section from %x to %x"%(sec[0],sec[1]))
for i in range(sec[0],sec[1]):
if (i<comp_storage_start) or (i>=comp_storage_end):
iho[i]=ih[i]
#copy start address
#print("start address: ",ih.start_addr)
iho.start_addr = ih.start_addr
if not linear_mode or start_at_end or use_seg_as_linear:
#need custom version of intelhex, get it here: https://github.com/sebastien-riou/intelhex
iho.write_hex_file(ihexf+".lz4l.ihex",linear_mode=linear_mode,start_at_end=start_at_end,use_seg_as_linear=use_seg_as_linear)
else:
iho.write_hex_file(ihexf+".lz4l.ihex")
| 37.098361 | 162 | 0.627338 |
b70e5693b800019c043a966f67e793acac17d9e5 | 3,819 | py | Python | application/views/service/service.py | celerysoft/scholar-tool-manager | 6188d981266eeec391ba646b9c7dc426ddec37e8 | [
"Apache-2.0"
] | null | null | null | application/views/service/service.py | celerysoft/scholar-tool-manager | 6188d981266eeec391ba646b9c7dc426ddec37e8 | [
"Apache-2.0"
] | 3 | 2019-04-29T22:55:49.000Z | 2020-05-14T14:35:42.000Z | application/views/service/service.py | celerysoft/ScholarToolManager | 6188d981266eeec391ba646b9c7dc426ddec37e8 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from flask import make_response, Blueprint
from app import derive_import_root, add_url_rules_for_blueprint
from application import exception
from application.model.service import Service
from application.model.service_template import ServiceTemplate
from application.util.database import session_scope
from application.views.base_api import BaseNeedLoginAPI, ApiResult
view = ServiceAPI
bp = Blueprint(__name__.split('.')[-1], __name__)
root = derive_import_root(__name__)
add_url_rules_for_blueprint(root, bp)
| 38.19 | 101 | 0.625556 |
b713985ca32368cb00dff148dea34d4486a5b5ad | 1,293 | py | Python | trello/searchs.py | fif911/trello3_little_bit_updated | baf0275c5a89b3bcf9c1544897cbe25fafbc53d0 | [
"BSD-2-Clause"
] | 16 | 2016-01-19T17:02:24.000Z | 2020-02-20T19:23:32.000Z | trello/searchs.py | fif911/trello3_little_bit_updated | baf0275c5a89b3bcf9c1544897cbe25fafbc53d0 | [
"BSD-2-Clause"
] | 3 | 2016-02-10T14:17:58.000Z | 2016-07-26T01:31:54.000Z | trello/searchs.py | fif911/trello3_little_bit_updated | baf0275c5a89b3bcf9c1544897cbe25fafbc53d0 | [
"BSD-2-Clause"
] | 7 | 2016-02-09T23:47:00.000Z | 2021-06-05T17:03:22.000Z | import json
import requests
| 76.058824 | 649 | 0.784996 |
b713b15110cb69ba0de9387f17cdd3a78231774b | 3,736 | py | Python | architect/design/bounded_exogenous_parameters.py | MIT-REALM/architect | 1b5bbf6ddf08146cd3b8ad5c058539ac140e9ebb | [
"BSD-2-Clause"
] | 2 | 2022-03-30T03:07:26.000Z | 2022-03-30T17:35:21.000Z | architect/design/bounded_exogenous_parameters.py | MIT-REALM/architect | 1b5bbf6ddf08146cd3b8ad5c058539ac140e9ebb | [
"BSD-2-Clause"
] | null | null | null | architect/design/bounded_exogenous_parameters.py | MIT-REALM/architect | 1b5bbf6ddf08146cd3b8ad5c058539ac140e9ebb | [
"BSD-2-Clause"
] | null | null | null | """Exogenous parameters are anything "uncontrollable" that affect the design; these are
what we consider robustness against and are typically drawn from some distribution
"""
from typing import Optional, Sequence, Tuple, Union
import jax
import jax.numpy as jnp
from jax._src.prng import PRNGKeyArray
import numpy as np
from .exogenous_parameters import ExogenousParameters
| 35.580952 | 87 | 0.608405 |
b7176f2e0dac30c4e5404b6399ccd7f4159c21b1 | 4,955 | py | Python | courspider/department_calendar.py | Zylphrex/courspider | bbcd8f71afa3958405f7017890d5fc9ec8d0d4cd | [
"MIT"
] | null | null | null | courspider/department_calendar.py | Zylphrex/courspider | bbcd8f71afa3958405f7017890d5fc9ec8d0d4cd | [
"MIT"
] | null | null | null | courspider/department_calendar.py | Zylphrex/courspider | bbcd8f71afa3958405f7017890d5fc9ec8d0d4cd | [
"MIT"
] | null | null | null | import re
from courspider.faculty_calendar_resources.department import Department
from courspider.faculty_calendar_resources.url import URL
from courspider.course import Course
| 40.284553 | 795 | 0.618365 |
b7187d387790af8d5795d75e9899699ce907f9df | 6,366 | py | Python | chrome/test/chromedriver/run_buildbot_steps.py | devasia1000/chromium | 919a8a666862fb866a6bb7aa7f3ae8c0442b4828 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2019-02-03T05:19:48.000Z | 2021-11-15T15:07:21.000Z | chrome/test/chromedriver/run_buildbot_steps.py | devasia1000/chromium | 919a8a666862fb866a6bb7aa7f3ae8c0442b4828 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | chrome/test/chromedriver/run_buildbot_steps.py | devasia1000/chromium | 919a8a666862fb866a6bb7aa7f3ae8c0442b4828 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs all the buildbot steps for ChromeDriver except for update/compile."""
import optparse
import os
import platform
import shutil
import subprocess
import sys
import tempfile
import time
import urllib2
import zipfile
_THIS_DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(_THIS_DIR, os.pardir, 'pylib'))
from common import chrome_paths
from common import util
import archive
GS_BUCKET = 'gs://chromedriver-prebuilts'
GS_ZIP_PREFIX = 'chromedriver2_prebuilts'
SLAVE_SCRIPT_DIR = os.path.join(_THIS_DIR, os.pardir, os.pardir, os.pardir,
os.pardir, os.pardir, os.pardir, os.pardir,
'scripts', 'slave')
UPLOAD_SCRIPT = os.path.join(SLAVE_SCRIPT_DIR, 'skia', 'upload_to_bucket.py')
DOWNLOAD_SCRIPT = os.path.join(SLAVE_SCRIPT_DIR, 'gsutil_download.py')
if __name__ == '__main__':
main()
| 29.887324 | 80 | 0.678291 |
b718b1cb323c068a0fab21f464ad842ab3d200e7 | 2,924 | py | Python | src/TheLanguage/Parser/Expressions/UnitTests/CastExpressionParserInfo_UnitTest.py | davidbrownell/DavidBrownell_TheLanguage | 07170b448a0ebd7fa2325c9ccd4cefdb3cf7eb98 | [
"BSL-1.0"
] | null | null | null | src/TheLanguage/Parser/Expressions/UnitTests/CastExpressionParserInfo_UnitTest.py | davidbrownell/DavidBrownell_TheLanguage | 07170b448a0ebd7fa2325c9ccd4cefdb3cf7eb98 | [
"BSL-1.0"
] | null | null | null | src/TheLanguage/Parser/Expressions/UnitTests/CastExpressionParserInfo_UnitTest.py | davidbrownell/DavidBrownell_TheLanguage | 07170b448a0ebd7fa2325c9ccd4cefdb3cf7eb98 | [
"BSL-1.0"
] | 1 | 2021-06-18T18:58:57.000Z | 2021-06-18T18:58:57.000Z | # ----------------------------------------------------------------------
# |
# | CastExpressionParserInfo_UnitTest.py
# |
# | David Brownell <db@DavidBrownell.com>
# | 2021-10-04 09:14:16
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2021
# | Distributed under the Boost Software License, Version 1.0. See
# | accompanying file LICENSE_1_0.txt or copy at
# | http://www.boost.org/LICENSE_1_0.txt.
# |
# ----------------------------------------------------------------------
"""Unit test for CastExpressionParserInfo.py"""
import os
import pytest
import CommonEnvironment
from CommonEnvironmentEx.Package import InitRelativeImports
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
with InitRelativeImports():
from ..CastExpressionParserInfo import *
from ...Common.AutomatedTests import RegionCreator
from ...Types.StandardTypeParserInfo import StandardTypeParserInfo
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
| 34.4 | 113 | 0.499316 |
b71a0544974b49622ebf65934372dde5c8e554ce | 363 | py | Python | backend/api/models.py | pranav2812/9th-inter-iit-traffic-sign | 27d3f14ce8235d5cdedb4bb2dbaa10e436f9b06b | [
"Apache-2.0"
] | 1 | 2021-04-13T07:56:17.000Z | 2021-04-13T07:56:17.000Z | backend/api/models.py | Tech-Meet-Solutions/Bosch-CV-9th-InterIIT | 57f4bd915c4a1e2146a77210e92d756e1cc3722a | [
"Apache-2.0"
] | 1 | 2021-04-09T11:52:01.000Z | 2021-04-09T12:21:36.000Z | backend/api/models.py | pranav2812/9th-inter-iit-traffic-sign | 27d3f14ce8235d5cdedb4bb2dbaa10e436f9b06b | [
"Apache-2.0"
] | 3 | 2021-04-15T10:43:06.000Z | 2021-05-16T00:36:39.000Z | from django.db import models
| 33 | 59 | 0.730028 |
b71ce22664f675b460275b3803f757d02f90c92c | 189 | py | Python | release/Server/__init__.py | cdfmlr/readquickly_WeChatSmallApp | e489c507bfbf81a9a43872919000b99b803a079c | [
"MIT"
] | 2 | 2019-04-03T13:19:32.000Z | 2019-04-03T23:20:27.000Z | release/Server/__init__.py | cdfmlr/readquickly_WeChatSmallApp | e489c507bfbf81a9a43872919000b99b803a079c | [
"MIT"
] | 1 | 2019-04-03T16:44:36.000Z | 2019-04-03T23:16:35.000Z | release/Server/__init__.py | A666AHL/readquickly_WeChatSmallApp | 7324b7bdd7cf6b7a77e127969077d1c84ada189d | [
"MIT"
] | 2 | 2019-04-04T08:38:08.000Z | 2019-04-04T09:01:42.000Z | '''
# ReadQuickly
```
|-- Server
|-- __init__.py
|-- server.py ()
|-- content.py ()
|-- spider ()
|-- weather ()
|-- notice ()
```
''' | 12.6 | 28 | 0.449735 |
b71d1eb99d842362c6523cbc96a06ae382953062 | 1,746 | py | Python | JB/5.py | boostjanbjorge/adventofcode | 5cdd540a553550b1000496dfa39cbf7cf431a85f | [
"MIT"
] | null | null | null | JB/5.py | boostjanbjorge/adventofcode | 5cdd540a553550b1000496dfa39cbf7cf431a85f | [
"MIT"
] | null | null | null | JB/5.py | boostjanbjorge/adventofcode | 5cdd540a553550b1000496dfa39cbf7cf431a85f | [
"MIT"
] | null | null | null | import collections
import dataclasses
import itertools
def segments():
with open("inputs/5.txt") as f:
points = f.readlines()
points = (p.strip() for p in points)
points = (p.split("->") for p in points)
points = ((p1.split(","), p2.split(",")) for p1, p2 in points)
for (p1x, p1y), (p2x, p2y) in points:
yield Segment(
Point(int(p1x), int(p1y)),
Point(int(p2x), int(p2y)),
)
# 13319, to high
print(
count_interception(
s for s in segments() if s.start.x == s.stop.x or s.start.y == s.stop.y
)
)
# 19172
print(count_interception(segments()))
| 23.594595 | 83 | 0.57331 |
b71e4d45d0dd84308fd2a62e675360e49475c3fc | 3,140 | py | Python | readthedocs/search/parse_json.py | darrowco/readthedocs.org | fa7fc5a24306f1f6a27c7393f381c594ab29b357 | [
"MIT"
] | null | null | null | readthedocs/search/parse_json.py | darrowco/readthedocs.org | fa7fc5a24306f1f6a27c7393f381c594ab29b357 | [
"MIT"
] | null | null | null | readthedocs/search/parse_json.py | darrowco/readthedocs.org | fa7fc5a24306f1f6a27c7393f381c594ab29b357 | [
"MIT"
] | null | null | null | """Functions related to converting content into dict/JSON structures."""
import codecs
import json
import logging
from pyquery import PyQuery
log = logging.getLogger(__name__)
def process_file(fjson_filename):
"""Read the fjson file from disk and parse it into a structured dict."""
try:
with codecs.open(fjson_filename, encoding='utf-8', mode='r') as f:
file_contents = f.read()
except IOError:
log.info('Unable to read file: %s', fjson_filename)
raise
data = json.loads(file_contents)
sections = []
path = ''
title = ''
if 'current_page_name' in data:
path = data['current_page_name']
else:
log.info('Unable to index file due to no name %s', fjson_filename)
if data.get('body'):
body = PyQuery(data['body'])
sections.extend(generate_sections_from_pyquery(body))
else:
log.info('Unable to index content for: %s', fjson_filename)
if 'title' in data:
title = data['title']
if title.startswith('<'):
title = PyQuery(data['title']).text()
else:
log.info('Unable to index title for: %s', fjson_filename)
return {
'path': path,
'title': title,
'sections': sections,
}
def parse_content(content):
"""
Removes the starting text and .
It removes the starting text from the content
because it contains the title of that content,
which is redundant here.
"""
content = content.replace('', '').strip()
# removing the starting text of each
content = content.split('\n')
if len(content) > 1: # there were \n
content = content[1:]
# converting newlines to ". "
content = '. '.join([text.strip().rstrip('.') for text in content])
return content
| 28.288288 | 76 | 0.576752 |
b71eb8d88a38241242bcf5b67fab2d3309817366 | 8,427 | py | Python | pottermore/pottermore.py | Ricotjhe/kennnyshiwa-cogs | 5a596f298a6f7fe7502634793384a747060fc6c7 | [
"MIT"
] | null | null | null | pottermore/pottermore.py | Ricotjhe/kennnyshiwa-cogs | 5a596f298a6f7fe7502634793384a747060fc6c7 | [
"MIT"
] | null | null | null | pottermore/pottermore.py | Ricotjhe/kennnyshiwa-cogs | 5a596f298a6f7fe7502634793384a747060fc6c7 | [
"MIT"
] | null | null | null | import contextlib
from redbot.core import commands, Config
from redbot.core.utils.menus import menu, DEFAULT_CONTROLS
import discord
import aiohttp
import random
slytherin = "https://cdn.shopify.com/s/files/1/1325/3287/products/HP8040B_930f8033-607f-41ee-a8e4-fa90871ce7a7.png?v=1546231154"
gryffindor = "https://cdn10.bigcommerce.com/s-9p3fydit/products/370/images/1328/gryff1c__34591.1449620321.1280.1280.PNG?c=2"
ravenclaw = "https://cdn10.bigcommerce.com/s-9p3fydit/products/372/images/1332/raven1c__54237.1449620971.1200.1200.PNG?c=2"
hufflepuff = "https://cdn.shopify.com/s/files/1/0221/1146/products/Hufflepuff_Embroidered_Patch_Scaled_large.png?v=1553528874"
harry = "https://www.freepngimg.com/thumb/harry_potter/5-2-harry-potter-png-file.png"
hermione = "https://66.media.tumblr.com/3ce8453be755f31f93381918985b4918/tumblr_nn2lopIypj1rxkqbso1_1280.png"
voldemort = (
"https://vignette.wikia.nocookie.net/harrypotter/images/6/6e/VoldemortHeadshot_DHP1.png"
)
snape = "https://vignette.wikia.nocookie.net/harrypotter/images/a/a3/Severus_Snape.jpg"
draco = "https://vignette.wikia.nocookie.net/harrypotter/images/7/7e/Draco_Malfoy_TDH.png"
dumbledore = "https://images.ctfassets.net/bxd3o8b291gf/5ocauY6zAsqGiIgeECw06e/8accc1c586d2be7d9de6a3d9aec37b90/AlbusDumbledore_WB_F1_DumbledoreSmiling_Still_080615_Port.jpg"
ron = "https://upload.wikimedia.org/wikipedia/en/thumb/5/5e/Ron_Weasley_poster.jpg/220px-Ron_Weasley_poster.jpg"
hagrid = "https://vignette.wikia.nocookie.net/harrypotter/images/e/ee/Rubeushagrid.PNG/revision/latest?cb=20161123044204"
ginny = "http://hp-intothefire.wdfiles.com/local--files/ginny/ginny.jpg"
sirius = "https://vignette.wikia.nocookie.net/harrypotter/images/7/75/Sirius_Black_profile.jpg/revision/latest?cb=20150918055024"
mcgonagall = "https://vignette.wikia.nocookie.net/harrypotter/images/6/65/ProfessorMcGonagall-HBP.jpg/revision/latest?cb=20100612114856"
def cog_unload(self):
self.bot.loop.create_task(self.session.close())
| 48.154286 | 174 | 0.657411 |
b71f0ad71dba6e7fc8ad5d041a1cde7948bbc25f | 117 | py | Python | contacts/views/contact_views.py | Onlynfk/Freshdesk-CRM-Platform | 67137af09f7daf6fa2d19a9e70d573548137c9db | [
"MIT"
] | null | null | null | contacts/views/contact_views.py | Onlynfk/Freshdesk-CRM-Platform | 67137af09f7daf6fa2d19a9e70d573548137c9db | [
"MIT"
] | null | null | null | contacts/views/contact_views.py | Onlynfk/Freshdesk-CRM-Platform | 67137af09f7daf6fa2d19a9e70d573548137c9db | [
"MIT"
] | null | null | null | from django.shortcuts import render
| 19.5 | 52 | 0.735043 |
b721a7e3b07e02d858ee3ed05e23ae34bf1e9c54 | 781 | py | Python | algoritmos/PythonM2/desafio.py | MiguelTeixeiraUFPB/PythonM2 | 1ee07879b141eae4c4edd5f4ac43002b11167b2f | [
"MIT"
] | null | null | null | algoritmos/PythonM2/desafio.py | MiguelTeixeiraUFPB/PythonM2 | 1ee07879b141eae4c4edd5f4ac43002b11167b2f | [
"MIT"
] | null | null | null | algoritmos/PythonM2/desafio.py | MiguelTeixeiraUFPB/PythonM2 | 1ee07879b141eae4c4edd5f4ac43002b11167b2f | [
"MIT"
] | null | null | null | idadevelho=0
s=0
f=0
for p in range(1,3):
print('---{} pessoa---'.format(p))
nome=str(input('digite o {} nome: '.format(p))).strip()
idade=int(input('digite a idade da {} pessoa: '.format(p)))
peso=float(input('digite o peso da {} pessoa: '.format(p)))
sexo=str(input('sexo[M/F]: ')).upper().strip()
s+=idade
if p==1 and sexo=='M':
idadevelho=idade
nomevelho=nome
elif sexo=='M' and idade>idadevelho:
idadevelho=idade
nomevelho=nome
print()
if sexo=='F' and idade<20:
f+=1
nomemulher=nome
media=s/2
print('o nome do homem mais velho ',nomevelho)
print('a mdia de idade {}'.format(media))
print('a quantidade de mulheres com menos de 20 anos {}'.format(f))
| 27.892857 | 67 | 0.581306 |
b721cd3010a8637974b6ec065b10132ac28ed47b | 1,957 | py | Python | createVideo.py | Thefalas/disksMD | 1f3a0a1814baf1fd8905da2e88d2244de90d14ec | [
"MIT"
] | null | null | null | createVideo.py | Thefalas/disksMD | 1f3a0a1814baf1fd8905da2e88d2244de90d14ec | [
"MIT"
] | null | null | null | createVideo.py | Thefalas/disksMD | 1f3a0a1814baf1fd8905da2e88d2244de90d14ec | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu May 3 18:33:28 2018
@author: malopez
"""
import pandas as pd
import matplotlib.pyplot as plt
import cv2
images_folder = "C:/Users/malopez/Desktop/disksMD/images"
data_folder = "C:/Users/malopez/Desktop/disksMD/data"
output_video = './video4.mp4'
particle_radius = 1.0
n_particles = 90 # TODO: Why 3 is the minimun number of particles?
desired_collisions_per_particle = 10
n_collisions = n_particles*desired_collisions_per_particle
size_X = 60 # System size X
size_Y = 30 # System size Y
size_X_inches = 6*(size_X/size_Y)
size_Y_inches = 6
size_figure = (size_X_inches, size_Y_inches)
# Fenomenological constant ;p
circle_size = 11875*size_X_inches*size_Y_inches / (size_X*size_Y)
# circle_size = particle_radius*427500 / (size_X*size_Y)
for i in range(n_collisions):
file_name_pos = data_folder + "/xy"+'{0:05d}'.format(i)+".dat"
pos = pd.read_table(file_name_pos, sep='\s+',
header = None, names =['x', 'y'])
img_name = images_folder+'/img'+'{0:05d}'.format(i)+".png"
fig, ax = plt.subplots(figsize=size_figure, dpi=250)
ax.set_xlim([0,size_X])
ax.set_ylim([0,size_Y])
plt.scatter(pos.x, pos.y, s=circle_size)
fig.savefig(img_name)
print('Saving img n: '+str(i))
plt.close()
images = []
for i in range(n_collisions):
images.append(images_folder+'/img'+'{0:05d}'.format(i)+".png")
# Height and Width from first image
frame = cv2.imread(images[0])
height, width, channels = frame.shape
# Definimos el codec y creamos un objeto VideoWriter
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Be sure to use lower case
out = cv2.VideoWriter(output_video, fourcc, 30.0, (width, height))
print('Generating video, please wait')
for image in images:
frame = cv2.imread(image)
# Write out frame to video
out.write(frame)
# Release everything if job is finished
out.release()
print("The output video is {}".format(output_video)) | 29.651515 | 68 | 0.701073 |
b7222728ba7e52a01bbb0861ac4236dbfa5ce453 | 1,003 | py | Python | utils/builder/register_builder/riscv/BootPriority.py | noahsherrill/force-riscv | 500cec3017f619dbf853a497bf02eaeecca927c9 | [
"Apache-2.0"
] | 111 | 2020-06-12T22:31:30.000Z | 2022-03-19T03:45:20.000Z | utils/builder/register_builder/riscv/BootPriority.py | noahsherrill/force-riscv | 500cec3017f619dbf853a497bf02eaeecca927c9 | [
"Apache-2.0"
] | 34 | 2020-06-12T20:23:40.000Z | 2022-03-15T20:04:31.000Z | utils/builder/register_builder/riscv/BootPriority.py | noahsherrill/force-riscv | 500cec3017f619dbf853a497bf02eaeecca927c9 | [
"Apache-2.0"
] | 32 | 2020-06-12T19:15:26.000Z | 2022-02-20T11:38:31.000Z | #
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# BootPriority.py
#
# This file defines the BootPriority helper class.
# The boot priority class defines helper methods associated with boot priority
| 37.148148 | 78 | 0.755733 |
b72306e350f2a9f34586f4bdf8fb4a7f6ec9f932 | 4,141 | py | Python | truffe2/generic/templatetags/generic_extras.py | JonathanCollaud/truffe2 | 5cbb055ac1acf7e7dc697340618fcb56c67fbd91 | [
"BSD-2-Clause"
] | 9 | 2016-09-14T02:19:19.000Z | 2020-10-18T14:52:14.000Z | truffe2/generic/templatetags/generic_extras.py | JonathanCollaud/truffe2 | 5cbb055ac1acf7e7dc697340618fcb56c67fbd91 | [
"BSD-2-Clause"
] | 19 | 2016-11-09T21:28:51.000Z | 2021-02-10T22:37:31.000Z | truffe2/generic/templatetags/generic_extras.py | JonathanCollaud/truffe2 | 5cbb055ac1acf7e7dc697340618fcb56c67fbd91 | [
"BSD-2-Clause"
] | 13 | 2016-12-31T14:22:09.000Z | 2020-12-27T19:43:19.000Z | from django import template
from django.utils.safestring import mark_safe
import bleach
from bleach.sanitizer import BleachSanitizer
from bleach.encoding import force_unicode
from bootstrap3.renderers import FieldRenderer
from bootstrap3.text import text_value
import html5lib
import re
register = template.Library()
pos = [(0, 0), (1, 0), (0, 1), (2, 3), (1, 2), (2, 1), (2, 2)]
re_spaceless = re.compile("(\n|\r)+")
class CrlfNode(template.Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
rendered = self.nodelist.render(context).strip()
return re_spaceless.sub("", rendered)
| 26.375796 | 157 | 0.657088 |
b7243a1265f9290fb4007832856d3ae61b5b1b98 | 1,158 | py | Python | tests/conftest.py | inducer/courseflow | 0f9786e3616dbedf08365d81a731f672b97ba9f5 | [
"Unlicense"
] | 284 | 2015-01-09T12:02:28.000Z | 2022-03-27T14:30:46.000Z | tests/conftest.py | inducer/courseflow | 0f9786e3616dbedf08365d81a731f672b97ba9f5 | [
"Unlicense"
] | 799 | 2015-02-26T08:49:46.000Z | 2022-03-31T16:09:26.000Z | tests/conftest.py | davis68/relate | eb40c8c17d4a724a60de3caa3334521a833bad5c | [
"Unlicense"
] | 120 | 2015-01-30T18:00:56.000Z | 2022-03-28T06:24:43.000Z | import pytest
# from pytest_factoryboy import register
| 30.473684 | 78 | 0.636442 |
b72448ecc9aed4165b8b074fbdda6ef50c31088e | 2,210 | py | Python | main.py | Kallu609/mp3-to-mp4-converter | 780d4741b79a45c1e5541527a58313a36d665e47 | [
"MIT"
] | null | null | null | main.py | Kallu609/mp3-to-mp4-converter | 780d4741b79a45c1e5541527a58313a36d665e47 | [
"MIT"
] | null | null | null | main.py | Kallu609/mp3-to-mp4-converter | 780d4741b79a45c1e5541527a58313a36d665e47 | [
"MIT"
] | 1 | 2020-03-28T02:57:32.000Z | 2020-03-28T02:57:32.000Z | import subprocess
from mimetypes import MimeTypes
from os import devnull, getcwd, listdir, makedirs, walk
from os.path import basename, dirname, exists, isfile, join, splitext
from pprint import pprint
from urllib.request import pathname2url
ALLOWED_AUDIO_MIMETYPES = ['audio/mpeg']
ALLOWED_IMAGE_MIMETYPES = ['image/jpeg', 'image/png']
CWD = getcwd()
MP3_DIR = join(CWD, 'mp3')
# Setup necessary variables
mime = MimeTypes()
if __name__ == '__main__':
main()
| 26.95122 | 89 | 0.687783 |
b7247af0becba5f41e0c2a4a41f7a5b86547cdbf | 24 | py | Python | lang/Python/terminal-control-cursor-positioning-1.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
] | 1 | 2018-11-09T22:08:38.000Z | 2018-11-09T22:08:38.000Z | lang/Python/terminal-control-cursor-positioning-1.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
] | null | null | null | lang/Python/terminal-control-cursor-positioning-1.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
] | 1 | 2018-11-09T22:08:40.000Z | 2018-11-09T22:08:40.000Z | print("\033[6;3HHello")
| 12 | 23 | 0.666667 |
b724c571207a17423525239296130cc889afe81a | 6,169 | py | Python | ambassador/tests/ambassador_test.py | tesserai/ambassador | 70fadc62872be9b041b90cba54d3920a21777548 | [
"Apache-2.0"
] | 1 | 2019-01-22T05:36:23.000Z | 2019-01-22T05:36:23.000Z | ambassador/tests/ambassador_test.py | tesserai/ambassador | 70fadc62872be9b041b90cba54d3920a21777548 | [
"Apache-2.0"
] | null | null | null | ambassador/tests/ambassador_test.py | tesserai/ambassador | 70fadc62872be9b041b90cba54d3920a21777548 | [
"Apache-2.0"
] | null | null | null | import sys
import difflib
import errno
import json
import logging
import functools
import os
import pytest
from shell import shell
from diag_paranoia import diag_paranoia, filtered_overview, sanitize_errors
VALIDATOR_IMAGE = "datawire/ambassador-envoy-alpine:v1.5.0-116-g7ccb25882"
DIR = os.path.dirname(__file__)
EXCLUDES = [ "__pycache__" ]
# TESTDIR = os.path.join(DIR, "tests")
TESTDIR = DIR
DEFAULT_CONFIG = os.path.join(DIR, "..", "default-config")
MATCHES = [ n for n in os.listdir(TESTDIR)
if (n.startswith('0') and os.path.isdir(os.path.join(TESTDIR, n)) and (n not in EXCLUDES)) ]
os.environ['SCOUT_DISABLE'] = "1"
#### decorators
#### Utilities
#### Test functions
| 31.635897 | 120 | 0.595396 |
b7253bf44267f3981869514c6f90cf8cf83b6b75 | 538 | py | Python | geocode_missing.py | UoA-eResearch/billboards | 196a4931dc7ed21a5ff001e539254b0a93ddad2c | [
"MIT"
] | null | null | null | geocode_missing.py | UoA-eResearch/billboards | 196a4931dc7ed21a5ff001e539254b0a93ddad2c | [
"MIT"
] | null | null | null | geocode_missing.py | UoA-eResearch/billboards | 196a4931dc7ed21a5ff001e539254b0a93ddad2c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import json
import googlemaps
import sys
import os
gmaps = googlemaps.Client(key=os.environ["GOOGLE_API_KEY"])
print(gmaps)
filename = sys.argv[1]
with open(filename) as f:
data = json.load(f)
for d in data:
if d.get("address") and not d.get("latitude"):
result = gmaps.geocode(d["address"])
print(result)
result = result[0]["geometry"]["location"]
d["latitude"] = result["lat"]
d["longitude"] = result["lng"]
with open(filename, "w") as f:
json.dump(data, f) | 23.391304 | 59 | 0.633829 |
b7262426be3d901c9e4c8163e1aff5cbb71a7660 | 48,109 | py | Python | tardis/plasma/properties/continuum_processes.py | AlexHls/tardis | 5d6e2299f35953a65e2c974994c55fe4aa3caae9 | [
"BSD-3-Clause"
] | null | null | null | tardis/plasma/properties/continuum_processes.py | AlexHls/tardis | 5d6e2299f35953a65e2c974994c55fe4aa3caae9 | [
"BSD-3-Clause"
] | 1 | 2020-07-20T16:45:52.000Z | 2020-07-20T16:45:52.000Z | tardis/plasma/properties/continuum_processes.py | jordi5/tardis | 2e1cb75c91ea842526b0c7c80a13cc8646178813 | [
"BSD-3-Clause"
] | null | null | null | import logging
import numpy as np
import pandas as pd
from numba import prange, njit
from tardis import constants as const
from tardis.plasma.exceptions import PlasmaException
from tardis.plasma.properties.base import (
ProcessingPlasmaProperty,
Input,
TransitionProbabilitiesProperty,
)
from tardis.plasma.properties.j_blues import JBluesDiluteBlackBody
__all__ = [
"SpontRecombRateCoeff",
"StimRecombRateCoeff",
"PhotoIonRateCoeff",
"PhotoIonEstimatorsNormFactor",
"PhotoIonRateCoeffEstimator",
"StimRecombRateCoeffEstimator",
"CorrPhotoIonRateCoeff",
"BfHeatingRateCoeffEstimator",
"StimRecombCoolingRateCoeffEstimator",
"SpontRecombCoolingRateCoeff",
"RawRecombTransProbs",
"RawPhotoIonTransProbs",
"CollDeexcRateCoeff",
"CollExcRateCoeff",
"RawCollisionTransProbs",
"AdiabaticCoolingRate",
"FreeFreeCoolingRate",
"FreeBoundCoolingRate",
"BoundFreeOpacity",
"LevelNumberDensityLTE",
"PhotoIonBoltzmannFactor",
"FreeBoundEmissionCDF",
"RawTwoPhotonTransProbs",
"TwoPhotonEmissionCDF",
"TwoPhotonFrequencySampler",
"CollIonRateCoeffSeaton",
"CollRecombRateCoeff",
"RawCollIonTransProbs",
"BoundFreeOpacityInterpolator",
"FreeFreeOpacity",
"ContinuumOpacityCalculator",
"FreeFreeFrequencySampler",
"FreeBoundFrequencySampler",
]
N_A = const.N_A.cgs.value
K_B = const.k_B.cgs.value
C = const.c.cgs.value
H = const.h.cgs.value
A0 = const.a0.cgs.value
M_E = const.m_e.cgs.value
E = const.e.esu.value
BETA_COLL = (H ** 4 / (8 * K_B * M_E ** 3 * np.pi ** 3)) ** 0.5
F_K = (
16
/ (3.0 * np.sqrt(3))
* np.sqrt((2 * np.pi) ** 3 * K_B / (H ** 2 * M_E ** 3))
* (E ** 2 / C) ** 3
) # See Eq. 19 in Sutherland, R. S. 1998, MNRAS, 300, 321
FF_OPAC_CONST = (
(2 * np.pi / (3 * M_E * K_B)) ** 0.5 * 4 * E ** 6 / (3 * M_E * H * C)
) # See Eq. 6.1.8 in http://personal.psu.edu/rbc3/A534/lec6.pdf
logger = logging.getLogger(__name__)
njit_dict = {"fastmath": False, "parallel": False}
# It is currently not possible to use scipy.integrate.cumulative_trapezoid in
# numba. So here is my own implementation.
def get_ion_multi_index(multi_index_full, next_higher=True):
"""
Calculate the corresponding ion MultiIndex for a level MultiIndex.
Parameters
----------
multi_index_full : pandas.MultiIndex (atomic_number, ion_number,
level_number)
next_higher : bool, default True
If True use ion number of next higher ion, else use ion_number from
multi_index_full.
Returns
-------
pandas.MultiIndex (atomic_number, ion_number)
Ion MultiIndex for the given level MultiIndex.
"""
atomic_number = multi_index_full.get_level_values(0)
ion_number = multi_index_full.get_level_values(1)
if next_higher is True:
ion_number += 1
return pd.MultiIndex.from_arrays([atomic_number, ion_number])
def get_ground_state_multi_index(multi_index_full):
"""
Calculate the ground-state MultiIndex for the next higher ion.
Parameters
----------
multi_index_full : pandas.MultiIndex (atomic_number, ion_number,
level_number)
Returns
-------
pandas.MultiIndex (atomic_number, ion_number)
Ground-state MultiIndex for the next higher ion.
"""
atomic_number = multi_index_full.get_level_values(0)
ion_number = multi_index_full.get_level_values(1) + 1
level_number = np.zeros_like(ion_number)
return pd.MultiIndex.from_arrays([atomic_number, ion_number, level_number])
def cooling_rate_series2dataframe(cooling_rate_series, destination_level_idx):
"""
Transform cooling-rate Series to DataFrame.
This function transforms a Series with cooling rates into
an indexed DataFrame that can be used in MarkovChainTransProbs.
Parameters
----------
cooling_rate_series : pandas.Series, dtype float
Cooling rates for a process with a single destination idx.
Examples are adiabatic cooling or free-free cooling.
destination_level_idx : str
Destination idx of the cooling process; for example
'adiabatic' for adiabatic cooling.
Returns
-------
cooling_rate_frame : pandas.DataFrame, dtype float
Indexed by source_level_idx, destination_level_idx, transition_type
for the use in MarkovChainTransProbs.
"""
index_names = [
"source_level_idx",
"destination_level_idx",
"transition_type",
]
index = pd.MultiIndex.from_tuples(
[("k", destination_level_idx, -1)], names=index_names
)
cooling_rate_frame = pd.DataFrame(
cooling_rate_series.values[np.newaxis], index=index
)
return cooling_rate_frame
def bf_estimator_array2frame(bf_estimator_array, level2continuum_idx):
"""
Transform a bound-free estimator array to a DataFrame.
This function transforms a bound-free estimator array with entries
sorted by frequency to a multi-indexed DataFrame sorted by level.
Parameters
----------
bf_estimator_array : numpy.ndarray, dtype float
Array of bound-free estimators (e.g., for the stimulated recombination rate)
with entries sorted by the threshold frequency of the bound-free continuum.
level2continuum_idx : pandas.Series, dtype int
Maps a level MultiIndex (atomic_number, ion_number, level_number) to
the continuum_idx of the corresponding bound-free continuum (which are
sorted by decreasing frequency).
Returns
-------
pandas.DataFrame, dtype float
Bound-free estimators indexed by (atomic_number, ion_number, level_number).
"""
bf_estimator_frame = pd.DataFrame(
bf_estimator_array, index=level2continuum_idx.index
).sort_index()
bf_estimator_frame.columns.name = "Shell No."
return bf_estimator_frame
class IndexSetterMixin(object):
| 33.155755 | 127 | 0.642749 |
b727e8a96c1fbd46e661c2a5b89a290d333e2329 | 1,805 | py | Python | pull_related_videos.py | jgawrilo/youtube | 553bfe4cf303bc06abf8173f5ed0f4deb3ede57f | [
"Apache-2.0"
] | 1 | 2017-01-13T12:57:06.000Z | 2017-01-13T12:57:06.000Z | pull_related_videos.py | jgawrilo/youtube | 553bfe4cf303bc06abf8173f5ed0f4deb3ede57f | [
"Apache-2.0"
] | null | null | null | pull_related_videos.py | jgawrilo/youtube | 553bfe4cf303bc06abf8173f5ed0f4deb3ede57f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.tools import argparser
import json
import os
import codecs
from bs4 import BeautifulSoup
import argparse
import requests
import sys
import googleapiclient
# MAIN
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Pull some youtube.')
parser.add_argument("--key", help="https://cloud.google.com/console")
args = parser.parse_args()
# Set DEVELOPER_KEY to the API key value from the APIs & auth > Registered apps
# tab of
# https://cloud.google.com/console
# Please ensure that you have enabled the YouTube Data API for your project.
DEVELOPER_KEY = args.key
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
for f in os.listdir("../flashpoint/videos/"):
get_video_suggestions(youtube,f) | 28.203125 | 113 | 0.67867 |
b7286793431599e58da81b53c698ba6999fd3f4e | 1,630 | py | Python | day02/ex04/ai42/logging/log.py | d-r-e/Machine-Learning-Bootcamp | 618cad97c04d15fec6e8a371c526ad8e08cae35a | [
"MIT"
] | null | null | null | day02/ex04/ai42/logging/log.py | d-r-e/Machine-Learning-Bootcamp | 618cad97c04d15fec6e8a371c526ad8e08cae35a | [
"MIT"
] | 6 | 2021-05-25T08:51:39.000Z | 2021-05-25T08:51:40.000Z | day02/ex04/ai42/logging/log.py | d-r-e/Python-Bootcamp-42AI | 618cad97c04d15fec6e8a371c526ad8e08cae35a | [
"MIT"
] | null | null | null | # **************************************************************************** #
# #
# ::: :::::::: #
# logger.py :+: :+: :+: #
# +:+ +:+ +:+ #
# By: darodrig <darodrig@42madrid.com> +#+ +:+ +#+ #
# +#+#+#+#+#+ +#+ #
# Created: 2020/04/15 22:12:50 by darodrig #+# #+# #
# Updated: 2020/04/15 22:12:50 by darodrig ### ########.fr #
# #
# **************************************************************************** #
import time
import functools
from string import capwords
import getpass
import datetime
| 41.794872 | 81 | 0.282209 |
b72a583373d3c6cb8d5d4af07ff2faff1162f56a | 2,550 | py | Python | examples/avatar_example.py | ZSD-tim/dayu_widgets | 31c2530bdc4161d9311574d9850c2e9471e53072 | [
"MIT"
] | 157 | 2019-03-10T05:55:21.000Z | 2022-03-31T09:07:00.000Z | examples/avatar_example.py | kanbang/dayu_widgets | 6ff101e6c6f8fcf10e5cb578023a12ccdcef9164 | [
"MIT"
] | 16 | 2019-07-15T11:30:53.000Z | 2021-12-16T14:17:59.000Z | examples/avatar_example.py | kanbang/dayu_widgets | 6ff101e6c6f8fcf10e5cb578023a12ccdcef9164 | [
"MIT"
] | 56 | 2019-06-19T03:35:27.000Z | 2022-03-22T08:07:32.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2019.2
# Email : muyanru345@163.com
###################################################################
from dayu_widgets.avatar import MAvatar
from dayu_widgets.divider import MDivider
from dayu_widgets.field_mixin import MFieldMixin
from dayu_widgets.label import MLabel
from dayu_widgets.push_button import MPushButton
from dayu_widgets import dayu_theme
from dayu_widgets.qt import QWidget, QVBoxLayout, MPixmap, QFormLayout, Qt, QHBoxLayout
if __name__ == '__main__':
import sys
from dayu_widgets.qt import QApplication
app = QApplication(sys.argv)
test = AvatarExample()
dayu_theme.apply(test)
test.show()
sys.exit(app.exec_())
| 34.931507 | 87 | 0.59098 |
b72c5b0b453dbcbfe1f73085f19a64855959a7ec | 1,254 | py | Python | ch9/note9.4.py | YChanHuang/Python-for-every-body-notes | 771db11ceb00341b533b4b1bf5235fb988978f65 | [
"MIT"
] | null | null | null | ch9/note9.4.py | YChanHuang/Python-for-every-body-notes | 771db11ceb00341b533b4b1bf5235fb988978f65 | [
"MIT"
] | null | null | null | ch9/note9.4.py | YChanHuang/Python-for-every-body-notes | 771db11ceb00341b533b4b1bf5235fb988978f65 | [
"MIT"
] | null | null | null | #Assignment 9.4
#print('Hello World')
fname = input('Enter name: ')
if len(fname) < 1 : fname = 'mbox-short.txt'
handle = open(fname)
di = {} #create an empty dictionary
for line in handle:
line = line.rstrip()
wds = line.split()
#the second for-loop to print each word in the list
for w in wds :
#if the key is not in the dictionar the count starts with 0
##oldcount = di.get(w,0)
##print(w, 'old', oldcount)
##newcount = oldcount + 1
##di[w] = newcount
##print(w, 'new', newcount)
di[w] = di.get(w, 0) + 1
#print(w, 'new', di[w])
#workflow: retrieve/created/update counter
#if w in di:
# di[w] = di[w] + 1
#print('*Existing*') #To provide a hint of what the programmes is doing
# else:
# di[w] = 1
#print('**New**') #To provide a hint of what the programmes is doing
#print(di)
#print the Most commoncommon word programme.
#mulitiple for-loop
largest = -1
theword = None
for k, v in di.items() : # times looks for the elements in dictionary
print(k, v)
if v > largest :
largest = v
theword = k # catch/remember the word that was largest
print('Most common', theword,largest)
| 29.857143 | 83 | 0.585327 |
b72cde7853e529893263c2d30f32b8dad4c30116 | 7,467 | py | Python | ava/common/check.py | indeedsecurity/ava-ce | 4483b301034a096b716646a470a6642b3df8ce61 | [
"Apache-2.0"
] | 2 | 2019-03-26T15:37:48.000Z | 2020-01-03T03:47:30.000Z | ava/common/check.py | indeedsecurity/ava-ce | 4483b301034a096b716646a470a6642b3df8ce61 | [
"Apache-2.0"
] | 2 | 2021-03-25T21:27:09.000Z | 2021-06-01T21:20:04.000Z | ava/common/check.py | indeedsecurity/ava-ce | 4483b301034a096b716646a470a6642b3df8ce61 | [
"Apache-2.0"
] | null | null | null | from bs4 import BeautifulSoup
from difflib import SequenceMatcher
| 34.892523 | 117 | 0.656489 |
b72dfc9dbad5e8ddb1f77fb1efb1c89913e0c025 | 2,863 | py | Python | backend/translator/admin.py | Gobaan/menu-translator-django | db2a922dce844cd60a66b0325d4c074696126ca4 | [
"MIT"
] | null | null | null | backend/translator/admin.py | Gobaan/menu-translator-django | db2a922dce844cd60a66b0325d4c074696126ca4 | [
"MIT"
] | 4 | 2020-06-05T23:09:23.000Z | 2021-06-10T18:51:26.000Z | backend/translator/admin.py | Gobaan/menu-translator-django | db2a922dce844cd60a66b0325d4c074696126ca4 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django import forms
from .models import Annotation, Clip, Food, Image, Language, Website, UserProfile
admin.site.register(Annotation, AnnotationAdmin)
admin.site.register(Clip, ClipAdmin)
admin.site.register(Food, FoodAdmin)
admin.site.register(Image, ImageAdmin)
admin.site.register(Language, LanguageAdmin)
admin.site.register(Website, WebsiteAdmin)
admin.site.register(UserProfile, UserProfileAdmin)
| 26.027273 | 126 | 0.689487 |
b72e4a239b1f333096c89a02d6bb509de560e64e | 252 | py | Python | colibris/docs/openapi/__init__.py | AMecea/colibris | 068b7cbc4ed328dd9f3b4c40c5227b026589b028 | [
"BSD-3-Clause"
] | 6 | 2019-06-22T19:36:10.000Z | 2021-11-16T08:07:21.000Z | colibris/docs/openapi/__init__.py | AMecea/colibris | 068b7cbc4ed328dd9f3b4c40c5227b026589b028 | [
"BSD-3-Clause"
] | 34 | 2019-07-07T18:01:41.000Z | 2020-11-01T16:14:58.000Z | colibris/docs/openapi/__init__.py | AMecea/colibris | 068b7cbc4ed328dd9f3b4c40c5227b026589b028 | [
"BSD-3-Clause"
] | 2 | 2020-09-01T13:07:17.000Z | 2021-07-29T12:16:29.000Z | from os.path import abspath, join, dirname
from colibris.conf import settings
STATIC_PATH = abspath(join(dirname(__file__), 'swagger'))
UI_URL = settings.API_DOCS_URL
STATIC_URL = '{}/static'.format(UI_URL)
APISPEC_URL = '{}/apispec'.format(UI_URL)
| 25.2 | 57 | 0.761905 |
b72e8bfc6d83afc48f73ce1ff4c1bc304fbf13db | 145 | py | Python | arc/__init__.py | jtguibas/arc | e9df473ce5051f2b9f3981ef219b6a02076bdb42 | [
"MIT"
] | 6 | 2020-06-06T03:29:52.000Z | 2022-03-05T08:28:30.000Z | arc/__init__.py | jtguibas/arc | e9df473ce5051f2b9f3981ef219b6a02076bdb42 | [
"MIT"
] | null | null | null | arc/__init__.py | jtguibas/arc | e9df473ce5051f2b9f3981ef219b6a02076bdb42 | [
"MIT"
] | 5 | 2020-08-02T05:07:33.000Z | 2022-02-03T17:46:55.000Z | #!/usr/bin/env python
from arc import models
from arc import methods
from arc import black_boxes
from arc import others
from arc import coverage
| 20.714286 | 27 | 0.813793 |
b72ea37d9a980314ac78bb261e8ab6314ebd4e84 | 961 | py | Python | qcloudsdkbatch/TerminateTaskInstanceRequest.py | f3n9/qcloudcli | b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19 | [
"Apache-2.0"
] | null | null | null | qcloudsdkbatch/TerminateTaskInstanceRequest.py | f3n9/qcloudcli | b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19 | [
"Apache-2.0"
] | null | null | null | qcloudsdkbatch/TerminateTaskInstanceRequest.py | f3n9/qcloudcli | b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
| 28.264706 | 84 | 0.681582 |
b72f78f385c7b5879e4c81e4664cd6e4111978d3 | 329 | py | Python | altair/vegalite/v2/examples/step_chart.py | hydrosquall/altair | ded897b0967a88a467828b1e2c133bd92862de23 | [
"BSD-3-Clause"
] | null | null | null | altair/vegalite/v2/examples/step_chart.py | hydrosquall/altair | ded897b0967a88a467828b1e2c133bd92862de23 | [
"BSD-3-Clause"
] | null | null | null | altair/vegalite/v2/examples/step_chart.py | hydrosquall/altair | ded897b0967a88a467828b1e2c133bd92862de23 | [
"BSD-3-Clause"
] | null | null | null | """
Step Chart
-----------------
This example shows Google's stock price over time.
"""
import altair as alt
from vega_datasets import data
source = data.stocks()
chart = alt.Chart(source).mark_line(interpolate = 'step-after').encode(
x = 'date',
y = 'price'
)
chart.transform = [{"filter": "datum.symbol==='GOOG'"}]
| 18.277778 | 71 | 0.638298 |
b72fded642f589b2e698b4de11aaec30fbbd5f3f | 5,429 | py | Python | graphit/graph_networkx.py | py-graphit/py-graphit | 533ef47e279fc07d9a88f86cc9d19f09d56176f9 | [
"Apache-2.0"
] | 1 | 2018-12-02T18:56:34.000Z | 2018-12-02T18:56:34.000Z | graphit/graph_networkx.py | py-graphit/py-graphit | 533ef47e279fc07d9a88f86cc9d19f09d56176f9 | [
"Apache-2.0"
] | null | null | null | graphit/graph_networkx.py | py-graphit/py-graphit | 533ef47e279fc07d9a88f86cc9d19f09d56176f9 | [
"Apache-2.0"
] | 1 | 2018-12-02T15:29:41.000Z | 2018-12-02T15:29:41.000Z | # -*- coding: utf-8 -*-
"""
file: graph_networkx.py
Provides a NetworkX compliant Graph class.
"""
from graphit.graph import GraphBase
from graphit.graph_exceptions import GraphitException, GraphitNodeNotFound
from graphit.graph_algorithms import degree, size
from graphit.graph_utils.graph_utilities import graph_undirectional_to_directional, graph_directional_to_undirectional
| 25.729858 | 118 | 0.596611 |
b72ff8b09a0b3b3377f4450ff20722334a0f1163 | 260 | py | Python | Chapter02/Shuffle.py | Tanishadel/Mastering-Machine-Learning-for-Penetration-Testing | 5aecc2603faaa97dafc1fa4c86f4e5f530bb4877 | [
"MIT"
] | 241 | 2018-06-14T15:58:34.000Z | 2022-03-27T08:47:40.000Z | Chapter02/Shuffle.py | Tanishadel/Mastering-Machine-Learning-for-Penetration-Testing | 5aecc2603faaa97dafc1fa4c86f4e5f530bb4877 | [
"MIT"
] | 5 | 2018-11-05T20:40:58.000Z | 2021-09-17T12:35:42.000Z | Chapter02/Shuffle.py | Tanishadel/Mastering-Machine-Learning-for-Penetration-Testing | 5aecc2603faaa97dafc1fa4c86f4e5f530bb4877 | [
"MIT"
] | 144 | 2018-06-21T12:50:25.000Z | 2022-03-21T13:47:51.000Z | import os
import random
#initiate a list called emails_list
emails_list = []
Directory = '/home/azureuser/spam_filter/enron1/emails/'
Dir_list = os.listdir(Directory)
for file in Dir_list:
f = open(Directory + file, 'r')
emails_list.append(f.read())
f.close()
| 23.636364 | 56 | 0.753846 |
b73291a3b8e47fc42f8a646649597c72205d287e | 1,135 | py | Python | computer vision/pose_module.py | Francois-Adham/OpenCV | ccbace109086d3f4fcccc1bd9261ac336cfc49ef | [
"MIT"
] | null | null | null | computer vision/pose_module.py | Francois-Adham/OpenCV | ccbace109086d3f4fcccc1bd9261ac336cfc49ef | [
"MIT"
] | null | null | null | computer vision/pose_module.py | Francois-Adham/OpenCV | ccbace109086d3f4fcccc1bd9261ac336cfc49ef | [
"MIT"
] | null | null | null | import cv2 as cv
import mediapipe as mp
if __name__ == "__main__":
video = cv.VideoCapture(0)
detector = PoseDetector()
while True:
success, img = video.read()
image = detector.find_pose(img)
cv.imshow("Image", image)
cv.waitKey(1) | 33.382353 | 121 | 0.661674 |
b732eb90f353db0b4fec7fc27c2742a5319bf251 | 3,012 | py | Python | Irene/base.py | mghasemi/Irene | 2ecf58329310a0e98d1e28d4853ddb49f0a23311 | [
"MIT"
] | 12 | 2018-10-25T19:59:22.000Z | 2022-03-08T03:27:29.000Z | Irene/base.py | mghasemi/Irene | 2ecf58329310a0e98d1e28d4853ddb49f0a23311 | [
"MIT"
] | 1 | 2022-03-08T03:27:36.000Z | 2022-03-08T05:42:26.000Z | Irene/base.py | mghasemi/Irene | 2ecf58329310a0e98d1e28d4853ddb49f0a23311 | [
"MIT"
] | null | null | null | r"""
This is the base module for all other objects of the package.
+ `LaTeX` returns a LaTeX string out of an `Irene` object.
+ `base` is the parent of all `Irene` objects.
"""
def LaTeX(obj):
r"""
Returns LaTeX representation of Irene's objects.
"""
from sympy.core.core import all_classes
from Irene import SDPRelaxations, SDRelaxSol, Mom
inst = isinstance(obj, SDPRelaxations) or isinstance(
obj, SDRelaxSol) or isinstance(obj, Mom)
if inst:
return obj.__latex__()
elif isinstance(obj, tuple(all_classes)):
from sympy import latex
return latex(obj)
| 30.734694 | 98 | 0.534197 |
b733a705c894e170baefe76d8fe393fd92a6fb35 | 4,346 | py | Python | demo/corenlp.py | Rvlis/stanza | 58237519740b27b83c00c6c23d53ba434132dcca | [
"Apache-2.0"
] | null | null | null | demo/corenlp.py | Rvlis/stanza | 58237519740b27b83c00c6c23d53ba434132dcca | [
"Apache-2.0"
] | null | null | null | demo/corenlp.py | Rvlis/stanza | 58237519740b27b83c00c6c23d53ba434132dcca | [
"Apache-2.0"
] | null | null | null | from stanza.server import CoreNLPClient
import os
# example text
print('---')
print('input text')
print('')
# text = "Chris Manning is a nice person. Chris wrote a simple sentence. He also gives oranges to people."
text = "PyTables is built on top of the HDF5 library, using the Python language and the NumPy package."
print(text)
# set up the client
print('---')
print('starting up Java Stanford CoreNLP Server...')
# set up the client
# with CoreNLPClient(annotators=['tokenize','ssplit','pos','lemma','ner','parse','depparse','coref'], timeout=60000, memory='4G', be_quiet=True) as client:
with CoreNLPClient(annotators=['tokenize','ssplit','pos','parse','depparse'], timeout=60000, memory='4G', be_quiet=True) as client:
# submit the request to the server
ann = client.annotate(text)
# print("ann is ", ann)
# os.system("pause")
# get the first sentence
sentence = ann.sentence[0]
print("sentence is ", sentence)
os.system("pause")
# get the dependency parse of the first sentence
# print('---')
# print('dependency parse of first sentence')
# dependency_parse = sentence.basicDependencies
# print(dependency_parse)
# os.system("pause")
# HDSKG's method
print('---')
print('enhanced++ dependency parse of first sentence')
enhanced_plus_plus_dependency_parse = sentence.enhancedPlusPlusDependencies
print(enhanced_plus_plus_dependency_parse)
os.system("pause")
# get the constituency parse of the first sentence
# print('---')
# print('constituency parse of first sentence')
# constituency_parse = sentence.parseTree
# print(constituency_parse)
# os.system("pause")
# get the first subtree of the constituency parse
# print('---')
# print('first subtree of constituency parse')
# print(constituency_parse.child[0])
# os.system("pause")
# get the value of the first subtree
# print('---')
# print('value of first subtree of constituency parse')
# print(constituency_parse.child[0].value)
# os.system("pause")
# get the first token of the first sentence
print('---')
print('first token of first sentence')
token = sentence.token[0]
print(token)
os.system("pause")
# get the part-of-speech tag
print('---')
print('part of speech tag of token')
token.pos
print(token.pos)
os.system("pause")
# get the named entity tag
print('---')
print('named entity tag of token')
print(token.ner)
os.system("pause")
# get an entity mention from the first sentence
# print('---')
# print('first entity mention in sentence')
# print(sentence.mentions[0])
# os.system("pause")
# access the coref chain
# print('---')
# print('coref chains for the example')
# print(ann.corefChain)
# os.system("pause")
# Use tokensregex patterns to find who wrote a sentence.
# pattern = '([ner: PERSON]+) /wrote/ /an?/ []{0,3} /sentence|article/'
pattern = "([tag: NNP]{1,}) ([ tag:/VB.*/ ]) /an?/ ([pos:JJ]{0,3}) /sentence|article/"
matches = client.tokensregex(text, pattern)
print("tokensregex matches is ", matches)
# sentences contains a list with matches for each sentence.
assert len(matches["sentences"]) == 3
# length tells you whether or not there are any matches in this
assert matches["sentences"][1]["length"] == 1
# You can access matches like most regex groups.
# print("sentence is ",["sentences"][1]["0"]["text"])
matches["sentences"][1]["0"]["text"] == "Chris wrote a simple sentence"
matches["sentences"][1]["0"]["1"]["text"] == "Chris"
# # Use semgrex patterns to directly find who wrote what.
# pattern = '{word:wrote} >nsubj {}=subject >dobj {}=object'
# matches = client.semgrex(text, pattern)
# # print("semgrex matches is", matches)
# # sentences contains a list with matches for each sentence.
# assert len(matches["sentences"]) == 3
# # length tells you whether or not there are any matches in this
# assert matches["sentences"][1]["length"] == 1
# # You can access matches like most regex groups.
# matches["sentences"][1]["0"]["text"] == "wrote"
# matches["sentences"][1]["0"]["$subject"]["text"] == "Chris"
# matches["sentences"][1]["0"]["$object"]["text"] == "sentence"
| 35.048387 | 155 | 0.645651 |
b7340fcc54385aec310fcaab55c9c4530b0dc98a | 1,617 | py | Python | basic/fasta.py | JinyuanSun/my_bio_script | ceb84e2e32c38b0889956f12c380354d23b28dc1 | [
"MIT"
] | null | null | null | basic/fasta.py | JinyuanSun/my_bio_script | ceb84e2e32c38b0889956f12c380354d23b28dc1 | [
"MIT"
] | null | null | null | basic/fasta.py | JinyuanSun/my_bio_script | ceb84e2e32c38b0889956f12c380354d23b28dc1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# By Jinyuan Sun
| 25.666667 | 82 | 0.568336 |
b73487d3a2f5ed600768a510984d20ecb9e2c8c6 | 10,248 | py | Python | tea_client/http.py | alefnula/tea-client | 976416fe00ed075394064c86c42665dcda341c17 | [
"Apache-2.0"
] | 1 | 2020-12-22T16:06:09.000Z | 2020-12-22T16:06:09.000Z | tea_client/http.py | alefnula/tea-client | 976416fe00ed075394064c86c42665dcda341c17 | [
"Apache-2.0"
] | null | null | null | tea_client/http.py | alefnula/tea-client | 976416fe00ed075394064c86c42665dcda341c17 | [
"Apache-2.0"
] | null | null | null | import enum
from typing import Optional, Dict
import httpx
from tea import serde
from tea_client import errors
from tea_client.models import TeaClientModel
| 32.741214 | 79 | 0.518443 |
b734d2e12cf5fc6c54fb0dd62f0ce018d2ee9551 | 3,809 | py | Python | PRPConnector/TestPRPConnector.py | manuelbieri/PRP-APIConnect | 78c4772dce22686da82cdd27e246724edd5bcf0a | [
"MIT"
] | null | null | null | PRPConnector/TestPRPConnector.py | manuelbieri/PRP-APIConnect | 78c4772dce22686da82cdd27e246724edd5bcf0a | [
"MIT"
] | null | null | null | PRPConnector/TestPRPConnector.py | manuelbieri/PRP-APIConnect | 78c4772dce22686da82cdd27e246724edd5bcf0a | [
"MIT"
] | null | null | null | import unittest
from typing import List
import Connector
| 56.850746 | 159 | 0.719349 |
b73585b3b1af0b7be3ca9e5f9af38dd53d4fa51d | 1,302 | py | Python | competitors/VAE.py | umarov90/DeepFake | e65c72f255817532e8a8a3afe2138ae270477601 | [
"Apache-2.0"
] | 3 | 2021-01-28T08:08:20.000Z | 2021-10-30T02:15:54.000Z | competitors/VAE.py | umarov90/DeepCellState | e65c72f255817532e8a8a3afe2138ae270477601 | [
"Apache-2.0"
] | null | null | null | competitors/VAE.py | umarov90/DeepCellState | e65c72f255817532e8a8a3afe2138ae270477601 | [
"Apache-2.0"
] | 1 | 2022-03-09T14:56:49.000Z | 2022-03-09T14:56:49.000Z | import tensorflow as tf
from tensorflow import keras
| 35.189189 | 101 | 0.612135 |
b7366ab4b541a6d00ae9df7ac65ca2ada6762d7d | 2,121 | py | Python | test/unit/database/test_work.py | matiasmorant/py2neo | 521c9799f386fcae262f9f1b379bca9e24184d16 | [
"Apache-2.0"
] | null | null | null | test/unit/database/test_work.py | matiasmorant/py2neo | 521c9799f386fcae262f9f1b379bca9e24184d16 | [
"Apache-2.0"
] | null | null | null | test/unit/database/test_work.py | matiasmorant/py2neo | 521c9799f386fcae262f9f1b379bca9e24184d16 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2020, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pytest import raises
from py2neo import Transaction
def test_should_fail_on_tx_create_object():
tx = Transaction(FakeGraph())
with raises(TypeError):
tx.create(object())
def test_should_fail_on_tx_delete_object():
tx = Transaction(FakeGraph())
with raises(TypeError):
tx.delete(object())
def test_should_fail_on_tx_merge_object():
tx = Transaction(FakeGraph())
with raises(TypeError):
tx.merge(object())
def test_should_fail_on_tx_pull_object():
tx = Transaction(FakeGraph())
with raises(TypeError):
tx.pull(object())
def test_should_fail_on_tx_push_object():
tx = Transaction(FakeGraph())
with raises(TypeError):
tx.push(object())
def test_should_fail_on_tx_separate_object():
tx = Transaction(FakeGraph())
with raises(TypeError):
tx.separate(object())
| 22.806452 | 74 | 0.701556 |
b73713a84bd73916c488c2d607e11c9d22b75380 | 6,070 | py | Python | extract/read_acc.py | YutoNakayachi/jankenEstimate | 64ca3b6da78c1d84204d709c0c0a398277f4dafb | [
"MIT"
] | null | null | null | extract/read_acc.py | YutoNakayachi/jankenEstimate | 64ca3b6da78c1d84204d709c0c0a398277f4dafb | [
"MIT"
] | null | null | null | extract/read_acc.py | YutoNakayachi/jankenEstimate | 64ca3b6da78c1d84204d709c0c0a398277f4dafb | [
"MIT"
] | null | null | null | #from https://github.com/mfurukawa/imu_sensor/tree/master/src/Python
# September 03, 2020
# Yuto Nakayachi
from __future__ import unicode_literals ,print_function
import serial
from time import sleep
import numpy as np
import matplotlib.pyplot as plt
import io
import csv
import time
import datetime
import struct
import tensorflow as tf
from tensorflow import keras
# Variable to get real value
MPU9250A_2g = 0.000061035156 # 0.000061035156 g/LSB
MPU9250A_4g = 0.000122070312 # 0.000122070312 g/LSB
MPU9250A_8g = 0.000244140625 # 0.000244140625 g/LSB
MPU9250A_16g = 0.000488281250 # 0.000488281250 g/LSB
MPU9250G_250dps = 0.007633587786 # 0.007633587786 dps/LSB
MPU9250G_500dps = 0.015267175572 # 0.015267175572 dps/LSB
MPU9250G_1000dps = 0.030487804878 # 0.030487804878 dps/LSB
MPU9250G_2000dps = 0.060975609756 # 0.060975609756 dps/LSB
MPU9250M_4800uT = 0.6 # 0.6 uT/LSB
MPU9250T_85degC = 0.002995177763 # 0.002995177763 degC/LSB
Magnetometer_Sensitivity_Scale_Factor = 0.15
# number of axis
numVariable = 24 # 4ch * 6acc
# Maximum time for measure
minuteLength = 25
# sampling rate
smplHz = 500
# Variable to count number of sampling
smpl_cnt = 0
# Variable to count number of fail
fail_cnt_byte = 0
fail_cnt_head = 0
# Array to store data
buf = [[0 for i in range(numVariable + 2)] for j in range(smplHz*60*minuteLength)]
# Array to store real value
buf_f = [[0 for i in range(numVariable + 2)] for j in range(smplHz*60*minuteLength)]
# define serial port
ser = serial.Serial("COM3",921600,timeout=1)
# Check serial connection
if ser.is_open:
print("Start Serial Connection")
else:
print("PORT ERROR")
ser.close()
exit()
# Function to create csv file
# Function to Measure
# Start
print("ready? --> press s key")
while(1):
ready_s = input()
if ready_s == "s":
break
if ready_s == "r":
print("over")
ser.close()
exit()
# Measure the start time
p_time = time.time()
# Function to measure
readByte()
# Measure the end time
e_time = time.time()
# The time it took
print("time: ",e_time - p_time)
# Function to create csv file
writeCSV()
# close serial port
ser.close()
print("number of data: ",smpl_cnt)
print("number of byte fail: ",fail_cnt_byte)
print("number of header fail: ",fail_cnt_head)
print("END") | 27.590909 | 257 | 0.549918 |
b739046fc9ac3dbe4de500836eb668496db9cf82 | 1,754 | py | Python | scripts/backup/eval.py | vafaei-ar/Ngene | 9735f9729bdde70624ec9af73196b418f2e1b2f2 | [
"MIT"
] | null | null | null | scripts/backup/eval.py | vafaei-ar/Ngene | 9735f9729bdde70624ec9af73196b418f2e1b2f2 | [
"MIT"
] | null | null | null | scripts/backup/eval.py | vafaei-ar/Ngene | 9735f9729bdde70624ec9af73196b418f2e1b2f2 | [
"MIT"
] | null | null | null | import matplotlib as mpl
mpl.use('agg')
import glob
import argparse
from time import time
import numpy as np
import pylab as plt
import rficnn as rfc
parser = argparse.ArgumentParser()
parser.add_argument('--arch', required=False, help='choose architecture', type=str, default='1')
parser.add_argument('--trsh', required=False, help='choose threshold', type=float, default=0.1)
args = parser.parse_args()
threshold = args.trsh
rfc.the_print('Chosen architecture is: '+args.arch+' and threshod is: '+str(threshold),bgc='green')
model_add = './models/model_'+args.arch+'_'+str(threshold)
conv = ss.ConvolutionalLayers(nx=276,ny=400,n_channel=1,restore=1,
model_add=model_add,arch_file_name='arch_'+args.arch)
sim_files = glob.glob('../data/hide_sims_test/calib_1year/*.fits'))
times = []
for fil in sim_files:
fname = fil.split('/')[-1]
print fname
data,mask = read_chunck_sdfits(fil,label_tag=RFI,threshold=0.1,verbose=0)
data = np.clip(np.fabs(data), 0, 200)
data -= data.min()
data /= data.max()
lnx,lny = data.shape
s = time()
pred = conv.conv_large_image(data.reshape(1,lnx,lny,1),pad=10,lx=276,ly=400)
e = time()
times.append(e-s)
mask = mask[10:-10,:]
pred = pred[10:-10,:]
fig, (ax1,ax2,ax3) = plt.subplots(3,1,figsize=(18,8))
ax1.imshow(data,aspect='auto')
ax2.imshow(mask,aspect='auto')
ax3.imshow(pred,aspect='auto')
np.save('../comparison/'+fname+'_mask_'+sys.argv[1],mask)
np.save('../comparison/'+fname+'_pred_'+sys.argv[1],pred)
plt.subplots_adjust(left=0.04, right=0.99, top=0.99, bottom=0.04)
plt.savefig('../comparison/'+fname+'_'+sys.argv[1]+'.jpg',dpi=30)
plt.close()
print np.mean(times)
| 29.728814 | 99 | 0.661345 |
b739279244be21a77769468b2146bbedb6b82ebb | 4,544 | py | Python | ctemail.py | dyike/CTEmail | d94416401198393df01f143047acb1fb7c227492 | [
"MIT"
] | 47 | 2017-10-15T08:23:55.000Z | 2021-03-21T04:05:25.000Z | ctemail.py | bobo18801737494/CTEmail | d94416401198393df01f143047acb1fb7c227492 | [
"MIT"
] | null | null | null | ctemail.py | bobo18801737494/CTEmail | d94416401198393df01f143047acb1fb7c227492 | [
"MIT"
] | 7 | 2017-10-16T02:23:12.000Z | 2020-07-08T13:32:28.000Z | from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.header import Header
from email.mime.base import MIMEBase
from email import encoders
import os
import uuid
import smtplib
import re | 34.953846 | 102 | 0.569762 |
b73959dcfef900a0482e8a66a74254dfbae2b5e0 | 3,782 | py | Python | alipay/aop/api/domain/RequestExtShopItem.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/RequestExtShopItem.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/RequestExtShopItem.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
| 28.870229 | 77 | 0.574299 |
b739c6c3b975479392baa34036d389264f7e0a34 | 9,644 | py | Python | tornado/s3server.py | suocean16/osu_study | c86da00312b16ade5d7ed4586ffbe7ffc78781a1 | [
"Apache-2.0"
] | 9 | 2015-02-04T08:45:56.000Z | 2017-06-25T02:01:30.000Z | tornado/s3server.py | suocean16/osu_study | c86da00312b16ade5d7ed4586ffbe7ffc78781a1 | [
"Apache-2.0"
] | null | null | null | tornado/s3server.py | suocean16/osu_study | c86da00312b16ade5d7ed4586ffbe7ffc78781a1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of an S3-like storage server based on local files.
Useful to test features that will eventually run on S3, or if you want to
run something locally that was once running on S3.
We don't support all the features of S3, but it does work with the
standard S3 client for the most basic semantics. To use the standard
S3 client with this module:
c = S3.AWSAuthConnection("", "", server="localhost", port=8888,
is_secure=False)
c.create_bucket("mybucket")
c.put("mybucket", "mykey", "a value")
print c.get("mybucket", "mykey").body
"""
import bisect
import datetime
import hashlib
import os
import os.path
import urllib
from tornado import escape
from tornado import httpserver
from tornado import ioloop
from tornado import web
def start(port, root_directory="/tmp/s3", bucket_depth=0):
"""Starts the mock S3 server on the given port at the given path."""
application = S3Application(root_directory, bucket_depth)
http_server = httpserver.HTTPServer(application)
http_server.listen(port)
ioloop.IOLoop.instance().start()
| 37.671875 | 76 | 0.606595 |
b73c912fbef2f1e9dcedeede8d2e7747a1107d54 | 6,697 | py | Python | multitest_transport/api/test_result_api.py | maksonlee/multitest_transport | 9c20a48ac856307950a204854f52be7335705054 | [
"Apache-2.0"
] | null | null | null | multitest_transport/api/test_result_api.py | maksonlee/multitest_transport | 9c20a48ac856307950a204854f52be7335705054 | [
"Apache-2.0"
] | null | null | null | multitest_transport/api/test_result_api.py | maksonlee/multitest_transport | 9c20a48ac856307950a204854f52be7335705054 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test results APIs."""
import typing
# Non-standard docstrings are used to generate the API documentation.
import endpoints
from protorpc import message_types
from protorpc import messages
from protorpc import remote
from multitest_transport.api import base
from multitest_transport.models import messages as mtt_messages
from multitest_transport.models import ndb_models
from multitest_transport.models import sql_models
from multitest_transport.util import tfc_client
from multitest_transport.util import xts_result
| 41.596273 | 80 | 0.721368 |
b740609fa3b2a0150f07fc35f914d2369bdcd573 | 260 | py | Python | visualize_loss/loss_verification.py | entn-at/RL-LightSpeech | c8ba120216c7c3117d2e7eae2eebe726bc02d6d3 | [
"MIT"
] | 1 | 2021-09-22T01:30:05.000Z | 2021-09-22T01:30:05.000Z | visualize_loss/loss_verification.py | entn-at/LightSpeech | 48250fbcede4b258ba13ab17e3e83afc5fe85a01 | [
"MIT"
] | null | null | null | visualize_loss/loss_verification.py | entn-at/LightSpeech | 48250fbcede4b258ba13ab17e3e83afc5fe85a01 | [
"MIT"
] | 1 | 2021-09-22T01:32:06.000Z | 2021-09-22T01:32:06.000Z | import numpy as np
loss_arr = np.array(list())
with open("total_loss.txt", "r") as f_loss:
cnt = 0
for loss in f_loss.readlines():
cnt += 1
# print(loss)
loss_arr = np.append(loss_arr, float(loss))
print(cnt)
| 23.636364 | 52 | 0.561538 |
b7407fa4aa1e99e15cc05919bc7ed96eeb510a89 | 443 | py | Python | road_roughness_prediction/tools/image_utils.py | mknz/dsr-road-roughness-prediction | 5f56b6ba5da70a09f2c967b7f32c740072e20ed1 | [
"MIT"
] | 7 | 2019-04-04T06:40:29.000Z | 2020-11-12T10:53:30.000Z | road_roughness_prediction/tools/image_utils.py | mknz/dsr-road-roughness-prediction | 5f56b6ba5da70a09f2c967b7f32c740072e20ed1 | [
"MIT"
] | 1 | 2021-09-28T07:11:05.000Z | 2021-09-28T07:11:05.000Z | road_roughness_prediction/tools/image_utils.py | mknz/dsr-road-roughness-prediction | 5f56b6ba5da70a09f2c967b7f32c740072e20ed1 | [
"MIT"
] | null | null | null | '''Image utils'''
from io import BytesIO
from PIL import Image
import matplotlib.pyplot as plt
def save_and_open(save_func):
'''Save to in-memory buffer and re-open '''
buf = BytesIO()
save_func(buf)
buf.seek(0)
bytes_ = buf.read()
buf_ = BytesIO(bytes_)
return buf_
def fig_to_pil(fig: plt.Figure):
'''Convert matplot figure to PIL Image'''
buf = save_and_open(fig.savefig)
return Image.open(buf)
| 20.136364 | 47 | 0.670429 |
b741d52a9805602a91b772fcb3382611b832a2b5 | 15,070 | py | Python | machida/lib/wallaroo/experimental/connectors.py | pvmsikrsna/wallaroo | a08ef579ec809e5bf4ffe10937b2be20059a0530 | [
"Apache-2.0"
] | null | null | null | machida/lib/wallaroo/experimental/connectors.py | pvmsikrsna/wallaroo | a08ef579ec809e5bf4ffe10937b2be20059a0530 | [
"Apache-2.0"
] | null | null | null | machida/lib/wallaroo/experimental/connectors.py | pvmsikrsna/wallaroo | a08ef579ec809e5bf4ffe10937b2be20059a0530 | [
"Apache-2.0"
] | null | null | null | import hashlib
import logging
from struct import unpack
import sys
import time
from . import (connector_wire_messages as cwm,
AtLeastOnceSourceConnector,
ProtocolError,
ConnectorError)
if sys.version_info.major == 2:
from .base_meta2 import BaseMeta, abstractmethod
else:
from .base_meta3 import BaseMeta, abstractmethod
def stream_added(self, stream):
logging.debug("MultiSourceConnector added {}".format(stream))
source, acked = self.sources.get(stream.id, (None, None))
if source:
if stream.point_of_ref != source.point_of_ref():
source.reset(stream.point_of_ref)
# probably got this as part of the _handle_ok logic. Store the ack
# and use when a source matching the stream id is added
else:
self.sources[stream.id] = [None, stream.point_of_ref]
def stream_removed(self, stream):
logging.debug("MultiSourceConnector removed {}".format(stream))
pass
def stream_opened(self, stream):
logging.debug("MultiSourceConnector stream_opened {}".format(stream))
source, acked = self.sources.get(stream.id, (None, None))
if source:
if stream.id in self.joining:
self.joining.remove(stream.id)
if stream.point_of_ref != source.point_of_ref():
source.reset(stream.point_of_ref)
self.open.add(stream.id)
else:
raise ConnectorError("Stream {} was opened for unknown source. "
"Please use the add_source interface."
.format(stream))
def stream_closed(self, stream):
logging.debug("MultiSourceConnector closed {}".format(stream))
source, acked = self.sources.get(stream.id, (None, None))
if source:
if stream.id in self.open:
# source was open so move it back to joining state
self.open.remove(stream.id)
self.joining.add(stream.id)
elif stream.id in self.pending_eos_ack:
# source was pending eos ack, but that was interrupted
# move it back to joining
del self.pending_eos_ack[stream.id]
self.joining.add(stream.id)
elif stream.id in self.closed:
logging.debug("tried to close an already closed source: {}"
.format(Source))
else:
pass
else:
pass
def stream_acked(self, stream):
logging.debug("MultiSourceConnector acked {}".format(stream))
source, acked = self.sources.get(stream.id, (None, None))
if source:
# check if there's an eos pending this ack
eos_point_of_ref = self.pending_eos_ack.get(stream.id, None)
if eos_point_of_ref:
logging.debug("Stream {} is awaiting EOS Ack for {}"
.format(stream, eos_point_of_ref))
# source was pending eos ack
# check ack's point of ref
if stream.point_of_ref == eos_point_of_ref:
# can finish closing it now
self._close_and_delete_source(source)
return
elif stream.point_of_ref < eos_point_of_ref:
pass
else:
raise ConnectorError("Got ack point of ref that is larger"
" than the ended stream's point of ref.\n"
"Expected: {}, Received: {}"
.format(eos_point_of_ref, stream))
elif isinstance(acked, int): # acked may be 0 & use this clause!
# regular ack (incremental ack of a live stream)
if stream.point_of_ref < acked:
logging.warning("got an ack for older point of reference"
" for stream {}".format(stream))
source.reset(stream.point_of_ref)
else:
# source was added before connect()\handle_ok => reset
source.reset(stream.point_of_ref)
# update acked point of ref for the source
self.sources[stream.id][1] = stream.point_of_ref
elif stream.id in self.closed:
pass
else:
raise ConnectorError("Stream {} was opened for unknown source. "
"Please use the add_source interface."
.format(stream))
| 37.394541 | 210 | 0.557996 |
b743ed3e1bbeb444d21c50b6157a6fabbd1aba1e | 3,090 | py | Python | pytorch_lightning/callbacks/device_stats_monitor.py | Code-Cornelius/pytorch-lightning | ce95891f6ab21a6cb1e5e6bc46cebafe9aab6057 | [
"Apache-2.0"
] | 4 | 2021-12-10T01:30:35.000Z | 2022-02-12T17:25:36.000Z | pytorch_lightning/callbacks/device_stats_monitor.py | Code-Cornelius/pytorch-lightning | ce95891f6ab21a6cb1e5e6bc46cebafe9aab6057 | [
"Apache-2.0"
] | 4 | 2021-11-07T02:22:34.000Z | 2021-11-15T12:58:43.000Z | pytorch_lightning/callbacks/device_stats_monitor.py | Code-Cornelius/pytorch-lightning | ce95891f6ab21a6cb1e5e6bc46cebafe9aab6057 | [
"Apache-2.0"
] | 2 | 2021-12-08T22:29:39.000Z | 2022-03-26T04:46:09.000Z | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Device Stats Monitor
====================
Monitors and logs device stats during training.
"""
from typing import Any, Dict, Optional
import pytorch_lightning as pl
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.types import STEP_OUTPUT
| 37.228916 | 118 | 0.703883 |