hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c4353fef35e15660683e78f01919ecd4744808d | 574 | py | Python | flask_monitoringdashboard/test/core/profiler/util/test_stringhash.py | timgates42/Flask-MonitoringDashboard | 0404b05b9a8f1917796e0f314a77a53a754a0b15 | [
"MIT"
] | 3 | 2020-07-17T05:37:41.000Z | 2021-09-06T19:46:15.000Z | flask_monitoringdashboard/test/core/profiler/util/test_stringhash.py | timgates42/Flask-MonitoringDashboard | 0404b05b9a8f1917796e0f314a77a53a754a0b15 | [
"MIT"
] | null | null | null | flask_monitoringdashboard/test/core/profiler/util/test_stringhash.py | timgates42/Flask-MonitoringDashboard | 0404b05b9a8f1917796e0f314a77a53a754a0b15 | [
"MIT"
] | 1 | 2020-11-21T01:25:51.000Z | 2020-11-21T01:25:51.000Z | import unittest
from flask_monitoringdashboard.core.profiler.util.stringHash import StringHash
| 31.888889 | 78 | 0.709059 |
1c45545157e97f9c4e1cc68b6cafb654b5d57282 | 439 | py | Python | news/views.py | valch85/newssite | ef612a7bde4ff1d6e1e35f5cc4ec9407f031270e | [
"Apache-2.0"
] | null | null | null | news/views.py | valch85/newssite | ef612a7bde4ff1d6e1e35f5cc4ec9407f031270e | [
"Apache-2.0"
] | 2 | 2020-02-12T00:16:37.000Z | 2020-06-05T20:42:49.000Z | news/views.py | valch85/newssite | ef612a7bde4ff1d6e1e35f5cc4ec9407f031270e | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render, get_object_or_404
from .models import News
# Create your views here.
| 27.4375 | 62 | 0.728929 |
1c4559619debbfab81b5667b6115f6d8185615c5 | 1,229 | py | Python | benchmark/generate_libs/jamplus.py | chadaustin/ibb | ea1e25cc53a1ad7c302a12d95fc704c443924dff | [
"MIT"
] | 4 | 2015-04-09T17:24:58.000Z | 2019-07-02T12:05:56.000Z | benchmark/generate_libs/jamplus.py | chadaustin/ibb | ea1e25cc53a1ad7c302a12d95fc704c443924dff | [
"MIT"
] | null | null | null | benchmark/generate_libs/jamplus.py | chadaustin/ibb | ea1e25cc53a1ad7c302a12d95fc704c443924dff | [
"MIT"
] | 1 | 2019-11-08T15:38:29.000Z | 2019-11-08T15:38:29.000Z | #!/usr/bin/python
import os.path
import cppcodebase
import random
| 29.261905 | 107 | 0.643613 |
1c45bee0b72f7290f98a152d2fd4047f74e16502 | 8,482 | py | Python | inbm/dispatcher-agent/dispatcher/fota/fota.py | intel/intel-inb-manageability | cdb17765120857fd41cacb838d6ee6e34e1f5047 | [
"Apache-2.0"
] | 5 | 2021-12-13T21:19:31.000Z | 2022-01-18T18:29:43.000Z | inbm/dispatcher-agent/dispatcher/fota/fota.py | intel/intel-inb-manageability | cdb17765120857fd41cacb838d6ee6e34e1f5047 | [
"Apache-2.0"
] | 45 | 2021-12-30T17:21:09.000Z | 2022-03-29T22:47:32.000Z | inbm/dispatcher-agent/dispatcher/fota/fota.py | intel/intel-inb-manageability | cdb17765120857fd41cacb838d6ee6e34e1f5047 | [
"Apache-2.0"
] | 4 | 2022-01-26T17:42:54.000Z | 2022-03-30T04:48:04.000Z | """
FOTA update tool which is called from the dispatcher during installation
Copyright (C) 2017-2022 Intel Corporation
SPDX-License-Identifier: Apache-2.0
"""
import logging
import os
import platform
from threading import Timer
from typing import Any, Optional, Mapping
from future.moves.urllib.parse import urlparse
from inbm_common_lib.exceptions import UrlSecurityException
from inbm_common_lib.utility import canonicalize_uri
from inbm_common_lib.constants import REMOTE_SOURCE
from .constants import *
from .fota_error import FotaError
from .manifest import parse_tool_options, parse_guid, parse_hold_reboot_flag
from .os_factory import OsFactory, OsType
from ..common import dispatcher_state
from ..common.result_constants import *
from ..constants import UMASK_OTA
from ..dispatcher_callbacks import DispatcherCallbacks
from ..dispatcher_exception import DispatcherException
from ..downloader import download
from ..packagemanager.local_repo import DirectoryRepo
logger = logging.getLogger(__name__)
| 42.838384 | 113 | 0.630512 |
1c46065a2d7cec80d32a5396991fd1b74b074e66 | 8,727 | py | Python | syncflux.py | nagylzs/syncflux | c070267065cad817708d0680e17bfe5f8942310f | [
"Apache-2.0"
] | null | null | null | syncflux.py | nagylzs/syncflux | c070267065cad817708d0680e17bfe5f8942310f | [
"Apache-2.0"
] | null | null | null | syncflux.py | nagylzs/syncflux | c070267065cad817708d0680e17bfe5f8942310f | [
"Apache-2.0"
] | null | null | null | import copy
import datetime
import sys
import os
import time
import argparse
import traceback
import pytz
import syncthing
from influxdb import InfluxDBClient
import yaml
from yaml2dataclass import Schema, SchemaPath
from typing import Optional, Dict, Type, List
from dataclasses import dataclass, asdict, field
def load_app_config(stream) -> AppConfiguration:
"""Load application configuration from a stream."""
obj = yaml.safe_load(stream)
return AppConfiguration.scm_load_from_dict(obj)
def error(message: str):
sys.stderr.write("\nerror: " + message + "\n")
sys.stderr.flush()
raise SystemExit(-1)
def info(*values):
if not args.silent:
print(*values)
parser = argparse.ArgumentParser(description='Monitor your Syncthing instances with influxdb.')
parser.add_argument('-c', "--config", dest="config", default=None,
help="Configuration file for application. Default is syncflux.yml. "
"See syncflux_example.yml for an example.")
parser.add_argument("--config-dir", dest="config_dir", default=None,
help="Configuration directory. All config files with .yml extension will be processed one by one.")
parser.add_argument('-n', "--count", dest="count", default=1, type=int,
help="Number of test runs. Default is one. Use -1 to run indefinitely.")
parser.add_argument('-w', "--wait", dest="wait", default=60, type=float,
help="Number of seconds between test runs.")
parser.add_argument("-s", "--silent", dest='silent', action="store_true", default=False,
help="Supress all messages except errors.")
parser.add_argument("-v", "--verbose", dest='verbose', action="store_true", default=False,
help="Be verbose."
)
parser.add_argument("--halt-on-send-error", dest="halt_on_send_error", default=False, action="store_true",
help="Halt when cannot send data to influxdb. The default is to ignore the error.")
args = parser.parse_args()
if args.silent and args.verbose:
parser.error("Cannot use --silent and --verbose at the same time.")
if args.config is None:
args.config = "syncflux.yml"
if (args.config is not None) and (args.config_dir is not None):
parser.error("You must give either --config or --config-dir (exactly one of them)")
if args.count == 0:
parser.error("Test run count cannot be zero.")
if args.wait <= 0:
parser.error("Wait time must be positive.")
if args.config:
config_files = [args.config]
else:
config_files = []
for file_name in sorted(os.listdir(args.config_dir)):
ext = os.path.splitext(file_name)[1]
if ext.lower() == ".yml":
fpath = os.path.join(args.config_dir, file_name)
config_files.append(fpath)
index = 0
while args.count < 0 or index < args.count:
if args.count != 1:
info("Pass #%d started" % (index + 1))
started = time.time()
for config_file in config_files:
if not os.path.isfile(config_file):
parser.error("Cannot open %s" % config_file)
config = load_app_config(open(config_file, "r"))
main()
elapsed = time.time() - started
index += 1
last_one = (args.count > 0) and (index == args.count)
if not last_one:
remaining = args.wait - elapsed
if remaining > 0:
if not args.silent:
info("Pass #%d elapsed %.2f sec, waiting %.2f sec for next." % (index, elapsed, remaining))
time.sleep(args.wait)
else:
info("Pass #%d elapsed %.2f sec" % (index, elapsed))
info("")
| 33.694981 | 119 | 0.605936 |
1c48374373ae16db6dbcfd16316661e717dab9fc | 5,230 | py | Python | tests/input/pdf/test_pdf.py | asweeney86/preview-generator | 354cbac1c131ebbb81cd9cfd9b4bc0c184d10103 | [
"MIT"
] | null | null | null | tests/input/pdf/test_pdf.py | asweeney86/preview-generator | 354cbac1c131ebbb81cd9cfd9b4bc0c184d10103 | [
"MIT"
] | null | null | null | tests/input/pdf/test_pdf.py | asweeney86/preview-generator | 354cbac1c131ebbb81cd9cfd9b4bc0c184d10103 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import re
import shutil
import typing
from PIL import Image
from PyPDF2 import PdfFileReader
import PyPDF2.utils
import pytest
from preview_generator.exception import UnavailablePreviewType
from preview_generator.manager import PreviewManager
from tests import test_utils
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
CACHE_DIR = "/tmp/preview-generator-tests/cache"
PDF_FILE_PATH = os.path.join(CURRENT_DIR, "the_pdf.pdf")
PDF_FILE_PATH__ENCRYPTED = os.path.join(CURRENT_DIR, "the_pdf.encrypted.pdf")
PDF_FILE_PATH__A4 = os.path.join(CURRENT_DIR, "qpdfconvert.pdf")
| 39.621212 | 100 | 0.759656 |
1c4921cfeca9e8e27f2d0b623dc27dabba9abc92 | 10,495 | py | Python | ipt/ipt_filter_contour_by_size.py | tpmp-inra/ipapi | b0f6be8960a20dbf95ef9df96efdd22bd6e031c5 | [
"MIT"
] | 1 | 2020-06-30T06:53:36.000Z | 2020-06-30T06:53:36.000Z | ipt/ipt_filter_contour_by_size.py | tpmp-inra/ipapi | b0f6be8960a20dbf95ef9df96efdd22bd6e031c5 | [
"MIT"
] | null | null | null | ipt/ipt_filter_contour_by_size.py | tpmp-inra/ipapi | b0f6be8960a20dbf95ef9df96efdd22bd6e031c5 | [
"MIT"
] | null | null | null | from ipso_phen.ipapi.base.ipt_abstract import IptBase
from ipso_phen.ipapi.tools import regions
import numpy as np
import cv2
import logging
logger = logging.getLogger(__name__)
from ipso_phen.ipapi.base import ip_common as ipc
| 38.443223 | 107 | 0.429252 |
1c49c9837d339902372100015afa8dd09aa825df | 718 | py | Python | tests/main.py | deeso/json-search-replace | d1dd75cfaecb65bf8fcbad0c80a0bd839eccaa8d | [
"Apache-2.0"
] | 1 | 2019-02-08T14:42:45.000Z | 2019-02-08T14:42:45.000Z | tests/main.py | deeso/manipin-json | d1dd75cfaecb65bf8fcbad0c80a0bd839eccaa8d | [
"Apache-2.0"
] | null | null | null | tests/main.py | deeso/manipin-json | d1dd75cfaecb65bf8fcbad0c80a0bd839eccaa8d | [
"Apache-2.0"
] | null | null | null | from wrapper_tests.upsert_test import *
from wrapper_tests.upsertvaluedict_test import *
import os
import logging
import sys
import argparse
import signal
logging.getLogger().setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s - %(name)s] %(message)s')
ch.setFormatter(formatter)
logging.getLogger().addHandler(ch)
parser = argparse.ArgumentParser(
description='Unit testing for fiery snap.')
parser.add_argument('-config', type=str, default=None,
help='toml config for keys and such, see key.toml')
if __name__ == '__main__':
unittest.main()
os.kill(os.getpid(), signal.SIGKILL)
| 26.592593 | 71 | 0.721448 |
1c4b4d3e7fde53ff67c2f5b9ffd3aee5b505137c | 598 | py | Python | Sending_email/email.py | Satyam-Bhalla/Python-Scripts | 39c46a362acd63cc5d1b9ab57ecb7250eaff35f7 | [
"MIT"
] | 8 | 2018-09-25T16:30:12.000Z | 2022-03-25T05:13:43.000Z | Sending_email/email.py | Satyam-Bhalla/Python-Scripts | 39c46a362acd63cc5d1b9ab57ecb7250eaff35f7 | [
"MIT"
] | 1 | 2021-03-31T18:43:43.000Z | 2021-03-31T18:43:43.000Z | Sending_email/email.py | Satyam-Bhalla/Python-Scripts | 39c46a362acd63cc5d1b9ab57ecb7250eaff35f7 | [
"MIT"
] | 6 | 2018-01-29T19:00:42.000Z | 2022-03-25T05:13:47.000Z | import smtplib
gmail_user = 'your email'
gmail_password = 'your password'
sent_from = gmail_user
to = ['reciever email'] #Create a list for all the recievers
subject = 'OMG Super Important Message'
body = 'Hey, what\'s up?\n- You'
email_text = """\
From: %s
To: %s
Subject: %s
%s
""" % (sent_from, ", ".join(to), subject, body)
try:
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.ehlo()
server.login(gmail_user, gmail_password)
server.sendmail(sent_from, to, email_text)
server.close()
print('Email sent!')
except Exception as e:
print(e)
| 21.357143 | 61 | 0.653846 |
1c4b532b4b156dd08dc3bcca54d167230e8c8b2a | 4,532 | py | Python | FlaskDaemon/load_test.py | caffeinate/test-pylot | 3380208ea0e7ee5fed4299f22ab592a3d3232b3a | [
"MIT"
] | null | null | null | FlaskDaemon/load_test.py | caffeinate/test-pylot | 3380208ea0e7ee5fed4299f22ab592a3d3232b3a | [
"MIT"
] | 1 | 2021-10-31T17:46:54.000Z | 2021-10-31T17:46:54.000Z | FlaskDaemon/load_test.py | caffeinate/test-pylot | 3380208ea0e7ee5fed4299f22ab592a3d3232b3a | [
"MIT"
] | 1 | 2020-07-20T04:10:40.000Z | 2020-07-20T04:10:40.000Z | '''
Created on 11 Sep 2015
@author: si
'''
import json
import random
import time
from threading import Thread
# import urllib
import urllib2
from Queue import Queue
import logging
logger = logging.getLogger(__name__)
API_URL = "http://127.0.0.1:5000/"
if __name__ == '__main__':
l = LoadTest(10,30)
l.go()
| 29.23871 | 86 | 0.559797 |
1c4b641cf08d14aaba12ee7b055b0523dd40710b | 407 | py | Python | urls.py | jeylani99/Real-Estate | 5ccb4bf23c73b4acb77427faa202a15216ef58c3 | [
"Apache-2.0"
] | null | null | null | urls.py | jeylani99/Real-Estate | 5ccb4bf23c73b4acb77427faa202a15216ef58c3 | [
"Apache-2.0"
] | null | null | null | urls.py | jeylani99/Real-Estate | 5ccb4bf23c73b4acb77427faa202a15216ef58c3 | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from django.conf.urls import include,url
from .import views
urlpatterns = [
url(r'^$', views.IndexView.as_view(),name='index'),
#homeapp_detail_view_url
url(r'^(?P<pk>[0-9]+)/$',views.LocationView.as_view(),name='property'),
#homeapp/detailview/moredetailview
url(r'^([0-9]+)/(?P<pk>[0-9]+)/$',views.PropertyView.as_view(),name='propertyview'),
]
| 29.071429 | 88 | 0.668305 |
1c4bdbc2c162e12eac3d923f38fe6b53d36966ae | 541 | py | Python | main.py | ngh3053/auto_spacing_with_tensorflow | 0569b734c087d13cdf6cbb8e79dd8c579d7e66e4 | [
"MIT"
] | null | null | null | main.py | ngh3053/auto_spacing_with_tensorflow | 0569b734c087d13cdf6cbb8e79dd8c579d7e66e4 | [
"MIT"
] | null | null | null | main.py | ngh3053/auto_spacing_with_tensorflow | 0569b734c087d13cdf6cbb8e79dd8c579d7e66e4 | [
"MIT"
] | null | null | null | from utils import *
from model import Model2
if __name__ == '__main__':
train_data = DataLoader('../data/trainX.txt', '../data/trainY.txt')
test_data = DataLoader('../data/testX.txt', '../data/testY.txt')
train_data.set_batch(100)
test_data.set_batch(100)
char_dic = CharDic([train_data])
model = Model2(train_data=train_data,
test_data=test_data,
char_dic=char_dic,
model_name='bilstm_crf_n3_e300_h2002')
model.train()
model.test() | 28.473684 | 72 | 0.608133 |
1c4c3d7288804166b00482d9413cd64068adedd3 | 3,475 | py | Python | src/sardana/taurus/qt/qtgui/extra_macroexecutor/macrodescriptionviewer.py | marc2332/sardana | 48dc9191baaa63f6c714d8c025e8f3f96548ad26 | [
"CC-BY-3.0"
] | 43 | 2016-11-25T15:21:23.000Z | 2021-08-20T06:09:40.000Z | src/sardana/taurus/qt/qtgui/extra_macroexecutor/macrodescriptionviewer.py | marc2332/sardana | 48dc9191baaa63f6c714d8c025e8f3f96548ad26 | [
"CC-BY-3.0"
] | 1,263 | 2016-11-25T15:58:37.000Z | 2021-11-02T22:23:47.000Z | src/sardana/taurus/qt/qtgui/extra_macroexecutor/macrodescriptionviewer.py | marc2332/sardana | 48dc9191baaa63f6c714d8c025e8f3f96548ad26 | [
"CC-BY-3.0"
] | 58 | 2016-11-21T11:33:55.000Z | 2021-09-01T06:21:21.000Z | #!/usr/bin/env python
##############################################################################
##
# This file is part of Sardana
##
# http://www.sardana-controls.org/
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Sardana is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Sardana is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
"""
macrodescriptionviewer.py:
"""
import taurus.core
from taurus.external.qt import Qt
from taurus.qt.qtgui.base import TaurusBaseWidget
if __name__ == "__main__":
test()
| 35.10101 | 90 | 0.649784 |
1c4d007b31f3f642fe520a7abaa4b88348fd22fe | 179 | py | Python | torch/metrics/accuracy_score.py | LilDataScientist/PyTorch-From-Scratch | ae3c0bffc5a36a9a7c123b98f52bdaa32fbedef6 | [
"MIT"
] | null | null | null | torch/metrics/accuracy_score.py | LilDataScientist/PyTorch-From-Scratch | ae3c0bffc5a36a9a7c123b98f52bdaa32fbedef6 | [
"MIT"
] | null | null | null | torch/metrics/accuracy_score.py | LilDataScientist/PyTorch-From-Scratch | ae3c0bffc5a36a9a7c123b98f52bdaa32fbedef6 | [
"MIT"
] | null | null | null | import numpy as np
| 22.375 | 53 | 0.664804 |
1c4e17f4910c6d5e94aabd5e46b41369a206e931 | 462 | py | Python | asaas/financial_transactions.py | marlonjsilva/asaas_sdk_python | 871a199e8156d9baa9f78972232feee38b0608bb | [
"MIT"
] | null | null | null | asaas/financial_transactions.py | marlonjsilva/asaas_sdk_python | 871a199e8156d9baa9f78972232feee38b0608bb | [
"MIT"
] | 4 | 2022-02-16T13:53:36.000Z | 2022-02-16T14:10:40.000Z | asaas/financial_transactions.py | marlonjsilva/asaas_sdk_python | 871a199e8156d9baa9f78972232feee38b0608bb | [
"MIT"
] | null | null | null | from asaas.typing import SyncAsync
from typing import Any, Optional, Dict
| 25.666667 | 66 | 0.588745 |
1c4fabe61f50bb8ab5d328236ac8daab3e74249e | 17,672 | py | Python | datahub/core/serializers.py | uktrade/data-hub-api | c698cba533ff002293b821d01916f6334549f778 | [
"MIT"
] | 6 | 2019-12-02T16:11:24.000Z | 2022-03-18T10:02:02.000Z | datahub/core/serializers.py | uktrade/data-hub-api | c698cba533ff002293b821d01916f6334549f778 | [
"MIT"
] | 1,696 | 2019-10-31T14:08:37.000Z | 2022-03-29T12:35:57.000Z | datahub/core/serializers.py | uktrade/data-hub-api | c698cba533ff002293b821d01916f6334549f778 | [
"MIT"
] | 9 | 2019-11-22T12:42:03.000Z | 2021-09-03T14:25:05.000Z | from functools import partial
from uuid import UUID
from dateutil.parser import parse as dateutil_parse
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from rest_framework.fields import ReadOnlyField, UUIDField
from datahub.core.constants import Country as CountryEnum
from datahub.core.validate_utils import DataCombiner
from datahub.core.validators import InRule, OperatorRule, RulesBasedValidator, ValidationRule
from datahub.metadata.models import AdministrativeArea, Country
MAX_LENGTH = settings.CHAR_FIELD_MAX_LENGTH
RelaxedDateField = partial(serializers.DateField, input_formats=('iso-8601', '%Y/%m/%d'))
| 33.093633 | 99 | 0.587992 |
1c50b9af34c1306cdbc9fec048d28309381c28e4 | 4,763 | py | Python | samples/s07-rigid-objects/main.py | nomadsinteractive/ark | 52f84c6dbd5ca6bdd07d450b3911be1ffd995922 | [
"Apache-2.0"
] | 5 | 2018-03-28T09:14:55.000Z | 2018-04-02T11:54:33.000Z | samples/s07-rigid-objects/main.py | nomadsinteractive/ark | 52f84c6dbd5ca6bdd07d450b3911be1ffd995922 | [
"Apache-2.0"
] | null | null | null | samples/s07-rigid-objects/main.py | nomadsinteractive/ark | 52f84c6dbd5ca6bdd07d450b3911be1ffd995922 | [
"Apache-2.0"
] | null | null | null | import math
import random
from ark import dear_imgui, ApplicationFacade, Arena, Event, Integer, Collider, RenderObject, Size, Camera, Vec3, Numeric
if __name__ == '__main__':
main(Application(_application))
| 43.3 | 153 | 0.669116 |
1c51a22587be89037e69f604118ecdbeda84cab5 | 11,693 | py | Python | jamf/models/computer_extension_attribute.py | jensenbox/python-jamf | 85213085b1064a00375a7aa7df5e33c19f5178eb | [
"RSA-MD"
] | 1 | 2021-04-20T15:28:57.000Z | 2021-04-20T15:28:57.000Z | jamf/models/computer_extension_attribute.py | jensenbox/python-jamf | 85213085b1064a00375a7aa7df5e33c19f5178eb | [
"RSA-MD"
] | null | null | null | jamf/models/computer_extension_attribute.py | jensenbox/python-jamf | 85213085b1064a00375a7aa7df5e33c19f5178eb | [
"RSA-MD"
] | null | null | null | # coding: utf-8
"""
Jamf Pro API
## Overview This is a sample Jamf Pro server which allows for usage without any authentication. The Jamf Pro environment which supports the Try it Out functionality does not run the current beta version of Jamf Pro, thus any newly added endpoints will result in an error and should be used soley for documentation purposes. # noqa: E501
The version of the OpenAPI document: 10.25.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from jamf.configuration import Configuration
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ComputerExtensionAttribute):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ComputerExtensionAttribute):
return True
return self.to_dict() != other.to_dict()
| 33.408571 | 342 | 0.625673 |
1c5289b76fb10d8b256a4000027a462353b8a389 | 1,342 | py | Python | SSOKeyGen/ssokeygendialog.py | chrcoe/sso-keygen | c149f6202fbecb38874c75bf82e0d4857d1249f9 | [
"MIT"
] | null | null | null | SSOKeyGen/ssokeygendialog.py | chrcoe/sso-keygen | c149f6202fbecb38874c75bf82e0d4857d1249f9 | [
"MIT"
] | null | null | null | SSOKeyGen/ssokeygendialog.py | chrcoe/sso-keygen | c149f6202fbecb38874c75bf82e0d4857d1249f9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ssokeygendialog.ui'
#
# Created: Sun Feb 1 12:33:36 2015
# by: PyQt5 UI code generator 5.4
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
| 38.342857 | 106 | 0.708644 |
1c5370b938a0a9b556d9850b79dfef4883c667c0 | 4,138 | py | Python | util/n_download_util.py | TwrFyr/n-hen.py | 8d20639ee78cc34e4333fb247574ff10af81556c | [
"MIT"
] | null | null | null | util/n_download_util.py | TwrFyr/n-hen.py | 8d20639ee78cc34e4333fb247574ff10af81556c | [
"MIT"
] | 22 | 2020-12-04T15:16:36.000Z | 2021-04-29T12:20:04.000Z | util/n_download_util.py | TwrFyr/n-henpy | 8d20639ee78cc34e4333fb247574ff10af81556c | [
"MIT"
] | null | null | null | import urllib.request
import os
from typing import List
from util.n_util import NUser
from util.n_util import get_n_entry
import time
import threading
from util.array_util import slice_array
delay: float = 2.5
def save_files_to_dir(file_url_list: List[str], path: str, update=None, thread_count: int = 1) -> None:
"""Saves all files represented by a list of url resources to the folder specified.
The files are being named after the last part of the url.
The number of threads can be increased to use more threads for the downloading of the images."""
# pretend to be normal user
# opener=urllib.request.build_opener()
# opener.addheaders=[('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]
# urllib.request.install_opener(opener)
progress = ProgressWrapper(0, len(file_url_list), update)
progress.update()
if thread_count < 1 or thread_count > 16:
print(f'invalid thread count: {thread_count} not in [1, 16]')
return
else:
lock = threading.Lock()
threads = []
for i in range(thread_count):
slices = slice_array(file_url_list, thread_count)
t = threading.Thread(target=download_images, kwargs=dict(lock=lock, file_url_list=slices[i], path=path,
progress=progress),
daemon=True)
threads.append(t)
t.start()
for t in threads:
t.join()
def download_all_favorites(n_user: NUser, base_dir: str, update_entry=None, update_page=None, thread_count=1) -> None:
"""Downloads all entries favorited by `n_user` using the number of `thread_count` threads."""
print('downloading {}\'s {} favorites...'.format(n_user.username, n_user.fav_count))
current_entry = 1
total_entries = n_user.fav_count
for min_entry in n_user.favorite_list:
if update_entry is not None:
update_entry(current_entry=min_entry, current=current_entry, total=total_entries)
# get entry data
print('downloading entry with id {}'.format(min_entry.n_id))
entry = get_n_entry(min_entry.n_id)
if entry is None:
print('no connection possible, skipping...')
current_entry += 1
continue
# check directory is valid
if not os.path.exists(base_dir):
print('base directory does not exist, aborting...')
break
save_dir = os.path.join(base_dir, entry.digits)
if os.path.exists(save_dir):
print('entry already exists, skipping...')
current_entry += 1
continue
else:
os.mkdir(save_dir)
# download images
save_files_to_dir(entry.image_url_list, save_dir, update=update_page, thread_count=thread_count)
print('waiting for {} seconds...'.format(delay))
time.sleep(delay)
current_entry += 1
if update_entry is not None:
update_entry(current_entry=None, current=current_entry, total=total_entries)
print('download finished')
| 38.672897 | 150 | 0.646931 |
1c542217eb772ffd5114bee20efa5d974df6a3d5 | 2,907 | py | Python | stable-baselines/tests/test_deterministic.py | princeton-vl/PackIt | 9894d252c5238d582cba7c3d19540f89d47e4166 | [
"BSD-3-Clause"
] | 49 | 2020-07-24T18:17:12.000Z | 2022-01-04T15:30:52.000Z | stable-baselines/tests/test_deterministic.py | princeton-vl/PackIt | 9894d252c5238d582cba7c3d19540f89d47e4166 | [
"BSD-3-Clause"
] | 14 | 2020-07-21T20:21:08.000Z | 2022-03-12T00:42:18.000Z | stable-baselines/tests/test_deterministic.py | princeton-vl/PackIt | 9894d252c5238d582cba7c3d19540f89d47e4166 | [
"BSD-3-Clause"
] | 5 | 2020-07-27T12:35:00.000Z | 2021-07-19T03:04:21.000Z | import pytest
from stable_baselines import A2C, ACER, ACKTR, DeepQ, DDPG, PPO1, PPO2, TRPO
from stable_baselines.ddpg import AdaptiveParamNoiseSpec
from stable_baselines.common.identity_env import IdentityEnv, IdentityEnvBox
from stable_baselines.common.vec_env import DummyVecEnv
PARAM_NOISE_DDPG = AdaptiveParamNoiseSpec(initial_stddev=float(0.2), desired_action_stddev=float(0.2))
# Hyperparameters for learning identity for each RL model
LEARN_FUNC_DICT = {
'a2c': lambda e: A2C(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
'acer': lambda e: ACER(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
'acktr': lambda e: ACKTR(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
'deepq': lambda e: DeepQ(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
'ddpg': lambda e: DDPG(policy="MlpPolicy", env=e, param_noise=PARAM_NOISE_DDPG).learn(total_timesteps=1000),
'ppo1': lambda e: PPO1(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
'ppo2': lambda e: PPO2(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
'trpo': lambda e: TRPO(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
}
| 40.943662 | 112 | 0.711042 |
1c547eed055111ebe6fcfe3bbff16bf6a9eb3360 | 1,129 | py | Python | tests/models/tensorflow/convert_to_tensorflow_serving.py | filipecosta90/dlbench | 11dd2fb58050c38a4baa429b207aaecad9097ce3 | [
"MIT"
] | 14 | 2019-09-14T16:37:39.000Z | 2022-03-19T08:28:50.000Z | tests/models/tensorflow/convert_to_tensorflow_serving.py | filipecosta90/dlbench | 11dd2fb58050c38a4baa429b207aaecad9097ce3 | [
"MIT"
] | 40 | 2019-11-14T16:07:08.000Z | 2022-03-29T21:47:15.000Z | tests/models/tensorflow/convert_to_tensorflow_serving.py | filipecosta90/dlbench | 11dd2fb58050c38a4baa429b207aaecad9097ce3 | [
"MIT"
] | 2 | 2021-01-07T01:50:53.000Z | 2021-02-24T22:22:23.000Z | import tensorflow as tf
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
export_dir = './reference/00000002'
graph_pb = './creditcardfraud.pb'
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
with tf.gfile.GFile(graph_pb, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sigs = {}
with tf.Session(graph=tf.Graph()) as sess:
# name="" is important to ensure we don't get spurious prefixing
tf.import_graph_def(graph_def, name="")
g = tf.get_default_graph()
inp1 = g.get_tensor_by_name("transaction:0")
inp2 = g.get_tensor_by_name("reference:0")
out = g.get_tensor_by_name("output:0")
sigs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = \
tf.saved_model.signature_def_utils.predict_signature_def(
{"transaction": inp1, "reference": inp2}, {"output": out})
builder.add_meta_graph_and_variables(sess,
[tag_constants.SERVING],
signature_def_map=sigs)
builder.save()
| 34.212121 | 70 | 0.689105 |
1c54af7d2bc1fc02891b6239b955a52d082c20b2 | 936 | py | Python | pycuda/characterize.py | grlee77/pycuda | cfb787ac73a523fe4b32eff31ecffac485388bbf | [
"Apache-2.0"
] | null | null | null | pycuda/characterize.py | grlee77/pycuda | cfb787ac73a523fe4b32eff31ecffac485388bbf | [
"Apache-2.0"
] | null | null | null | pycuda/characterize.py | grlee77/pycuda | cfb787ac73a523fe4b32eff31ecffac485388bbf | [
"Apache-2.0"
] | 1 | 2020-08-31T08:52:24.000Z | 2020-08-31T08:52:24.000Z | from __future__ import division
from __future__ import absolute_import
from pycuda.tools import context_dependent_memoize
import numpy as np
| 24 | 72 | 0.700855 |
1c5536cbf34d028ddd3a2b10367f5360508e1251 | 1,666 | py | Python | bopt/transforms.py | georgedeath/bomean | 0dad35e0d584cf7c46c9a8cb0445f225875cfa86 | [
"MIT"
] | 2 | 2020-05-19T15:48:37.000Z | 2021-08-16T10:41:49.000Z | bopt/transforms.py | georgedeath/bomean | 0dad35e0d584cf7c46c9a8cb0445f225875cfa86 | [
"MIT"
] | null | null | null | bopt/transforms.py | georgedeath/bomean | 0dad35e0d584cf7c46c9a8cb0445f225875cfa86 | [
"MIT"
] | null | null | null | import torch
from scipy.stats import median_absolute_deviation
| 25.630769 | 76 | 0.617047 |
1c556489b0d99f41db32e59ce5f01f383067703c | 2,797 | py | Python | pygs/graphserver/compiler/dedupe.py | abyrd/graphserver | 42edcad2618635310c57fa6ab4a13974025248ba | [
"BSD-3-Clause-Clear"
] | 2 | 2015-02-25T21:46:02.000Z | 2019-04-27T20:22:33.000Z | pygs/graphserver/compiler/dedupe.py | ninowalker/graphserver | dc08070bc6e295986633cf510ca46a2f8d451b92 | [
"BSD-3-Clause-Clear"
] | null | null | null | pygs/graphserver/compiler/dedupe.py | ninowalker/graphserver | dc08070bc6e295986633cf510ca46a2f8d451b92 | [
"BSD-3-Clause-Clear"
] | null | null | null | # eliminate duplicate service periods from a GTFS database
from graphserver.ext.gtfs.gtfsdb import GTFSDatabase
import sys
from optparse import OptionParser
if __name__=='__main__':
| 39.957143 | 271 | 0.631748 |
1c5786ec0bae08a5ef1c18dbc1ab79a0a17bfc34 | 105 | py | Python | 10/01/03/2.py | pylangstudy/201707 | c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6 | [
"CC0-1.0"
] | null | null | null | 10/01/03/2.py | pylangstudy/201707 | c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6 | [
"CC0-1.0"
] | 46 | 2017-06-30T22:19:07.000Z | 2017-07-31T22:51:31.000Z | 10/01/03/2.py | pylangstudy/201707 | c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6 | [
"CC0-1.0"
] | null | null | null | print(MyClass().__repr__())
| 26.25 | 61 | 0.704762 |
1c57a86a468018b2042fa4b09d8dfca249bb7498 | 9,562 | py | Python | tests/tasks/core/test_core.py | andykawabata/prefect | a11061c19847beeea26616ccaf4b404ad939676b | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-09-28T16:24:02.000Z | 2020-10-08T17:08:19.000Z | tests/tasks/core/test_core.py | andykawabata/prefect | a11061c19847beeea26616ccaf4b404ad939676b | [
"ECL-2.0",
"Apache-2.0"
] | 5 | 2021-06-28T20:52:27.000Z | 2022-02-27T13:04:42.000Z | tests/tasks/core/test_core.py | yalaudah/prefect | 2f7f92c39a4575119c3268b0415841c6aca5df60 | [
"Apache-2.0"
] | 1 | 2020-05-04T13:22:11.000Z | 2020-05-04T13:22:11.000Z | import pytest
from prefect.core import Edge, Flow, Parameter, Task
from prefect.tasks.core import collections
from prefect.tasks.core.constants import Constant
from prefect.tasks.core.function import FunctionTask
| 33.787986 | 87 | 0.587743 |
1c57c91d84e8ef886ecab5c688f26666500663aa | 536 | py | Python | Tree/node.py | philipwerner/python_data_structures | 554c38376b732f65c5c168d0e1bd30bea3d1ab6b | [
"MIT"
] | null | null | null | Tree/node.py | philipwerner/python_data_structures | 554c38376b732f65c5c168d0e1bd30bea3d1ab6b | [
"MIT"
] | null | null | null | Tree/node.py | philipwerner/python_data_structures | 554c38376b732f65c5c168d0e1bd30bea3d1ab6b | [
"MIT"
] | null | null | null | """Node class module for Binary Tree."""
| 26.8 | 88 | 0.585821 |
1c5842430ac7ddf81b0dae7e72f5e8595722304e | 26,713 | py | Python | qutip/operators.py | pschindler/qutip | dc399135b77a01077898e13bb7d30d60db9b6e67 | [
"BSD-3-Clause"
] | 1 | 2018-05-31T17:38:03.000Z | 2018-05-31T17:38:03.000Z | qutip/operators.py | pschindler/qutip | dc399135b77a01077898e13bb7d30d60db9b6e67 | [
"BSD-3-Clause"
] | 3 | 2021-08-23T19:00:52.000Z | 2021-08-24T21:38:04.000Z | qutip/operators.py | pschindler/qutip | dc399135b77a01077898e13bb7d30d60db9b6e67 | [
"BSD-3-Clause"
] | 2 | 2017-08-11T11:14:52.000Z | 2022-03-13T21:37:47.000Z | # This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
"""
This module contains functions for generating Qobj representation of a variety
of commonly occuring quantum operators.
"""
__all__ = ['jmat', 'spin_Jx', 'spin_Jy', 'spin_Jz', 'spin_Jm', 'spin_Jp',
'spin_J_set', 'sigmap', 'sigmam', 'sigmax', 'sigmay', 'sigmaz',
'destroy', 'create', 'qeye', 'identity', 'position', 'momentum',
'num', 'squeeze', 'squeezing', 'displace', 'commutator',
'qutrit_ops', 'qdiags', 'phase', 'qzero', 'enr_destroy',
'enr_identity', 'charge', 'tunneling']
import numbers
import numpy as np
import scipy
import scipy.sparse as sp
from qutip.qobj import Qobj
from qutip.fastsparse import fast_csr_matrix, fast_identity
from qutip.dimensions import flatten
#
# Spin operators
#
def jmat(j, *args):
"""Higher-order spin operators:
Parameters
----------
j : float
Spin of operator
args : str
Which operator to return 'x','y','z','+','-'.
If no args given, then output is ['x','y','z']
Returns
-------
jmat : qobj / ndarray
``qobj`` for requested spin operator(s).
Examples
--------
>>> jmat(1) # doctest: +SKIP
[ Quantum object: dims = [[3], [3]], \
shape = [3, 3], type = oper, isHerm = True
Qobj data =
[[ 0. 0.70710678 0. ]
[ 0.70710678 0. 0.70710678]
[ 0. 0.70710678 0. ]]
Quantum object: dims = [[3], [3]], \
shape = [3, 3], type = oper, isHerm = True
Qobj data =
[[ 0.+0.j 0.-0.70710678j 0.+0.j ]
[ 0.+0.70710678j 0.+0.j 0.-0.70710678j]
[ 0.+0.j 0.+0.70710678j 0.+0.j ]]
Quantum object: dims = [[3], [3]], \
shape = [3, 3], type = oper, isHerm = True
Qobj data =
[[ 1. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. -1.]]]
Notes
-----
If no 'args' input, then returns array of ['x','y','z'] operators.
"""
if (np.fix(2 * j) != 2 * j) or (j < 0):
raise TypeError('j must be a non-negative integer or half-integer')
if not args:
return jmat(j, 'x'), jmat(j, 'y'), jmat(j, 'z')
if args[0] == '+':
A = _jplus(j)
elif args[0] == '-':
A = _jplus(j).getH()
elif args[0] == 'x':
A = 0.5 * (_jplus(j) + _jplus(j).getH())
elif args[0] == 'y':
A = -0.5 * 1j * (_jplus(j) - _jplus(j).getH())
elif args[0] == 'z':
A = _jz(j)
else:
raise TypeError('Invalid type')
return Qobj(A)
def _jplus(j):
"""
Internal functions for generating the data representing the J-plus
operator.
"""
m = np.arange(j, -j - 1, -1, dtype=complex)
data = (np.sqrt(j * (j + 1.0) - (m + 1.0) * m))[1:]
N = m.shape[0]
ind = np.arange(1, N, dtype=np.int32)
ptr = np.array(list(range(N-1))+[N-1]*2, dtype=np.int32)
ptr[-1] = N-1
return fast_csr_matrix((data,ind,ptr), shape=(N,N))
def _jz(j):
"""
Internal functions for generating the data representing the J-z operator.
"""
N = int(2*j+1)
data = np.array([j-k for k in range(N) if (j-k)!=0], dtype=complex)
# Even shaped matrix
if (N % 2 == 0):
ind = np.arange(N, dtype=np.int32)
ptr = np.arange(N+1,dtype=np.int32)
ptr[-1] = N
# Odd shaped matrix
else:
j = int(j)
ind = np.array(list(range(j))+list(range(j+1,N)), dtype=np.int32)
ptr = np.array(list(range(j+1))+list(range(j,N)), dtype=np.int32)
ptr[-1] = N-1
return fast_csr_matrix((data,ind,ptr), shape=(N,N))
#
# Spin j operators:
#
def spin_Jx(j):
"""Spin-j x operator
Parameters
----------
j : float
Spin of operator
Returns
-------
op : Qobj
``qobj`` representation of the operator.
"""
return jmat(j, 'x')
def spin_Jy(j):
"""Spin-j y operator
Parameters
----------
j : float
Spin of operator
Returns
-------
op : Qobj
``qobj`` representation of the operator.
"""
return jmat(j, 'y')
def spin_Jz(j):
"""Spin-j z operator
Parameters
----------
j : float
Spin of operator
Returns
-------
op : Qobj
``qobj`` representation of the operator.
"""
return jmat(j, 'z')
def spin_Jm(j):
"""Spin-j annihilation operator
Parameters
----------
j : float
Spin of operator
Returns
-------
op : Qobj
``qobj`` representation of the operator.
"""
return jmat(j, '-')
def spin_Jp(j):
"""Spin-j creation operator
Parameters
----------
j : float
Spin of operator
Returns
-------
op : Qobj
``qobj`` representation of the operator.
"""
return jmat(j, '+')
def spin_J_set(j):
"""Set of spin-j operators (x, y, z)
Parameters
----------
j : float
Spin of operators
Returns
-------
list : list of Qobj
list of ``qobj`` representating of the spin operator.
"""
return jmat(j)
#
# Pauli spin 1/2 operators:
#
def sigmap():
"""Creation operator for Pauli spins.
Examples
--------
>>> sigmap() # doctest: +SKIP
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = False
Qobj data =
[[ 0. 1.]
[ 0. 0.]]
"""
return jmat(1 / 2., '+')
def sigmam():
"""Annihilation operator for Pauli spins.
Examples
--------
>>> sigmam() # doctest: +SKIP
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = False
Qobj data =
[[ 0. 0.]
[ 1. 0.]]
"""
return jmat(1 / 2., '-')
def sigmax():
"""Pauli spin 1/2 sigma-x operator
Examples
--------
>>> sigmax() # doctest: +SKIP
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = False
Qobj data =
[[ 0. 1.]
[ 1. 0.]]
"""
return 2.0 * jmat(1.0 / 2, 'x')
def sigmay():
"""Pauli spin 1/2 sigma-y operator.
Examples
--------
>>> sigmay() # doctest: +SKIP
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = True
Qobj data =
[[ 0.+0.j 0.-1.j]
[ 0.+1.j 0.+0.j]]
"""
return 2.0 * jmat(1.0 / 2, 'y')
def sigmaz():
"""Pauli spin 1/2 sigma-z operator.
Examples
--------
>>> sigmaz() # doctest: +SKIP
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = True
Qobj data =
[[ 1. 0.]
[ 0. -1.]]
"""
return 2.0 * jmat(1.0 / 2, 'z')
#
# DESTROY returns annihilation operator for N dimensional Hilbert space
# out = destroy(N), N is integer value & N>0
#
def destroy(N, offset=0):
'''Destruction (lowering) operator.
Parameters
----------
N : int
Dimension of Hilbert space.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper : qobj
Qobj for lowering operator.
Examples
--------
>>> destroy(4) # doctest: +SKIP
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isHerm = False
Qobj data =
[[ 0.00000000+0.j 1.00000000+0.j 0.00000000+0.j 0.00000000+0.j]
[ 0.00000000+0.j 0.00000000+0.j 1.41421356+0.j 0.00000000+0.j]
[ 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j 1.73205081+0.j]
[ 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j]]
'''
if not isinstance(N, (int, np.integer)): # raise error if N not integer
raise ValueError("Hilbert space dimension must be integer value")
data = np.sqrt(np.arange(offset+1, N+offset, dtype=complex))
ind = np.arange(1,N, dtype=np.int32)
ptr = np.arange(N+1, dtype=np.int32)
ptr[-1] = N-1
return Qobj(fast_csr_matrix((data,ind,ptr),shape=(N,N)), isherm=False)
#
# create returns creation operator for N dimensional Hilbert space
# out = create(N), N is integer value & N>0
#
def create(N, offset=0):
'''Creation (raising) operator.
Parameters
----------
N : int
Dimension of Hilbert space.
Returns
-------
oper : qobj
Qobj for raising operator.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Examples
--------
>>> create(4) # doctest: +SKIP
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isHerm = False
Qobj data =
[[ 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j]
[ 1.00000000+0.j 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j]
[ 0.00000000+0.j 1.41421356+0.j 0.00000000+0.j 0.00000000+0.j]
[ 0.00000000+0.j 0.00000000+0.j 1.73205081+0.j 0.00000000+0.j]]
'''
if not isinstance(N, (int, np.integer)): # raise error if N not integer
raise ValueError("Hilbert space dimension must be integer value")
qo = destroy(N, offset=offset) # create operator using destroy function
return qo.dag()
def _implicit_tensor_dimensions(dimensions):
"""
Total flattened size and operator dimensions for operator creation routines
that automatically perform tensor products.
Parameters
----------
dimensions : (int) or (list of int) or (list of list of int)
First dimension of an operator which can create an implicit tensor
product. If the type is `int`, it is promoted first to `[dimensions]`.
From there, it should be one of the two-elements `dims` parameter of a
`qutip.Qobj` representing an `oper` or `super`, with possible tensor
products.
Returns
-------
size : int
Dimension of backing matrix required to represent operator.
dimensions : list
Dimension list in the form required by ``Qobj`` creation.
"""
if not isinstance(dimensions, list):
dimensions = [dimensions]
flat = flatten(dimensions)
if not all(isinstance(x, numbers.Integral) and x >= 0 for x in flat):
raise ValueError("All dimensions must be integers >= 0")
return np.prod(flat), [dimensions, dimensions]
def qzero(dimensions):
"""
Zero operator.
Parameters
----------
dimensions : (int) or (list of int) or (list of list of int)
Dimension of Hilbert space. If provided as a list of ints, then the
dimension is the product over this list, but the ``dims`` property of
the new Qobj are set to this list. This can produce either `oper` or
`super` depending on the passed `dimensions`.
Returns
-------
qzero : qobj
Zero operator Qobj.
"""
size, dimensions = _implicit_tensor_dimensions(dimensions)
# A sparse matrix with no data is equal to a zero matrix.
return Qobj(fast_csr_matrix(shape=(size, size), dtype=complex),
dims=dimensions, isherm=True)
#
# QEYE returns identity operator for a Hilbert space with dimensions dims.
# a = qeye(N), N is integer or list of integers & all elements >= 0
#
def qeye(dimensions):
"""
Identity operator.
Parameters
----------
dimensions : (int) or (list of int) or (list of list of int)
Dimension of Hilbert space. If provided as a list of ints, then the
dimension is the product over this list, but the ``dims`` property of
the new Qobj are set to this list. This can produce either `oper` or
`super` depending on the passed `dimensions`.
Returns
-------
oper : qobj
Identity operator Qobj.
Examples
--------
>>> qeye(3) # doctest: +SKIP
Quantum object: dims = [[3], [3]], shape = (3, 3), type = oper, \
isherm = True
Qobj data =
[[ 1. 0. 0.]
[ 0. 1. 0.]
[ 0. 0. 1.]]
>>> qeye([2,2]) # doctest: +SKIP
Quantum object: dims = [[2, 2], [2, 2]], shape = (4, 4), type = oper, \
isherm = True
Qobj data =
[[1. 0. 0. 0.]
[0. 1. 0. 0.]
[0. 0. 1. 0.]
[0. 0. 0. 1.]]
"""
size, dimensions = _implicit_tensor_dimensions(dimensions)
return Qobj(fast_identity(size),
dims=dimensions, isherm=True, isunitary=True)
def identity(dims):
"""Identity operator. Alternative name to :func:`qeye`.
Parameters
----------
dimensions : (int) or (list of int) or (list of list of int)
Dimension of Hilbert space. If provided as a list of ints, then the
dimension is the product over this list, but the ``dims`` property of
the new Qobj are set to this list. This can produce either `oper` or
`super` depending on the passed `dimensions`.
Returns
-------
oper : qobj
Identity operator Qobj.
"""
return qeye(dims)
def position(N, offset=0):
"""
Position operator x=1/sqrt(2)*(a+a.dag())
Parameters
----------
N : int
Number of Fock states in Hilbert space.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper : qobj
Position operator as Qobj.
"""
a = destroy(N, offset=offset)
return 1.0 / np.sqrt(2.0) * (a + a.dag())
def momentum(N, offset=0):
"""
Momentum operator p=-1j/sqrt(2)*(a-a.dag())
Parameters
----------
N : int
Number of Fock states in Hilbert space.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper : qobj
Momentum operator as Qobj.
"""
a = destroy(N, offset=offset)
return -1j / np.sqrt(2.0) * (a - a.dag())
def num(N, offset=0):
"""Quantum object for number operator.
Parameters
----------
N : int
The dimension of the Hilbert space.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper: qobj
Qobj for number operator.
Examples
--------
>>> num(4) # doctest: +SKIP
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isHerm = True
Qobj data =
[[0 0 0 0]
[0 1 0 0]
[0 0 2 0]
[0 0 0 3]]
"""
if offset == 0:
data = np.arange(1,N, dtype=complex)
ind = np.arange(1,N, dtype=np.int32)
ptr = np.array([0]+list(range(0,N)), dtype=np.int32)
ptr[-1] = N-1
else:
data = np.arange(offset, offset + N, dtype=complex)
ind = np.arange(N, dtype=np.int32)
ptr = np.arange(N+1,dtype=np.int32)
ptr[-1] = N
return Qobj(fast_csr_matrix((data,ind,ptr), shape=(N,N)), isherm=True)
def squeeze(N, z, offset=0):
"""Single-mode Squeezing operator.
Parameters
----------
N : int
Dimension of hilbert space.
z : float/complex
Squeezing parameter.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper : :class:`qutip.qobj.Qobj`
Squeezing operator.
Examples
--------
>>> squeeze(4, 0.25) # doctest: +SKIP
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isHerm = False
Qobj data =
[[ 0.98441565+0.j 0.00000000+0.j 0.17585742+0.j 0.00000000+0.j]
[ 0.00000000+0.j 0.95349007+0.j 0.00000000+0.j 0.30142443+0.j]
[-0.17585742+0.j 0.00000000+0.j 0.98441565+0.j 0.00000000+0.j]
[ 0.00000000+0.j -0.30142443+0.j 0.00000000+0.j 0.95349007+0.j]]
"""
a = destroy(N, offset=offset)
op = (1 / 2.0) * np.conj(z) * (a ** 2) - (1 / 2.0) * z * (a.dag()) ** 2
return op.expm()
def squeezing(a1, a2, z):
"""Generalized squeezing operator.
.. math::
S(z) = \\exp\\left(\\frac{1}{2}\\left(z^*a_1a_2
- za_1^\\dagger a_2^\\dagger\\right)\\right)
Parameters
----------
a1 : :class:`qutip.qobj.Qobj`
Operator 1.
a2 : :class:`qutip.qobj.Qobj`
Operator 2.
z : float/complex
Squeezing parameter.
Returns
-------
oper : :class:`qutip.qobj.Qobj`
Squeezing operator.
"""
b = 0.5 * (np.conj(z) * (a1 * a2) - z * (a1.dag() * a2.dag()))
return b.expm()
def displace(N, alpha, offset=0):
"""Single-mode displacement operator.
Parameters
----------
N : int
Dimension of Hilbert space.
alpha : float/complex
Displacement amplitude.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper : qobj
Displacement operator.
Examples
---------
>>> displace(4,0.25) # doctest: +SKIP
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isHerm = False
Qobj data =
[[ 0.96923323+0.j -0.24230859+0.j 0.04282883+0.j -0.00626025+0.j]
[ 0.24230859+0.j 0.90866411+0.j -0.33183303+0.j 0.07418172+0.j]
[ 0.04282883+0.j 0.33183303+0.j 0.84809499+0.j -0.41083747+0.j]
[ 0.00626025+0.j 0.07418172+0.j 0.41083747+0.j 0.90866411+0.j]]
"""
a = destroy(N, offset=offset)
D = (alpha * a.dag() - np.conj(alpha) * a).expm()
return D
def commutator(A, B, kind="normal"):
"""
Return the commutator of kind `kind` (normal, anti) of the
two operators A and B.
"""
if kind == 'normal':
return A * B - B * A
elif kind == 'anti':
return A * B + B * A
else:
raise TypeError("Unknown commutator kind '%s'" % kind)
def qutrit_ops():
"""
Operators for a three level system (qutrit).
Returns
-------
opers: array
`array` of qutrit operators.
"""
from qutip.states import qutrit_basis
one, two, three = qutrit_basis()
sig11 = one * one.dag()
sig22 = two * two.dag()
sig33 = three * three.dag()
sig12 = one * two.dag()
sig23 = two * three.dag()
sig31 = three * one.dag()
return np.array([sig11, sig22, sig33, sig12, sig23, sig31],
dtype=object)
def qdiags(diagonals, offsets, dims=None, shape=None):
"""
Constructs an operator from an array of diagonals.
Parameters
----------
diagonals : sequence of array_like
Array of elements to place along the selected diagonals.
offsets : sequence of ints
Sequence for diagonals to be set:
- k=0 main diagonal
- k>0 kth upper diagonal
- k<0 kth lower diagonal
dims : list, optional
Dimensions for operator
shape : list, tuple, optional
Shape of operator. If omitted, a square operator large enough
to contain the diagonals is generated.
See Also
--------
scipy.sparse.diags : for usage information.
Notes
-----
This function requires SciPy 0.11+.
Examples
--------
>>> qdiags(sqrt(range(1, 4)), 1) # doctest: +SKIP
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isherm = False
Qobj data =
[[ 0. 1. 0. 0. ]
[ 0. 0. 1.41421356 0. ]
[ 0. 0. 0. 1.73205081]
[ 0. 0. 0. 0. ]]
"""
data = sp.diags(diagonals, offsets, shape, format='csr', dtype=complex)
if not dims:
dims = [[], []]
if not shape:
shape = []
return Qobj(data, dims, list(shape))
def phase(N, phi0=0):
"""
Single-mode Pegg-Barnett phase operator.
Parameters
----------
N : int
Number of basis states in Hilbert space.
phi0 : float
Reference phase.
Returns
-------
oper : qobj
Phase operator with respect to reference phase.
Notes
-----
The Pegg-Barnett phase operator is Hermitian on a truncated Hilbert space.
"""
phim = phi0 + (2.0 * np.pi * np.arange(N)) / N # discrete phase angles
n = np.arange(N).reshape((N, 1))
states = np.array([np.sqrt(kk) / np.sqrt(N) * np.exp(1.0j * n * kk)
for kk in phim])
ops = np.array([np.outer(st, st.conj()) for st in states])
return Qobj(np.sum(ops, axis=0))
def enr_destroy(dims, excitations):
"""
Generate annilation operators for modes in a excitation-number-restricted
state space. For example, consider a system consisting of 4 modes, each
with 5 states. The total hilbert space size is 5**4 = 625. If we are
only interested in states that contain up to 2 excitations, we only need
to include states such as
(0, 0, 0, 0)
(0, 0, 0, 1)
(0, 0, 0, 2)
(0, 0, 1, 0)
(0, 0, 1, 1)
(0, 0, 2, 0)
...
This function creates annihilation operators for the 4 modes that act
within this state space:
a1, a2, a3, a4 = enr_destroy([5, 5, 5, 5], excitations=2)
From this point onwards, the annihiltion operators a1, ..., a4 can be
used to setup a Hamiltonian, collapse operators and expectation-value
operators, etc., following the usual pattern.
Parameters
----------
dims : list
A list of the dimensions of each subsystem of a composite quantum
system.
excitations : integer
The maximum number of excitations that are to be included in the
state space.
Returns
-------
a_ops : list of qobj
A list of annihilation operators for each mode in the composite
quantum system described by dims.
"""
from qutip.states import enr_state_dictionaries
nstates, state2idx, idx2state = enr_state_dictionaries(dims, excitations)
a_ops = [sp.lil_matrix((nstates, nstates), dtype=np.complex)
for _ in range(len(dims))]
for n1, state1 in idx2state.items():
for n2, state2 in idx2state.items():
for idx, a in enumerate(a_ops):
s1 = [s for idx2, s in enumerate(state1) if idx != idx2]
s2 = [s for idx2, s in enumerate(state2) if idx != idx2]
if (state1[idx] == state2[idx] - 1) and (s1 == s2):
a_ops[idx][n1, n2] = np.sqrt(state2[idx])
return [Qobj(a, dims=[dims, dims]) for a in a_ops]
def enr_identity(dims, excitations):
"""
Generate the identity operator for the excitation-number restricted
state space defined by the `dims` and `exciations` arguments. See the
docstring for enr_fock for a more detailed description of these arguments.
Parameters
----------
dims : list
A list of the dimensions of each subsystem of a composite quantum
system.
excitations : integer
The maximum number of excitations that are to be included in the
state space.
state : list of integers
The state in the number basis representation.
Returns
-------
op : Qobj
A Qobj instance that represent the identity operator in the
exication-number-restricted state space defined by `dims` and
`exciations`.
"""
from qutip.states import enr_state_dictionaries
nstates, _, _ = enr_state_dictionaries(dims, excitations)
data = sp.eye(nstates, nstates, dtype=np.complex)
return Qobj(data, dims=[dims, dims])
def charge(Nmax, Nmin=None, frac = 1):
"""
Generate the diagonal charge operator over charge states
from Nmin to Nmax.
Parameters
----------
Nmax : int
Maximum charge state to consider.
Nmin : int (default = -Nmax)
Lowest charge state to consider.
frac : float (default = 1)
Specify fractional charge if needed.
Returns
-------
C : Qobj
Charge operator over [Nmin,Nmax].
Notes
-----
.. versionadded:: 3.2
"""
if Nmin is None:
Nmin = -Nmax
diag = np.arange(Nmin, Nmax+1, dtype=float)
if frac != 1:
diag *= frac
C = sp.diags(diag, 0, format='csr', dtype=complex)
return Qobj(C, isherm=True)
def tunneling(N, m=1):
"""
Tunneling operator with elements of the form
:math:`\\sum |N><N+m| + |N+m><N|`.
Parameters
----------
N : int
Number of basis states in Hilbert space.
m : int (default = 1)
Number of excitations in tunneling event.
Returns
-------
T : Qobj
Tunneling operator.
Notes
-----
.. versionadded:: 3.2
"""
diags = [np.ones(N-m,dtype=int),np.ones(N-m,dtype=int)]
T = sp.diags(diags,[m,-m],format='csr', dtype=complex)
return Qobj(T, isherm=True)
# Break circular dependencies by a trailing import.
# Note that we use a relative import here to deal with that
# qutip.tensor is the *function* tensor, not the module.
from qutip.tensor import tensor
| 26.370188 | 79 | 0.573129 |
1c59215728acaff76dbcdca05ce20bf9c254f9f4 | 1,627 | py | Python | tests/test_deepsv.py | lsantuari/deepsv | debaa1442d1d97b8220be70e12321cf047d3e6a0 | [
"Apache-2.0"
] | null | null | null | tests/test_deepsv.py | lsantuari/deepsv | debaa1442d1d97b8220be70e12321cf047d3e6a0 | [
"Apache-2.0"
] | null | null | null | tests/test_deepsv.py | lsantuari/deepsv | debaa1442d1d97b8220be70e12321cf047d3e6a0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from deepsv import deepsv
from unittest.mock import patch
"""Tests for the deepsv module.
"""
# Fixture example
| 22.287671 | 71 | 0.695144 |
1c59d4c3c8cb7118c29dce871107ae825dc23c99 | 8,959 | py | Python | tcex/bin/dep.py | phuerta-tc/tcex | 4a4e800e1a6114c1fde663f8c3ab7a1d58045c79 | [
"Apache-2.0"
] | null | null | null | tcex/bin/dep.py | phuerta-tc/tcex | 4a4e800e1a6114c1fde663f8c3ab7a1d58045c79 | [
"Apache-2.0"
] | null | null | null | tcex/bin/dep.py | phuerta-tc/tcex | 4a4e800e1a6114c1fde663f8c3ab7a1d58045c79 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""TcEx Dependencies Command"""
# standard library
import os
import platform
import shutil
import subprocess # nosec
import sys
from distutils.version import StrictVersion # pylint: disable=no-name-in-module
from pathlib import Path
from typing import List
from urllib.parse import quote
# third-party
import typer
# first-party
from tcex.app_config.models.tcex_json_model import LibVersionModel
from tcex.bin.bin_abc import BinABC
| 36.125 | 99 | 0.58444 |
1c5a7f175c98d892dc83db59726cb2f27a8bed94 | 2,198 | py | Python | parser/fase2/team20/execution/executeSentence2.py | LopDlMa/tytus | 0b43ee1c7300cb11ddbe593e08239321b71dc443 | [
"MIT"
] | null | null | null | parser/fase2/team20/execution/executeSentence2.py | LopDlMa/tytus | 0b43ee1c7300cb11ddbe593e08239321b71dc443 | [
"MIT"
] | null | null | null | parser/fase2/team20/execution/executeSentence2.py | LopDlMa/tytus | 0b43ee1c7300cb11ddbe593e08239321b71dc443 | [
"MIT"
] | null | null | null | from .AST.sentence import *
from .AST.expression import *
from .AST.error import *
import sys
sys.path.append("../")
from console import *
| 36.032787 | 135 | 0.641947 |
1c5b0f1bcbcc57bddee91b24e585d8faf96244eb | 5,592 | py | Python | src/test/cli/component.py | huseyinbolt/cord-tester | ed9b79916e6326a45bfaf3227b8ff922d76df4f1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/test/cli/component.py | huseyinbolt/cord-tester | ed9b79916e6326a45bfaf3227b8ff922d76df4f1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/test/cli/component.py | huseyinbolt/cord-tester | ed9b79916e6326a45bfaf3227b8ff922d76df4f1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2016-present Ciena Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Created on 24-Oct-2012
author:s: Anil Kumar ( anilkumar.s@paxterrasolutions.com ),
Raghav Kashyap( raghavkashyap@paxterrasolutions.com )
TestON is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
( at your option ) any later version.
TestON is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with TestON. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
from clicommon import *
if __name__ != "__main__":
import sys
sys.modules[ __name__ ] = Component()
| 35.392405 | 76 | 0.619456 |
1c5cd63de747901926f8ddd0a4d149ca05999677 | 2,575 | py | Python | python-framework/handlers/base/auth.py | huangxingx/python-framework | a62618b0ee5ecff9de426327892cdd690d10510d | [
"MIT"
] | 7 | 2019-10-24T03:26:22.000Z | 2019-10-27T14:55:07.000Z | python-framework/handlers/base/auth.py | PJoemu/python-framework | a62618b0ee5ecff9de426327892cdd690d10510d | [
"MIT"
] | 3 | 2021-06-08T19:13:10.000Z | 2022-01-13T00:38:48.000Z | python-framework/handlers/base/auth.py | PJoemu/python-framework | a62618b0ee5ecff9de426327892cdd690d10510d | [
"MIT"
] | 2 | 2019-10-25T03:54:51.000Z | 2020-06-28T08:50:12.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author: x.huang
# @date:17-8-4
import logging
from pony.orm import db_session
from handlers.base.base import BaseRequestHandler
| 32.1875 | 107 | 0.533204 |
1c5d3932d3d58eb3852f548752bb665e5c02d910 | 475 | py | Python | pysol/core/helpers.py | lotfio/pysol | 34fac6d1ec246a7a037d8237e00974a9a9548faa | [
"MIT"
] | 2 | 2019-10-09T21:58:20.000Z | 2020-01-08T07:29:28.000Z | pysol/core/helpers.py | lotfio/pysol | 34fac6d1ec246a7a037d8237e00974a9a9548faa | [
"MIT"
] | null | null | null | pysol/core/helpers.py | lotfio/pysol | 34fac6d1ec246a7a037d8237e00974a9a9548faa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#| This file is part of cony
#|
#| @package Pysol python cli application
#| @author <lotfio lakehal>
#| @license MIT
#| @version 0.1.0
#| @copyright 2019 lotfio lakehal
import sys
# load module function
# this function loads a module by string name | 23.75 | 53 | 0.673684 |
1c5dc3f5290f019cca3cade7daba3d9be28fa2da | 6,387 | py | Python | autograd_hacks/test_autograd_hacks.py | jusjusjus/autograd-hacks | c12556d03e40cccaa0e70e14b0120b723002ed9e | [
"Unlicense"
] | 1 | 2020-05-01T12:14:43.000Z | 2020-05-01T12:14:43.000Z | autograd_hacks/test_autograd_hacks.py | jusjusjus/autograd-hacks | c12556d03e40cccaa0e70e14b0120b723002ed9e | [
"Unlicense"
] | null | null | null | autograd_hacks/test_autograd_hacks.py | jusjusjus/autograd-hacks | c12556d03e40cccaa0e70e14b0120b723002ed9e | [
"Unlicense"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytest
from . import autograd_hacks
# Autograd helpers, from https://gist.github.com/apaszke/226abdf867c4e9d6698bd198f3b45fb7
def jacobian(y: torch.Tensor, x: torch.Tensor, create_graph=False):
jac = []
flat_y = y.reshape(-1)
grad_y = torch.zeros_like(flat_y)
for i in range(len(flat_y)):
grad_y[i] = 1.
grad_x, = torch.autograd.grad(flat_y, x, grad_y, retain_graph=True, create_graph=create_graph)
jac.append(grad_x.reshape(x.shape))
grad_y[i] = 0.
return torch.stack(jac).reshape(y.shape + x.shape)
def hessian(y: torch.Tensor, x: torch.Tensor):
return jacobian(jacobian(y, x, create_graph=True), x)
| 29.706977 | 102 | 0.614686 |
1c5e34faccefb41600dc36e2445e46683f4cb6c1 | 5,213 | py | Python | tests/test_command.py | paulfurley/Mailpile | f89611d916e41e74dd00997327a2c2d042a96399 | [
"Apache-2.0"
] | 1 | 2017-04-19T11:10:05.000Z | 2017-04-19T11:10:05.000Z | tests/test_command.py | paulfurley/Mailpile | f89611d916e41e74dd00997327a2c2d042a96399 | [
"Apache-2.0"
] | null | null | null | tests/test_command.py | paulfurley/Mailpile | f89611d916e41e74dd00997327a2c2d042a96399 | [
"Apache-2.0"
] | null | null | null | import unittest
import mailpile
from mock import patch
from mailpile.commands import Action as action
from tests import MailPileUnittest
if __name__ == '__main__':
unittest.main()
| 32.378882 | 87 | 0.619029 |
1c5fd36ae0b1a46a987890321b0748ee13ed63f6 | 7,739 | py | Python | navrep/envs/rosnavtrainencodedenv.py | ReykCS/navrep | 22ee4727268188414a8121f069e45c2ab798ca19 | [
"MIT"
] | null | null | null | navrep/envs/rosnavtrainencodedenv.py | ReykCS/navrep | 22ee4727268188414a8121f069e45c2ab798ca19 | [
"MIT"
] | null | null | null | navrep/envs/rosnavtrainencodedenv.py | ReykCS/navrep | 22ee4727268188414a8121f069e45c2ab798ca19 | [
"MIT"
] | null | null | null | from gym import spaces
import numpy as np
from scipy import interpolate
import yaml
from navrep.envs.navreptrainenv import NavRepTrainEnv
from navrep.rosnav_models.utils.reward import RewardCalculator
from navrep.rosnav_models.utils.reward import RewardCalculator | 35.663594 | 127 | 0.580178 |
1c6030cb89b906c901110530b42acd2d1d95f2a5 | 9,789 | py | Python | pdm/models/repositories.py | gaojiuli/pdm | 9aedd12e864b57826e850a10eeea45900bb62aad | [
"MIT"
] | 1 | 2021-02-04T19:43:38.000Z | 2021-02-04T19:43:38.000Z | pdm/models/repositories.py | gaojiuli/pdm | 9aedd12e864b57826e850a10eeea45900bb62aad | [
"MIT"
] | null | null | null | pdm/models/repositories.py | gaojiuli/pdm | 9aedd12e864b57826e850a10eeea45900bb62aad | [
"MIT"
] | null | null | null | from __future__ import annotations
import sys
from functools import wraps
from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Tuple
from pdm._types import CandidateInfo, Source
from pdm.context import context
from pdm.exceptions import CandidateInfoNotFound, CorruptedCacheError
from pdm.models.candidates import Candidate
from pdm.models.requirements import (
Requirement,
filter_requirements_with_extras,
parse_requirement,
)
from pdm.models.specifiers import PySpecSet, SpecifierSet
from pdm.utils import allow_all_wheels
if TYPE_CHECKING:
from pdm.models.environment import Environment
| 39.156 | 88 | 0.629073 |
1c6036ce4a4bea03f2bf60037b8ba69bf71a83e1 | 713 | py | Python | tests/backends/test_cookie.py | euri10/starsessions | 6bd258a0f94d30b6ec4a8da41910f97c5dabbe54 | [
"MIT"
] | 31 | 2021-07-15T13:00:06.000Z | 2022-03-17T08:25:52.000Z | tests/backends/test_cookie.py | euri10/starsessions | 6bd258a0f94d30b6ec4a8da41910f97c5dabbe54 | [
"MIT"
] | 6 | 2021-09-01T15:25:20.000Z | 2022-03-13T07:29:19.000Z | tests/backends/test_cookie.py | euri10/starsessions | 6bd258a0f94d30b6ec4a8da41910f97c5dabbe54 | [
"MIT"
] | 5 | 2021-08-19T04:46:35.000Z | 2022-03-09T15:27:22.000Z | import pytest
from starsessions import SessionBackend
| 27.423077 | 88 | 0.775596 |
1c60d6b7074a5670b3d1308323fd21a043a33869 | 4,888 | py | Python | sqlalchemy_dremio/db.py | thbeh/sqlalchemy_dremio | 180169a86200977a8087d39afe67d3594bd66523 | [
"Apache-2.0"
] | 14 | 2020-04-19T16:14:37.000Z | 2021-11-14T01:45:51.000Z | sqlalchemy_dremio/db.py | thbeh/sqlalchemy_dremio | 180169a86200977a8087d39afe67d3594bd66523 | [
"Apache-2.0"
] | 13 | 2020-04-18T14:44:49.000Z | 2022-03-14T13:45:22.000Z | sqlalchemy_dremio/db.py | thbeh/sqlalchemy_dremio | 180169a86200977a8087d39afe67d3594bd66523 | [
"Apache-2.0"
] | 6 | 2020-04-29T10:18:59.000Z | 2021-08-19T13:46:30.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from pyarrow import flight
from sqlalchemy_dremio.exceptions import Error, NotSupportedError
from sqlalchemy_dremio.flight_auth import HttpDremioClientAuthHandler
from sqlalchemy_dremio.query import execute
logger = logging.getLogger(__name__)
paramstyle = 'qmark'
def check_closed(f):
"""Decorator that checks if connection/cursor is closed."""
return g
def check_result(f):
"""Decorator that checks if the cursor has results from `execute`."""
return d
class Cursor(object):
"""Connection cursor."""
| 25.591623 | 115 | 0.618658 |
1c61e6c641ff5d3b13cd3eb58254039918bc75f6 | 2,081 | py | Python | docker-images/rasa2/snips_services/tts_server.py | sanyaade-machine-learning/opensnips_original | 3c7d4aa2ef7dec7b0b8c532a537b79c3ef9df7cc | [
"MIT"
] | 57 | 2017-12-28T22:50:20.000Z | 2022-01-25T16:05:36.000Z | docker-images/rasa2/snips_services/tts_server.py | sanyaade-machine-learning/opensnips_original | 3c7d4aa2ef7dec7b0b8c532a537b79c3ef9df7cc | [
"MIT"
] | 28 | 2018-04-18T06:45:20.000Z | 2022-03-08T22:50:50.000Z | docker-images/rasa2/snips_services/tts_server.py | sanyaade-machine-learning/opensnips_original | 3c7d4aa2ef7dec7b0b8c532a537b79c3ef9df7cc | [
"MIT"
] | 18 | 2017-12-27T01:57:14.000Z | 2021-03-02T14:13:06.000Z | #!/opt/rasa/anaconda/bin/python
# -*-: coding utf-8 -*-
""" Snips core and nlu server. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import time
import os
from socket import error as socket_error
from SnipsMqttServer import SnipsMqttServer
import paho.mqtt.client as mqtt
from thread_handler import ThreadHandler
import sys,warnings
# apt-get install sox libsox-fmt-all
import sox
server = SnipsTTSServer()
server.start()
| 29.728571 | 166 | 0.605478 |
1c625b305422a96fe496b35f015f87dde84dd1cd | 462 | py | Python | gtd/migrations/0018_context_color.py | jimbofreedman/naggingnelly-api | 510d801791dcce39560bac227c12e5f6d9e80dcc | [
"BSD-3-Clause"
] | null | null | null | gtd/migrations/0018_context_color.py | jimbofreedman/naggingnelly-api | 510d801791dcce39560bac227c12e5f6d9e80dcc | [
"BSD-3-Clause"
] | null | null | null | gtd/migrations/0018_context_color.py | jimbofreedman/naggingnelly-api | 510d801791dcce39560bac227c12e5f6d9e80dcc | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-08-02 17:53
from __future__ import unicode_literals
from django.db import migrations, models
| 22 | 67 | 0.614719 |
1c627c266e817eb089303a3e29f35bf34a1b6c4c | 6,652 | py | Python | neuralintents/main.py | nitori/neuralintents | 7a63075fbdca24ec6a6e5281552f64325dd279ff | [
"MIT"
] | null | null | null | neuralintents/main.py | nitori/neuralintents | 7a63075fbdca24ec6a6e5281552f64325dd279ff | [
"MIT"
] | null | null | null | neuralintents/main.py | nitori/neuralintents | 7a63075fbdca24ec6a6e5281552f64325dd279ff | [
"MIT"
] | null | null | null | from abc import ABCMeta, abstractmethod
import random
import json
import pickle
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import nltk
from nltk.stem import WordNetLemmatizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.models import load_model
nltk.download('punkt', quiet=True)
nltk.download('wordnet', quiet=True)
| 33.766497 | 109 | 0.599519 |
1c63f92718131c9edb3951c411929fc66600dca1 | 607 | py | Python | cogs/TieThePie.py | Engineer152/Engineer-Bot | 9654666776d5ba91b1c8afdb32c86a7aedad7143 | [
"MIT"
] | null | null | null | cogs/TieThePie.py | Engineer152/Engineer-Bot | 9654666776d5ba91b1c8afdb32c86a7aedad7143 | [
"MIT"
] | null | null | null | cogs/TieThePie.py | Engineer152/Engineer-Bot | 9654666776d5ba91b1c8afdb32c86a7aedad7143 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
client = commands.Bot(command_prefix='your prefix',owner_ids = {your user id},case_insensitive=True )
| 33.722222 | 224 | 0.742998 |
1c6537848455d77ed4e22e5c61b4d2a5153fa5e0 | 3,359 | py | Python | python/lsst/eotest/simulation/generate_Fe55_images.py | tguillemLSST/eotest | c6f150984fa5dff85b9805028645bf46fc846f11 | [
"BSD-3-Clause-LBNL"
] | 3 | 2016-04-21T07:05:45.000Z | 2020-08-05T08:37:37.000Z | python/lsst/eotest/simulation/generate_Fe55_images.py | tguillemLSST/eotest | c6f150984fa5dff85b9805028645bf46fc846f11 | [
"BSD-3-Clause-LBNL"
] | 70 | 2015-03-26T09:48:53.000Z | 2020-04-22T16:29:43.000Z | python/lsst/eotest/simulation/generate_Fe55_images.py | tguillemLSST/eotest | c6f150984fa5dff85b9805028645bf46fc846f11 | [
"BSD-3-Clause-LBNL"
] | 5 | 2017-08-15T20:52:44.000Z | 2022-03-25T12:54:07.000Z | """
@brief Generate Fe55 images and associated darks and bias images
according to section 5.4 of the E/O document (Dec 19, 2012 version).
@author J. Chiang <jchiang@slac.stanford.edu>
"""
import os
import numpy as np
from sim_inputs import *
from sim_tools import *
if __name__ == '__main__':
nexp = 10
exptimes = np.linspace(1, 5, nexp)
nxrays = [int(x*1000) for x in exptimes]
generate_Fe55_images(exptimes, nxrays, '.', 'xxx-xx')
| 39.988095 | 74 | 0.61953 |
1c6615aacc368931eb1fadc13190d4aad9dc4cda | 32,663 | py | Python | pcdet/models/backbones_3d/pfe/voxel_set_abstraction.py | rayguan97/M3DETR | cb76890a28c1555f2c0138030e0a432df6ee731b | [
"Apache-2.0"
] | 21 | 2022-01-21T11:02:15.000Z | 2022-03-08T14:55:30.000Z | pcdet/models/backbones_3d/pfe/voxel_set_abstraction.py | rayguan97/M3DETR | cb76890a28c1555f2c0138030e0a432df6ee731b | [
"Apache-2.0"
] | 2 | 2022-01-21T08:10:49.000Z | 2022-01-21T23:44:40.000Z | pcdet/models/backbones_3d/pfe/voxel_set_abstraction.py | rayguan97/M3DETR | cb76890a28c1555f2c0138030e0a432df6ee731b | [
"Apache-2.0"
] | 3 | 2022-01-21T11:41:55.000Z | 2022-01-24T14:20:19.000Z | import math
import numpy as np
import torch
import torch.nn as nn
from ....ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules
from ....ops.pointnet2.pointnet2_stack import pointnet2_utils as pointnet2_stack_utils
from ....utils import common_utils
from ...backbones_2d.transformer import TransformerEncoderLayer3D, TransformerEncoder
from ...roi_heads.target_assigner.proposal_target_layer import ProposalTargetLayer
from ...model_utils.model_nms_utils import class_agnostic_nms
def bilinear_interpolate_torch(im, x, y):
"""
Args:
im: (H, W, C) [y, x]
x: (N)
y: (N)
Returns:
"""
x0 = torch.floor(x).long()
x1 = x0 + 1
y0 = torch.floor(y).long()
y1 = y0 + 1
x0 = torch.clamp(x0, 0, im.shape[1] - 1)
x1 = torch.clamp(x1, 0, im.shape[1] - 1)
y0 = torch.clamp(y0, 0, im.shape[0] - 1)
y1 = torch.clamp(y1, 0, im.shape[0] - 1)
Ia = im[y0, x0]
Ib = im[y1, x0]
Ic = im[y0, x1]
Id = im[y1, x1]
wa = (x1.type_as(x) - x) * (y1.type_as(y) - y)
wb = (x1.type_as(x) - x) * (y - y0.type_as(y))
wc = (x - x0.type_as(x)) * (y1.type_as(y) - y)
wd = (x - x0.type_as(x)) * (y - y0.type_as(y))
ans = torch.t((torch.t(Ia) * wa)) + torch.t(torch.t(Ib) * wb) + torch.t(torch.t(Ic) * wc) + torch.t(torch.t(Id) * wd)
return ans
def sample_points_with_roi(rois, points, sample_radius_with_roi, num_max_points_of_part=200000):
"""
Args:
rois: (M, 7 + C)
points: (N, 3)
sample_radius_with_roi:
num_max_points_of_part:
Returns:
sampled_points: (N_out, 3)
"""
if points.shape[0] < num_max_points_of_part:
distance = (points[:, None, :] - rois[None, :, 0:3]).norm(dim=-1)
min_dis, min_dis_roi_idx = distance.min(dim=-1)
roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1)
point_mask = min_dis < roi_max_dim + sample_radius_with_roi
else:
start_idx = 0
point_mask_list = []
while start_idx < points.shape[0]:
distance = (points[start_idx:start_idx + num_max_points_of_part, None, :] - rois[None, :, 0:3]).norm(dim=-1)
min_dis, min_dis_roi_idx = distance.min(dim=-1)
roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1)
cur_point_mask = min_dis < roi_max_dim + sample_radius_with_roi
point_mask_list.append(cur_point_mask)
start_idx += num_max_points_of_part
point_mask = torch.cat(point_mask_list, dim=0)
sampled_points = points[:1] if point_mask.sum() == 0 else points[point_mask, :]
return sampled_points, point_mask
def sector_fps(points, num_sampled_points, num_sectors):
"""
Args:
points: (N, 3)
num_sampled_points: int
num_sectors: int
Returns:
sampled_points: (N_out, 3)
"""
sector_size = np.pi * 2 / num_sectors
point_angles = torch.atan2(points[:, 1], points[:, 0]) + np.pi
sector_idx = (point_angles / sector_size).floor().clamp(min=0, max=num_sectors)
xyz_points_list = []
xyz_batch_cnt = []
num_sampled_points_list = []
for k in range(num_sectors):
mask = (sector_idx == k)
cur_num_points = mask.sum().item()
if cur_num_points > 0:
xyz_points_list.append(points[mask])
xyz_batch_cnt.append(cur_num_points)
ratio = cur_num_points / points.shape[0]
num_sampled_points_list.append(
min(cur_num_points, math.ceil(ratio * num_sampled_points))
)
if len(xyz_batch_cnt) == 0:
xyz_points_list.append(points)
xyz_batch_cnt.append(len(points))
num_sampled_points_list.append(num_sampled_points)
print(f'Warning: empty sector points detected in SectorFPS: points.shape={points.shape}')
xyz = torch.cat(xyz_points_list, dim=0)
xyz_batch_cnt = torch.tensor(xyz_batch_cnt, device=points.device).int()
sampled_points_batch_cnt = torch.tensor(num_sampled_points_list, device=points.device).int()
sampled_pt_idxs = pointnet2_stack_utils.stack_farthest_point_sample(
xyz.contiguous(), xyz_batch_cnt, sampled_points_batch_cnt
).long()
sampled_points = xyz[sampled_pt_idxs]
return sampled_points
| 42.585398 | 174 | 0.608732 |
1c66241d3877e47cd775f05edef325a5a8e7b8d8 | 451 | py | Python | metabot2txt/display.py | HeitorBoschirolli/metabot2txt | 845c6b1042f7e586cf80de56e78c976e3c919f0a | [
"MIT"
] | null | null | null | metabot2txt/display.py | HeitorBoschirolli/metabot2txt | 845c6b1042f7e586cf80de56e78c976e3c919f0a | [
"MIT"
] | null | null | null | metabot2txt/display.py | HeitorBoschirolli/metabot2txt | 845c6b1042f7e586cf80de56e78c976e3c919f0a | [
"MIT"
] | null | null | null | import os
| 22.55 | 64 | 0.536585 |
1c66f42291c5ccace3a10ed12cc3202e55caf594 | 47,394 | py | Python | cogs/errors.py | i1470s/IVRY | 922908b19b57881ad6fef2b45fabe6bc1ff7a298 | [
"MIT"
] | 3 | 2020-10-03T20:53:39.000Z | 2020-10-11T07:58:57.000Z | cogs/errors.py | i1470s/IVRY | 922908b19b57881ad6fef2b45fabe6bc1ff7a298 | [
"MIT"
] | 3 | 2020-10-11T22:23:30.000Z | 2020-10-14T16:54:37.000Z | cogs/errors.py | i1470s/IVRY | 922908b19b57881ad6fef2b45fabe6bc1ff7a298 | [
"MIT"
] | null | null | null | #PRIMARY IMPORTS
import discord, os, datetime, sys, json, traceback, logging
#SECONDARY IMPORTS
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from discord.ext import commands
from data import config
#LOGGING
logger = logging.getLogger("ivry")
logger.debug("errors.py Started")
| 65.551867 | 138 | 0.531502 |
1c672bcb64cc19b33318c71e9093a770db7e263e | 1,315 | py | Python | Using Python to Access Web Data/Problem 6_Extracting Data from JSON 5.py | Karoline0097/University-of-Michigan-Python-for-Everybody | 8b3999638c0c074ae3c1120de87cf8f31740ebb8 | [
"MIT"
] | null | null | null | Using Python to Access Web Data/Problem 6_Extracting Data from JSON 5.py | Karoline0097/University-of-Michigan-Python-for-Everybody | 8b3999638c0c074ae3c1120de87cf8f31740ebb8 | [
"MIT"
] | null | null | null | Using Python to Access Web Data/Problem 6_Extracting Data from JSON 5.py | Karoline0097/University-of-Michigan-Python-for-Everybody | 8b3999638c0c074ae3c1120de87cf8f31740ebb8 | [
"MIT"
] | null | null | null | ## Problem 5: Extracting Data from JSON
# Example: http://py4e-data.dr-chuck.net/comments_42.json
# data consists of a number of names and comment counts in JSON
# {
# comments: [
# {
# name: "Matthias"
# count: 97
# },
# {
# name: "Geomer"
# count: 97
# }
# ...
# ]
# }
import urllib.request, urllib.parse, urllib.error
import json
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# prompt for a URL
url = input('Enter URL: ')
# handle for data
data_handle = urllib.request.urlopen(url, context=ctx)
# read the JSON data from that URL using urllib
# decode UTF 8 byte array to Unicode string
data = data_handle.read().decode()
# parse string containing json into structured object (-> JSON object / Python dictionary)
# data_js is dictionary
data_js = json.loads(data)
# compute the sum of the numbers in the file
number_sum = 0
# parse and extract the comment counts from the JSON data,
# data_js['comments'] is list of dictionaries
# print(data_js['comments'])
for user in data_js['comments']:
print('Name:', user['name'])
print('Count:', user['count'])
number_sum = number_sum + user['count']
# Example: Total count 2553
print('Total Count:', number_sum)
| 24.811321 | 90 | 0.686692 |
1c67babe06797acaab8d0e9b738376ce3cb3ee88 | 376 | py | Python | lessons/day_05/python/app.py | jiaguilera/a-walk-in-graphql | ed4f44b4f4bf283cc7342141eb8127a2745ea2d7 | [
"MIT"
] | 16 | 2020-06-16T17:12:16.000Z | 2021-12-03T14:19:38.000Z | lessons/day_05/python/app.py | martinarnesi/a-walk-in-graphql | 56cd949cbeb4c4322882bd15398a867b16900ccd | [
"MIT"
] | 8 | 2020-06-11T21:53:03.000Z | 2020-07-26T01:47:10.000Z | lessons/day_05/python/app.py | martinarnesi/a-walk-in-graphql | 56cd949cbeb4c4322882bd15398a867b16900ccd | [
"MIT"
] | 9 | 2020-06-15T13:09:57.000Z | 2022-03-06T14:49:17.000Z | from ariadne import make_executable_schema, load_schema_from_path
from ariadne.asgi import GraphQL
from resolvers import query, skill, person, eye_color, mutation
# import schema from GraphQL file
type_defs = load_schema_from_path("./schema.gql")
schema = make_executable_schema(
type_defs, query, skill, person, eye_color, mutation
)
app = GraphQL(schema, debug=True)
| 28.923077 | 65 | 0.800532 |
1c6bbe01f2a25c56bbd4e7b84c94d14c49d0cee9 | 1,127 | py | Python | src/__main__.py | andreaswatch/piTomation | 140bff77ad0b84ad17898106c7be7dc48a2d0783 | [
"MIT"
] | null | null | null | src/__main__.py | andreaswatch/piTomation | 140bff77ad0b84ad17898106c7be7dc48a2d0783 | [
"MIT"
] | null | null | null | src/__main__.py | andreaswatch/piTomation | 140bff77ad0b84ad17898106c7be7dc48a2d0783 | [
"MIT"
] | null | null | null | import importlib
import time
from pathlib import Path
import os
import sys
print("Import plugins ..")
import_plugins()
print("Import app ..")
import modules.app.App as piTomation
app: piTomation.App
print("Start app ..")
app = piTomation.App()
#try:
# app = piTomation.App()
#except Exception as ex:
# print(ex)
# exit()
try:
while not app.is_disposed:
time.sleep(1)
except Exception as ex:
print(ex)
| 21.673077 | 92 | 0.624667 |
1c6d48cdfb7c008c470c879e2a06b5ce0223008d | 1,208 | py | Python | src/decanter/core/extra/utils.py | MatthewK3023/decanter-ai-core-sdk | d09a0316d5c3f28d55fd0dd83ef7f3e141d421de | [
"MIT"
] | null | null | null | src/decanter/core/extra/utils.py | MatthewK3023/decanter-ai-core-sdk | d09a0316d5c3f28d55fd0dd83ef7f3e141d421de | [
"MIT"
] | null | null | null | src/decanter/core/extra/utils.py | MatthewK3023/decanter-ai-core-sdk | d09a0316d5c3f28d55fd0dd83ef7f3e141d421de | [
"MIT"
] | null | null | null | """
Functions support other modules.
"""
import uuid
def check_response(response, key=None):
"""CHeck the api response.
Make sure the status call is successful and the response have specific key.
Return:
class: `Response <Response>`
"""
code = response.status_code
if not 200 <= code < 300:
raise Exception('[Decanter Core response Error] Request Error')
if key is not None and key not in response.json():
raise KeyError('[Decanter Core response Error] No key value')
return response
def gen_id(type_, name):
"""Generate a random UUID if name isn't given.
Returns:
string
"""
if name is None:
rand_id = uuid.uuid4()
rand_id = str(rand_id)[:8]
name = type_ + '_' + rand_id
return name
def isnotebook():
"""Return True if SDK is running on Jupyter Notebook."""
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
if shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
return False
except NameError:
return False
| 23.686275 | 79 | 0.626656 |
1c6e264ceb5ab2e61f2f2b6e3294aa8858b8f9fd | 1,064 | py | Python | 03/03.py | stevenpclark/aoc2021 | 726009e5a2a87025943a736e8676784ca7cdc8bd | [
"MIT"
] | 1 | 2021-11-30T05:25:58.000Z | 2021-11-30T05:25:58.000Z | 03/03.py | stevenpclark/aoc2021 | 726009e5a2a87025943a736e8676784ca7cdc8bd | [
"MIT"
] | null | null | null | 03/03.py | stevenpclark/aoc2021 | 726009e5a2a87025943a736e8676784ca7cdc8bd | [
"MIT"
] | null | null | null | import numpy as np
if __name__ == '__main__':
main()
| 20.461538 | 67 | 0.535714 |
1c6f742ff7bb6409fa5b1d806e2433034d2aa878 | 1,096 | py | Python | distillation/build_student.py | fengxiaoshuai/CNN_model_optimizer | 4c48420989ffe31a4075d36a5133fee0d999466a | [
"Apache-2.0"
] | null | null | null | distillation/build_student.py | fengxiaoshuai/CNN_model_optimizer | 4c48420989ffe31a4075d36a5133fee0d999466a | [
"Apache-2.0"
] | 1 | 2021-01-05T10:41:24.000Z | 2021-01-05T10:41:24.000Z | distillation/build_student.py | fengxiaoshuai/CNN_model_optimizer | 4c48420989ffe31a4075d36a5133fee0d999466a | [
"Apache-2.0"
] | 1 | 2020-08-07T02:56:20.000Z | 2020-08-07T02:56:20.000Z | import tensorflow as tf
import numpy as np
with tf.variable_scope("student"):
input_label = tf.placeholder(dtype=tf.float32, shape=[10, 10], name="label")
input_image = tf.placeholder(dtype=tf.float32, shape=[10, 224, 224, 3], name="input")
conv1 = tf.layers.conv2d(inputs=input_image, filters=64, kernel_size=[3, 3], padding='same')
conv2 = tf.layers.conv2d(conv1, filters=64, kernel_size=[3, 3], padding='same')
conv3 = tf.layers.conv2d(conv2, filters=64, kernel_size=[3, 3], padding='same')
shape = int(np.prod(conv3.get_shape()[1:]))
flat = tf.reshape(conv3, [-1, shape])
fc1 = tf.layers.dense(flat, units=100)
fc2 = tf.layers.dense(fc1, units=10, name="logit")
probability = tf.nn.softmax(fc2)
loss = tf.losses.softmax_cross_entropy(input_label, fc2)
print(input_label)
image = np.ones(shape=[10, 224, 224, 3])
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
saver = tf.train.Saver()
saver.save(sess, "./student/student")
print(sess.run(probability, feed_dict={input_image: image}))
| 40.592593 | 96 | 0.681569 |
1c70409e75cdadbb3949d0d1cde6a6029abd620b | 5,365 | py | Python | code/statistical_tests.py | ChamiLamelas/Math36B_FinalProject | 0bdb5d17769553a4edb163534c21cc641860a07a | [
"MIT"
] | null | null | null | code/statistical_tests.py | ChamiLamelas/Math36B_FinalProject | 0bdb5d17769553a4edb163534c21cc641860a07a | [
"MIT"
] | null | null | null | code/statistical_tests.py | ChamiLamelas/Math36B_FinalProject | 0bdb5d17769553a4edb163534c21cc641860a07a | [
"MIT"
] | null | null | null | import scipy.stats
import numpy as np
def f_test(sample_x, sample_y, larger_varx_alt):
"""
Computes the F-value and corresponding p-value for a pair of samples and alternative hypothesis.
Parameters
----------
sample_x : list
A random sample x1,...,xnx. Let its (underlying) variance be ox^2 and its sample variance Sx^2.
sample_y : list
A random sample y1,...,yny. Let its (underlying) variance be oy^2 and its sample variance Sy^2.
larger_varx_alt : bool
True if alternative hypothesis is ox^2 > oy^2. False if ox^2 < oy^2.
Returns
-------
f_value : float
Sx^2 / Sy^2 as defined in 'A Quick, Compact, Two-Sample Dispersion Test: Count Five'.
p_value : float
Let F be the F-distribution with nx, ny df. 1 - P(F < f_value) if larger_varx_alt = True, P(F < f_value) otherwise. More extreme F = Sx^2 / Sy^2 values for alternative ox^2 > oy^2 are to the right. More extreme F values for ox^2 < oy^2 are to the left.
"""
# calculate unbiased sample variances (n-1 in the denominator)
sample_var_x = np.var(sample_x, ddof=1)
sample_var_y = np.var(sample_y, ddof=1)
f_value = sample_var_x/sample_var_y
nx = len(sample_x)
ny = len(sample_y)
# compute P(F < f_value) with nx-1, ny-1 df
cdf = scipy.stats.f.cdf(f_value, nx-1, ny-1)
# More extreme f_value = Sx^2 / Sy^2 values for alternative ox^2 > oy^2. ox^2 being even bigger would be represented by larger quotient Sx^2 / Sy^2.
# More extreme f_value for ox^2 < oy^2 are to the left. ox^2 being even smaller would be represented by smaller quotient.
p_value = 1 - cdf if larger_varx_alt else cdf
return f_value, p_value
def f1_test(sample_x, sample_y, larger_varx_alt):
"""
Computes the F1-value as defined in 'Fixing the F Test for Equal Variances' and corresponding p-value for a pair of samples and alternative hypothesis.
Parameters
----------
sample_x : list
A random sample x1,...,xnx. Let its (underlying) variance be ox^2 and its sample variance Sx^2.
sample_y : list
A random sample y1,...,yny. Let its (underlying) variance be oy^2 and its sample variance Sy^2.
larger_varx_alt : bool
True if alternative hypothesis is ox^2 > oy^2. False if ox^2 < oy^2.
Returns
-------
p_value : float
Let F be the F-distribution with rx, ry df as specified in equation (1) of 'Fixing the F Test for Equal Variances'. 1 - P(F < f_value) if larger_varx_alt = True, P(F < f_value) otherwise.
"""
# calculate unbiased sample variances (n-1 in the denominator)
sample_var_x = np.var(sample_x, ddof=1)
sample_var_y = np.var(sample_y, ddof=1)
f_value = sample_var_x/sample_var_y
nx = len(sample_x)
ny = len(sample_y)
xmean = np.mean(sample_x)
ymean = np.mean(sample_y)
# compute moment, variance below equatio (1) of Shoemaker paper
fourth_moment = (np.sum((sample_x - xmean)**4) +
np.sum((sample_y - ymean)**4))/(nx + ny)
pooled_var = ((nx-1)*sample_var_x + (ny-1)*sample_var_y)/(nx + ny)
# see equation (1) of Shoemaker paper
rx = 2*nx / ((fourth_moment/pooled_var**2) - ((nx - 3)/(nx - 1)))
ry = 2*ny / ((fourth_moment/pooled_var**2) - ((ny - 3)/(ny - 1)))
# compute P(F < f_value) with rx-1, ry-1 df
cdf = scipy.stats.f.cdf(f_value, rx-1, ry-1)
# More extreme f_value = Sx^2 / Sy^2 values for alternative ox^2 > oy^2. ox^2 being even bigger would be represented by larger quotient Sx^2 / Sy^2.
# More extreme f_value for ox^2 < oy^2 are to the left. ox^2 being even smaller would be represented by smaller quotient.
p_value = 1 - cdf if larger_varx_alt else cdf
return p_value
def count_five(sample_x, sample_y, center):
"""
Computes the extreme counts for samples x and y as defined in 'A Quick, Compact, Two-Sample Dispersion Test: Count Five'.
Parameters
----------
sample_x : list
A random sample x1,...,xn.
sample_y : list
A random sample y1,...,ym.
center : str
Whether to use 'mean' or 'median' for centering.
Returns
-------
extreme_count_x : int
C_x computed with centering mu being sample mean if center = 'mean' and sample median if center = 'median' as defined in equation (1) of 'A Quick, Compact, Two-Sample Dispersion Test: Count Five'.
extreme_count_y : int
C_y defined analogously to C_x above.
Raises
------
ValueError
If center is neither 'mean' or 'median'.
"""
if center not in {'mean', 'median'}:
raise ValueError('Invalid center %s' % (center))
if center == 'mean':
centering_x = np.mean(sample_x)
centering_y = np.mean(sample_y)
else:
centering_x = np.median(sample_x)
centering_y = np.median(sample_y)
# compute absolute deviations from centering for x, y samples
abs_dev_x = np.abs(np.array(sample_x) - centering_x)
abs_dev_y = np.abs(np.array(sample_y) - centering_y)
# count number of X deviations greater than max Y deviation and vice versa
# see equation (1) of Count Five paper
extreme_count_x = np.sum(np.where(abs_dev_x > np.max(abs_dev_y), 1, 0))
extreme_count_y = np.sum(np.where(abs_dev_y > np.max(abs_dev_x), 1, 0))
return extreme_count_x, extreme_count_y
| 41.269231 | 261 | 0.654054 |
1c71ba0a22523d640266f7845ef799a8f73cbe39 | 243 | py | Python | pawpyseed/compiler.py | akashkumarsingh612/pawpyseed | 6f5aa0b8ca8c28a0221e5256afeb939c3344560b | [
"BSD-3-Clause"
] | null | null | null | pawpyseed/compiler.py | akashkumarsingh612/pawpyseed | 6f5aa0b8ca8c28a0221e5256afeb939c3344560b | [
"BSD-3-Clause"
] | null | null | null | pawpyseed/compiler.py | akashkumarsingh612/pawpyseed | 6f5aa0b8ca8c28a0221e5256afeb939c3344560b | [
"BSD-3-Clause"
] | null | null | null | import os, subprocess
def compile_core(comp, scilib):
"""
ATTENTION, NOT FINISHED
"""
subprocess.call(("make pawpy_%s"%comp).split())
def compile_core(comp, scilib):
"""
ATTENTION, NOT FINISHED
"""
subprocess.call("make hfc".split()) | 18.692308 | 48 | 0.691358 |
1c720e3c45ed8efa4771cbbb3a3b55d0385c9d41 | 1,125 | py | Python | finnhub_python/socket.py | humdings/finnhub-python | ca98681e5a529598e9d17e3ebc2f6d49c64b54de | [
"MIT"
] | null | null | null | finnhub_python/socket.py | humdings/finnhub-python | ca98681e5a529598e9d17e3ebc2f6d49c64b54de | [
"MIT"
] | null | null | null | finnhub_python/socket.py | humdings/finnhub-python | ca98681e5a529598e9d17e3ebc2f6d49c64b54de | [
"MIT"
] | null | null | null | """
Example usage of Finnhub socket API.
"""
from __future__ import print_function # Py2 compat
import websocket
from finnhub_python.utils import get_finnhub_api_key
tick_file = 'raw_ticks.txt'
token = get_finnhub_api_key()
SYMBOLS = [
"AAPL",
"SPY",
"VXX",
"BINANCE:ETHUSDT",
"BINANCE:BTCUSDT"
]
if __name__ == "__main__":
websocket.enableTrace(True)
ws = websocket.WebSocketApp("wss://ws.finnhub.io?token=" + token,
on_message=on_message,
on_error=on_error,
on_close=on_close)
ws.on_open = on_open
ws.run_forever() | 20.089286 | 69 | 0.604444 |
1c722363623f21dde32f8eb4058f20a248ddb2fd | 2,570 | py | Python | pycovjson/cli/convert.py | RileyWilliams/pycovjson | 741737f53ef18ef1476eccb5e626866843c152bd | [
"BSD-3-Clause"
] | 10 | 2016-08-16T17:46:30.000Z | 2021-04-06T22:03:58.000Z | pycovjson/cli/convert.py | RileyWilliams/pycovjson | 741737f53ef18ef1476eccb5e626866843c152bd | [
"BSD-3-Clause"
] | 46 | 2016-07-21T13:14:14.000Z | 2020-07-02T09:16:29.000Z | pycovjson/cli/convert.py | RileyWilliams/pycovjson | 741737f53ef18ef1476eccb5e626866843c152bd | [
"BSD-3-Clause"
] | 6 | 2016-07-29T09:56:37.000Z | 2020-08-23T18:20:47.000Z | """
Pycovjson - Command line interface
Author: rileywilliams
Version: 0.1.0
"""
import argparse
from pycovjson.write import Writer
from pycovjson.read_netcdf import NetCDFReader as Reader
def main():
"""
Command line interface for pycovjson - Converts Scientific Data Formats into CovJSON and saves to disk.
:argument -i: Input file path.
:argument -o: Output file name.
:argument -t: Use Tiling.
:argument -v: Which variable to populate coverage with.
:argument -s: [tile shape]: Tile shape.
:argument -n: Use interactive mode.
:argument -u: MongoDB URL
"""
parser = argparse.ArgumentParser(
description='Convert Scientific Data Formats into CovJSON.')
parser.add_argument('-i', '--input', dest='inputfile',
help='Name of input file', required=True)
parser.add_argument('-o', '--output', dest='outputfile',
help='Name and location of output file', default='coverage.covjson')
parser.add_argument('-t', '--tiled', action='store_true', help='Apply tiling')
parser.add_argument('-s', '--shape', nargs='+',
help='Tile shape, list', type=int)
parser.add_argument('-v', dest='variable',
help='Variable to populate coverage with', required=True)
parser.add_argument('-n', '--interactive', action='store_true', help='Enter interactive mode')
parser.add_argument('-u', '--endpoint_url', dest='endpoint_url', nargs=1,
help='MongoDB endpoint for CovJSON persistence')
args = parser.parse_args()
inputfile = args.inputfile
outputfile = args.outputfile
variable = args.variable
tiled = args.tiled
tile_shape = args.shape
interactive = args.interactive
endpoint_url = args.endpoint_url
if interactive:
axis = input('Which Axis?', Reader.get_axis(variable))
if tiled and len(tile_shape) == 0:
reader = Reader(inputfile)
shape_list = reader.get_shape(variable)
dims = reader.get_dimensions(variable)
print(list(zip(dims, shape_list)))
tile_shape = input(
'Enter the shape tile shape as a list of comma separated integers')
tile_shape = tile_shape.split(',')
tile_shape = list(map(int, tile_shape))
print(tile_shape)
if outputfile == None:
outputfile = outputfile.default
Writer(outputfile, inputfile, [variable],
tiled=tiled, tile_shape=tile_shape, endpoint_url=endpoint_url).write()
if __name__ == '__main__':
main()
| 36.714286 | 107 | 0.649027 |
1c72ce0a57a6b20d9f3b0b840d03685a73126b0e | 22,727 | py | Python | duels/duels.py | ridinginstyle00/redcogs | 216869935f322f7e5927740da22fa36f728c48db | [
"MIT"
] | 8 | 2016-08-23T16:56:17.000Z | 2021-07-24T16:44:31.000Z | duels/duels.py | ridinginstyle00/redcogs | 216869935f322f7e5927740da22fa36f728c48db | [
"MIT"
] | 1 | 2018-04-25T14:20:06.000Z | 2018-04-25T14:20:06.000Z | duels/duels.py | ridinginstyle00/redcogs | 216869935f322f7e5927740da22fa36f728c48db | [
"MIT"
] | 8 | 2016-07-26T21:36:44.000Z | 2019-08-03T16:38:57.000Z | import discord
from discord.ext import commands
from .utils import checks
from .utils.dataIO import dataIO
from __main__ import send_cmd_help
from __main__ import settings
from datetime import datetime
from random import choice
from random import sample
from copy import deepcopy
from collections import namedtuple, defaultdict
import os
import logging
import aiohttp
import asyncio
import time
from time import sleep
client = discord.Client()
def check_folders():
if not os.path.exists("data/duels"):
print("Creating data/duels folder...")
os.mkdir("data/duels")
| 44.215953 | 195 | 0.6267 |
1c741e6bc69fc8671df5a15c26f40ce7a3bf09f3 | 2,839 | py | Python | paranuara/citizens/models/citizens.py | SPLAYER-HD/Paranuara | 5a42f23d761e16e3b486ba04d9185551614f06a5 | [
"MIT"
] | null | null | null | paranuara/citizens/models/citizens.py | SPLAYER-HD/Paranuara | 5a42f23d761e16e3b486ba04d9185551614f06a5 | [
"MIT"
] | 4 | 2021-06-08T20:53:43.000Z | 2022-03-12T00:13:51.000Z | paranuara/citizens/models/citizens.py | SPLAYER-HD/RestServiceDjango | 5a42f23d761e16e3b486ba04d9185551614f06a5 | [
"MIT"
] | null | null | null | """Citizens model."""
# Django
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.core.validators import RegexValidator
# models
from paranuara.companies.models import Company
# PostgreSQL fields
from django.contrib.postgres.fields import JSONField
# Utilities
from paranuara.utils.models import ParanuaraModel
| 22.007752 | 98 | 0.62205 |
1c75ba48ae7018192a5f6740f29aabe6961aa8fd | 103 | py | Python | tests/utils.py | niwibe/cobrascript | 4c6a193d8745771e5fb0e277394f83e47cc7ede8 | [
"BSD-3-Clause"
] | 1 | 2015-05-03T00:25:17.000Z | 2015-05-03T00:25:17.000Z | tests/utils.py | niwibe/cobrascript | 4c6a193d8745771e5fb0e277394f83e47cc7ede8 | [
"BSD-3-Clause"
] | null | null | null | tests/utils.py | niwibe/cobrascript | 4c6a193d8745771e5fb0e277394f83e47cc7ede8 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from cobra.utils import normalize
| 14.714286 | 33 | 0.669903 |
1c770de3012ff3f97ad6bf07fd17d96b765a28e4 | 2,442 | py | Python | chess/rules.py | DevStrikerTech/Chess-Engine | f0d2e0fc48b820325b1826e4379bf0520c8d3b52 | [
"MIT"
] | 18 | 2021-01-26T19:21:45.000Z | 2021-01-27T00:32:49.000Z | chess/rules.py | KingCobra2018/Chess-Engine | f0d2e0fc48b820325b1826e4379bf0520c8d3b52 | [
"MIT"
] | null | null | null | chess/rules.py | KingCobra2018/Chess-Engine | f0d2e0fc48b820325b1826e4379bf0520c8d3b52 | [
"MIT"
] | 9 | 2021-01-26T19:51:20.000Z | 2021-01-26T22:39:28.000Z | import pygame
from chess.board import Board
from .variable_declaration import black_piece, white_piece, position_piece, board_square_size
| 29.421687 | 97 | 0.620393 |
1c77f1e65b1460f3b0a09bd95f3c03183aa1bcf6 | 1,542 | py | Python | kivygames/games/noughtsandcrosses/__init__.py | jonathanjameswatson/kivygames | 7636580956562af0814c973f94afede926cfa4b9 | [
"MIT"
] | null | null | null | kivygames/games/noughtsandcrosses/__init__.py | jonathanjameswatson/kivygames | 7636580956562af0814c973f94afede926cfa4b9 | [
"MIT"
] | null | null | null | kivygames/games/noughtsandcrosses/__init__.py | jonathanjameswatson/kivygames | 7636580956562af0814c973f94afede926cfa4b9 | [
"MIT"
] | null | null | null | import numpy as np
from kivygames.games import Game
import kivygames.games.noughtsandcrosses.c as c
| 26.135593 | 74 | 0.586252 |
1c7894b14ef779955e6bd0f109d8986f10e8fa84 | 1,206 | py | Python | 03-Decouvrez-POO/download_agents.py | gruiick/openclassrooms-py | add4b28eab8b311dea7c1d3915a22061f54326a9 | [
"BSD-2-Clause"
] | null | null | null | 03-Decouvrez-POO/download_agents.py | gruiick/openclassrooms-py | add4b28eab8b311dea7c1d3915a22061f54326a9 | [
"BSD-2-Clause"
] | null | null | null | 03-Decouvrez-POO/download_agents.py | gruiick/openclassrooms-py | add4b28eab8b311dea7c1d3915a22061f54326a9 | [
"BSD-2-Clause"
] | null | null | null | #! /usr/bin/env python
import argparse
import json
import time
import urllib.error
import urllib.request
if __name__ == "__main__":
main()
| 30.923077 | 109 | 0.630182 |
1c78aef6937bac0c47b2a7aeef06915d8ec4cebe | 3,681 | py | Python | Commands/images.py | Mariobob/Proton | 7c5eab0251266ca1da83591d396b357bab692399 | [
"MIT"
] | null | null | null | Commands/images.py | Mariobob/Proton | 7c5eab0251266ca1da83591d396b357bab692399 | [
"MIT"
] | null | null | null | Commands/images.py | Mariobob/Proton | 7c5eab0251266ca1da83591d396b357bab692399 | [
"MIT"
] | null | null | null | import functools
import re
import asyncio
from io import BytesIO
from discord.ext import commands
import discord
from Utils import canvas
import random
| 41.359551 | 125 | 0.620212 |
1c79747bf1ea18f4c3b8be4f42301cb16c8ee8f3 | 223 | py | Python | labJS/conf.py | lpomfrey/django-labjs | f35346ec7f3b87ae24b2d7a01c06001ceb4173bc | [
"MIT"
] | null | null | null | labJS/conf.py | lpomfrey/django-labjs | f35346ec7f3b87ae24b2d7a01c06001ceb4173bc | [
"MIT"
] | null | null | null | labJS/conf.py | lpomfrey/django-labjs | f35346ec7f3b87ae24b2d7a01c06001ceb4173bc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from appconf import AppConf
from django.conf import settings # noqa
| 18.583333 | 40 | 0.730942 |
1c7b1135efb3bd7f94a1f1a7d47294ebfd74cbde | 10,416 | py | Python | tests/test_nanoevents_vector.py | danbarto/coffea | 2b28e28f602f8b81a1449ee85578187a7f52b602 | [
"BSD-3-Clause"
] | null | null | null | tests/test_nanoevents_vector.py | danbarto/coffea | 2b28e28f602f8b81a1449ee85578187a7f52b602 | [
"BSD-3-Clause"
] | null | null | null | tests/test_nanoevents_vector.py | danbarto/coffea | 2b28e28f602f8b81a1449ee85578187a7f52b602 | [
"BSD-3-Clause"
] | null | null | null | import awkward as ak
from coffea.nanoevents.methods import vector
import pytest
ATOL = 1e-8
| 28.075472 | 93 | 0.368856 |
1c7b1f4e4b7bfbf72b788463867c6a1ec1a46c6d | 900 | py | Python | testing/python/telBuggyScript2.py | sys-bio/rrplugins | 03af6ea70d73462ad88103f1e446dc0c5f3f971c | [
"Apache-2.0"
] | null | null | null | testing/python/telBuggyScript2.py | sys-bio/rrplugins | 03af6ea70d73462ad88103f1e446dc0c5f3f971c | [
"Apache-2.0"
] | 8 | 2015-12-02T18:20:43.000Z | 2021-08-20T17:13:34.000Z | testing/python/telBuggyScript2.py | sys-bio/telPlugins | 03af6ea70d73462ad88103f1e446dc0c5f3f971c | [
"Apache-2.0"
] | 3 | 2015-01-27T18:53:45.000Z | 2015-07-13T17:07:50.000Z | import roadrunner
import teplugins as tel
i = 0
#for i in range(100):
try:
noisePlugin = tel.Plugin ("tel_add_noise")
print noisePlugin.listOfProperties()
# Create a roadrunner instance
rr = roadrunner.RoadRunner()
rr.load("sbml_test_0001.xml")
# Generate data
data = rr.simulate(0, 10, 511) # Want 512 points
# Get the dataseries from roadrunner
d = tel.getDataSeries (data)
# Assign the dataseries to the plugin inputdata
noisePlugin.InputData = d
# Set parameter for the 'size' of the noise
noisePlugin.Sigma = 3.e-6
# Add the noise
noisePlugin.execute()
# Get the data to plot
noisePlugin.InputData.plot()
# tel.show()
d.writeDataSeries ("testData2.dat")
d.readDataSeries ("testData2.dat")
print "done"
print i
except Exception as e:
print 'Problem: ' + `e`
| 20 | 52 | 0.637778 |
1c7b885a3c4fad049ff2d1a6a859aa95838e0630 | 2,954 | py | Python | encyclopaedia/labels.py | tcyrus/renpy-encyclopaedia | 900517b34ab7b870f6ee03057f898fb5eb61313c | [
"MIT"
] | null | null | null | encyclopaedia/labels.py | tcyrus/renpy-encyclopaedia | 900517b34ab7b870f6ee03057f898fb5eb61313c | [
"MIT"
] | null | null | null | encyclopaedia/labels.py | tcyrus/renpy-encyclopaedia | 900517b34ab7b870f6ee03057f898fb5eb61313c | [
"MIT"
] | null | null | null | from renpy import store
| 32.461538 | 94 | 0.635748 |
1c7bed607992f89cbbe011d8fbb3d755bb77d244 | 1,816 | py | Python | ncservice/ncDeviceOps/threaded/get_configs.py | cunningr/yanccm | 2d8f891d704672f4d3a15472c7a13edf7832d53d | [
"MIT"
] | null | null | null | ncservice/ncDeviceOps/threaded/get_configs.py | cunningr/yanccm | 2d8f891d704672f4d3a15472c7a13edf7832d53d | [
"MIT"
] | null | null | null | ncservice/ncDeviceOps/threaded/get_configs.py | cunningr/yanccm | 2d8f891d704672f4d3a15472c7a13edf7832d53d | [
"MIT"
] | null | null | null | import logging
from ncservice.ncDeviceOps.nc_device_ops import NcDeviceOps
from ncservice.ncDeviceOps.task_report import TaskReport
from ncservice.ncDeviceOps.threaded.base_thread_class import BaseThreadClass
logger = logging.getLogger('main.{}'.format(__name__))
extra = {'signature': '---SIGNATURE-NOT-SET---'}
| 37.061224 | 108 | 0.643722 |
1c7de0aa67e4761191bbc1f1c380a31439d54c36 | 258 | py | Python | CodeHS/Looping/DoubleForLoop.py | Kev-in123/ICS2O7 | 425c59975d4ce6aa0937fd8715b51d04487e4fa9 | [
"MIT"
] | 2 | 2021-08-10T18:16:08.000Z | 2021-09-26T19:49:26.000Z | CodeHS/Looping/DoubleForLoop.py | Kev-in123/ICS2O7 | 425c59975d4ce6aa0937fd8715b51d04487e4fa9 | [
"MIT"
] | null | null | null | CodeHS/Looping/DoubleForLoop.py | Kev-in123/ICS2O7 | 425c59975d4ce6aa0937fd8715b51d04487e4fa9 | [
"MIT"
] | null | null | null | """
This program visualizes nested for loops by printing number 0 through 3
and then 0 through 3 for the nested loop.
"""
for i in range(4):
print("Outer for loop: " + str(i))
for j in range(4):
print(" Inner for loop: " + str(j)) | 28.666667 | 72 | 0.612403 |
1c7e58a1470bb1ce0a1146ec377bf0292d1e20e6 | 4,657 | py | Python | saleor/graphql/channel/tests/test_base_channel_listing.py | fairhopeweb/saleor | 9ac6c22652d46ba65a5b894da5f1ba5bec48c019 | [
"CC-BY-4.0"
] | 15,337 | 2015-01-12T02:11:52.000Z | 2021-10-05T19:19:29.000Z | saleor/graphql/channel/tests/test_base_channel_listing.py | fairhopeweb/saleor | 9ac6c22652d46ba65a5b894da5f1ba5bec48c019 | [
"CC-BY-4.0"
] | 7,486 | 2015-02-11T10:52:13.000Z | 2021-10-06T09:37:15.000Z | saleor/graphql/channel/tests/test_base_channel_listing.py | aminziadna/saleor | 2e78fb5bcf8b83a6278af02551a104cfa555a1fb | [
"CC-BY-4.0"
] | 5,864 | 2015-01-16T14:52:54.000Z | 2021-10-05T23:01:15.000Z | from collections import defaultdict
import graphene
import pytest
from django.core.exceptions import ValidationError
from ....shipping.error_codes import ShippingErrorCode
from ..mutations import BaseChannelListingMutation
| 30.84106 | 83 | 0.721065 |
1c7ea7eccdeaa85272171df846b591a0afd65d34 | 9,843 | py | Python | francoralite/apps/francoralite_front/tools.py | Francoralite/francoralite | f8c5eeffe6d395c7e4222a9f5a4a7a01841b503c | [
"BSD-3-Clause"
] | 2 | 2021-07-26T08:29:26.000Z | 2021-07-26T08:29:27.000Z | francoralite/apps/francoralite_front/tools.py | lluc/telemeta-integration | c2fb116471235674eae597abac84a7113e0f7c82 | [
"BSD-3-Clause"
] | 167 | 2018-10-20T14:34:46.000Z | 2021-06-01T10:40:55.000Z | francoralite/apps/francoralite_front/tools.py | Francoralite/francoralite | f8c5eeffe6d395c7e4222a9f5a4a7a01841b503c | [
"BSD-3-Clause"
] | 1 | 2021-06-06T12:16:49.000Z | 2021-06-06T12:16:49.000Z | # -*- coding: utf-8 -*-
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors: Luc LEGER / Cooprative ARTEFACTS <artefacts.lle@gmail.com>
import requests
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect, Http404
from django.utils.translation import gettext as _
from requests.exceptions import RequestException
from rest_framework import status
from francoralite.apps.francoralite_front.errors import APPLICATION_ERRORS
from .views.related import (
write_fond_related,
write_mission_related,
write_collection_related,
write_item_related)
HTTP_ERRORS = {
status.HTTP_400_BAD_REQUEST: APPLICATION_ERRORS['HTTP_API_400'],
status.HTTP_401_UNAUTHORIZED: APPLICATION_ERRORS['HTTP_API_401'],
status.HTTP_403_FORBIDDEN: APPLICATION_ERRORS['HTTP_API_403'],
status.HTTP_404_NOT_FOUND: APPLICATION_ERRORS['HTTP_API_404'],
status.HTTP_409_CONFLICT: APPLICATION_ERRORS['HTTP_API_409'],
}
PROBLEM_NAMES = [
"legal_rights",
"recording_context",
"location_gis",
]
def get_token_header(request):
"""
TODO: renseigner
"""
auth_token = request.session.get('oidc_access_token')
if auth_token:
return {'Authorization': 'Bearer ' + auth_token}
else:
return {}
def check_status_code(status_code, allowed_codes=(status.HTTP_200_OK,)):
"""
TODO: renseigner
"""
if status_code == status.HTTP_403_FORBIDDEN:
raise PermissionDenied(_('Accs interdit.'))
if status_code == status.HTTP_404_NOT_FOUND:
raise Http404(_('Cette fiche nexiste pas.'))
if status_code == status.HTTP_409_CONFLICT:
raise UserMessageError(_('Une fiche avec ce code existe dj.'))
if status.HTTP_400_BAD_REQUEST <= status_code < status.HTTP_500_INTERNAL_SERVER_ERROR:
raise RequestException()
if status_code not in allowed_codes:
raise Exception(HTTP_ERRORS[status_code])
def handle_message_from_exception(request, exception):
"""
TODO: renseigner
"""
if isinstance(exception, UserMessageError):
messages.add_message(request, messages.ERROR, exception)
elif exception is not None:
messages.add_message(request, messages.ERROR,
_('Une erreur indtermine est survenue.'))
def request_api(endpoint):
"""
TODO: renseigner
"""
response = requests.get(settings.FRONT_HOST_URL + endpoint)
check_status_code(response.status_code)
return response.json()
def post(entity, form_entity, request, *args, **kwargs):
"""
TODO: renseigner
"""
form = form_entity(request.POST, request.FILES)
entity_api = entity
entity_url = entity
# Processing the problem names entities
if entity in PROBLEM_NAMES:
entity_api = entity.replace('_', '')
# Processing URL for Mission entity
if entity == 'fond':
entity_url = 'institution/' + kwargs['id_institution'] \
+ '/' + entity
# Processing URL for Mission entity
if entity == 'mission':
entity_url = 'institution/' + kwargs['id_institution'] \
+ '/fond/' + kwargs['id_fond']\
+ '/' + entity
# Processing URL for Collection entity
if entity == 'collection':
entity_url = 'institution/' + kwargs['id_institution'] \
+ '/fond/' + kwargs['id_fond']\
+ '/mission/' + kwargs['id_mission'] \
+ '/' + entity
# Processing URL for Item entity
if entity == 'item':
entity_url = 'institution/' + kwargs['id_institution'] \
+ '/fond/' + kwargs['id_fond']\
+ '/mission/' + kwargs['id_mission'] \
+ '/collection/' + kwargs['id_collection'] \
+ '/' + entity
# Problem with old Telemeta fields/entities
if form.is_valid():
if entity == 'item':
# Concatenate domains
form.cleaned_data['domain'] = ''.join(form.cleaned_data['domain'])
# Remove the 'file' entry : if not, there some bugs
del form.cleaned_data['file']
try:
post_api(settings.FRONT_HOST_URL + '/api/' + entity_api,
data=form.cleaned_data,
request=request,
entity=entity)
if entity == 'fond':
return HttpResponseRedirect(
'/institution/' +
str(form.cleaned_data['institution']))
# Previous page ( not an edit page ... )
if len(request.session["referers"]) > 1:
try:
for referer in request.session["referers"]:
if 'add' not in referer.split('/'):
return HttpResponseRedirect(referer)
except Exception:
return HttpResponseRedirect('/' + entity)
return HttpResponseRedirect('/' + entity)
except RequestException as e:
handle_message_from_exception(request, e)
return HttpResponseRedirect('/' + entity_url + '/add')
return HttpResponseRedirect('/' + entity_url + '/add')
def post_api(endpoint, data, request, entity):
"""
TODO: renseigner
"""
headers = get_token_header(request=request)
response = requests.post(
endpoint,
data=data,
files=request.FILES,
headers=headers,
)
check_status_code(response.status_code,
allowed_codes=(status.HTTP_200_OK, status.HTTP_201_CREATED))
entity_json = response.json()
if entity == "fond":
write_fond_related(entity_json, request, headers)
if entity == "mission":
write_mission_related(entity_json, request, headers)
if entity == "collection":
write_collection_related(entity_json, request, headers)
if entity == "item":
write_item_related(entity_json, request, headers)
return entity_json
def patch(entity, form_entity, request, *args, **kwargs):
"""
TODO: renseigner
"""
form = form_entity(request.POST)
if entity == 'item':
form.fields['file'].required = False
id = kwargs.get('id')
entity_api = entity
if entity in PROBLEM_NAMES:
entity_api = entity.replace('_', '')
if form.is_valid():
if entity == "collection":
form.cleaned_data['recorded_from_year'] = \
form.data['recorded_from_year']
form.cleaned_data['recorded_to_year'] = \
form.data['recorded_to_year']
if form.cleaned_data['year_published'] is None:
form.cleaned_data['year_published'] = ''
if entity == "item":
# Concatenate domains
form.cleaned_data['domain'] = ''.join(form.cleaned_data['domain'])
try:
response = patch_api(
settings.FRONT_HOST_URL + '/api/' + entity_api + '/' + str(id),
data=form.cleaned_data,
request=request,
entity=entity
)
if(response.status_code != status.HTTP_200_OK):
return HttpResponseRedirect('/' + entity + '/edit/' +
str(id))
# Previous page ( not an edit page ... )
if len(request.session["referers"]) > 1:
for referer in request.session["referers"]:
if 'edit' not in referer.split('/'):
return HttpResponseRedirect(referer)
return HttpResponseRedirect('/' + entity)
except RequestException as e:
handle_message_from_exception(request, e)
return HttpResponseRedirect('/' + entity + '/edit/' + str(id))
return HttpResponseRedirect('/' + entity + '/edit/' + str(id))
def patch_api(endpoint, data, request, entity):
"""
TODO: renseigner
"""
response = requests.patch(
endpoint,
data=data,
headers=get_token_header(request=request),
)
check_status_code(response.status_code)
entity_json = response.json()
if entity == "fond":
write_fond_related(
entity_json,
request,
headers=get_token_header(request=request),
)
if entity == "mission":
write_mission_related(
entity_json,
request,
headers=get_token_header(request=request),
)
if entity == "collection":
write_collection_related(
entity_json,
request,
headers=get_token_header(request=request),
)
if entity == "item":
write_item_related(
entity_json,
request,
headers=get_token_header(request=request),
)
return response
def delete(entity, request, *args, **kwargs):
"""
TODO: renseigner
"""
id = kwargs.get('id')
entity_api = entity
if entity in PROBLEM_NAMES:
entity_api = entity.replace('_', '')
try:
delete_api(
settings.FRONT_HOST_URL + '/api/' + entity_api + '/' + str(id),
request=request,
)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
except RequestException as e:
handle_message_from_exception(request, e)
return HttpResponseRedirect('/' + entity)
def delete_api(endpoint, request):
"""
TODO: renseigner
"""
response = requests.delete(
endpoint,
headers=get_token_header(request=request),
)
check_status_code(response.status_code)
return response
| 30.286154 | 90 | 0.607843 |
1c7f5af1d319f74fdb488cde790b4cffce3502aa | 5,173 | py | Python | python2.7/site-packages/twisted/internet/iocpreactor/client.py | 84KaliPleXon3/sslstrip-hsts-openwrt | f875ded48078a3ed84bffef1e69dcbeaf2e77ae3 | [
"MIT"
] | 4 | 2020-10-31T19:52:05.000Z | 2021-09-22T11:39:27.000Z | python2.7/site-packages/twisted/internet/iocpreactor/client.py | 84KaliPleXon3/sslstrip-hsts-openwrt | f875ded48078a3ed84bffef1e69dcbeaf2e77ae3 | [
"MIT"
] | null | null | null | python2.7/site-packages/twisted/internet/iocpreactor/client.py | 84KaliPleXon3/sslstrip-hsts-openwrt | f875ded48078a3ed84bffef1e69dcbeaf2e77ae3 | [
"MIT"
] | 2 | 2020-02-27T08:28:35.000Z | 2020-09-13T12:39:26.000Z | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
import socket
from twisted.persisted import styles
from twisted.internet.base import BaseConnector
from twisted.internet import defer, interfaces, error
from twisted.python import failure
from abstract import ConnectedSocket
from ops import ConnectExOp
from util import StateEventMachineType
from zope.interface import implements
| 30.429412 | 127 | 0.64972 |
1c7f78b673d9e154cc86707fcd75f178c99f6089 | 2,678 | py | Python | pypika/tests/dialects/test_mssql.py | uhrm/pypika | b390aa33c980704555d75d27ade5bfa4d1d4bae7 | [
"Apache-2.0"
] | null | null | null | pypika/tests/dialects/test_mssql.py | uhrm/pypika | b390aa33c980704555d75d27ade5bfa4d1d4bae7 | [
"Apache-2.0"
] | null | null | null | pypika/tests/dialects/test_mssql.py | uhrm/pypika | b390aa33c980704555d75d27ade5bfa4d1d4bae7 | [
"Apache-2.0"
] | null | null | null | import unittest
from pypika import Table
from pypika.analytics import Count
from pypika.dialects import MSSQLQuery
from pypika.utils import QueryException
| 38.257143 | 118 | 0.647872 |
1c7fbcb14ea301bda84e83c0a6cddb4f13bae6fe | 14,860 | py | Python | Postprocessing/Hardt/Hardt.py | maliha93/Fairness-Analysis-Code | acf13c6e7993704fc627249fe4ada44d8b616264 | [
"MIT"
] | null | null | null | Postprocessing/Hardt/Hardt.py | maliha93/Fairness-Analysis-Code | acf13c6e7993704fc627249fe4ada44d8b616264 | [
"MIT"
] | null | null | null | Postprocessing/Hardt/Hardt.py | maliha93/Fairness-Analysis-Code | acf13c6e7993704fc627249fe4ada44d8b616264 | [
"MIT"
] | null | null | null | import cvxpy as cvx
import numpy as np
from collections import namedtuple
from metric import metric, cd
import pandas as pd
import sys
from helper import make_dataset
| 43.323615 | 113 | 0.611844 |
1c802e1801de4019c3b100aff72c042e2ff702ed | 1,632 | py | Python | tests/test_exceptions.py | nesnahnoj/py3-textract | 61290fb44c964cf78ce64593fdf0076143dbcd91 | [
"MIT"
] | 2 | 2015-03-03T12:40:17.000Z | 2015-03-03T13:05:14.000Z | tests/test_exceptions.py | anderser/textract | 8f7b32cadabcd13ad1eab1a56b9aa151901d0453 | [
"MIT"
] | null | null | null | tests/test_exceptions.py | anderser/textract | 8f7b32cadabcd13ad1eab1a56b9aa151901d0453 | [
"MIT"
] | null | null | null | import unittest
import os
import subprocess
import base
| 37.090909 | 72 | 0.692402 |
1c81071b5834983f0325a721292427a8ce6ce5f8 | 1,998 | py | Python | dloud_ads/circular_queue.py | dataloudlabs/dloud-ads | d0ad3f169c2384292db4097e00ba7858f37a8198 | [
"MIT"
] | null | null | null | dloud_ads/circular_queue.py | dataloudlabs/dloud-ads | d0ad3f169c2384292db4097e00ba7858f37a8198 | [
"MIT"
] | null | null | null | dloud_ads/circular_queue.py | dataloudlabs/dloud-ads | d0ad3f169c2384292db4097e00ba7858f37a8198 | [
"MIT"
] | null | null | null | """Queue implementation using circularly linked list for storage."""
| 29.382353 | 76 | 0.578579 |
1c828f50eb6739af4655f73774016c25d4ee4ac9 | 1,389 | py | Python | suplemon/helpers.py | johnmbaughman/suplemon | fdde20f2181c280236d40f89b89b9bbe5843440e | [
"MIT"
] | null | null | null | suplemon/helpers.py | johnmbaughman/suplemon | fdde20f2181c280236d40f89b89b9bbe5843440e | [
"MIT"
] | null | null | null | suplemon/helpers.py | johnmbaughman/suplemon | fdde20f2181c280236d40f89b89b9bbe5843440e | [
"MIT"
] | null | null | null | # -*- encoding: utf-8
"""
Various helper constants and functions.
"""
import os
import re
import sys
import time
import traceback
def curr_time():
"""Current time in %H:%M"""
return time.strftime("%H:%M")
def curr_time_sec():
"""Current time in %H:%M:%S"""
return time.strftime("%H:%M:%S")
def get_error_info():
"""Return info about last error."""
msg = "{0}\n{1}".format(str(traceback.format_exc()), str(sys.exc_info()))
return msg
def get_string_between(start, stop, s):
"""Search string for a substring between two delimeters. False if not found."""
i1 = s.find(start)
if i1 == -1:
return False
s = s[i1 + len(start):]
i2 = s.find(stop)
if i2 == -1:
return False
s = s[:i2]
return s
def whitespace(line):
"""Return index of first non whitespace character on a line."""
i = 0
for char in line:
if char != " ":
break
i += 1
return i
def parse_path(path):
"""Parse a relative path and return full directory and filename as a tuple."""
if path[:2] == "~" + os.sep:
p = os.path.expanduser("~")
path = os.path.join(p+os.sep, path[2:])
ab = os.path.abspath(path)
parts = os.path.split(ab)
return parts
| 21.369231 | 83 | 0.592513 |
1c850ddd900887b33d213aba43297d734592063b | 31,713 | py | Python | geofem/emg3d/meshes.py | iisadoramacedo/geofem-master | cc5cf4ae660480dd4dc3d805310f7207fb28230e | [
"MIT"
] | null | null | null | geofem/emg3d/meshes.py | iisadoramacedo/geofem-master | cc5cf4ae660480dd4dc3d805310f7207fb28230e | [
"MIT"
] | 1 | 2020-10-29T11:42:21.000Z | 2020-10-29T11:42:21.000Z | build/lib/geofem/emg3d/meshes.py | iisadoramacedo/geofem-master | cc5cf4ae660480dd4dc3d805310f7207fb28230e | [
"MIT"
] | 1 | 2020-07-09T18:15:10.000Z | 2020-07-09T18:15:10.000Z | """
:mod:`meshes` -- Discretization
===============================
Everything related to meshes appropriate for the multigrid solver.
"""
# Copyright 2018-2020 The emg3d Developers.
#
# This file is part of emg3d.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import numpy as np
from copy import deepcopy
from scipy import optimize
__all__ = ['TensorMesh', 'get_hx_h0', 'get_cell_numbers', 'get_stretched_h',
'get_domain', 'get_hx']
def get_hx_h0(freq, res, domain, fixed=0., possible_nx=None, min_width=None,
pps=3, alpha=None, max_domain=100000., raise_error=True, verb=1,
return_info=False):
r"""Return cell widths and origin for given parameters.
Returns cell widths for the provided frequency, resistivity, domain extent,
and other parameters using a flexible amount of cells. See input parameters
for more details. A maximum of three hard/fixed boundaries can be provided
(one of which is the grid center).
The minimum cell width is calculated through :math:`\delta/\rm{pps}`, where
the skin depth is given by :math:`\delta = 503.3 \sqrt{\rho/f}`, and the
parameter `pps` stands for 'points-per-skindepth'. The minimum cell width
can be restricted with the parameter `min_width`.
The actual calculation domain adds a buffer zone around the (survey)
domain. The thickness of the buffer is six times the skin depth. The field
is basically zero after two wavelengths. A wavelength is
:math:`2\pi\delta`, hence roughly 6 times the skin depth. Taking a factor 6
gives therefore almost two wavelengths, as the field travels to the
boundary and back. The actual buffer thickness can be steered with the
`res` parameter.
One has to take into account that the air is very resistive, which has to
be considered not just in the vertical direction, but also in the
horizontal directions, as the airwave will bounce back from the sides
otherwise. In the marine case this issue reduces with increasing water
depth.
See Also
--------
get_stretched_h : Get `hx` for a fixed number `nx` and within a fixed
domain.
Parameters
----------
freq : float
Frequency (Hz) to calculate the skin depth. The skin depth is a concept
defined in the frequency domain. If a negative frequency is provided,
it is assumed that the calculation is carried out in the Laplace
domain. To calculate the skin depth, the value of `freq` is then
multiplied by :math:`-2\pi`, to simulate the closest
frequency-equivalent.
res : float or list
Resistivity (Ohm m) to calculate the skin depth. The skin depth is
used to calculate the minimum cell width and the boundary thicknesses.
Up to three resistivities can be provided:
- float: Same resistivity for everything;
- [min_width, boundaries];
- [min_width, left boundary, right boundary].
domain : list
Contains the survey-domain limits [min, max]. The actual calculation
domain consists of this domain plus a buffer zone around it, which
depends on frequency and resistivity.
fixed : list, optional
Fixed boundaries, one, two, or maximum three values. The grid is
centered around the first value. Hence it is the center location with
the smallest cell. Two more fixed boundaries can be added, at most one
on each side of the first one.
Default is 0.
possible_nx : list, optional
List of possible numbers of cells. See :func:`get_cell_numbers`.
Default is ``get_cell_numbers(500, 5, 3)``, which corresponds to
[16, 24, 32, 40, 48, 64, 80, 96, 128, 160, 192, 256, 320, 384].
min_width : float, list or None, optional
Minimum cell width restriction:
- None : No restriction;
- float : Fixed to this value, ignoring skin depth and `pps`.
- list [min, max] : Lower and upper bounds.
Default is None.
pps : int, optional
Points per skindepth; minimum cell width is calculated via
`dmin = skindepth/pps`.
Default = 3.
alpha : list, optional
Maximum alpha and step size to find a good alpha. The first value is
the maximum alpha of the survey domain, the second value is the maximum
alpha for the buffer zone, and the third value is the step size.
Default = [1, 1.5, .01], hence no stretching within the survey domain
and a maximum stretching of 1.5 in the buffer zone; step size is 0.01.
max_domain : float, optional
Maximum calculation domain from fixed[0] (usually source position).
Default is 100,000.
raise_error : bool, optional
If True, an error is raised if no suitable grid is found. Otherwise it
just prints a message and returns None's.
Default is True.
verb : int, optional
Verbosity, 0 or 1.
Default = 1.
return_info : bool
If True, a dictionary is returned with some grid info (min and max
cell width and alpha).
Returns
-------
hx : ndarray
Cell widths of mesh.
x0 : float
Origin of the mesh.
info : dict
Dictionary with mesh info; only if ``return_info=True``.
Keys:
- `dmin`: Minimum cell width;
- `dmax`: Maximum cell width;
- `amin`: Minimum alpha;
- `amax`: Maximum alpha.
"""
# Get variables with default lists:
if alpha is None:
alpha = [1, 1.5, 0.01]
if possible_nx is None:
possible_nx = get_cell_numbers(500, 5, 3)
# Cast resistivity value(s).
res = np.array(res, ndmin=1)
if res.size == 1:
res_arr = np.array([res[0], res[0], res[0]])
elif res.size == 2:
res_arr = np.array([res[0], res[1], res[1]])
else:
res_arr = np.array([res[0], res[1], res[2]])
# Cast and check fixed.
fixed = np.array(fixed, ndmin=1)
if fixed.size > 2:
# Check length.
if fixed.size > 3:
print("\n* ERROR :: Maximum three fixed boundaries permitted.\n"
f" Provided: {fixed.size}.")
raise ValueError("Wrong input for fixed")
# Sort second and third, so it doesn't matter how it was provided.
fixed = np.array([fixed[0], max(fixed[1:]), min(fixed[1:])])
# Check side.
if np.sign(np.diff(fixed[:2])) == np.sign(np.diff(fixed[::2])):
print("\n* ERROR :: 2nd and 3rd fixed boundaries have to be "
"left and right of the first one.\n "
f"Provided: [{fixed[0]}, {fixed[1]}, {fixed[2]}]")
raise ValueError("Wrong input for fixed")
# Calculate skin depth.
skind = 503.3*np.sqrt(res_arr/abs(freq))
if freq < 0: # For Laplace-domain calculations.
skind /= np.sqrt(2*np.pi)
# Minimum cell width.
dmin = skind[0]/pps
if min_width is not None: # Respect user input.
min_width = np.array(min_width, ndmin=1)
if min_width.size == 1:
dmin = min_width
else:
dmin = np.clip(dmin, *min_width)
# Survey domain; contains all sources and receivers.
domain = np.array(domain, dtype=float)
# Calculation domain; big enough to avoid boundary effects.
# To avoid boundary effects we want the signal to travel two wavelengths
# from the source to the boundary and back to the receiver.
# => 2*pi*sd ~ 6.3*sd = one wavelength => signal is ~ 0.2 %.
# Two wavelengths we can safely assume it is zero.
#
# The air does not follow the concept of skin depth, as it is a wave rather
# than diffusion. For this is the factor `max_domain`, which restricts
# the domain in each direction to this value from the center.
# (a) Source to edges of domain.
dist_in_domain = abs(domain - fixed[0])
# (b) Two wavelengths.
two_lambda = skind[1:]*4*np.pi
# (c) Required buffer, additional to domain.
dist_buff = np.max([np.zeros(2), (two_lambda - dist_in_domain)/2], axis=0)
# (d) Add buffer to domain.
calc_domain = np.array([domain[0]-dist_buff[0], domain[1]+dist_buff[1]])
# (e) Restrict total domain to max_domain.
calc_domain[0] = max(calc_domain[0], fixed[0]-max_domain)
calc_domain[1] = min(calc_domain[1], fixed[0]+max_domain)
# Initiate flag if terminated.
finished = False
# Initiate alpha variables for survey and calculation domains.
sa, ca = 1.0, 1.0
# Loop over possible cell numbers from small to big.
for nx in np.unique(possible_nx):
# Loop over possible alphas for domain.
for sa in np.arange(1.0, alpha[0]+alpha[2]/2, alpha[2]):
# Get current stretched grid cell sizes.
thxl = dmin*sa**np.arange(nx) # Left of origin.
thxr = dmin*sa**np.arange(nx) # Right of origin.
# 0. Adjust stretching for fixed boundaries.
if fixed.size > 1: # Move mesh to first fixed boundary.
t_nx = np.r_[fixed[0], fixed[0]+np.cumsum(thxr)]
ii = np.argmin(abs(t_nx-fixed[1]))
thxr *= abs(fixed[1]-fixed[0])/np.sum(thxr[:ii])
if fixed.size > 2: # Move mesh to second fixed boundary.
t_nx = np.r_[fixed[0], fixed[0]-np.cumsum(thxl)]
ii = np.argmin(abs(t_nx-fixed[2]))
thxl *= abs(fixed[2]-fixed[0])/np.sum(thxl[:ii])
# 1. Fill from center to left domain.
nl = np.sum((fixed[0]-np.cumsum(thxl)) > domain[0])+1
# 2. Fill from center to right domain.
nr = np.sum((fixed[0]+np.cumsum(thxr)) < domain[1])+1
# 3. Get remaining number of cells and check termination criteria.
nsdc = nl+nr # Number of domain cells.
nx_remain = nx-nsdc
# Not good, try next.
if nx_remain <= 0:
continue
# Create the current hx-array.
hx = np.r_[thxl[:nl][::-1], thxr[:nr]]
hxo = np.r_[thxl[:nl][::-1], thxr[:nr]]
# Get actual domain:
asurv_domain = [fixed[0]-np.sum(thxl[:nl]),
fixed[0]+np.sum(thxr[:nr])]
x0 = float(fixed[0]-np.sum(thxl[:nl]))
# Get actual stretching (differs in case of fixed layers).
sa_adj = np.max([hx[1:]/hx[:-1], hx[:-1]/hx[1:]])
# Loop over possible alphas for calc_domain.
for ca in np.arange(sa, alpha[1]+alpha[2]/2, alpha[2]):
# 4. Fill to left calc_domain.
thxl = hx[0]*ca**np.arange(1, nx_remain+1)
nl = np.sum((asurv_domain[0]-np.cumsum(thxl)) >
calc_domain[0])+1
# 5. Fill to right calc_domain.
thxr = hx[-1]*ca**np.arange(1, nx_remain+1)
nr = np.sum((asurv_domain[1]+np.cumsum(thxr)) <
calc_domain[1])+1
# 6. Get remaining number of cells and check termination
# criteria.
ncdc = nl+nr # Number of calc_domain cells.
nx_remain2 = nx-nsdc-ncdc
if nx_remain2 < 0: # Not good, try next.
continue
# Create hx-array.
nl += int(np.floor(nx_remain2/2)) # If uneven, add one cell
nr += int(np.ceil(nx_remain2/2)) # more on the right.
hx = np.r_[thxl[:nl][::-1], hx, thxr[:nr]]
# Calculate origin.
x0 = float(asurv_domain[0]-np.sum(thxl[:nl]))
# Mark it as finished and break out of the loop.
finished = True
break
if finished:
break
if finished:
break
# Check finished and print info about found grid.
if not finished:
# Throw message if no solution was found.
print("\n* ERROR :: No suitable grid found; relax your criteria.\n")
if raise_error:
raise ArithmeticError("No grid found!")
else:
hx, x0 = None, None
elif verb > 0:
print(f" Skin depth ", end="")
if res.size == 1:
print(f" [m] : {skind[0]:.0f}")
elif res.size == 2:
print(f"(m/l-r) [m] : {skind[0]:.0f} / {skind[1]:.0f}")
else:
print(f"(m/l/r) [m] : {skind[0]:.0f} / {skind[1]:.0f} / "
f"{skind[2]:.0f}")
print(f" Survey domain [m] : {domain[0]:.0f} - "
f"{domain[1]:.0f}")
print(f" Calculation domain [m] : {calc_domain[0]:.0f} - "
f"{calc_domain[1]:.0f}")
print(f" Final extent [m] : {x0:.0f} - "
f"{x0+np.sum(hx):.0f}")
extstr = f" Min/max cell width [m] : {min(hx):.0f} / "
alstr = f" Alpha survey"
nrstr = " Number of cells "
if not np.isclose(sa, sa_adj):
sastr = f"{sa:.3f} ({sa_adj:.3f})"
else:
sastr = f"{sa:.3f}"
print(extstr+f"{max(hxo):.0f} / {max(hx):.0f}")
print(alstr+f"/calc : {sastr} / {ca:.3f}")
print(nrstr+f"(s/c/r) : {nx} ({nsdc}/{ncdc}/{nx_remain2})")
print()
if return_info:
if not fixed.size > 1:
sa_adj = sa
info = {'dmin': dmin,
'dmax': np.nanmax(hx),
'amin': np.nanmin([ca, sa, sa_adj]),
'amax': np.nanmax([ca, sa, sa_adj])}
return hx, x0, info
else:
return hx, x0
def get_cell_numbers(max_nr, max_prime=5, min_div=3):
r"""Returns 'good' cell numbers for the multigrid method.
'Good' cell numbers are numbers which can be divided by 2 as many times as
possible. At the end there will be a low prime number.
The function adds all numbers :math:`p 2^n \leq M` for :math:`p={2, 3, ...,
p_\text{max}}` and :math:`n={n_\text{min}, n_\text{min}+1, ..., \infty}`;
:math:`M, p_\text{max}, n_\text{min}` correspond to `max_nr`, `max_prime`,
and `min_div`, respectively.
Parameters
----------
max_nr : int
Maximum number of cells.
max_prime : int
Highest permitted prime number p for p*2^n. {2, 3, 5, 7} are good upper
limits in order to avoid too big lowest grids in the multigrid method.
Default is 5.
min_div : int
Minimum times the number can be divided by two.
Default is 3.
Returns
-------
numbers : array
Array containing all possible cell numbers from lowest to highest.
"""
# Primes till 20.
primes = np.array([2, 3, 5, 7, 11, 13, 17, 19])
# Sanity check; 19 is already ridiculously high.
if max_prime > primes[-1]:
print(f"* ERROR :: Highest prime is {max_prime}, "
"please use a value < 20.")
raise ValueError("Highest prime too high")
# Restrict to max_prime.
primes = primes[primes <= max_prime]
# Get possible values.
# Currently restricted to prime*2**30 (for prime=2 => 1,073,741,824 cells).
numbers = primes[:, None]*2**np.arange(min_div, 30)
# Get unique values.
numbers = np.unique(numbers)
# Restrict to max_nr and return.
return numbers[numbers <= max_nr]
def get_stretched_h(min_width, domain, nx, x0=0, x1=None, resp_domain=False):
"""Return cell widths for a stretched grid within the domain.
Returns `nx` cell widths within `domain`, where the minimum cell width is
`min_width`. The cells are not stretched within `x0` and `x1`, and outside
uses a power-law stretching. The actual stretching factor and the number of
cells left and right of `x0` and `x1` are find in a minimization process.
The domain is not completely respected. The starting point of the domain
is, but the endpoint of the domain might slightly shift (this is more
likely the case for small `nx`, for big `nx` the shift should be small).
The new endpoint can be obtained with ``domain[0]+np.sum(hx)``. If you want
the domain to be respected absolutely, set ``resp_domain=True``. However,
be aware that this will introduce one stretch-factor which is different
from the other stretch factors, to accommodate the restriction. This
one-off factor is between the left- and right-side of `x0`, or, if `x1` is
provided, just after `x1`.
See Also
--------
get_hx_x0 : Get `hx` and `x0` for a flexible number of `nx` with
given bounds.
Parameters
----------
min_width : float
Minimum cell width. If x1 is provided, the actual minimum cell width
might be smaller than min_width.
domain : list
[start, end] of model domain.
nx : int
Number of cells.
x0 : float
Center of the grid. `x0` is restricted to `domain`.
Default is 0.
x1 : float
If provided, then no stretching is applied between `x0` and `x1`. The
non-stretched part starts at `x0` and stops at the first possible
location at or after `x1`. `x1` is restricted to `domain`. This will
min_width so that an integer number of cells fit within x0 and x1.
resp_domain : bool
If False (default), then the domain-end might shift slightly to assure
that the same stretching factor is applied throughout. If set to True,
however, the domain is respected absolutely. This will introduce one
stretch-factor which is different from the other stretch factors, to
accommodate the restriction. This one-off factor is between the left-
and right-side of `x0`, or, if `x1` is provided, just after `x1`.
Returns
-------
hx : ndarray
Cell widths of mesh.
"""
# Cast to arrays
domain = np.array(domain, dtype=float)
x0 = np.array(x0, dtype=float)
x0 = np.clip(x0, *domain) # Restrict to model domain
min_width = np.array(min_width, dtype=float)
if x1 is not None:
x1 = np.array(x1, dtype=float)
x1 = np.clip(x1, *domain) # Restrict to model domain
# If x1 is provided (a part is not stretched)
if x1 is not None:
# Store original values
xlim_orig = domain.copy()
nx_orig = int(nx)
x0_orig = x0.copy()
h_min_orig = min_width.copy()
# Get number of non-stretched cells
n_nos = int(np.ceil((x1-x0)/min_width))
# Re-calculate min_width to fit with x0-x1-limits:
min_width = (x1-x0)/n_nos
# Subtract one cell, because the standard scheme provides one
# min_width-cell.
n_nos -= 1
# Reset x0, because the first min_width comes from normal scheme
x0 += min_width
# Reset xmax for normal scheme
domain[1] -= n_nos*min_width
# Reset nx for normal scheme
nx -= n_nos
# If there are not enough points reset to standard procedure. The limit
# of five is arbitrary. However, nx should be much bigger than five
# anyways, otherwise stretched grid doesn't make sense.
if nx <= 5:
print("Warning :: Not enough points for non-stretched part,"
"ignoring therefore `x1`.")
domain = xlim_orig
nx = nx_orig
x0 = x0_orig
x1 = None
min_width = h_min_orig
# Get stretching factor (a = 1+alpha).
if min_width == 0 or min_width > np.diff(domain)/nx:
# If min_width is bigger than the domain-extent divided by nx, no
# stretching is required at all.
alpha = 0
else:
# Wrap _get_dx into a minimization function to call with fsolve.
def find_alpha(alpha, min_width, args):
"""Find alpha such that min(hx) = min_width."""
return min(get_hx(alpha, *args))/min_width-1
# Search for best alpha, must be at least 0
args = (domain, nx, x0)
alpha = max(0, optimize.fsolve(find_alpha, 0.02, (min_width, args)))
# With alpha get actual cell spacing with `resp_domain` to respect the
# users decision.
hx = get_hx(alpha, domain, nx, x0, resp_domain)
# Add the non-stretched center if x1 is provided
if x1 is not None:
hx = np.r_[hx[: np.argmin(hx)], np.ones(n_nos)*min_width,
hx[np.argmin(hx):]]
# Print warning min_width could not be respected.
if abs(hx.min() - min_width) > 0.1:
print(f"Warning :: Minimum cell width ({np.round(hx.min(), 2)} m) is "
"below `min_width`, because `nx` is too big for `domain`.")
return hx
def get_domain(x0=0, freq=1, res=0.3, limits=None, min_width=None,
fact_min=0.2, fact_neg=5, fact_pos=None):
r"""Get domain extent and minimum cell width as a function of skin depth.
Returns the extent of the calculation domain and the minimum cell width as
a multiple of the skin depth, with possible user restrictions on minimum
calculation domain and range of possible minimum cell widths.
.. math::
\delta &= 503.3 \sqrt{\frac{\rho}{f}} , \\
x_\text{start} &= x_0-k_\text{neg}\delta , \\
x_\text{end} &= x_0+k_\text{pos}\delta , \\
h_\text{min} &= k_\text{min} \delta .
Parameters
----------
x0 : float
Center of the calculation domain. Normally the source location.
Default is 0.
freq : float
Frequency (Hz) to calculate the skin depth. The skin depth is a concept
defined in the frequency domain. If a negative frequency is provided,
it is assumed that the calculation is carried out in the Laplace
domain. To calculate the skin depth, the value of `freq` is then
multiplied by :math:`-2\pi`, to simulate the closest
frequency-equivalent.
Default is 1 Hz.
res : float, optional
Resistivity (Ohm m) to calculate skin depth.
Default is 0.3 Ohm m (sea water).
limits : None or list
[start, end] of model domain. This extent represents the minimum extent
of the domain. The domain is therefore only adjusted if it has to reach
outside of [start, end].
Default is None.
min_width : None, float, or list of two floats
Minimum cell width is calculated as a function of skin depth:
fact_min*sd. If `min_width` is a float, this is used. If a list of
two values [min, max] are provided, they are used to restrain
min_width. Default is None.
fact_min, fact_neg, fact_pos : floats
The skin depth is multiplied with these factors to estimate:
- Minimum cell width (`fact_min`, default 0.2)
- Domain-start (`fact_neg`, default 5), and
- Domain-end (`fact_pos`, defaults to `fact_neg`).
Returns
-------
h_min : float
Minimum cell width.
domain : list
Start- and end-points of calculation domain.
"""
# Set fact_pos to fact_neg if not provided.
if fact_pos is None:
fact_pos = fact_neg
# Calculate the skin depth.
skind = 503.3*np.sqrt(res/abs(freq))
if freq < 0: # For Laplace-domain calculations.
skind /= np.sqrt(2*np.pi)
# Estimate minimum cell width.
h_min = fact_min*skind
if min_width is not None: # Respect user input.
if np.array(min_width).size == 1:
h_min = min_width
else:
h_min = np.clip(h_min, *min_width)
# Estimate calculation domain.
domain = [x0-fact_neg*skind, x0+fact_pos*skind]
if limits is not None: # Respect user input.
domain = [min(limits[0], domain[0]), max(limits[1], domain[1])]
return h_min, domain
def get_hx(alpha, domain, nx, x0, resp_domain=True):
r"""Return cell widths for given input.
Find the number of cells left and right of `x0`, `nl` and `nr`
respectively, for the provided alpha. For this, we solve
.. math:: \frac{x_\text{max}-x_0}{x_0-x_\text{min}} =
\frac{a^{nr}-1}{a^{nl}-1}
where :math:`a = 1+\alpha`.
Parameters
----------
alpha : float
Stretching factor `a` is given by ``a=1+alpha``.
domain : list
[start, end] of model domain.
nx : int
Number of cells.
x0 : float
Center of the grid. `x0` is restricted to `domain`.
resp_domain : bool
If False (default), then the domain-end might shift slightly to assure
that the same stretching factor is applied throughout. If set to True,
however, the domain is respected absolutely. This will introduce one
stretch-factor which is different from the other stretch factors, to
accommodate the restriction. This one-off factor is between the left-
and right-side of `x0`, or, if `x1` is provided, just after `x1`.
Returns
-------
hx : ndarray
Cell widths of mesh.
"""
if alpha <= 0.: # If alpha <= 0: equal spacing (no stretching at all)
hx = np.ones(nx)*np.diff(np.squeeze(domain))/nx
else: # Get stretched hx
a = alpha+1
# Get hx depending if x0 is on the domain boundary or not.
if np.isclose(x0, domain[0]) or np.isclose(x0, domain[1]):
# Get al a's
alr = np.diff(domain)*alpha/(a**nx-1)*a**np.arange(nx)
if x0 == domain[1]:
alr = alr[::-1]
# Calculate differences
hx = alr*np.diff(domain)/sum(alr)
else:
# Find number of elements left and right by solving:
# (xmax-x0)/(x0-xmin) = a**nr-1/(a**nl-1)
nr = np.arange(2, nx+1)
er = (domain[1]-x0)/(x0-domain[0]) - (a**nr[::-1]-1)/(a**nr-1)
nl = np.argmin(abs(np.floor(er)))+1
nr = nx-nl
# Get all a's
al = a**np.arange(nl-1, -1, -1)
ar = a**np.arange(1, nr+1)
# Calculate differences
if resp_domain:
# This version honours domain[0] and domain[1], but to achieve
# this it introduces one stretch-factor which is different from
# all the others between al to ar.
hx = np.r_[al*(x0-domain[0])/sum(al),
ar*(domain[1]-x0)/sum(ar)]
else:
# This version moves domain[1], but each stretch-factor is
# exactly the same.
fact = (x0-domain[0])/sum(al) # Take distance from al.
hx = np.r_[al, ar]*fact
# Note: this hx is equivalent as providing the following h
# to TensorMesh:
# h = [(min_width, nl-1, -a), (min_width, n_nos+1),
# (min_width, nr, a)]
return hx
| 35.833898 | 79 | 0.590578 |
1c855f3c4b60017317473eca05c6f77584434cbc | 2,464 | py | Python | QUICK_START/NODE_SQUEEZESEG_CLUSTER/src/script/squeezeseg/utils/clock.py | Hqss/DINK | 5fecaa65e2f9da48eb8ac38ef709aa555fca8766 | [
"BSD-3-Clause"
] | 189 | 2019-01-16T03:05:23.000Z | 2020-09-14T14:54:16.000Z | QUICK_START/NODE_SQUEEZESEG_CLUSTER/src/script/squeezeseg/utils/clock.py | jtpils/DINK | 5f6b3eaba279126f79ae6607f965311002d7451c | [
"BSD-3-Clause"
] | 3 | 2019-02-11T06:20:15.000Z | 2020-04-05T07:03:53.000Z | QUICK_START/NODE_SQUEEZESEG_CLUSTER/src/script/squeezeseg/utils/clock.py | jtpils/DINK | 5f6b3eaba279126f79ae6607f965311002d7451c | [
"BSD-3-Clause"
] | 25 | 2019-01-16T03:05:24.000Z | 2020-04-04T21:07:53.000Z | #! /usr/bin/python2
# -*- coding: utf-8 -*-
"""
Clock function to take running time following Segmatch.
"""
# BSD 3-Clause License
#
# Copyright (c) 2019, FPAI
# Copyright (c) 2019, SeriouslyHAO
# Copyright (c) 2019, xcj2019
# Copyright (c) 2019, Leonfirst
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
| 39.741935 | 114 | 0.745536 |
1c8628f13d65ff3439c3fbd013fa30e504e0ca89 | 918 | py | Python | office365/sharepoint/view_collection.py | andebor/Office365-REST-Python-Client | ffd0ab4cf742b2e5ae7d8c44e937495aece41e07 | [
"MIT"
] | null | null | null | office365/sharepoint/view_collection.py | andebor/Office365-REST-Python-Client | ffd0ab4cf742b2e5ae7d8c44e937495aece41e07 | [
"MIT"
] | null | null | null | office365/sharepoint/view_collection.py | andebor/Office365-REST-Python-Client | ffd0ab4cf742b2e5ae7d8c44e937495aece41e07 | [
"MIT"
] | null | null | null | from office365.runtime.client_object_collection import ClientObjectCollection
from office365.runtime.resource_path_service_operation import ResourcePathServiceOperation
from office365.sharepoint.view import View
| 45.9 | 111 | 0.734205 |
1c88139e81ccf155fe77c897a8674f07ab2d5797 | 1,461 | py | Python | common-scrapers/common_src/scrapers/second_extinction.py | mrPaintMan/blog-scraper | 9b1ff3d398bd23d799d86c9a62ec76a6950555cc | [
"MIT"
] | null | null | null | common-scrapers/common_src/scrapers/second_extinction.py | mrPaintMan/blog-scraper | 9b1ff3d398bd23d799d86c9a62ec76a6950555cc | [
"MIT"
] | null | null | null | common-scrapers/common_src/scrapers/second_extinction.py | mrPaintMan/blog-scraper | 9b1ff3d398bd23d799d86c9a62ec76a6950555cc | [
"MIT"
] | 1 | 2020-03-11T14:49:00.000Z | 2020-03-11T14:49:00.000Z | from common_src.lib.model.post import Post
from common_src.lib.model.source import Source
from common_src.scrapers.abstract_scraper import make_soup, remove_dups, now
SOURCE_CODE = "second_extinction"
WEBSITE = "https://www.secondextinctiongame.com/news"
ALT_IMAGE = 'https://www.secondextinctiongame.com/static/242486b363d867dc483deb6d7038dde1/d8255/se_screenshot_5.jpg'
FILENAME = "../resources/data/second_extinction.txt"
| 39.486486 | 119 | 0.699521 |
1c88d1e1834d792edf9c14b13846bd1ee7d80360 | 3,860 | py | Python | systems/ILSVRC12/AlexNet/alexnet.py | mdatres/quantlab | 09fb24ede78f49768f829afe0fac2ac291b8fd4f | [
"Apache-2.0"
] | 7 | 2021-07-01T17:02:50.000Z | 2022-03-29T10:54:41.000Z | systems/ILSVRC12/AlexNet/alexnet.py | mdatres/quantlab | 09fb24ede78f49768f829afe0fac2ac291b8fd4f | [
"Apache-2.0"
] | null | null | null | systems/ILSVRC12/AlexNet/alexnet.py | mdatres/quantlab | 09fb24ede78f49768f829afe0fac2ac291b8fd4f | [
"Apache-2.0"
] | 2 | 2021-07-10T20:57:06.000Z | 2022-01-02T10:10:25.000Z | #
# alexnet.py
#
# Author(s):
# Matteo Spallanzani <spmatteo@iis.ee.ethz.ch>
#
# Copyright (c) 2020-2021 ETH Zurich.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
import torch.nn as nn
| 31.900826 | 91 | 0.591192 |
1c8a214cb9301b78671cb8aa70f1cebef2a6167b | 448 | py | Python | e/mail-relay/web/apps/core/migrations/0012_auto_20151105_1442.py | zhouli121018/nodejsgm | 0ccbc8acf61badc812f684dd39253d55c99f08eb | [
"MIT"
] | null | null | null | e/mail-relay/web/apps/core/migrations/0012_auto_20151105_1442.py | zhouli121018/nodejsgm | 0ccbc8acf61badc812f684dd39253d55c99f08eb | [
"MIT"
] | 18 | 2020-06-05T18:17:40.000Z | 2022-03-11T23:25:21.000Z | e/mail-relay/web/apps/core/migrations/0012_auto_20151105_1442.py | zhouli121018/nodejsgm | 0ccbc8acf61badc812f684dd39253d55c99f08eb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| 22.4 | 93 | 0.625 |
1c8bf817623bc83ae0e3cfb38c83d93d7647579a | 1,068 | py | Python | madlib/main.py | FredericIV/PythonPractice | 36b3a321eb8fefc38befe83b15a7596418250756 | [
"CC0-1.0"
] | null | null | null | madlib/main.py | FredericIV/PythonPractice | 36b3a321eb8fefc38befe83b15a7596418250756 | [
"CC0-1.0"
] | null | null | null | madlib/main.py | FredericIV/PythonPractice | 36b3a321eb8fefc38befe83b15a7596418250756 | [
"CC0-1.0"
] | null | null | null | #!/bin/python3
# Libraries
import sys
import array
import textwrap
# Variable Declaration
madlib_selection = "example.txt"
madlib_array = array.array('i')
copy_state = False
user_filler = ""
new_madlib = []
if len(sys.argv) != 1:
print(len(sys.argv))
if sys.argv[1] == "-":
print("This program takes the path to a madlib as an argument. Showing default now.")
## TODO: Add input validation, i.e. make sure the input is actully text.
else:
## TODO: Add pipe as input option.
madlib_selection = sys.argv[1]
with open(madlib_selection, 'r') as madlib:
read_madlib = madlib.read()
for i in range(read_madlib.count("#")//2):
first = read_madlib.index("#")
second = read_madlib.index("#", first+1)
replacement = input("Please give me " + read_madlib[first+1:second] + ":")
new_madlib = read_madlib[0:first] + replacement + read_madlib[second+1:]
read_madlib = new_madlib
print("\n\n\n")
print(textwrap.fill(read_madlib, drop_whitespace=False, replace_whitespace=False))
| 31.411765 | 93 | 0.659176 |
1c8d061b0e5a02933d936632c10e61f84e6418bb | 2,558 | py | Python | src/tests/control/test_devices.py | bsod85/pretix | d86b3a217352f7ad24008685393f9af18fcf6e6c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/tests/control/test_devices.py | bsod85/pretix | d86b3a217352f7ad24008685393f9af18fcf6e6c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/tests/control/test_devices.py | bsod85/pretix | d86b3a217352f7ad24008685393f9af18fcf6e6c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import pytest
from django.utils.timezone import now
from pretix.base.models import Device, Event, Organizer, Team, User
from pretix.base.models.devices import generate_api_token
| 30.452381 | 106 | 0.713057 |
1c8e802ab7e5ab17bb7b662f2406ded9d3de6507 | 11,773 | py | Python | mcp/augmentation/album.py | j20232/moco_image_pipeline | 997ae76e795548e75f95e862284c1fc0a3c7541a | [
"BSD-3-Clause"
] | 5 | 2020-03-18T14:36:12.000Z | 2022-01-26T09:36:11.000Z | mcp/augmentation/album.py | j20232/moco_image_pipeline | 997ae76e795548e75f95e862284c1fc0a3c7541a | [
"BSD-3-Clause"
] | null | null | null | mcp/augmentation/album.py | j20232/moco_image_pipeline | 997ae76e795548e75f95e862284c1fc0a3c7541a | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from PIL import Image, ImageOps, ImageEnhance
import albumentations as A
# ndarray: H x W x C
# ----------------------------------- Blur -------------------------------------------
# ----------------------------------- Noise -------------------------------------------
# ---------------------------------- Distortion ---------------------------------------
# ----------------------------------- Histogram ----------------------------------------
# ------------------------------------- Removal ------------------------------------------
# ------------------------------------------- Augmix -------------------------------------------
# Reference: https://www.kaggle.com/haqishen/augmix-based-on-albumentations
def int_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval .
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled to
level/PARAMETER_MAX.
Returns:
An int that results from scaling `maxval` according to `level`.
"""
return int(level * maxval / 10)
def float_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval.
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled to
level/PARAMETER_MAX.
Returns:
A float that results from scaling `maxval` according to `level`.
"""
return float(level) * maxval / 10.
def sample_level(n):
return np.random.uniform(low=0.1, high=n)
def autocontrast(pil_img, _):
return ImageOps.autocontrast(pil_img)
def equalize(pil_img, _):
return ImageOps.equalize(pil_img)
def posterize(pil_img, level):
level = int_parameter(sample_level(level), 4)
return ImageOps.posterize(pil_img, 4 - level)
def rotate(pil_img, level):
degrees = int_parameter(sample_level(level), 30)
if np.random.uniform() > 0.5:
degrees = -degrees
return pil_img.rotate(degrees, resample=Image.BILINEAR)
def solarize(pil_img, level):
level = int_parameter(sample_level(level), 256)
return ImageOps.solarize(pil_img, 256 - level)
def shear_x(pil_img, level):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform(pil_img.size,
Image.AFFINE, (1, level, 0, 0, 1, 0),
resample=Image.BILINEAR)
def shear_y(pil_img, level):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform(pil_img.size,
Image.AFFINE, (1, 0, 0, level, 1, 0),
resample=Image.BILINEAR)
def translate_x(pil_img, level):
level = int_parameter(sample_level(level), pil_img.size[0] / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform(pil_img.size,
Image.AFFINE, (1, 0, level, 0, 1, 0),
resample=Image.BILINEAR)
def translate_y(pil_img, level):
level = int_parameter(sample_level(level), pil_img.size[0] / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform(pil_img.size,
Image.AFFINE, (1, 0, 0, 0, 1, level),
resample=Image.BILINEAR)
# operation that overlaps with ImageNet-C's test set
# operation that overlaps with ImageNet-C's test set
# operation that overlaps with ImageNet-C's test set
# operation that overlaps with ImageNet-C's test set
def normalize(image):
"""Normalize input image channel-wise to zero mean and unit variance."""
return image - 127
def augment_and_mix(image, severity=3, width=3, depth=-1, alpha=1.):
"""Perform AugMix augmentations and compute mixture.
Args:
image: Raw input image as float32 np.ndarray of shape (h, w, c)
severity: Severity of underlying augmentation operators (between 1 to 10).
width: Width of augmentation chain
depth: Depth of augmentation chain. -1 enables stochastic depth uniformly
from [1, 3]
alpha: Probability coefficient for Beta and Dirichlet distributions.
Returns:
mixed: Augmented and mixed image.
"""
augmentations = [
autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y,
translate_x, translate_y
]
ws = np.float32(np.random.dirichlet([alpha] * width))
m = np.float32(np.random.beta(alpha, alpha))
mix = np.zeros_like(image).astype(np.float32)
for i in range(width):
image_aug = image.copy()
depth = depth if depth > 0 else np.random.randint(1, 4)
for _ in range(depth):
op = np.random.choice(augmentations)
image_aug = apply_op(image_aug, op, severity)
# Preprocessing commutes since all coefficients are convex
mix += ws[i] * image_aug
# mix += ws[i] * normalize(image_aug)
mixed = (1 - m) * image + m * mix
# mixed = (1 - m) * normalize(image) + m * mix
return mixed
| 35.460843 | 99 | 0.599507 |
1c8ea0dcd3e4b0f8ab68d4a876a677661904e6f8 | 2,959 | py | Python | website/util/sanitize.py | bdyetton/prettychart | e8b33a7dfdc8c33d15969586be7f68172795f76d | [
"Apache-2.0"
] | null | null | null | website/util/sanitize.py | bdyetton/prettychart | e8b33a7dfdc8c33d15969586be7f68172795f76d | [
"Apache-2.0"
] | null | null | null | website/util/sanitize.py | bdyetton/prettychart | e8b33a7dfdc8c33d15969586be7f68172795f76d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import bleach
import json
def strip_html(unclean):
"""Sanitize a string, removing (as opposed to escaping) HTML tags
:param unclean: A string to be stripped of HTML tags
:return: stripped string
:rtype: str
"""
return bleach.clean(unclean, strip=True, tags=[], attributes=[], styles=[])
def clean_tag(data):
"""Format as a valid Tag
:param data: A string to be cleaned
:return: cleaned string
:rtype: str
"""
# TODO: make this a method of Tag?
return escape_html(data).replace('"', '"').replace("'", ''')
def is_iterable_but_not_string(obj):
"""Return True if ``obj`` is an iterable object that isn't a string."""
return (hasattr(obj, '__iter__') and not hasattr(obj, 'strip'))
def escape_html(data):
"""Escape HTML characters in data.
:param data: A string, dict, or list to clean of HTML characters
:return: A cleaned object
:rtype: str or list or dict
"""
if isinstance(data, dict):
return {
key: escape_html(value)
for (key, value) in data.iteritems()
}
if is_iterable_but_not_string(data):
return [
escape_html(value)
for value in data
]
if isinstance(data, basestring):
return bleach.clean(data)
return data
def assert_clean(data):
"""Ensure that data is cleaned
:raise: AssertionError
"""
return escape_html(data)
# TODO: Remove safe_unescape_html when mako html safe comes in
def safe_unescape_html(value):
"""
Return data without html escape characters.
:param value: A string, dict, or list
:return: A string or list or dict without html escape characters
"""
safe_characters = {
'&': '&',
'<': '<',
'>': '>',
}
if isinstance(value, dict):
return {
key: safe_unescape_html(value)
for (key, value) in value.iteritems()
}
if is_iterable_but_not_string(value):
return [
safe_unescape_html(each)
for each in value
]
if isinstance(value, basestring):
for escape_sequence, character in safe_characters.items():
value = value.replace(escape_sequence, character)
return value
return value
def safe_json(value):
"""
Dump a string to JSON in a manner that can be used for JS strings in mako templates.
Providing additional forward-slash escaping to prevent injection of closing markup in strings. See:
http://benalpert.com/2012/08/03/preventing-xss-json.html
:param value: A string to be converted
:return: A JSON-formatted string that explicitly escapes forward slashes when needed
"""
return json.dumps(value).replace('</', '<\\/') # Fix injection of closing markup in strings
| 26.185841 | 103 | 0.627239 |
1c8eddd2bd80bb485d60b7d54110b5642d861af4 | 16,525 | py | Python | mainTrain.py | PolarizedLightFieldMicroscopy/LFMNet2 | c9b064d7625e018ef54b8dd8a0e53801c4565397 | [
"Apache-2.0"
] | null | null | null | mainTrain.py | PolarizedLightFieldMicroscopy/LFMNet2 | c9b064d7625e018ef54b8dd8a0e53801c4565397 | [
"Apache-2.0"
] | null | null | null | mainTrain.py | PolarizedLightFieldMicroscopy/LFMNet2 | c9b064d7625e018ef54b8dd8a0e53801c4565397 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from torch.utils import data
from torch import optim
import torchvision.models as models
from torch.autograd import Variable
import torchvision as tv
import random
import math
import time
from datetime import datetime
import os
import argparse
import subprocess
from util.LFUtil import *
import numpy as np
from networks.LFMNet import LFMNet
if __name__ == '__main__':
main() | 47.34957 | 191 | 0.616884 |
1c8fed7e472142a2a42ee1131ff8f6b28599bc16 | 1,295 | py | Python | tools/utils.py | valsworthen/toxic-comment-classification | 12ceb4d78410a14fba05e43f6f424cec52e6665d | [
"MIT"
] | 10 | 2018-03-26T05:46:39.000Z | 2020-04-30T08:03:18.000Z | tools/utils.py | valsworthen/toxic_comment_classification | 12ceb4d78410a14fba05e43f6f424cec52e6665d | [
"MIT"
] | null | null | null | tools/utils.py | valsworthen/toxic_comment_classification | 12ceb4d78410a14fba05e43f6f424cec52e6665d | [
"MIT"
] | null | null | null | """Utilities"""
import pandas as pd
import numpy as np
from attrdict import AttrDict
import yaml
def average_predictions(cv_predictions, n_splits, num_samples = 153164, num_labels = 6):
"""Average k-fold predictions stored in a dict"""
preds = np.zeros((num_samples, num_labels))
for preds_i in cv_predictions:
preds += preds_i
preds /= n_splits
return preds
def geom_average_predictions(cv_predictions, n_splits, num_samples = 153164, num_labels = 6):
"""Average k-fold predictions stored in a dict"""
preds = np.ones((num_samples, num_labels))
for preds_i in cv_predictions:
preds *= preds_i
preds = preds **(1/n_splits)
return preds
| 33.205128 | 93 | 0.67722 |
1c9014f0cf5d96c8108b4c96a94c876f92838ff8 | 530 | py | Python | dags/exercise1.py | mikef-nl/airflow-training-skeleton | 85a0e9103772be012a41ee0daa9f67ba401bfddc | [
"Apache-2.0"
] | null | null | null | dags/exercise1.py | mikef-nl/airflow-training-skeleton | 85a0e9103772be012a41ee0daa9f67ba401bfddc | [
"Apache-2.0"
] | null | null | null | dags/exercise1.py | mikef-nl/airflow-training-skeleton | 85a0e9103772be012a41ee0daa9f67ba401bfddc | [
"Apache-2.0"
] | null | null | null | import airflow
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
args = {
'owner': 'Mike',
'start_date': airflow.utils.dates.days_ago(2),
}
dag = DAG(
dag_id='exercise1',
default_args=args,
schedule_interval=None
)
t1 = DummyOperator(task_id='task1', dag=dag)
t2 = DummyOperator(task_id='task2', dag=dag)
t3 = DummyOperator(task_id='task3', dag=dag)
t4 = DummyOperator(task_id='task4', dag=dag)
t5 = DummyOperator(task_id='task5', dag=dag)
t1 >> t2 >> [t3,t4] >> t5
| 23.043478 | 58 | 0.70566 |
1c90552cf52e653e519bda73228f741afee1058c | 3,148 | py | Python | pyhelp/scripts/produce_meteo_maps.py | jnsebgosselin/help | f0194a96ba7e1474fe1864d79447ee20cee949ec | [
"MIT"
] | 12 | 2019-03-11T12:38:35.000Z | 2021-06-26T03:40:18.000Z | pyhelp/scripts/produce_meteo_maps.py | jnsebgosselin/help | f0194a96ba7e1474fe1864d79447ee20cee949ec | [
"MIT"
] | 23 | 2018-11-22T15:16:12.000Z | 2022-03-25T12:55:33.000Z | pyhelp/scripts/produce_meteo_maps.py | jnsebgosselin/help | f0194a96ba7e1474fe1864d79447ee20cee949ec | [
"MIT"
] | 2 | 2019-04-18T17:47:00.000Z | 2021-08-31T04:45:30.000Z | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 27 10:54:25 2018
@author: jsgosselin
"""
# ---- Standard Library Imports
from itertools import product
import os.path as osp
import os
# ---- Third Party Imports
import netCDF4
from geopandas import GeoDataFrame
import pandas as pd
from shapely.geometry import Point, Polygon
import numpy as np
dirpath_netcdf = "D:/MeteoGrilleDaily"
# %% Get lat/lon from the netCDF
filename = osp.join(dirpath_netcdf, 'GCQ_v2_2000.nc')
netcdf_dset = netCDF4.Dataset(filename, 'r+')
lat = np.array(netcdf_dset['lat'])
lon = np.array(netcdf_dset['lon'])
netcdf_dset.close()
# %% Read the weather data from the InfoClimat grid
stack_precip = []
stack_tasmax = []
stack_tasmin = []
nyear = 0
for year in range(2000, 2015):
print("\rProcessing year %d" % year, end=' ')
filename = osp.join(dirpath_netcdf, 'GCQ_v2_%d.nc' % year)
netcdf_dset = netCDF4.Dataset(filename, 'r+')
stack_precip.append(np.array(netcdf_dset['pr']))
stack_tasmax.append(np.array(netcdf_dset['tasmax']))
stack_tasmin.append(np.array(netcdf_dset['tasmin']))
netcdf_dset.close()
nyear += 1
print('')
daily_precip = np.vstack(stack_precip)
daily_tasmax = np.vstack(stack_tasmax)
daily_tasmin = np.vstack(stack_tasmin)
daily_tasavg = (daily_tasmax + daily_tasmin) / 2
yearly_avg_precip = np.sum(daily_precip, axis=0) / nyear
yearly_avg_tasavg = np.average(daily_tasavg, axis=0)
yearly_avg_tasmax = np.average(daily_tasmax, axis=0)
yearly_avg_tasmin = np.average(daily_tasmin, axis=0)
# %% Create a grid
Np = len(lat) * len(lon)
geometry = []
arr_yearly_avg_precip = np.zeros(Np)
arr_avg_yearly_tasavg = np.zeros(Np)
arr_avg_yearly_tasmax = np.zeros(Np)
arr_avg_yearly_tasmin = np.zeros(Np)
i = 0
dx = dy = 0.1/2
for j, k in product(range(len(lat)), range(len(lon))):
print("\rProcessing cell %d of %d" % (i, Np), end=' ')
point = Point((lon[k], lat[j]))
# polygon = Polygon([(lon[k]-dx, lat[j]-dy),
# (lon[k]-dx, lat[j]+dy),
# (lon[k]+dx, lat[j]+dy),
# (lon[k]+dx, lat[j]-dy)])
geometry.append(point)
arr_yearly_avg_precip[i] = yearly_avg_precip[j, k]
arr_avg_yearly_tasavg[i] = yearly_avg_tasavg[j, k]
arr_avg_yearly_tasmax[i] = yearly_avg_tasmax[j, k]
arr_avg_yearly_tasmin[i] = yearly_avg_tasmin[j, k]
i += 1
print("\rProcessing cell %d of %d" % (i, Np))
# %%
print('\rFormating the data in a shapefile...', end=' ')
df = pd.DataFrame(data={'precip': arr_yearly_avg_precip,
'tasavg': arr_avg_yearly_tasavg,
'tasmax': arr_avg_yearly_tasmax,
'tasmin': arr_avg_yearly_tasmin})
crs = "+proj=longlat +ellps=GRS80 +datum=NAD83 +towgs84=0,0,0,0,0,0,0 +no_defs"
gdf = GeoDataFrame(df, crs=crs, geometry=geometry)
print('\rFormating the data in a shapefile... done')
print('\rSaving to Shapefile...', end=' ')
path_shp_out = ("D:/MeteoGrilleDaily/grid_yearly_meteo/grid_yearly_meteo.shp")
if not osp.exists(path_shp_out):
os.makedirs(path_shp_out)
gdf.to_file(path_shp_out)
print('\rSaving to Shapefile... done', end=' ')
| 29.420561 | 79 | 0.67249 |
1c90b62f02619f835bc7d89b23d75b9ecf0b6be0 | 1,803 | py | Python | platform/core/tests/test_activitylogs/test_service.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | platform/core/tests/test_activitylogs/test_service.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | platform/core/tests/test_activitylogs/test_service.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | # pylint:disable=ungrouped-imports
import uuid
import pytest
import activitylogs
from db.models.activitylogs import ActivityLog
from events.registry.experiment import EXPERIMENT_DELETED_TRIGGERED
from events.registry.user import USER_ACTIVATED
from factories.factory_experiments import ExperimentFactory
from factories.factory_users import UserFactory
from tests.base.case import BaseTest
| 36.795918 | 68 | 0.658902 |
1c9164529ab22e884811a536f6b8ba91eb8bbe19 | 420 | py | Python | tests/framework/test_ingress.py | praus/shapy | 7fa5512d9015b4921870f212495280fbb0675164 | [
"MIT"
] | 54 | 2015-01-14T10:14:11.000Z | 2022-02-25T17:12:10.000Z | tests/framework/test_ingress.py | praus/shapy | 7fa5512d9015b4921870f212495280fbb0675164 | [
"MIT"
] | 1 | 2015-12-29T07:37:17.000Z | 2015-12-30T06:17:41.000Z | tests/framework/test_ingress.py | praus/shapy | 7fa5512d9015b4921870f212495280fbb0675164 | [
"MIT"
] | 14 | 2015-02-10T15:29:48.000Z | 2021-09-22T03:01:13.000Z | import unittest
from shapy.framework.tcelements import *
from shapy.framework.executor import run
from tests import TCTestCase
| 24.705882 | 54 | 0.688095 |
1c91c9a08ce7e29a5358fe242bc8b960fc941c8f | 1,844 | py | Python | software/hippietrap/gradient.py | mayhem/led-chandelier | 899caa8d81e6aac6e954f78b4f5b4ab101bf5257 | [
"MIT"
] | 2 | 2018-09-20T08:36:11.000Z | 2019-08-25T20:06:11.000Z | software/hippietrap/gradient.py | mayhem/led-chandelier | 899caa8d81e6aac6e954f78b4f5b4ab101bf5257 | [
"MIT"
] | null | null | null | software/hippietrap/gradient.py | mayhem/led-chandelier | 899caa8d81e6aac6e954f78b4f5b4ab101bf5257 | [
"MIT"
] | 1 | 2020-12-12T18:21:18.000Z | 2020-12-12T18:21:18.000Z | from colorsys import hsv_to_rgb
from math import fabs, fmod
import os
from hippietrap.color import Color
| 29.741935 | 107 | 0.58026 |
1c95c503cb53578803e2dbe2dd22ba875018dd47 | 817 | py | Python | stix_shifter_modules/elastic/entry_point.py | 6un9-h0-Dan/stix-shifter | f99feee8c247b9fc1d79f6db623c301b49685b63 | [
"Apache-2.0"
] | 1 | 2020-04-06T21:28:19.000Z | 2020-04-06T21:28:19.000Z | stix_shifter_modules/elastic/entry_point.py | 6un9-h0-Dan/stix-shifter | f99feee8c247b9fc1d79f6db623c301b49685b63 | [
"Apache-2.0"
] | null | null | null | stix_shifter_modules/elastic/entry_point.py | 6un9-h0-Dan/stix-shifter | f99feee8c247b9fc1d79f6db623c301b49685b63 | [
"Apache-2.0"
] | null | null | null | from stix_shifter_utils.utils.entry_point_base import EntryPointBase
from stix_shifter_utils.modules.cim.stix_translation.cim_data_mapper import CimDataMapper
from stix_shifter_utils.modules.car.stix_translation.car_data_mapper import CarDataMapper
from .stix_translation.stix_to_elastic import StixToElastic | 74.272727 | 124 | 0.812729 |
1c962e345da89a5eb411a0b3f49cfb775dfe43b5 | 1,850 | py | Python | src/http_pick/pickergui.py | thomaspcole/http-pick | c470869878483241672c2928fd85458ab30555c4 | [
"MIT"
] | null | null | null | src/http_pick/pickergui.py | thomaspcole/http-pick | c470869878483241672c2928fd85458ab30555c4 | [
"MIT"
] | null | null | null | src/http_pick/pickergui.py | thomaspcole/http-pick | c470869878483241672c2928fd85458ab30555c4 | [
"MIT"
] | null | null | null | from PyQt5.QtWidgets import (QMainWindow, QToolButton, QWidget, QHBoxLayout)
from PyQt5.QtGui import QIcon
from PyQt5 import QtCore
from math import floor
import sys | 40.217391 | 148 | 0.591892 |
1c967254ce0d2a6e7d37a5e738a1749e4d64b857 | 6,324 | py | Python | genetic_pwdcrack.py | robotenique/AI-programming | 41a690963b452165342cfd3caa81bfad13d1cc76 | [
"Unlicense"
] | 3 | 2018-04-05T16:38:48.000Z | 2020-11-15T21:24:57.000Z | genetic_pwdcrack.py | robotenique/AI-programming | 41a690963b452165342cfd3caa81bfad13d1cc76 | [
"Unlicense"
] | null | null | null | genetic_pwdcrack.py | robotenique/AI-programming | 41a690963b452165342cfd3caa81bfad13d1cc76 | [
"Unlicense"
] | null | null | null | """
Crack a password using a genetic algorithm!
"""
import random as rnd
def main():
"""
This file implements a genetic algorithm to solve the problem of
cracking a given password, by creating 'generations' of different
words, selecting the best, breeeding them, applying a simple crossover
(randomized) and a mutation chance.
"""
#variables dict: Define the problem constants
genetic_variables = {
'password' : "verylongwordpass",
'size_population' : 100,
'best_sample' : 20,
'lucky_few' : 20,
'number_of_child' : 5,
'number_of_generations' : 10000, #Overkill >:D
'chance_of_mutation' : .5
}
prob = genetic_variables
#program
if (prob['best_sample'] + prob['lucky_few'])/2*prob['number_of_child'] != prob['size_population']:
print ("population size not stable")
return
last_gen, _ = genetic_algorithm(**genetic_variables)
print("Last generation: \n\n")
print(last_gen)
def genetic_algorithm(**kwargs):
"""
Execute the genetic algorithm.
This algorithm takes a dict as an argument.
It will iterate based on the variable 'number_of_generations', and return
the last_gen and the historic
"""
# Unpack the values from the dict
password = kwargs['password']
size_population = kwargs['size_population']
best_sample = kwargs['best_sample']
lucky_few = kwargs['lucky_few']
number_of_child = kwargs['number_of_child']
number_of_generations = kwargs['number_of_generations']
chance_of_mutation = kwargs['chance_of_mutation']
hist = []
# The genetic algorithm
curr_pop = initial_pop(size_population, password)
hist = curr_pop
last_found = -1
for _ in range (number_of_generations):
curr_pop = next_gen(curr_pop, password, best_sample, lucky_few, number_of_child, chance_of_mutation)
hist.append(curr_pop)
if check_solution(curr_pop, password):
last_found = _
break
if last_found != -1:
print(f"Found a solution in the {last_found} generation!!")
else:
print("No solution found! D':")
return curr_pop, hist
def next_gen(curr_pop, password, best_sample, lucky_few, number_of_child, chance_of_mutation):
"""
-> This is the main task of the Genetic Algorithm <-
Given the current population, apply the following steps:
- Compute the fitness of each individual in the population
- Select the best ones (and some lucky guys)
- Make them reproduce
- Mutate the children
- Return this new population
"""
pop_sorted = compute_perf_pop(curr_pop, password)
next_breeders = select_from_population(pop_sorted, best_sample, lucky_few)
next_pop = create_children(next_breeders, number_of_child)
next_gen = mutate_pop(next_pop, chance_of_mutation)
return next_gen
def initial_pop(size, password):
"""
Generate a population consisting of random words, each with the same
length as the password, and the population has the size specified.
"""
return [word_generate(len(password)) for _ in range(size)]
def fitness (password, test_word):
"""
The fitness function:
fitness(test_word): (# of correct chars) / (total number of chars)
fitness(test_word) = 0 if # of correct chars = 0
fitness(test_word) = 100 if # of correct chars = total number of chars
"""
if (len(test_word) != len(password)):
print("Incompatible password...")
return
else:
score = (1 if password[i] == test_word[i] else 0 for i in range(len(password)))
return sum(score)*100/len(password)
def compute_perf_pop(population, password):
"""
Return the population, sorted by the fitness from each individual
"""
populationPerf = {ind:fitness(password, ind) for ind in population}
# Sort by fitness, reversed (best ones in the beginning of the list)
return sorted(populationPerf.items(), key= lambda it: it[1], reverse=True)
def select_from_population(pop_sorted, best_sample, lucky_few):
"""
Create the next breeders, with 'best_sample' individuals which have the
top fitness value from the population, and 'lucky_few' individuals which
are randomly selected.
"""
next_gen = []
for i in range(best_sample):
next_gen.append(pop_sorted[i][0])
# Simple lucky few: randomly select some elements from the population
for i in range(lucky_few):
next_gen.append(rnd.choice(pop_sorted)[0])
rnd.shuffle(next_gen)
return next_gen
def create_children(breeders, nof_childs):
"""
Create the next population of individuals, by breeding two by two
"""
next_pop = []
mid_pos = len(breeders)//2 # len(breeders) must be an even number
for ind_1, ind_2 in zip(breeders[:mid_pos], breeders[mid_pos:]):
for _ in range(nof_childs):
next_pop.append(create_child(ind_1, ind_2))
return next_pop
def mutate_pop(population, chance):
"""
Given a chance for mutation, this apply the mutation layer
to the genetic algorithm, by generating a mutation with the chance
specified.
"""
for i in range(len(population)):
if rnd.random() < chance:
population[i] = mutate_word(population[i])
return population
def mutate_word(word):
"""
Mutate a letter(gene) from the word, then return it
"""
pos = int(rnd.random()*len(word))
word = word[:pos] + chr(97 + int(26*rnd.random())) + word[pos + 1:]
return word
def create_child(ind_1, ind_2):
"""
For each letter of the child, get a random gene from ind_1 or ind_2
in the i-th position.
"""
temp = [ind_1[i] if rnd.random() < 0.5 else ind_2[i] for i in range(len(ind_1))]
return "".join(temp)
def word_generate(length):
"""
Generate a string with random lowercase letters, with length = length!
"""
# Generate a random letter from alphabet, lowercase, and add to result
return "".join((chr(97 + rnd.randint(0, 26)) for _ in range(length)))
def check_solution(population, password):
"""
Check if the population found a solution to the problem
"""
return any(ind == password for ind in population)
if __name__ == '__main__':
main()
| 34 | 108 | 0.669355 |