hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
731a5b94603a881cbdad31e6b399fc2db646e99b | 4,162 | py | Python | elegy/optimizer_test.py | sooheon/elegy | cad6f832cac1a34684c4f4f2c4a386cbfa817623 | [
"Apache-2.0"
] | null | null | null | elegy/optimizer_test.py | sooheon/elegy | cad6f832cac1a34684c4f4f2c4a386cbfa817623 | [
"Apache-2.0"
] | null | null | null | elegy/optimizer_test.py | sooheon/elegy | cad6f832cac1a34684c4f4f2c4a386cbfa817623 | [
"Apache-2.0"
] | null | null | null | import jax
import elegy
import unittest
import numpy as np
import jax.numpy as jnp
import optax
| 30.602941 | 88 | 0.606439 |
731a5f37b2d3af866a1a81886741f91cddda5c09 | 6,929 | py | Python | scripts/version.py | nfnty/docker | cdc68f57fdb6bd472b78d6ef6cbc77f430bd5089 | [
"MIT"
] | 54 | 2015-03-08T23:45:21.000Z | 2021-01-11T12:35:07.000Z | scripts/version.py | nfnty/docker | cdc68f57fdb6bd472b78d6ef6cbc77f430bd5089 | [
"MIT"
] | 4 | 2015-04-10T08:58:29.000Z | 2015-11-08T08:34:55.000Z | scripts/version.py | nfnty/docker | cdc68f57fdb6bd472b78d6ef6cbc77f430bd5089 | [
"MIT"
] | 16 | 2015-04-08T23:54:07.000Z | 2020-04-08T22:03:12.000Z | #!/usr/bin/python3
''' Check image package versions '''
import argparse
import distutils.version
import re
import subprocess
from typing import Any, Dict, Sequence, Tuple
import lxml.html # type: ignore
import requests
from termcolor import cprint
from utils.image import IMAGES, path_dockerfile
TIMEOUT = (31, 181) # (Connect, Read)
HEADERS = {'user-agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:43.0) Gecko/20100101 Firefox/43.0'}
def args_parse(arguments: Sequence[str] = None) -> argparse.Namespace:
''' Parse arguments '''
par0 = argparse.ArgumentParser(description='Image package version checker')
method = par0.add_mutually_exclusive_group(required=False)
method.add_argument(
'--include', metavar='IMAGE', action='append', choices=IMAGES.keys(),
help='Include image(s)',
)
method.add_argument(
'--exclude', metavar='IMAGE', action='append', choices=IMAGES.keys(),
help='Exclude image(s)',
)
return par0.parse_args(arguments)
def fetch(url: str, timeout: Tuple[int, int]) -> Any:
''' Fetch URL '''
try:
response = requests.get(url, headers=HEADERS, timeout=timeout)
response.raise_for_status()
except (requests.exceptions.Timeout, requests.exceptions.HTTPError) as error:
raise RuntimeError('fetch: {0:s}\n{1:s}'.format(str(error), str(error.response.content)))
except OSError as error:
raise RuntimeError('fetch: {0:s}'.format(str(error)))
return lxml.html.document_fromstring(response.content)
def document_parse(document: Any, xpath: str, attribute: str,
regex: str) -> distutils.version.LooseVersion:
''' xpath version extractor '''
nodes = document.xpath(xpath)
if not nodes:
raise RuntimeError('Incorrect xpath: No nodes')
versions = []
for node in nodes:
if attribute:
string = node.get(attribute)
elif isinstance(node, str):
string = node
else:
string = node.text
if regex:
obj = re.search(regex, string,
flags=(re.MULTILINE | re.DOTALL)) # pylint: disable=no-member
if not obj:
continue
elif len(obj.groups()) > 1:
raise RuntimeError('Incorrect regex: More than 1 capture group')
string = obj.group(1)
if not string:
raise RuntimeError('Incorrect regex: Invalid capture group')
versions.append(distutils.version.LooseVersion(string))
if not versions:
raise RuntimeError('No matching versions')
version: distutils.version.LooseVersion = sorted(versions, reverse=True)[0]
if not version or not hasattr(version, 'vstring'):
raise RuntimeError('Version is invalid')
return version
def version_scrape(url: str, xpath: str, attribute: str,
regex: str) -> distutils.version.LooseVersion:
''' Scrape latest version from url '''
document = fetch(url, TIMEOUT)
return document_parse(document, xpath, attribute, regex)
def version_pacman(package: str) -> Dict[str, distutils.version.LooseVersion]:
''' Return dict with repository versions of package '''
try:
output = subprocess.run([
'/usr/bin/expac', '--sync', '--search',
'%n %r %v',
r'^{0:s}$'.format(re.escape(package)),
], check=True, stdout=subprocess.PIPE).stdout.decode('UTF-8')
except subprocess.CalledProcessError:
raise RuntimeError('{0:s} not in any repository'.format(package))
versions: Dict[str, distutils.version.LooseVersion] = {}
for line in output.splitlines():
name, repo, version = line.split()
if name == package:
versions[repo] = distutils.version.LooseVersion(version)
return versions
def dockerfile_update(path: str, variable: str, version: str) -> None:
''' Update Dockerfiles with current version '''
with open(path, 'r') as fobj:
newfile, found = re.subn(
r'{0:s}=\'\S*\''.format(variable),
'{0:s}=\'{1:s}\''.format(variable, version),
fobj.read(),
)
if not found:
raise ValueError('Did not find ENV variable')
elif found > 1:
raise ValueError('More than 1: {0:s}'.format(variable))
with open(path, 'w') as fobj:
fobj.write(newfile)
def main() -> None: # pylint: disable=too-many-branches
''' Main '''
subprocess.check_call(['/usr/bin/sudo', '/usr/bin/pacman', '--sync', '--refresh'])
if ARGS.include:
images = {image: config for image, config in IMAGES.items() if image in ARGS.include}
elif ARGS.exclude:
images = {image: config for image, config in IMAGES.items() if image not in ARGS.exclude}
else:
images = IMAGES
for image, image_dict in sorted(images.items(), key=lambda item: item[0]):
cprint('\n{0:s}'.format(image), 'white', attrs=['underline'])
if 'Check' in image_dict and not image_dict['Check']:
print('Not checked!')
continue
if 'Packages' not in image_dict:
print('No packages!')
continue
for package, package_dict in image_dict['Packages'].items():
cprint('{0:s}:'.format(package), 'yellow')
for source, source_dict in package_dict['Sources'].items():
try:
source_dict['Version'] = version_scrape(
source_dict['URL'],
source_dict['XPath'],
source_dict['Attribute'] if 'Attribute' in source_dict else None,
source_dict['Regex'] if 'Regex' in source_dict else None,
)
except RuntimeError as error:
cprint('{0:s}: {1:s}'.format(source, str(error)), 'red')
source_dict['Version'] = None
try:
for repo, version in version_pacman(package).items():
package_dict['Sources'][repo] = {'Version': version}
except RuntimeError as error:
cprint(str(error), 'red')
for source, source_dict in package_dict['Sources'].items():
print('{0:15s}{1:s}'.format(
source,
source_dict['Version'].vstring if source_dict['Version'] else 'None',
))
if not package_dict['Sources'][package_dict['Download']]['Version']:
cprint('No Version for Download: {0:s}'.format(
package_dict['Download']), 'red')
continue
dockerfile_update(
path_dockerfile(image),
package_dict['Variable'],
package_dict['Sources'][package_dict['Download']]['Version'].vstring,
)
if __name__ == '__main__':
ARGS = args_parse()
main()
| 35.533333 | 97 | 0.593159 |
731bcc2e7423a542f77047dce4151ada325579ea | 2,441 | py | Python | nazrul.py | rakesh0703/Content_Parser_of_works_of_kazi_nazrul | c3e2060effe7b7576ee5b034a9aba3df648d6358 | [
"Apache-2.0"
] | null | null | null | nazrul.py | rakesh0703/Content_Parser_of_works_of_kazi_nazrul | c3e2060effe7b7576ee5b034a9aba3df648d6358 | [
"Apache-2.0"
] | null | null | null | nazrul.py | rakesh0703/Content_Parser_of_works_of_kazi_nazrul | c3e2060effe7b7576ee5b034a9aba3df648d6358 | [
"Apache-2.0"
] | null | null | null | # -- coding: UTF-8 --
"""
Spyder Editor
This is a temporary script file.
"""
from bs4 import BeautifulSoup
import sys
import os
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
import urllib.parse,urllib.request,urllib.error
base="https://nazrul-rachanabali.nltr.org/"
page=urllib.request.urlopen(base).read();
soup=BeautifulSoup(page,'html5lib')
ba=soup.find_all("ul",{"class":["slidedoormenu"]})
#print(ba)
d=soup.div.ul.find_all('a')
#type(d[3])
article_page=(d[3]).get("href")
#soup.div.ul.li.a
newurl_2=base+article_page
page1=urllib.request.urlopen(newurl_2).read()
soup1=BeautifulSoup(page1,'html5lib')
e=soup1.find_all('a')
arr1=[]
arr4=[]
for link in e[1:9]:
f=link.get('href')
f=base+f
arr1.append(f)
arr4.append(link.get_text())
#for k in arr2:
for m in range(0,len(arr4)):
page1=urllib.request.urlopen(arr1[m]).read()
soup1=BeautifulSoup(page1,'html5lib')
x=soup1.find_all('div',id='data')
arr2=[];
arr3=[];
for i in x:
g=i.find_all('a')
for k in g[:-7]:
arr2.append(k.get('href'))
arr3.append(k.get_text())
for z in range(0,len(arr3)):
final_url=base+arr2[z]
#==============================================================================
# page1=urllib.request.urlopen(final_url).read()
# soup1=BeautifulSoup(page1,'html5lib')
# head = soup1.find_all("p",class_="head1")
# headd=head[0].get_text()
#==============================================================================
filenam = "D:\%s\%s"%(arr4[m],arr3[z])
if not os.path.exists(filenam):
os.makedirs(filenam)
for i in range(0,110):
if arr3[z].endswith(" "):
arr3[z]=arr3[z][:-1]
filename = "D:\%s\%s\%s_%d.txt"%(arr4[m],arr3[z],arr3[z],i)
fi = open(filename, "wb")
page1=urllib.request.urlopen(final_url).read()
soup1=BeautifulSoup(page1,'html5lib')
final_url=base+arr2[z]
h=soup1.find_all('div',id="data")
for j in h:
fi.write(j.text.encode("utf-8"))
s=j.text
if not s.split():
break
a,b=final_url.split('1&titleid=')
final_url=a+str(i+1)+"&titleid="+b
print('************'+final_url+'***********')
fi.close()
| 29.059524 | 87 | 0.530111 |
731c7020273e619e347b608e87b47d20ed636f00 | 3,515 | py | Python | core/handler.py | mh4x0f/kinproxy | 72dd24eb5ff5286c2bb57524124934a54614f9ec | [
"MIT"
] | 5 | 2018-01-20T15:33:14.000Z | 2021-06-29T04:26:44.000Z | core/handler.py | mh4x0f/kinproxy | 72dd24eb5ff5286c2bb57524124934a54614f9ec | [
"MIT"
] | null | null | null | core/handler.py | mh4x0f/kinproxy | 72dd24eb5ff5286c2bb57524124934a54614f9ec | [
"MIT"
] | 1 | 2019-03-08T18:46:05.000Z | 2019-03-08T18:46:05.000Z | try:
from mitmproxy import controller, proxy
from mitmproxy.proxy.server import ProxyServer
except:
from libmproxy import controller, proxy
from libmproxy.proxy.server import ProxyServer
from plugins import *
from threading import Thread
from core.config.settings import SettingsINI
# MIT License
#
# Copyright (c) 2018 Marcos Nesster
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
| 33.47619 | 80 | 0.657183 |
731d1cfc50fdedf83932598a710d90798e979c30 | 4,940 | py | Python | mapping/sandbox/graphslam/graphslam_pipeline.py | sameeptandon/sail-car-log | 0ee3d598bb09d389bcbd2ebf73cd4b2411e796be | [
"BSD-2-Clause"
] | 1 | 2021-02-24T03:11:13.000Z | 2021-02-24T03:11:13.000Z | mapping/sandbox/graphslam/graphslam_pipeline.py | sameeptandon/sail-car-log | 0ee3d598bb09d389bcbd2ebf73cd4b2411e796be | [
"BSD-2-Clause"
] | null | null | null | mapping/sandbox/graphslam/graphslam_pipeline.py | sameeptandon/sail-car-log | 0ee3d598bb09d389bcbd2ebf73cd4b2411e796be | [
"BSD-2-Clause"
] | 3 | 2015-03-18T14:36:04.000Z | 2018-07-04T02:57:24.000Z | import os
from os.path import join as pjoin
from subprocess import check_call
from ruffus import files, follows, pipeline_run, pipeline_printout, pipeline_printout_graph, jobs_limit
from graphslam_config import GRAPHSLAM_PATH,\
GRAPHSLAM_MATCH_DIR, GRAPHSLAM_OPT_POS_DIR, GRAPHSLAM_ALIGN_DIR,\
MATCHES_FILE, GPS_FILES, RSS_LIST, GRAPHSLAM_OUT_DIR, GRAPHSLAM_DIRS,\
GRAPHSLAM_MAPS_DIR, GRAPHSLAM_VIDEOS_DIR, GRAPHSLAM_EVAL_DIR
from pipeline_config import NUM_CPUS, SAIL_CAR_LOG_PATH
from pipeline_utils import print_and_call, touchf
# NOTE Have to rerun this after match_traces is run
def clean_pipelines():
for route, segment, split in RSS_LIST:
cmd = 'export SCL_ROUTE=%s; export SCL_SEGMENT=%s; export SCL_SPLIT=%s; python %s/mapping/pipeline/pipeline.py clean' % (route, segment, split, SAIL_CAR_LOG_PATH)
print_and_call(cmd)
def clean():
for d in GRAPHSLAM_DIRS:
print 'deleting %s' % d
if os.path.exists(d):
check_call('rm -r %s' % d, shell=True)
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print 'Usage: python graphslam_pipeline.py print,graph,run (task1,task2)'
sys.exit(1)
TORUN = [
]
if len(sys.argv) == 3:
TORUN = sys.argv[2].split(',')
CMDS = sys.argv[1].split(',')
tasks = {
'print': lambda: pipeline_printout(sys.stdout, TORUN,
forcedtorun_tasks=[], verbose=5),
'graph': lambda: pipeline_printout_graph('graph.jpg', 'jpg', TORUN,
forcedtorun_tasks=[],
no_key_legend=False),
'run': lambda: pipeline_run(TORUN,
multiprocess=NUM_CPUS,
one_second_per_job=False),
'force': lambda: pipeline_run([],
forcedtorun_tasks=TORUN,
multiprocess=NUM_CPUS,
one_second_per_job=False),
'printf': lambda: pipeline_printout(sys.stdout,
[],
forcedtorun_tasks=TORUN,
verbose=2),
'clean': clean,
'clean_pipelines': clean_pipelines
}
for key in tasks:
if key in CMDS:
tasks[key]()
| 37.424242 | 185 | 0.654049 |
731e4596b4a14f1da0dc95574358cfa12ef495f2 | 319 | py | Python | sandbox/wavelets.py | EtalumaSupport/LumaViewPro | ab9678c04fc561e6fce8b774c5d87cc91d6f3e07 | [
"MIT"
] | null | null | null | sandbox/wavelets.py | EtalumaSupport/LumaViewPro | ab9678c04fc561e6fce8b774c5d87cc91d6f3e07 | [
"MIT"
] | 59 | 2021-03-26T19:22:59.000Z | 2021-12-04T00:42:12.000Z | sandbox/wavelets.py | EtalumaSupport/LumaViewPro | ab9678c04fc561e6fce8b774c5d87cc91d6f3e07 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from astropy.convolution import RickerWavelet2DKernel
ricker_2d_kernel = RickerWavelet2DKernel(5)
plt.imshow(ricker_2d_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
print(ricker_2d_kernel)
| 22.785714 | 66 | 0.793103 |
731e657c5103db0d7c66fbe61179c7894a85d4d3 | 5,267 | py | Python | tests/test_errors.py | raymundl/firepit | 5b913806eef646c02bd55e301b19baa052aa29d5 | [
"Apache-2.0"
] | null | null | null | tests/test_errors.py | raymundl/firepit | 5b913806eef646c02bd55e301b19baa052aa29d5 | [
"Apache-2.0"
] | null | null | null | tests/test_errors.py | raymundl/firepit | 5b913806eef646c02bd55e301b19baa052aa29d5 | [
"Apache-2.0"
] | null | null | null | import os
import pytest
from firepit.exceptions import IncompatibleType
from firepit.exceptions import InvalidAttr
from firepit.exceptions import InvalidStixPath
from firepit.exceptions import InvalidViewname
from firepit.exceptions import StixPatternError
from .helpers import tmp_storage
def test_empty_results(fake_bundle_file, tmpdir):
"""Look for finding objects that aren't there"""
store = tmp_storage(tmpdir)
store.cache('q1', [fake_bundle_file])
store.extract('my_findings', 'x-ibm-finding', 'q1', "[x-ibm-finding:name = 'Whatever']")
findings = store.lookup('my_findings')
assert findings == []
| 35.587838 | 101 | 0.652933 |
731f66af557f8e0f3fe1a093bf5c18d9478212d8 | 11,798 | py | Python | script/run_scribus.py | csneofreak/public-domain-season-songs | d6e559e7cfe6e3a7ab784855a096d02ae8c656cd | [
"Unlicense"
] | 14 | 2015-12-18T10:52:15.000Z | 2021-01-11T14:43:47.000Z | script/run_scribus.py | csneofreak/public-domain-season-songs | d6e559e7cfe6e3a7ab784855a096d02ae8c656cd | [
"Unlicense"
] | 1 | 2015-12-05T19:30:01.000Z | 2015-12-05T19:30:01.000Z | script/run_scribus.py | csneofreak/public-domain-season-songs | d6e559e7cfe6e3a7ab784855a096d02ae8c656cd | [
"Unlicense"
] | 9 | 2015-03-11T04:09:23.000Z | 2021-12-18T21:44:47.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
import time
import json
import os
import math
import scribus
import simplebin
import inspect
from collections import defaultdict
PWD = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
DATA_FILE = pwd("data.json")
CACHE_FILE = pwd("cache.json")
MANUEL_PROCESSING_FILE = pwd("manual_processing.json")
FILES = pwd("lily_output/")
FAST = False # use this to debug
SPACING_SONGS = 10
EFFECTIVE_PAGE_HEIGHT = 255 + SPACING_SONGS
SPACING_HEADLINE_SONG = 18
SPACING_SONG_TEXT = 5
PAGE_NUM_HEIGHT = 5
BASELINE_GRID = 5
if __name__ == "__main__":
main()
| 37.693291 | 152 | 0.66418 |
73204039a36f632a872aa19aeb8f09ee3f95315d | 424 | py | Python | 12-transformar_metro.py | tainagirotto/exercicios-py | 39107c05a0a8e5230cd48876ad8e1ad6f0a2ff59 | [
"MIT"
] | null | null | null | 12-transformar_metro.py | tainagirotto/exercicios-py | 39107c05a0a8e5230cd48876ad8e1ad6f0a2ff59 | [
"MIT"
] | null | null | null | 12-transformar_metro.py | tainagirotto/exercicios-py | 39107c05a0a8e5230cd48876ad8e1ad6f0a2ff59 | [
"MIT"
] | null | null | null | # Ler um nmero em metros e mostrar seu valor em cm e mm:
m = float(input('Digite o valor em metros: '))
dm = m * 10
cm = m * 100
mm = m * 1000
km = m/1000
hm = m/100
dam = m/10
print('O valor em cm {}' .format(cm))
print('O valor em milmetros {}' .format(mm))
print('O valor em dm {}' .format(dm))
print('O valor em km {}' .format(km))
print('O valor em hm {}' .format(hm))
print('O valor em dm {}' .format(dm))
| 24.941176 | 57 | 0.613208 |
73212f2cfd8e6dccfeaf70d354cab83a3bcc2ae2 | 3,059 | py | Python | src/urls.py | chunky2808/Hire-Me | 7a43fb2f555a5f46e285d24c18457c2ce1c0d225 | [
"MIT"
] | null | null | null | src/urls.py | chunky2808/Hire-Me | 7a43fb2f555a5f46e285d24c18457c2ce1c0d225 | [
"MIT"
] | 6 | 2020-02-12T00:41:15.000Z | 2022-03-11T23:20:37.000Z | src/urls.py | chunky2808/Hire-Me | 7a43fb2f555a5f46e285d24c18457c2ce1c0d225 | [
"MIT"
] | null | null | null | """src URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from accounts import views as accounts_views
from hire import views as hire_views
from django.contrib.auth import views as auth_views
from chat_app import views as chat_views
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', hire_views.mainee,name = 'maiee'),
url(r'^accounts/', include('django.contrib.auth.urls')),
url(r'^accounts/signup/$', accounts_views.signup, name='signup'),
url(r'^accounts/signup/customer/$', accounts_views.CustomerSignUpView.as_view(), name='customer_signup'),
url(r'^accounts/signup/service/$', accounts_views.ServiceSignUpView.as_view(), name='service_signup'),
url(r'^chat/(?P<stri_id>\w+?)/', chat_views.chat, name='index'),
url(r'^chatbox/(?P<stri_id>\w+?)/', chat_views.chatbox, name='chat'),
url(r'^oauth/', include('social_django.urls', namespace='social')), # <--
url(r'^login/$', auth_views.LoginView.as_view(template_name='login.html'), name='login'),
url(r'^logout/$', auth_views.LogoutView.as_view(), name='logout'),
url(r'^services/$', hire_views.home, name='home'),
url(r'^services/new/$', hire_views.add_service, name='new_service'),
url(r'^services/(?P<pk>\d+)/$', hire_views.list_services, name='serve_list'),
url(r'^services/(?P<pk>\d+)/new/$', hire_views.list_services_new, name='new_serve_list'),
url(r'^services/(?P<pk>\d+)/delete/$', hire_views.delete_main, name='delete'),
url(r'^services/(?P<pk>\d+)/(?P<Service_category_pk>\d+)/review/$', hire_views.review, name='review'),
url(r'^services/(?P<pk>\d+)/(?P<Service_category_pk>\d+)/review/new/$', hire_views.review_new, name='review_new'),
url(r'^worker_page/(?P<pk>\d+)/$', hire_views.worker_page, name='worker_page'),
url(r'^increment/(?P<pk>\d+)/(?P<Service_category_pk>\d+)/review/$', hire_views.increment, name='increment'),
url(r'^decrement/(?P<pk>\d+)/(?P<Service_category_pk>\d+)/review/$', hire_views.decrement, name='decrement'),
# url(r'^user/$', hire_views.model_form_upload, name='model_form_upload'),
url(r'^hello/$', hire_views.hello, name='hello'),
]
if settings.DEBUG == True:
urlpatterns += static(settings.STATIC_URL, document_root = settings.STATIC_ROOT)
| 50.147541 | 119 | 0.676038 |
7322d738208c1e92a29dc1677393b7f139a60b9b | 1,546 | py | Python | re_compare/re_compare.py | gchase/re-compare | c717094053fd5938ea7f0a46dcfec75bc077cb7e | [
"MIT"
] | null | null | null | re_compare/re_compare.py | gchase/re-compare | c717094053fd5938ea7f0a46dcfec75bc077cb7e | [
"MIT"
] | null | null | null | re_compare/re_compare.py | gchase/re-compare | c717094053fd5938ea7f0a46dcfec75bc077cb7e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import logging
import argparse
import traceback
import os
import sys
from analysis import Analysis
from collector import Collector
from config import DEBUG, DEFAULT_LOG_FILE_DIR
if __name__ == '__main__':
main()
| 24.539683 | 82 | 0.641656 |
7323e7284674358cab716226cc5bccd1b52ec055 | 1,216 | py | Python | venv/Lib/site-packages/nipype/conftest.py | richung99/digitizePlots | 6b408c820660a415a289726e3223e8f558d3e18b | [
"MIT"
] | 585 | 2015-01-12T16:06:47.000Z | 2022-03-26T14:51:08.000Z | nipype/conftest.py | tamires-consulting/nipype | b7879d75a63b6500b2e7d2c3eba5aa7670339274 | [
"Apache-2.0"
] | 2,329 | 2015-01-01T09:56:41.000Z | 2022-03-30T14:24:49.000Z | nipype/conftest.py | tamires-consulting/nipype | b7879d75a63b6500b2e7d2c3eba5aa7670339274 | [
"Apache-2.0"
] | 487 | 2015-01-20T01:04:52.000Z | 2022-03-21T21:22:47.000Z | import os
import shutil
from tempfile import mkdtemp
import pytest
import numpy
import py.path as pp
NIPYPE_DATADIR = os.path.realpath(
os.path.join(os.path.dirname(__file__), "testing/data")
)
temp_folder = mkdtemp()
data_dir = os.path.join(temp_folder, "data")
shutil.copytree(NIPYPE_DATADIR, data_dir)
def pytest_unconfigure(config):
# Delete temp folder after session is finished
shutil.rmtree(temp_folder)
| 26.434783 | 76 | 0.709704 |
732764ebd0702a98bf1fa40e238672b9d8162849 | 397 | py | Python | tests/test_modules/test_ADPandABlocks/test_adpandablocks_blocks.py | aaron-parsons/pymalcolm | 4e7ebd6b09382ab7e013278a81097d17873fa5c4 | [
"Apache-2.0"
] | null | null | null | tests/test_modules/test_ADPandABlocks/test_adpandablocks_blocks.py | aaron-parsons/pymalcolm | 4e7ebd6b09382ab7e013278a81097d17873fa5c4 | [
"Apache-2.0"
] | null | null | null | tests/test_modules/test_ADPandABlocks/test_adpandablocks_blocks.py | aaron-parsons/pymalcolm | 4e7ebd6b09382ab7e013278a81097d17873fa5c4 | [
"Apache-2.0"
] | null | null | null | from mock import Mock
from malcolm.testutil import ChildTestCase
from malcolm.modules.ADPandABlocks.blocks import pandablocks_runnable_block
| 33.083333 | 78 | 0.7733 |
73276ed229a6cddfa545672ed9e4e28191eeb79e | 2,939 | py | Python | pymbolic/mapper/coefficient.py | sv2518/pymbolic | 42687a410b1c355beec510b91c18f97e5137795b | [
"MIT"
] | null | null | null | pymbolic/mapper/coefficient.py | sv2518/pymbolic | 42687a410b1c355beec510b91c18f97e5137795b | [
"MIT"
] | null | null | null | pymbolic/mapper/coefficient.py | sv2518/pymbolic | 42687a410b1c355beec510b91c18f97e5137795b | [
"MIT"
] | null | null | null | __copyright__ = "Copyright (C) 2013 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from pymbolic.mapper import Mapper
| 35.841463 | 77 | 0.639673 |
7328644eaa6b2ec01fefc42231719421b2897b5b | 1,958 | py | Python | day_06/balancer.py | anglerud/advent_of_code_2017 | eff27d43cd9eb7c60271887c80cb88f1ae50c48d | [
"MIT"
] | 3 | 2017-12-06T21:23:19.000Z | 2020-04-12T09:49:53.000Z | day_06/balancer.py | anglerud/advent_of_code_2017 | eff27d43cd9eb7c60271887c80cb88f1ae50c48d | [
"MIT"
] | null | null | null | day_06/balancer.py | anglerud/advent_of_code_2017 | eff27d43cd9eb7c60271887c80cb88f1ae50c48d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
""" """
import typing as t
import attr
import click
def detect_loop(memory: Memory) -> int:
"""Find how many steps until we detect a loop."""
arrangements_seen = set()
balancer_rounds = 0
while memory not in arrangements_seen:
arrangements_seen.add(memory)
memory = memory.balance()
balancer_rounds += 1
return balancer_rounds, memory
def main():
"""Entrypoint."""
balancer()
if __name__ == '__main__':
main()
| 23.035294 | 76 | 0.62666 |
7329f5fd535980ef38334fa719dd3596b7005058 | 176 | py | Python | python-while/exercise4.py | crobert7/Py-Basics | c1d1a1441de6cbee409c59ddda2b11bc7ee16df1 | [
"MIT"
] | null | null | null | python-while/exercise4.py | crobert7/Py-Basics | c1d1a1441de6cbee409c59ddda2b11bc7ee16df1 | [
"MIT"
] | null | null | null | python-while/exercise4.py | crobert7/Py-Basics | c1d1a1441de6cbee409c59ddda2b11bc7ee16df1 | [
"MIT"
] | null | null | null | word = input('Type a word: ')
while word != 'chupacabra':
word = input('Type a word: ')
if word == 'chupacabra':
print('You are out of the loop')
break | 25.142857 | 40 | 0.5625 |
732ac32a2f056f0d1b4317192e07425ea49f8e2a | 1,268 | bzl | Python | pw_build/selects.bzl | mspang/pigweed | 89ff5f98f38b1ff7a1ff0633c590479e9b592a14 | [
"Apache-2.0"
] | null | null | null | pw_build/selects.bzl | mspang/pigweed | 89ff5f98f38b1ff7a1ff0633c590479e9b592a14 | [
"Apache-2.0"
] | 1 | 2021-06-18T13:54:41.000Z | 2021-06-18T13:54:41.000Z | pw_build/selects.bzl | mspang/pigweed | 89ff5f98f38b1ff7a1ff0633c590479e9b592a14 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
_RTOS_NONE = "//pw_build/constraints/rtos:none"
# Common select for tagging a target as only compatible with host OS's. This
# select implements the logic '(Windows or Macos or Linux) and not RTOS'.
# Example usage:
# load("//pw_build:selects.bzl","TARGET_COMPATIBLE_WITH_HOST_SELECT")
# pw_cc_library(
# name = "some_host_only_lib",
# hdrs = ["host.h"],
# target_compatible_with = select(TARGET_COMPATIBLE_WITH_HOST_SELECT),
# )
TARGET_COMPATIBLE_WITH_HOST_SELECT = {
"@platforms//os:windows": [_RTOS_NONE],
"@platforms//os:macos": [_RTOS_NONE],
"@platforms//os:linux": [_RTOS_NONE],
"//conditions:default": ["@platforms//:incompatible"],
}
| 39.625 | 79 | 0.729495 |
732b698a8ba47881b21329d7619d19e7979a1509 | 1,433 | py | Python | subscriptions/models.py | emil-magnusson/py-on-api | 50967ea9d6a189c2c1cb75bd3e2b8ab817077634 | [
"MIT"
] | null | null | null | subscriptions/models.py | emil-magnusson/py-on-api | 50967ea9d6a189c2c1cb75bd3e2b8ab817077634 | [
"MIT"
] | 4 | 2021-03-30T14:10:30.000Z | 2021-09-22T19:29:56.000Z | subscriptions/models.py | emil-magnusson/py-on-api | 50967ea9d6a189c2c1cb75bd3e2b8ab817077634 | [
"MIT"
] | null | null | null | # subscriptions/models.py
import uuid
from django.db import models
from accesses.models import Accesses, Services
| 37.710526 | 105 | 0.752966 |
732bbfe89e64414c6afc65b3cfb58bb41674d875 | 2,848 | py | Python | leboncrevard/job.py | mclbn/leboncrevard | ee1b2a445eeda8f8561b5c62289b994dff38cfa9 | [
"ISC"
] | 5 | 2017-03-14T00:28:13.000Z | 2019-02-06T15:38:21.000Z | leboncrevard/job.py | mclbn/leboncrevard | ee1b2a445eeda8f8561b5c62289b994dff38cfa9 | [
"ISC"
] | null | null | null | leboncrevard/job.py | mclbn/leboncrevard | ee1b2a445eeda8f8561b5c62289b994dff38cfa9 | [
"ISC"
] | 5 | 2017-02-25T07:31:26.000Z | 2019-02-06T15:38:27.000Z | import smtplib
import time
from email.mime.text import MIMEText
from leboncrevard import scrapper, config
| 33.904762 | 81 | 0.458567 |
732c359d55e1699fb9b02c52c8e5453f0946a5bf | 13,825 | py | Python | tsl/data/datamodule/splitters.py | TorchSpatiotemporal/tsl | da13493b0cf83826bf41fe78a67e8d4ce1d7a8a0 | [
"MIT"
] | 4 | 2022-03-21T09:16:33.000Z | 2022-03-30T12:24:30.000Z | tsl/data/datamodule/splitters.py | TorchSpatiotemporal/tsl | da13493b0cf83826bf41fe78a67e8d4ce1d7a8a0 | [
"MIT"
] | null | null | null | tsl/data/datamodule/splitters.py | TorchSpatiotemporal/tsl | da13493b0cf83826bf41fe78a67e8d4ce1d7a8a0 | [
"MIT"
] | null | null | null | import functools
from copy import deepcopy
from datetime import datetime
from typing import Mapping, Callable, Union, Tuple, Optional
import numpy as np
from tsl.utils.python_utils import ensure_list
from ..spatiotemporal_dataset import SpatioTemporalDataset
from ..utils import SynchMode
__all__ = [
'Splitter',
'CustomSplitter',
'TemporalSplitter',
'AtTimeStepSplitter',
]
from ...typing import Index
class CustomSplitter(Splitter):
def fit(self, dataset: SpatioTemporalDataset):
_, test_idxs = self.test_split_fn(dataset, **self.test_kwargs)
val_kwargs = self.val_kwargs
if self.mask_test_indices_in_val and len(test_idxs):
val_kwargs = dict(**self.val_kwargs, mask=test_idxs)
train_idxs, val_idxs = self.val_split_fn(dataset, **val_kwargs)
self.set_indices(train_idxs, val_idxs, test_idxs)
class FixedIndicesSplitter(Splitter):
class TemporalSplitter(Splitter):
###
def indices_between(dataset: SpatioTemporalDataset,
first_ts: Union[Tuple, datetime] = None,
last_ts: Union[Tuple, datetime] = None):
if first_ts is not None:
if isinstance(first_ts, datetime):
pass
elif isinstance(first_ts, (tuple, list)) and len(first_ts) >= 3:
first_ts = datetime(*first_ts, tzinfo=dataset.index.tzinfo)
else:
raise TypeError("first_ts must be a datetime or a tuple")
if last_ts is not None:
if isinstance(last_ts, datetime):
pass
elif isinstance(last_ts, (tuple, list)) and len(last_ts) >= 3:
last_ts = datetime(*last_ts, tzinfo=dataset.index.tzinfo)
else:
raise TypeError("last_ts must be a datetime or a tuple")
first_day_loc, last_day_loc = dataset.index.slice_locs(first_ts, last_ts)
first_sample_loc = first_day_loc - dataset.horizon_offset
last_sample_loc = last_day_loc - dataset.horizon_offset - 1
indices_from_sample = np.where((first_sample_loc <= dataset.indices) & (
dataset.indices < last_sample_loc))[0]
return indices_from_sample
def split_at_ts(dataset, ts, mask=None):
from_day_idxs = indices_between(dataset, first_ts=ts)
prev_idxs = np.arange(
from_day_idxs[0] if len(from_day_idxs) else len(dataset))
if mask is not None:
from_day_idxs = np.setdiff1d(from_day_idxs, mask)
prev_idxs = np.setdiff1d(prev_idxs, mask)
return prev_idxs, from_day_idxs
def disjoint_months(dataset, months=None, synch_mode=SynchMode.WINDOW):
idxs = np.arange(len(dataset))
months = ensure_list(months)
# divide indices according to window or horizon
if synch_mode is SynchMode.WINDOW:
start = 0
end = dataset.window - 1
elif synch_mode is SynchMode.HORIZON:
start = dataset.horizon_offset
end = dataset.horizon_offset + dataset.horizon - 1
else:
raise ValueError('synch_mode can only be one of %s'
% [SynchMode.WINDOW, SynchMode.HORIZON])
# after idxs
indices = np.asarray(dataset._indices)
start_in_months = np.in1d(dataset.index[indices + start].month, months)
end_in_months = np.in1d(dataset.index[indices + end].month, months)
idxs_in_months = start_in_months & end_in_months
after_idxs = idxs[idxs_in_months]
# previous idxs
months = np.setdiff1d(np.arange(1, 13), months)
start_in_months = np.in1d(dataset.index[indices + start].month, months)
end_in_months = np.in1d(dataset.index[indices + end].month, months)
idxs_in_months = start_in_months & end_in_months
prev_idxs = idxs[idxs_in_months]
return prev_idxs, after_idxs
# SPLIT FUNCTIONS
# aliases
temporal = TemporalSplitter
at_ts = AtTimeStepSplitter
| 34.051724 | 115 | 0.650922 |
732d71e2f7609d24712a7e6d1541ad6047bd54bf | 3,483 | py | Python | demo.py | bringBackm/SSD | 6cbc9018fd7365d7c65cf6d4da90c14cced5e542 | [
"MIT"
] | null | null | null | demo.py | bringBackm/SSD | 6cbc9018fd7365d7c65cf6d4da90c14cced5e542 | [
"MIT"
] | null | null | null | demo.py | bringBackm/SSD | 6cbc9018fd7365d7c65cf6d4da90c14cced5e542 | [
"MIT"
] | null | null | null | import glob
import os
import torch
from PIL import Image
from tqdm import tqdm
from ssd.config import cfg
from ssd.data.datasets import COCODataset, VOCDataset
from ssd.modeling.predictor import Predictor
from ssd.modeling.vgg_ssd import build_ssd_model
import argparse
import numpy as np
from ssd.utils.viz import draw_bounding_boxes
if __name__ == '__main__':
main()
| 35.540816 | 128 | 0.669251 |
732dd28306f669f7a332fd1f3c061993b13d2ff5 | 800 | py | Python | quiz/bot/storage/shelter.py | shubham-king/guess-the-melody | 1658a17f41b39cfd212175a03e043b3be7bc0e56 | [
"MIT"
] | 4 | 2019-06-15T15:32:43.000Z | 2020-02-21T09:45:36.000Z | quiz/bot/storage/shelter.py | shubham-king/guess-the-melody | 1658a17f41b39cfd212175a03e043b3be7bc0e56 | [
"MIT"
] | 4 | 2020-07-05T07:20:26.000Z | 2021-04-25T21:05:53.000Z | quiz/bot/storage/shelter.py | shubham-king/guess-the-melody | 1658a17f41b39cfd212175a03e043b3be7bc0e56 | [
"MIT"
] | 7 | 2020-06-13T20:11:41.000Z | 2021-11-09T07:47:02.000Z | from shelve import DbfilenameShelf, open
from typing import Type
from quiz.config import Config
from quiz.types import ContextManager, DictAccess
| 26.666667 | 65 | 0.665 |
73321c0acbdf4ab01ceed6d568aada4c5a634a97 | 125 | py | Python | src/applications/task310/apps.py | SergeyNazarovSam/SergeyPythonfirst | fd2bddf1f5ba28c6802be921177917f369f7ef2e | [
"MIT"
] | 2 | 2020-12-17T20:19:21.000Z | 2020-12-22T12:46:43.000Z | src/applications/task310/apps.py | alexander-sidorov/tms-z43 | 61ecd204f5de4e97ff0300f6ef91c36c2bcda31c | [
"MIT"
] | 4 | 2021-04-20T08:40:30.000Z | 2022-02-10T07:50:30.000Z | src/applications/task310/apps.py | SergeyNazarovSam/SergeyPythonfirst | fd2bddf1f5ba28c6802be921177917f369f7ef2e | [
"MIT"
] | 1 | 2021-02-10T06:42:19.000Z | 2021-02-10T06:42:19.000Z | from django.apps import AppConfig
| 17.857143 | 34 | 0.72 |
7332ce4b3b7c7b972d457f074400634cb61ce765 | 5,686 | py | Python | scripts/data_creation_v3.py | deepchecks/url_classification_dl | 029fddb78e019cf288adcc2fd46be3435536d469 | [
"CC0-1.0"
] | 3 | 2021-05-22T09:20:54.000Z | 2022-03-14T15:58:17.000Z | scripts/data_creation_v3.py | deepchecks/url_classification_dl | 029fddb78e019cf288adcc2fd46be3435536d469 | [
"CC0-1.0"
] | 1 | 2021-11-15T11:22:48.000Z | 2021-12-11T13:32:19.000Z | scripts/data_creation_v3.py | deepchecks/url_classification_dl | 029fddb78e019cf288adcc2fd46be3435536d469 | [
"CC0-1.0"
] | 6 | 2021-05-15T17:46:22.000Z | 2022-03-24T11:24:59.000Z | import whois
from datetime import datetime, timezone
import math
import pandas as pd
import numpy as np
from pyquery import PyQuery
from requests import get
| 29.769634 | 90 | 0.536933 |
7333549135a1f86b79763216b9dd3553195359bb | 5,175 | py | Python | rgnn_at_scale/models/gat.py | sigeisler/robustness_of_gnns_at_scale | 0f4844711ace599f54c2abc760b53680a80d6a32 | [
"MIT"
] | 11 | 2021-11-01T19:54:41.000Z | 2022-01-27T11:34:11.000Z | rgnn_at_scale/models/gat.py | sigeisler/robustness_of_gnns_at_scale | 0f4844711ace599f54c2abc760b53680a80d6a32 | [
"MIT"
] | 1 | 2021-12-13T21:14:56.000Z | 2022-01-16T17:37:36.000Z | rgnn_at_scale/models/gat.py | sigeisler/robustness_of_gnns_at_scale | 0f4844711ace599f54c2abc760b53680a80d6a32 | [
"MIT"
] | 2 | 2021-11-05T00:42:18.000Z | 2022-01-12T10:10:30.000Z | from typing import Any, Dict, Tuple
import torch
from torch_geometric.nn import GATConv
from torch_sparse import SparseTensor, set_diag
from rgnn_at_scale.aggregation import ROBUST_MEANS
from rgnn_at_scale.models.gcn import GCN
| 36.443662 | 119 | 0.636329 |
7334d673ab4fa7b6545531cff68878e44e4b4835 | 902 | py | Python | code/renderer/randomize/material.py | jonathangranskog/shading-scene-representations | 9c9033a1ca05095c7e2ccfeb4da3046b687bef3d | [
"MIT"
] | 21 | 2020-09-28T10:38:04.000Z | 2022-03-12T08:46:09.000Z | code/renderer/randomize/material.py | jonathangranskog/shading-scene-representations | 9c9033a1ca05095c7e2ccfeb4da3046b687bef3d | [
"MIT"
] | null | null | null | code/renderer/randomize/material.py | jonathangranskog/shading-scene-representations | 9c9033a1ca05095c7e2ccfeb4da3046b687bef3d | [
"MIT"
] | 1 | 2020-12-16T14:56:21.000Z | 2020-12-16T14:56:21.000Z | import numpy as np
import pyrr
import os | 32.214286 | 184 | 0.586475 |
7335cb0ff48cfe398a0b353de5f1570850d9c8fa | 3,752 | py | Python | frappe/core/doctype/sms_settings/sms_settings.py | ektai/erp2Dodock | 5ad64b01cba9b07437f9a27751101258679379e8 | [
"MIT"
] | null | null | null | frappe/core/doctype/sms_settings/sms_settings.py | ektai/erp2Dodock | 5ad64b01cba9b07437f9a27751101258679379e8 | [
"MIT"
] | null | null | null | frappe/core/doctype/sms_settings/sms_settings.py | ektai/erp2Dodock | 5ad64b01cba9b07437f9a27751101258679379e8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, throw, msgprint
from frappe.utils import nowdate
from frappe.model.document import Document
import six
from six import string_types
def send_via_gateway(arg):
ss = frappe.get_doc('SMS Settings', 'SMS Settings')
headers = get_headers(ss)
use_json = headers.get("Content-Type") == "application/json"
message = frappe.safe_decode(arg.get('message'))
args = {ss.message_parameter: message}
for d in ss.get("parameters"):
if not d.header:
args[d.parameter] = d.value
success_list = []
for d in arg.get('receiver_list'):
args[ss.receiver_parameter] = d
status = send_request(ss.sms_gateway_url, args, headers, ss.use_post, use_json)
if 200 <= status < 300:
success_list.append(d)
if len(success_list) > 0:
args.update(arg)
create_sms_log(args, success_list)
if arg.get('success_msg'):
frappe.msgprint(_("SMS sent to following numbers: {0}").format("\n" + "\n".join(success_list)))
# Create SMS Log
# ========================================================= | 27.188406 | 98 | 0.711354 |
7335d3017f92ccc28bd13ffbbbef33f7a8f4f467 | 481 | py | Python | blog/migrations/0041_auto_20190504_0855.py | akindele214/181hub_2 | 48b8814b5f66ad87f9a54721506076ddf70fe9bc | [
"MIT"
] | 1 | 2020-05-20T08:42:49.000Z | 2020-05-20T08:42:49.000Z | blog/migrations/0041_auto_20190504_0855.py | akindele214/181hub_2 | 48b8814b5f66ad87f9a54721506076ddf70fe9bc | [
"MIT"
] | 14 | 2020-03-24T17:31:08.000Z | 2022-03-11T23:59:30.000Z | blog/migrations/0041_auto_20190504_0855.py | akindele214/181hub_2 | 48b8814b5f66ad87f9a54721506076ddf70fe9bc | [
"MIT"
] | 1 | 2020-04-13T12:37:37.000Z | 2020-04-13T12:37:37.000Z | # Generated by Django 2.1.5 on 2019-05-04 07:55
import blog.formatChecker
from django.db import migrations, models
| 24.05 | 123 | 0.638254 |
73360c2d69e50730324e4dc6677481e54cc8e26d | 1,850 | py | Python | tardis/model/tests/test_csvy_model.py | Youssef15015/tardis | adde5b0114f23634fe5afef6937b285174ad6b55 | [
"BSD-3-Clause"
] | null | null | null | tardis/model/tests/test_csvy_model.py | Youssef15015/tardis | adde5b0114f23634fe5afef6937b285174ad6b55 | [
"BSD-3-Clause"
] | 2 | 2019-06-10T11:24:50.000Z | 2019-06-18T17:28:59.000Z | tardis/model/tests/test_csvy_model.py | Youssef15015/tardis | adde5b0114f23634fe5afef6937b285174ad6b55 | [
"BSD-3-Clause"
] | 1 | 2019-06-10T10:21:41.000Z | 2019-06-10T10:21:41.000Z | import numpy as np
import numpy.testing as npt
import tardis
import os
from astropy import units as u
from tardis.io.config_reader import Configuration
from tardis.model import Radial1DModel
import pytest
DATA_PATH = os.path.join(tardis.__path__[0],'model','tests','data')
| 44.047619 | 107 | 0.682162 |
73391ce9c005d2972ce3d22ec1870d858657b9ce | 34,911 | py | Python | wepppy/taudem/topaz_emulator.py | hwbeeson/wepppy | 6358552df99853c75be8911e7ef943108ae6923e | [
"BSD-3-Clause"
] | null | null | null | wepppy/taudem/topaz_emulator.py | hwbeeson/wepppy | 6358552df99853c75be8911e7ef943108ae6923e | [
"BSD-3-Clause"
] | null | null | null | wepppy/taudem/topaz_emulator.py | hwbeeson/wepppy | 6358552df99853c75be8911e7ef943108ae6923e | [
"BSD-3-Clause"
] | null | null | null | from typing import List
import os
import json
from os.path import join as _join
from os.path import exists as _exists
import math
from osgeo import gdal, osr
import numpy as np
from scipy.ndimage import label
from subprocess import Popen, PIPE
from pprint import pprint
from wepppy.all_your_base.geo import read_tif, centroid_px
from wepppy.watershed_abstraction.wepp_top_translator import WeppTopTranslator
from wepppy.watershed_abstraction.support import (
cummnorm_distance, compute_direction, representative_normalized_elevations,
weighted_slope_average, rect_to_polar, write_slp, HillSummary, ChannelSummary, CentroidSummary,
slp_asp_color, polygonize_netful, polygonize_bound, polygonize_subcatchments, json_to_wgs
)
from .taudem import TauDEMRunner
_USE_MPI = False
_DEBUG = False
def run_streamnet(self, single_watershed=False):
super(TauDEMTopazEmulator, self).run_streamnet(single_watershed=single_watershed)
tau2top_translator = self.tau2topaz_translator_factory()
with open(self._net) as fp:
js = json.load(fp)
for i, feature in enumerate(js['features']):
topaz_id = tau2top_translator[feature['properties']['WSNO']]
js['features'][i]['properties']['TopazID'] = int(str(topaz_id) + '4')
with open(self._net, 'w') as fp:
json.dump(js, fp)
cmd = ['gdal_rasterize', '-a', 'TopazID', '-a_nodata', '0',
'-a_srs', 'epsg:{}'.format(self.epsg),
'-te', self.ul_x, self.lr_y, self.lr_x, self.ul_y,
'-tr', self.cellsize, self.cellsize,
'-ot', 'UInt16', self._net, self._channels]
cmd = [str(v) for v in cmd]
print(' '.join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
p.wait()
assert _exists(self._channels)
def build_channels(self, csa=None):
if csa is None:
csa = 100
wd = self.wd
self.run_pitremove()
self.run_d8flowdir()
self.run_aread8()
self.run_gridnet()
self.run_src_threshold(threshold=csa)
polygonize_netful(self._src, self._netful_shp)
def set_outlet(self, lng, lat):
self.run_moveoutletstostrm(lng=lng, lat=lat)
def build_subcatchments(self, threshold=None):
self.run_peukerdouglas()
self.run_peukerdouglas_stream_delineation(threshold=threshold)
self.run_streamnet()
self.run_dinfflowdir()
self.run_areadinf()
self.run_dinfdistdown()
json_to_wgs(self._net)
self.delineate_subcatchments()
polygonize_subcatchments(self._subwta, self._subwta_shp, self._subcatchments_shp)
self.make_bound()
polygonize_bound(self._bound, self._bound_shp)
def abstract_watershed(self, wepp_chn_type,
clip_hillslopes=False, clip_hillslope_length=300.0):
self.abstract_channels(wepp_chn_type=wepp_chn_type)
self.abstract_subcatchments(clip_hillslopes=clip_hillslopes,
clip_hillslope_length=clip_hillslope_length)
self.abstract_structure()
def abstract_channels(self, wepp_chn_type=None):
cellsize = self.cellsize
cellsize2 = self.cellsize2
translator = self.translator
slopes = self.data_fetcher('dinf_slope', dtype=np.float)
fvslop = self.data_fetcher('dinf_angle', dtype=np.float)
with open(self._net) as fp:
js = json.load(fp)
chn_d = {}
for feature in js['features']:
topaz_id = int(str(feature['properties']['TopazID'])[:-1])
catchment_id = feature['properties']['WSNO']
uslinkn01 = feature['properties']['USLINKNO1']
uslinkn02 = feature['properties']['USLINKNO2']
dslinkn0 = feature['properties']['DSLINKNO']
order = feature['properties']['strmOrder']
chn_id = int(str(topaz_id) + '4')
enz_coords = feature['geometry']['coordinates'] # listed bottom to top
# need to identify unique pixels
px_last, py_last = None, None
indx, indy = [], []
for e, n, z in enz_coords:
px, py = self.utm_to_px(e, n)
if px != px_last or py != py_last:
assert 0 <= px < slopes.shape[0], ((px, py), (e, n), slopes.shape)
assert 0 <= py < slopes.shape[1], ((px, py), (e, n), slopes.shape)
indx.append(px)
indy.append(py)
px_last, py_last = px, py
# the pixels are listed bottom to top we want them top to bottom as if we walked downt the flowpath
indx = indx[::-1]
indy = indy[::-1]
flowpath = np.array([indx, indy]).T
_distance = flowpath[:-1, :] - flowpath[1:, :]
distance = np.sqrt(np.power(_distance[:, 0], 2.0) +
np.power(_distance[:, 1], 2.0))
slope = np.array([slopes[px, py] for px, py in zip(indx[:-1], indy[:-1])])
assert distance.shape == slope.shape, (distance.shape, slope.shape)
if len(indx) == 1:
px, py = indx[0], indy[0]
slope_scalar = float(slopes[px, py])
slope = np.array([slope_scalar, slope_scalar])
# todo: don't think head and tail are being used any where, but these
# are inconsistent with case when there is more than one pixel
head = enz_coords[-1][:-1]
tail = enz_coords[0][:-1]
direction = compute_direction(head, tail)
length = np.linalg.norm(np.array(head) - np.array(tail))
if length < cellsize:
length = cellsize
width = cellsize2 / length
distance_p = [0.0, 1.0]
elevs = representative_normalized_elevations(distance_p, list(slope))
else:
# need normalized distance_p to define slope
distance_p = cummnorm_distance(distance)
if len(slope) == 1:
slope = np.array([float(slope), float(slope)])
# calculate the length from the distance array
length = float(np.sum(distance) * cellsize)
width = float(cellsize)
# aspect = float(self._determine_aspect(indx, indy))
head = [v * cellsize for v in flowpath[-1]]
head = [float(v) for v in head]
tail = [v * cellsize for v in flowpath[0]]
tail = [float(v) for v in tail]
direction = compute_direction(head, tail)
elevs = representative_normalized_elevations(distance_p, list(slope))
slope_scalar = float(abs(elevs[-1]))
area = float(length) * float(width)
# calculate aspect
aspect = np.mean(np.angle([np.complex(np.cos(rad), np.sin(rad)) for rad in fvslop[(indx, indy)]], deg=True))
isoutlet = dslinkn0 == -1
c_px, c_py = centroid_px(indx, indy)
centroid_lnglat = self.px_to_lnglat(c_px, c_py)
chn_enum = translator.chn_enum(chn_id=chn_id)
chn_d[str(chn_id)] = dict(chn_id=int(chn_id),
chn_enum=int(chn_enum),
order=int(order),
length=float(length),
width=float(width),
area=float(area),
elevs=[float(v) for v in elevs],
wepp_chn_type=wepp_chn_type,
head=head,
tail=tail,
aspect=float(aspect),
slopes=[float(v) for v in slope],
isoutlet=isoutlet,
direction=float(direction),
distance_p=[float(v) for v in distance_p],
centroid_px=[int(c_px), int(c_py)],
centroid_lnglat=[float(v) for v in centroid_lnglat],
slope_scalar=float(slope_scalar)
)
with open(self._abstracted_channels, 'w') as fp:
json.dump(chn_d, fp, indent=2, sort_keys=True)
def abstract_subcatchments(self, clip_hillslopes=False, clip_hillslope_length=300.0):
"""
in: dinf_dd_horizontal, dinf_dd_vertical, dinf_dd_surface, dinf_slope, subwta
:return:
"""
cellsize = self.cellsize
cellsize2 = self.cellsize2
sub_ids = self.topaz_sub_ids
assert _exists(self._dinf_dd_horizontal), self._dinf_dd_horizontal
assert _exists(self._dinf_dd_vertical), self._dinf_dd_vertical
assert _exists(self._dinf_dd_surface), self._dinf_dd_surface
assert _exists(self._dinf_slope), self._dinf_slope
assert _exists(self._subwta), self._subwta
assert _exists(self._dinf_angle), self._dinf_angle
subwta = self.data_fetcher('subwta', dtype=np.uint16)
lengths = self.data_fetcher('dinf_dd_horizontal', dtype=np.float)
verticals = self.data_fetcher('dinf_dd_vertical', dtype=np.float)
surface_lengths = self.data_fetcher('dinf_dd_surface', dtype=np.float)
slopes = self.data_fetcher('dinf_slope', dtype=np.float)
aspects = self.data_fetcher('dinf_angle', dtype=np.float)
chns_d = self.abstracted_channels
subs_d = {}
for sub_id in sub_ids:
# identify cooresponding channel
chn_id = str(sub_id)[:-1] + '4'
# identify indicies of sub_id
raw_indx, raw_indy = np.where(subwta == sub_id)
area = float(len(raw_indx)) * cellsize2
indx, indy = [], []
for _x, _y in zip(raw_indx, raw_indy):
if lengths[_x, _y] >= 0.0:
indx.append(_x)
indy.append(_y)
if len(indx) == 0:
print('sub_id', sub_id)
print('raw_indx, raw_indy', raw_indx, raw_indy)
print(lengths[(raw_indx, raw_indy)])
print(surface_lengths[(raw_indx, raw_indy)])
print(slopes[(raw_indx, raw_indy)])
print(aspects[(raw_indx, raw_indy)])
width = length = math.sqrt(area)
_slp = np.mean(slopes[(raw_indx, raw_indy)])
w_slopes = [_slp, _slp]
distance_p = [0, 1]
fp_longest = None
fp_longest_length = length
fp_longest_slope = _slp
else:
# extract flowpath statistics
fp_lengths = lengths[(indx, indy)]
fp_lengths += cellsize
fp_verticals = verticals[(indx, indy)]
fp_surface_lengths = surface_lengths[(indx, indy)]
fp_surface_lengths += cellsize
fp_surface_areas = np.ceil(fp_surface_lengths) * cellsize
fp_slopes = slopes[(indx, indy)]
length = float(np.sum(fp_lengths * fp_surface_areas) / np.sum(fp_surface_areas))
if clip_hillslopes and length > clip_hillslope_length:
length = clip_hillslope_length
width = area / length
# if str(sub_id).endswith('1'):
# # determine representative length and width
# # Cochrane dissertation eq 3.4
#
# #print('sub_id', sub_id)
# #pprint('fp_lengths')
# #pprint(fp_lengths)
# #pprint('fp_surface_areas')
# #pprint(fp_surface_areas)
# length = float(np.sum(fp_lengths * fp_surface_areas) / np.sum(fp_surface_areas))
# width = area / length
#
# #print('area', area)
# #print('width', width)
# #print('length', length, '\n\n\n')
# else:
# width = chns_d[chn_id].length
# length = area / width
# determine representative slope profile
w_slopes, distance_p = weighted_slope_average(fp_surface_areas, fp_slopes, fp_lengths)
# calculate longest flowpath statistics
fp_longest = int(np.argmax(fp_lengths))
fp_longest_vertical = fp_verticals[fp_longest]
fp_longest_length = fp_lengths[fp_longest]
fp_longest_slope = fp_longest_vertical / fp_longest_length
# calculate slope for hillslope
elevs = representative_normalized_elevations(distance_p, w_slopes)
slope_scalar = float(abs(elevs[-1]))
# calculate aspect
_aspects = aspects[(indx, indy)]
aspect = np.mean(np.angle([np.complex(np.cos(rad), np.sin(rad)) for rad in _aspects], deg=True))
# calculate centroid
c_px, c_py = centroid_px(raw_indx, raw_indy)
centroid_lnglat = self.px_to_lnglat(c_px, c_py)
direction = chns_d[chn_id].direction
if str(sub_id).endswith('2'):
direction += 90
if str(sub_id).endswith('3'):
direction -= 90
subs_d[str(sub_id)] = dict(sub_id=int(sub_id),
area=float(area),
length=float(length),
aspect=float(aspect),
direction=float(direction),
width=float(width),
w_slopes=list(w_slopes),
distance_p=list(distance_p),
centroid_lnglat=[float(v) for v in centroid_lnglat],
centroid_px=[int(c_px), int(c_py)],
elevs=list(elevs),
slope_scalar=float(slope_scalar),
fp_longest=fp_longest,
fp_longest_length=float(fp_longest_length),
fp_longest_slope=float(fp_longest_slope)
)
with open(self._abstracted_subcatchments, 'w') as fp:
json.dump(subs_d, fp, indent=2, sort_keys=True)
def abstract_structure(self, verbose=False):
translator = self.translator
topaz_network = self.topaz_network
# now we are going to define the lines of the structure file
# this doesn't handle impoundments
structure = []
for chn_id in translator.iter_chn_ids():
if verbose:
print('abstracting structure for channel %s...' % chn_id)
top = translator.top(chn_id=chn_id)
chn_enum = translator.chn_enum(chn_id=chn_id)
# right subcatchments end in 2
hright = top - 2
if not translator.has_top(hright):
hright = 0
# left subcatchments end in 3
hleft = top - 1
if not translator.has_top(hleft):
hleft = 0
# center subcatchments end in 1
hcenter = top - 3
if not translator.has_top(hcenter):
hcenter = 0
# define structure for channel
# the first item defines the channel
_structure = [chn_enum]
# network is defined from the NETW.TAB file that has
# already been read into {network}
# the 0s are appended to make sure it has a length of
# at least 3
chns = topaz_network[top] + [0, 0, 0]
# structure line with top ids
_structure += [hright, hleft, hcenter] + chns[:3]
# this is where we would handle impoundments
# for now no impoundments are assumed
_structure += [0, 0, 0]
# and translate topaz to wepp
structure.append([int(v) for v in _structure])
with open(self._structure, 'w') as fp:
for row in structure:
fp.write('\t'.join([str(v) for v in row]))
fp.write('\n')
def delineate_subcatchments(self, use_topaz_ids=True):
"""
in: pksrc, net
out: subwta
:return:
"""
w_data = self.data_fetcher('w', dtype=np.int32)
_src_data = self.data_fetcher('pksrc', dtype=np.int32)
src_data = np.zeros(_src_data.shape, dtype=np.int32)
src_data[np.where(_src_data == 1)] = 1
subwta = np.zeros(w_data.shape, dtype=np.uint16)
with open(self._net) as fp:
js = json.load(fp)
# identify pourpoints of the end node catchments
end_node_pourpoints = {}
for feature in js['features']:
catchment_id = feature['properties']['WSNO']
coords = feature['geometry']['coordinates']
uslinkn01 = feature['properties']['USLINKNO1']
uslinkn02 = feature['properties']['USLINKNO2']
end_node = uslinkn01 == -1 and uslinkn02 == -1
top = coords[-1][:-1]
if end_node:
end_node_pourpoints[catchment_id] = top
# make geojson with pourpoints as input for gage watershed
outlets_fn = _join(self.wd, 'outlets.geojson')
self._make_multiple_outlets_geojson(dst=outlets_fn, en_points_dict=end_node_pourpoints)
gw_fn = _join(self.wd, 'end_nodes_gw.tif')
self._run_gagewatershed(outlets_fn=outlets_fn, dst=gw_fn)
gw, _, _ = read_tif(gw_fn, dtype=np.int16)
for _pass in range(2):
for feature in js['features']:
topaz_id = int(str(feature['properties']['TopazID'])[:-1])
catchment_id = feature['properties']['WSNO']
coords = feature['geometry']['coordinates']
uslinkn01 = feature['properties']['USLINKNO1']
uslinkn02 = feature['properties']['USLINKNO2']
end_node = uslinkn01 == -1 and uslinkn02 == -1
if (end_node and _pass) or (not end_node and not _pass):
continue # this has already been processed
top = coords[-1]
bottom = coords[0]
top_px = self.utm_to_px(top[0], top[1])
bottom_px = self.utm_to_px(bottom[0], bottom[1])
# need a mask for the side subcatchments
catchment_data = np.zeros(w_data.shape, dtype=np.int32)
catchment_data[np.where(w_data == catchment_id)] = 1
if end_node:
# restrict the end node catchment the catchment area.
# otherwise there are cases where it gets drainage from beyond the watershed
gw_sub = gw * catchment_data
# identify top subcatchment cells
gw_indx = np.where(gw_sub == catchment_id)
# copy the top subcatchment to the subwta raster
if use_topaz_ids:
subwta[gw_indx] = int(str(topaz_id) + '1')
else:
subwta[gw_indx] = int(str(catchment_id) + '1')
# remove end subcatchments from the catchment mask
catchment_data[np.where(subwta != 0)] = 0
# remove channels from catchment mask
catchment_data -= src_data
catchment_data = np.clip(catchment_data, a_min=0, a_max=1)
indx, indy = np.where(catchment_data == 1)
print(catchment_id, _pass, len(indx))
# the whole catchment drains through the top of the channel
if len(indx) == 0:
continue
if _DEBUG:
driver = gdal.GetDriverByName('GTiff')
dst_ds = driver.Create(_join(self.wd, 'catchment_for_label_%05i.tif' % catchment_id),
xsize=subwta.shape[0], ysize=subwta.shape[1],
bands=1, eType=gdal.GDT_Int32,
options=['COMPRESS=LZW', 'PREDICTOR=2'])
dst_ds.SetGeoTransform(self.transform)
dst_ds.SetProjection(self.srs_wkt)
band = dst_ds.GetRasterBand(1)
band.WriteArray(catchment_data.T)
dst_ds = None
# we are going to crop the catchment for scipy.ndimage.label. It is really slow otherwise
# to do this we identify the bounds and then add a pad
pad = 1
x0, xend = np.min(indx), np.max(indx)
if x0 >= pad:
x0 -= pad
else:
x0 = 0
if xend < self.num_cols - pad:
xend += pad
else:
xend = self.num_cols - 1
y0, yend = np.min(indy), np.max(indy)
if y0 >= pad:
y0 -= pad
else:
y0 = 0
if yend < self.num_rows - pad:
yend += pad
else:
yend = self.num_rows - 1
# crop to just the side channel catchments
_catchment_data = catchment_data[x0:xend, y0:yend]
# use scipy.ndimage.label to identify side subcatchments
# todo: compare performance to opencv connectedComponents
# https://stackoverflow.com/questions/46441893/connected-component-labeling-in-python
subcatchment_data, n_labels = label(_catchment_data)
# isolated pixels in the channel can get misidentified as subcatchments
# this gets rid of those
subcatchment_data -= src_data[x0:xend, y0:yend]
# we only want the two largest subcatchments. These should be the side subcatchments
# so we need to identify which are the largest
sub_d = []
for i in range(n_labels):
s_indx, s_indy = np.where(subcatchment_data == i + 1)
sub_d.append(dict(rank=len(s_indx), s_indx=s_indx, s_indy=s_indy,
point=(x0 + np.mean(s_indx), y0 + np.mean(s_indy)),
origin=(float(bottom_px[0]), float(bottom_px[1])),
refvec=np.array(top_px, dtype=float) - np.array(bottom_px, dtype=float)
)
)
# sort clockwise
sub_d = sorted(sub_d, key=lambda _d: _d['rank'], reverse=True)
if len(sub_d) > 2:
sub_d = sub_d[:2]
sub_d = sorted(sub_d, key=lambda _d: rect_to_polar(_d))
# assert len(sub_d) == 2
k = 2
for d in sub_d:
if use_topaz_ids:
subwta[x0:xend, y0:yend][d['s_indx'], d['s_indy']] = int(str(topaz_id) + str(k))
else:
subwta[x0:xend, y0:yend][d['s_indx'], d['s_indy']] = int(str(catchment_id) + str(k))
k += 1
channels = self.data_fetcher('channels', dtype=np.int32)
ind = np.where(subwta == 0)
subwta[ind] = channels[ind]
driver = gdal.GetDriverByName('GTiff')
dst_ds = driver.Create(self._subwta, xsize=subwta.shape[0], ysize=subwta.shape[1],
bands=1, eType=gdal.GDT_UInt16, options=['COMPRESS=LZW', 'PREDICTOR=2'])
dst_ds.SetGeoTransform(self.transform)
dst_ds.SetProjection(self.srs_wkt)
band = dst_ds.GetRasterBand(1)
band.WriteArray(subwta.T)
band.SetNoDataValue(0)
dst_ds = None
def make_bound(self):
w_data = self.data_fetcher('w', dtype=np.int32)
bound = np.zeros(w_data.shape, dtype=np.int32)
bound[np.where(w_data > 0)] = 1
driver = gdal.GetDriverByName('GTiff')
dst_ds = driver.Create(self._bound, xsize=bound.shape[0], ysize=bound.shape[1],
bands=1, eType=gdal.GDT_Byte, options=['COMPRESS=LZW', 'PREDICTOR=2'])
dst_ds.SetGeoTransform(self.transform)
dst_ds.SetProjection(self.srs_wkt)
band = dst_ds.GetRasterBand(1)
band.WriteArray(bound.T)
band.SetNoDataValue(0)
dst_ds = None
def calculate_watershed_statistics(self):
bound = self.data_fetcher('bound', dtype=np.int32)
fvslop = self.data_fetcher('dinf_angle', dtype=np.float32)
relief = self.data_fetcher('fel', dtype=np.float32)
# calculate descriptive statistics
cellsize = self.cellsize
wsarea = float(np.sum(bound) * cellsize * cellsize)
mask = -1 * bound + 1
# determine area with slope > 30
fvslop_ma = np.ma.masked_array(fvslop, mask=mask)
indx, indy = np.ma.where(fvslop_ma > 0.3)
area_gt30 = float(len(indx) * cellsize * cellsize)
# determine ruggedness of watershed
relief_ma = np.ma.masked_array(relief, mask=mask)
minz = float(np.min(relief_ma))
maxz = float(np.max(relief_ma))
ruggedness = float((maxz - minz) / math.sqrt(wsarea))
indx, indy = np.ma.where(bound == 1)
ws_cen_px, ws_cen_py = int(np.round(np.mean(indx))), int(np.round(np.mean(indy)))
ws_centroid = self.px_to_lnglat(ws_cen_px, ws_cen_py)
outlet_top_id = None # todo
return dict(wsarea=wsarea,
area_gt30=area_gt30,
ruggedness=ruggedness,
minz=minz,
maxz=maxz,
ws_centroid=ws_centroid,
outlet_top_id=outlet_top_id,)
def write_slps(self, out_dir, channels=1, subcatchments=1, flowpaths=0):
"""
Writes slope files to the specified wat_dir. The channels,
subcatchments, and flowpaths args specify what slope files
should be written.
"""
if channels:
self._make_channel_slps(out_dir)
if subcatchments:
self._write_subcatchment_slps(out_dir)
if flowpaths:
raise NotImplementedError
| 37.619612 | 120 | 0.540661 |
733a0eff21e557f8f32c9d92815d4f668db0c2d8 | 47,930 | py | Python | PyRSM/utils.py | chdahlqvist/RSMmap | 53984967d612eaf4feb90ba4972109638f6cf70a | [
"MIT"
] | 3 | 2021-05-18T16:40:13.000Z | 2022-03-17T15:32:31.000Z | PyRSM/utils.py | chdahlqvist/RSMmap | 53984967d612eaf4feb90ba4972109638f6cf70a | [
"MIT"
] | null | null | null | PyRSM/utils.py | chdahlqvist/RSMmap | 53984967d612eaf4feb90ba4972109638f6cf70a | [
"MIT"
] | 1 | 2022-01-19T11:04:21.000Z | 2022-01-19T11:04:21.000Z | """
Set of functions used by the PyRSM class to compute detection maps and optimize the parameters
of the RSM algorithm and PSF-subtraction techniques via the auto-RSM and auto-S/N frameworks
"""
__author__ = 'Carl-Henrik Dahlqvist'
from scipy.interpolate import Rbf
import pandas as pd
import numpy.linalg as la
from vip_hci.var import get_annulus_segments, frame_center,prepare_matrix
from vip_hci.preproc.derotation import _define_annuli
import numpy as np
from vip_hci.preproc import cube_derotate, cube_collapse, check_pa_vector,check_scal_vector
from vip_hci.preproc.derotation import _find_indices_adi
from vip_hci.preproc.rescaling import _find_indices_sdi
import scipy as sp
from multiprocessing import cpu_count
from vip_hci.conf.utils_conf import pool_map, iterable
from vip_hci.pca.svd import get_eigenvectors
from vip_hci.llsg.llsg import _patch_rlrps
from vip_hci.preproc import cube_rescaling_wavelengths as scwave
import vip_hci as vip
from sklearn.decomposition import NMF as NMF_sklearn
def rot_scale(step,cube,cube_scaled,angle_list,scale_list, imlib, interpolation):
"""
Function used to rescale the frames when relying on ADI+SDI before the computation the reference PSF
(step='ini') and rescale and derotate the frames to generate the cube of residuals used by the RSM
algorithm (step='fin').
Parameters
----------
step: str
'ini' before the reference PSF computation and 'fin' after PSF subtraction.
cube: numpy ndarray, 3d or 4d
Original cube
cube_scaled: numpy ndarray, 3d
Cube of residuals to be rescaled and derotated (None for the step='ini')
angle_list : numpy ndarray, 1d
Parallactic angles for each frame of the ADI sequences.
scale_list: numpy ndarray, 1d, optional
Scaling factors in case of IFS data (ADI+mSDI cube). Usually, the
scaling factors are the central channel wavelength divided by the
shortest wavelength in the cube (more thorough approaches can be used
to get the scaling factors). This scaling factors are used to re-scale
the spectral channels and align the speckles. Default is None
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
"""
if cube.ndim == 4:
z, n, y_in, x_in = cube.shape
scale_list = check_scal_vector(scale_list)
if step=='ini':
# rescaled cube, aligning speckles for SDI
for i in range(n):
if i==0:
fin_cube = scwave(cube[:, i, :, :], scale_list,
imlib=imlib, interpolation=interpolation)[0]
fin_pa=np.repeat(angle_list[i],z)
fin_scale=scale_list
else:
fin_cube = np.append(fin_cube,scwave(cube[:, i, :, :], scale_list,
imlib=imlib, interpolation=interpolation)[0],axis=0)
fin_pa=np.append(fin_pa,np.repeat(angle_list[i],z),axis=0)
fin_scale=np.append(fin_scale,scale_list,axis=0)
return fin_cube,fin_pa,fin_scale
elif step=='fin':
cube_fin=np.zeros((n,y_in, x_in))
cube_rescaled = scwave(cube_scaled, scale_list,
full_output=True, inverse=True,
y_in=y_in, x_in=x_in, imlib=imlib,
interpolation=interpolation)[0]
cube_derotated=cube_derotate(cube_rescaled,angle_list, interpolation=interpolation,imlib=imlib)
for i in range(n):
cube_fin[i]=np.mean(cube_derotated[(i*z):((i+1)*z),:,:],axis=0)
return cube_fin
if cube.ndim == 3:
if step=='ini':
return cube,angle_list,None
elif step=='fin':
cube_derotated=cube_derotate(cube_scaled,angle_list, interpolation=interpolation,imlib=imlib)
return cube_derotated
def remove_outliers(time_s, range_sel, k=5, t0=3):
"""
Hampel Filter to remove potential outliers in the set of selected parameters
for the annular mode of the auto-RSM framework
"""
vals=pd.DataFrame(data=time_s[range_sel])
L= 1.4826
rolling_median=vals.rolling(k).median()
difference=np.abs(rolling_median-vals)
median_abs_deviation=difference.rolling(k).median()
threshold= t0 *L * median_abs_deviation
outlier_idx=difference>threshold
vals[outlier_idx]=threshold[outlier_idx]
return(vals.to_numpy().reshape(-1))
def interpolation(time_s,range_sel):
"""
Interpolation algorithm for the RSM parameters
for the annular mode of the auto-RSM framework
"""
time_series=time_s.copy()
time_series[range_sel]=remove_outliers(time_series,range_sel)
fit = Rbf(range_sel,time_s[range_sel])
inter_point = np.linspace(range_sel[0],range_sel[-1]+1, num=(range_sel[-1]-range_sel[0]+1), endpoint=True)
return fit(inter_point)
def poly_fit(time_s,range_sel,poly_n):
"""
Smoothing procedure for the computation of the final radial thresholds
which are subtracted from the final RSM detection map in the final step
of the auto-RSM framework
"""
time_series=time_s.copy()
time_series[range_sel]=remove_outliers(time_series,range_sel)
fit_p=np.poly1d(np.polyfit(range_sel,time_series[range_sel], poly_n))
time_series=fit_p(range(len(time_series)))
return time_series
def get_time_series(mcube,ann_center):
"""
Function defining and ordering (anti-clockwise) the pixels composing
an annulus at a radial distance of ann_center for an ADI sequence mcube
"""
if mcube.ndim == 4:
indices = get_annulus_segments(mcube[0,0,:,:], ann_center,1,4,90)
else:
indices = get_annulus_segments(mcube[0], ann_center,1,4,90)
tempind=np.vstack((indices[0][0],indices[0][1]))
ind = np.lexsort((tempind[0], tempind[1]))
indicesy=tempind[0,ind[::-1]]
indicesx=tempind[1,ind[::-1]]
tempind=np.vstack((indices[1][0],indices[1][1]))
ind = np.lexsort((-tempind[0], tempind[1]))
indicesy=np.hstack((indicesy,tempind[0,ind[::-1]]))
indicesx=np.hstack((indicesx,tempind[1,ind[::-1]]))
tempind=np.vstack((indices[2][0],indices[2][1]))
ind = np.lexsort((tempind[0], tempind[1]))
indicesy=np.hstack((indicesy,tempind[0,ind]))
indicesx=np.hstack((indicesx,tempind[1,ind]))
tempind=np.vstack((indices[3][0],indices[3][1]))
ind = np.lexsort((-tempind[0], tempind[1]))
indicesy=np.hstack((indicesy,tempind[0,ind]))
indicesx=np.hstack((indicesx,tempind[1,ind]))
return indicesy,indicesx
def perturb(frame,model_matrix,numbasis,evals_matrix, evecs_matrix, KL_basis_matrix,sci_mean_sub_matrix,refs_mean_sub_matrix, angle_list, fwhm, pa_threshold, ann_center):
"""
Function allowing the estimation of the PSF forward model when relying on KLIP
for the computation of the speckle field. The code is based on the PyKLIP library
considering only the ADI case with a singlle number of principal components considered.
For more details about the code, consider the PyKLIP library or the originall articles
(Pueyo, L. 2016, ApJ, 824, 117 or
Ruffio, J.-B., Macintosh, B., Wang, J. J., & Pueyo, L. 2017, ApJ, 842)
"""
#Selection of the reference library based on the given parralactic angle threshold
if pa_threshold != 0:
indices_left = _find_indices_adi(angle_list, frame,
pa_threshold, truncate=False)
models_ref = model_matrix[indices_left]
else:
models_ref = model_matrix
#Computation of the self-subtraction and over-subtraction for the current frame
model_sci = model_matrix[frame]
KL_basis=KL_basis_matrix[frame]
sci_mean_sub=sci_mean_sub_matrix[frame]
refs_mean_sub=refs_mean_sub_matrix[frame]
evals=evals_matrix[frame]
evecs=evecs_matrix[frame]
max_basis = KL_basis.shape[0]
N_pix = KL_basis.shape[1]
models_mean_sub = models_ref - np.nanmean(models_ref, axis=1)[:,None]
models_mean_sub[np.where(np.isnan(models_mean_sub))] = 0
model_sci_mean_sub = model_sci- np.nanmean(model_sci)
model_sci_mean_sub[np.where(np.isnan(model_sci_mean_sub))] = 0
model_sci_mean_sub_rows = np.reshape(model_sci_mean_sub,(1,N_pix))
sci_mean_sub_rows = np.reshape(sci_mean_sub,(1,N_pix))
delta_KL = np.zeros([max_basis, N_pix])
models_mean_sub_X_refs_mean_sub_T = models_mean_sub.dot(refs_mean_sub.transpose())
for k in range(max_basis):
Zk = np.reshape(KL_basis[k,:],(1,KL_basis[k,:].size))
Vk = (evecs[:,k])[:,None]
diagVk_X_models_mean_sub_X_refs_mean_sub_T = (Vk.T).dot(models_mean_sub_X_refs_mean_sub_T)
models_mean_sub_X_refs_mean_sub_T_X_Vk = models_mean_sub_X_refs_mean_sub_T.dot(Vk)
DeltaZk = -(1/(2*np.sqrt(evals[k])))*(diagVk_X_models_mean_sub_X_refs_mean_sub_T.dot(Vk) + ((Vk.T).dot(models_mean_sub_X_refs_mean_sub_T_X_Vk))).dot(Zk)+(Vk.T).dot(models_mean_sub)
for j in range(k):
Zj = KL_basis[j, :][None,:]
Vj = evecs[:, j][:,None]
DeltaZk += np.sqrt(evals[j])/(evals[k]-evals[j])*(diagVk_X_models_mean_sub_X_refs_mean_sub_T.dot(Vj) + ((Vj.T).dot(models_mean_sub_X_refs_mean_sub_T_X_Vk))).dot(Zj)
for j in range(k+1, max_basis):
Zj = KL_basis[j, :][None,:]
Vj = evecs[:, j][:,None]
DeltaZk += np.sqrt(evals[j])/(evals[k]-evals[j])*(diagVk_X_models_mean_sub_X_refs_mean_sub_T.dot(Vj) + ((Vj.T).dot(models_mean_sub_X_refs_mean_sub_T_X_Vk))).dot(Zj)
delta_KL[k] = DeltaZk/np.sqrt(evals[k])
oversubtraction_inner_products = np.dot(model_sci_mean_sub_rows, KL_basis.T)
selfsubtraction_1_inner_products = np.dot(sci_mean_sub_rows, delta_KL.T)
selfsubtraction_2_inner_products = np.dot(sci_mean_sub_rows, KL_basis.T)
oversubtraction_inner_products[max_basis::] = 0
klipped_oversub = np.dot(oversubtraction_inner_products, KL_basis)
selfsubtraction_1_inner_products[0,max_basis::] = 0
selfsubtraction_2_inner_products[0,max_basis::] = 0
klipped_selfsub = np.dot(selfsubtraction_1_inner_products, KL_basis) + \
np.dot(selfsubtraction_2_inner_products, delta_KL)
return model_sci[None,:] - klipped_oversub - klipped_selfsub
def KLIP(cube, angle_list, nann=None, local=False, fwhm=4, asize=2, n_segments=1,delta_rot=1, ncomp=1,min_frames_lib=2, max_frames_lib=200,imlib='opencv',nframes=None, interpolation='lanczos4', collapse='median',full_output=False, verbose=1):
"""
Function allowing the estimation of the cube of residuals after
the subtraction of the speckle field modeled via the KLIP framework
"""
array = cube
if array.ndim != 3:
raise TypeError('Input array is not a cube or 3d array')
if array.shape[0] != angle_list.shape[0]:
raise TypeError('Input vector or parallactic angles has wrong length')
n, y, _ = array.shape
angle_list = check_pa_vector(angle_list)
if asize is None:
annulus_width = int(np.ceil(2 * fwhm))
elif isinstance(asize, int):
annulus_width = asize
# Annulus parametrization
radius_int=fwhm
if local==True:
if nann> 2*annulus_width:
n_annuli = 5
radius_int=(nann//annulus_width-2)*annulus_width
else:
n_annuli = 4
radius_int=(nann//annulus_width-1)*annulus_width
else:
n_annuli = int((y / 2 - radius_int) / asize)
# Definition of the number of segment for the diifferent annuli
if isinstance(n_segments, int):
n_segments = [n_segments for _ in range(n_annuli)]
elif n_segments == 'auto':
n_segments = list()
n_segments.append(2)
n_segments.append(3)
ld = 2 * np.tan(360 / 4 / 2) * asize
for i in range(2, n_annuli):
radius = i * asize
ang = np.rad2deg(2 * np.arctan(ld / (2 * radius)))
n_segments.append(int(np.ceil(360 / ang)))
if verbose:
msg = '# annuli = {}, Ann width = {}, FWHM = {:.3f}'
print(msg.format(n_annuli, asize, fwhm))
print('PCA per annulus (or annular sectors):')
# Definition of the annuli and the corresmponding parralactic angle threshold
cube_out = np.zeros_like(array)
for ann in range(n_annuli):
if isinstance(ncomp, list) or isinstance(ncomp, np.ndarray):
if len(ncomp) == n_annuli:
ncompann = ncomp[ann]
else:
msge = 'If ncomp is a list, it must match the number of annuli'
raise TypeError(msge)
else:
ncompann = ncomp
inner_radius = radius_int + ann * annulus_width
n_segments_ann = n_segments[ann]
if verbose:
print('{} : in_rad={}, n_segm={}'.format(ann+1, inner_radius,
n_segments_ann))
theta_init = 90
res_ann_par = _define_annuli(angle_list, ann, int((y / 2 - radius_int) / asize), fwhm,radius_int, annulus_width, delta_rot,n_segments_ann, verbose)
pa_thr, inner_radius, ann_center = res_ann_par
indices = get_annulus_segments(array[0], inner_radius, annulus_width,n_segments_ann,theta_init)
# Computation of the speckle field for the different frames and estimation of the cube of residuals
for j in range(n_segments_ann):
for k in range(array.shape[0]):
res =KLIP_patch(k,array[:, indices[j][0], indices[j][1]], ncompann, angle_list, fwhm, pa_thr, ann_center,nframes=nframes)
cube_out[k,indices[j][0], indices[j][1]] = res[3]
# Cube is derotated according to the parallactic angle and collapsed
cube_der = cube_derotate(cube_out, angle_list, imlib=imlib,interpolation=interpolation)
frame = cube_collapse(cube_der, mode=collapse)
if full_output:
return cube_out, cube_der, frame
else:
return frame
def KLIP_patch(frame, matrix, numbasis, angle_list, fwhm, pa_threshold, ann_center,nframes=None):
"""
Function allowing the computation via KLIP of the speckle field for a
given sub-region of the original ADI sequence. Code inspired by the PyKLIP librabry
"""
max_frames_lib=200
if pa_threshold != 0:
if ann_center > fwhm*20:
indices_left = _find_indices_adi(angle_list,frame,pa_threshold, truncate=True,max_frames=max_frames_lib)
else:
indices_left = _find_indices_adi(angle_list, frame,pa_threshold, truncate=False,nframes=nframes)
refs = matrix[indices_left]
else:
refs = matrix
sci = matrix[frame]
sci_mean_sub = sci - np.nanmean(sci)
#sci_mean_sub[np.where(np.isnan(sci_mean_sub))] = 0
refs_mean_sub = refs- np.nanmean(refs, axis=1)[:, None]
#refs_mean_sub[np.where(np.isnan(refs_mean_sub))] = 0
# Covariance matrix definition
covar_psfs = np.cov(refs_mean_sub)
covar_psfs *= (np.size(sci)-1)
tot_basis = covar_psfs.shape[0]
numbasis = np.clip(numbasis - 1, 0, tot_basis-1)
max_basis = np.max(numbasis) + 1
#Computation of the eigenvectors/values of the covariance matrix
evals, evecs = la.eigh(covar_psfs)
evals = np.copy(evals[int(tot_basis-max_basis):int(tot_basis)])
evecs = np.copy(evecs[:,int(tot_basis-max_basis):int(tot_basis)])
evals = np.copy(evals[::-1])
evecs = np.copy(evecs[:,::-1])
# Computation of the principal components
KL_basis = np.dot(refs_mean_sub.T,evecs)
KL_basis = KL_basis * (1. / np.sqrt(evals))[None,:]
KL_basis = KL_basis.T
N_pix = np.size(sci_mean_sub)
sci_rows = np.reshape(sci_mean_sub, (1,N_pix))
inner_products = np.dot(sci_rows, KL_basis.T)
inner_products[0,int(max_basis)::]=0
#Projection of the science image on the selected prinicpal component
#to generate the speckle field model
klip_reconstruction = np.dot(inner_products, KL_basis)
# Subtraction of the speckle field model from the riginal science image
#to obtain the residual frame
sub_img_rows = sci_rows - klip_reconstruction
return evals,evecs,KL_basis,np.reshape(sub_img_rows, (N_pix)),refs_mean_sub,sci_mean_sub
def LOCI_FM(cube, psf, ann_center, angle_list,scale_list, asize,fwhm, Tol,delta_rot,delta_sep):
"""
Computation of the optimal factors weigthing the linear combination of reference
frames used to obtain the modeled speckle field for each frame and allowing the
determination of the forward modeled PSF. Estimation of the cube
of residuals based on the modeled speckle field.
"""
cube_res = np.zeros_like(cube)
ceny, cenx = frame_center(cube[0])
radius_int=ann_center-int(1.5*asize)
if radius_int<=0:
radius_int=1
for ann in range(3):
n_segments_ann = 1
inner_radius_ann = radius_int + ann*asize
pa_threshold = _define_annuli(angle_list, ann, 3, asize,
radius_int, asize, delta_rot,
n_segments_ann, verbose=False)[0]
indices = get_annulus_segments(cube[0], inner_radius=inner_radius_ann,
width=asize, nsegm=n_segments_ann)
ind_opt = get_annulus_segments(cube[0], inner_radius=inner_radius_ann,
width=asize, nsegm=n_segments_ann,
optim_scale_fact=2)
ayxyx = [inner_radius_ann,pa_threshold, indices[0][0], indices[0][1],
ind_opt[0][0], ind_opt[0][1]]
matrix_res, ind_ref, coef, yy, xx = _leastsq_patch(ayxyx,
angle_list,scale_list,fwhm,cube,ann_center,'manhattan', 100,delta_sep,
'lstsq', Tol,formod=True,psf=psf)
if ann==1:
ind_ref_list=ind_ref
coef_list=coef
cube_res[:, yy, xx] = matrix_res
return cube_res, ind_ref_list,coef_list
def nmf_adisdi(cube, angle_list,scale_list=None, cube_ref=None, ncomp=1, scaling=None, max_iter=100,
random_state=None, mask_center_px=None, imlib='opencv',
interpolation='lanczos4', collapse='median', full_output=False,
verbose=True, **kwargs):
""" Non Negative Matrix Factorization for ADI or ADI+SDI sequences.This function embeds the
scikit-learn NMF algorithm solved through coordinate descent method.
"""
array,angle_list_t,scale_list_t=rot_scale('ini',cube,None,angle_list,scale_list,imlib, interpolation)
n, y, x = array.shape
matrix = prepare_matrix(array, scaling, mask_center_px, mode='fullfr',
verbose=verbose)
matrix += np.abs(matrix.min())
if cube_ref is not None:
matrix_ref = prepare_matrix(cube_ref, scaling, mask_center_px,
mode='fullfr', verbose=verbose)
matrix_ref += np.abs(matrix_ref.min())
mod = NMF_sklearn(n_components=ncomp, alpha=0, solver='cd', init='nndsvd',
max_iter=max_iter, random_state=random_state, **kwargs)
# H [ncomp, n_pixels]: Non-negative components of the data
if cube_ref is not None:
H = mod.fit(matrix_ref).components_
else:
H = mod.fit(matrix).components_
# W: coefficients [n_frames, ncomp]
W = mod.transform(matrix)
reconstructed = np.dot(W, H)
residuals = matrix - reconstructed
array_out = np.zeros_like(array)
for i in range(n):
array_out[i] = residuals[i].reshape(y,x)
cube_der=rot_scale('fin',cube,array_out,angle_list_t,scale_list_t, imlib, interpolation)
frame_fin = cube_collapse(cube_der, mode=collapse)
return cube_der,frame_fin
def annular_NMF(cube, angle_list, nann=None, local=False, fwhm=4, asize=2, n_segments=1, ncomp=20,imlib='opencv', interpolation='lanczos4', collapse='median',max_iter=100,
random_state=None,full_output=False, verbose=False):
"""
Function allowing the estimation of the cube of residuals after
the subtraction of the speckle field modeled via the NMF framework.
This codes is an adaptation of the VIP NMF function to the case of annular
computation of the modeled speckle fields
(only full-frame estimation in Gonzalez et al. AJ, 154:7,2017)
"""
array = cube
if array.ndim != 3:
raise TypeError('Input array is not a cube or 3d array')
if array.shape[0] != angle_list.shape[0]:
raise TypeError('Input vector or parallactic angles has wrong length')
n, y, _ = array.shape
angle_list = check_pa_vector(angle_list)
if asize is None:
annulus_width = int(np.ceil(2 * fwhm))
elif isinstance(asize, int):
annulus_width = asize
# Annulus parametrization
radius_int=fwhm
if local==True:
if nann> 2*annulus_width:
n_annuli = 5
radius_int=(nann//annulus_width-2)*annulus_width
else:
n_annuli = 4
radius_int=(nann//annulus_width-1)*annulus_width
else:
n_annuli = int((y / 2 - radius_int) / asize)
# Definition of the annuli and the corresponding parralactic angle threshold
cube_out = np.zeros_like(array)
for ann in range(n_annuli):
inner_radius = radius_int + ann * annulus_width
if verbose:
print('{} : in_rad={}'.format(ann+1, inner_radius))
theta_init = 90
indices = get_annulus_segments(array[0], inner_radius, annulus_width,n_segments,theta_init)
# Computation of the speckle field for the different frames and estimation of the cube of residuals
for j in range(n_segments):
cube_out[:,indices[j][0], indices[j][1]] =NMF_patch(array[:, indices[j][0], indices[j][1]], ncomp, max_iter,random_state,verbose)
# Cube is derotated according to the parallactic angle and collapsed
cube_der = cube_derotate(cube_out, angle_list, imlib=imlib,interpolation=interpolation)
frame = cube_collapse(cube_der, mode=collapse)
if full_output:
return cube_out, cube_der, frame
else:
return frame
def NMF_patch(matrix, ncomp, max_iter,random_state,sklearn=False):
"""
Function allowing the computation via NMF of the speckle field for a
given sub-region of the original ADI sequence. The code is a partial reproduction of
the VIP function NMF_patch (Gonzalez et al. AJ, 154:7,2017)
"""
refs = matrix+ np.abs(matrix.min())
if sklearn==True:
mod = NMF_sklearn(n_components=ncomp, alpha=0, solver='cd', init='nndsvd',
max_iter=max_iter, random_state=random_state)
# H [ncomp, n_pixels]: Non-negative components of the data
H = mod.fit(refs).components_
W = mod.transform(refs)
reconstructed = np.dot(W, H)
else:
mod = NMF(X=refs, n_components=ncomp)
mod.SolveNMF(maxiters=max_iter, tol=0.001)
H=mod.H
W=mod.W
reconstructed = np.dot(W, H)
residuals = refs - reconstructed
return residuals
def NMF_patch_range(matrix, ncomp_range, max_iter,random_state,verbose):
"""
Function allowing the computation via NMF of the speckle field for a range of principal
components ncomp_range and a given sub-region of the original ADI sequence. The code is a
partial reproduction of the VIP function NMF_patch (Gonzalez et al. AJ, 154:7,2017)
"""
refs = matrix+ np.abs(matrix.min())
mod = NMF(X=refs, n_components=ncomp_range[len(ncomp_range)-1])
mod.SolveNMF(maxiters=max_iter, tol=0.001)
if verbose:
print('Done NMF with sklearn.NMF.')
residuals=[]
for i in ncomp_range:
H=mod.H[ncomp_range[0]:i,:]
W=mod.W[:,ncomp_range[0]:i]
reconstructed = np.dot(W, H)
residuals.append(refs - reconstructed)
return residuals
def annular_pca_adisdi(cube, angle_list,scale_list=None, radius_int=0, fwhm=4, asize=2, n_segments=1,
delta_rot=1,delta_sep=0.1, ncomp=1, svd_mode='lapack', nproc=None,
min_frames_lib=2, max_frames_lib=200, tol=1e-1, scaling=None,
imlib='opencv', interpolation='lanczos4', collapse='median',
full_output=False, verbose=False, cube_ref=None, weights=None):
""" PCA exploiting angular and spectral variability (ADI or ADI+SDI fashion).
"""
array,angle_list_t,scale_list_t=rot_scale('ini',cube,None,angle_list,scale_list,imlib, interpolation)
n, y, _ = array.shape
angle_list_t = check_pa_vector(angle_list_t)
n_annuli = int((y / 2 - radius_int) / asize)
if isinstance(delta_rot, tuple):
delta_rot = np.linspace(delta_rot[0], delta_rot[1], num=n_annuli)
elif isinstance(delta_rot, (int, float)):
delta_rot = [delta_rot] * n_annuli
if isinstance(n_segments, int):
n_segments = [n_segments for _ in range(n_annuli)]
elif n_segments == 'auto':
n_segments = list()
n_segments.append(2) # for first annulus
n_segments.append(3) # for second annulus
ld = 2 * np.tan(360 / 4 / 2) * asize
for i in range(2, n_annuli): # rest of annuli
radius = i * asize
ang = np.rad2deg(2 * np.arctan(ld / (2 * radius)))
n_segments.append(int(np.ceil(360 / ang)))
if verbose:
msg = 'N annuli = {}, FWHM = {:.3f}'
print(msg.format(n_annuli, fwhm))
print('PCA per annulus (or annular sectors):')
if nproc is None: # Hyper-threading "duplicates" the cores -> cpu_count/2
nproc = cpu_count() // 2
# The annuli are built, and the corresponding PA thresholds for frame
# rejection are calculated (at the center of the annulus)
cube_out = np.zeros_like(array)
for ann in range(n_annuli):
if isinstance(ncomp, tuple) or isinstance(ncomp, np.ndarray):
if len(ncomp) == n_annuli:
ncompann = ncomp[ann]
else:
raise TypeError('If `ncomp` is a tuple, it must match the '
'number of annuli')
else:
ncompann = ncomp
n_segments_ann = n_segments[ann]
res_ann_par = _define_annuli(angle_list_t, ann, n_annuli, fwhm,
radius_int, asize, delta_rot[ann],
n_segments_ann, verbose)
pa_thr, inner_radius, ann_center = res_ann_par
indices = get_annulus_segments(array[0], inner_radius, asize,
n_segments_ann)
# Library matrix is created for each segment and scaled if needed
for j in range(n_segments_ann):
yy = indices[j][0]
xx = indices[j][1]
matrix_segm = array[:, yy, xx] # shape [nframes x npx_segment]
if cube_ref is not None:
matrix_segm_ref = cube_ref[:, yy, xx]
else:
matrix_segm_ref = None
res = pool_map(nproc, do_pca_patch, matrix_segm, iterable(range(n)),
angle_list_t,scale_list_t, fwhm, pa_thr,delta_sep, ann_center, svd_mode,
ncompann, min_frames_lib, max_frames_lib, tol,
matrix_segm_ref)
res = np.array(res)
residuals = np.array(res[:, 0])
for fr in range(n):
cube_out[fr][yy, xx] = residuals[fr]
# Cube is derotated according to the parallactic angle and collapsed
cube_der=rot_scale('fin',cube,cube_out,angle_list_t,scale_list_t, imlib, interpolation)
frame = cube_collapse(cube_der, mode=collapse)
return cube_der, frame
def do_pca_patch(matrix, frame, angle_list,scale_list, fwhm, pa_threshold, delta_sep, ann_center,
svd_mode, ncomp, min_frames_lib, max_frames_lib, tol,
matrix_ref):
"""
Function doing the SVD/PCA for each frame patch. The code is a partial reproduction of
the VIP function do_pca_patch (Gonzalez et al. AJ, 154:7,2017)
"""
if scale_list is not None:
indices_left = np.intersect1d(_find_indices_adi(angle_list, frame,
pa_threshold, truncate=False),_find_indices_sdi(scale_list, ann_center, frame,
fwhm, delta_sep))
else:
indices_left = _find_indices_adi(angle_list, frame,
pa_threshold, truncate=False)
data_ref = matrix[indices_left]
if matrix_ref is not None:
# Stacking the ref and the target ref (pa thresh) libraries
data_ref = np.vstack((matrix_ref, data_ref))
curr_frame = matrix[frame] # current frame
V = get_eigenvectors(ncomp, data_ref, svd_mode, noise_error=tol)
transformed = np.dot(curr_frame, V.T)
reconstructed = np.dot(transformed.T, V)
residuals = curr_frame - reconstructed
return residuals, V.shape[0], data_ref.shape[0]
def do_pca_patch_range(matrix, frame, angle_list,scale_list, fwhm, pa_threshold,delta_sep, ann_center,
svd_mode, ncomp_range, min_frames_lib, max_frames_lib, tol,
matrix_ref):
"""
Function doing the SVD/PCA for each frame patch for a range of principal
component ncomp_range. The code is a partial reproduction of
the VIP function do_pca_patch (Gonzalez et al. AJ, 154:7,2017)
"""
if scale_list is not None:
indices_left = np.intersect1d(_find_indices_adi(angle_list, frame,
pa_threshold, truncate=False),_find_indices_sdi(scale_list, ann_center, frame,
fwhm, delta_sep))
else:
indices_left = _find_indices_adi(angle_list, frame,
pa_threshold, truncate=False)
data_ref = matrix[indices_left]
if matrix_ref is not None:
# Stacking the ref and the target ref (pa thresh) libraries
data_ref = np.vstack((matrix_ref, data_ref))
curr_frame = matrix[frame] # current frame
V = get_eigenvectors(ncomp_range[len(ncomp_range)-1], data_ref, svd_mode, noise_error=tol)
residuals=[]
for i in ncomp_range:
V_trunc=V[ncomp_range[0]:i,:]
transformed = np.dot(curr_frame, V_trunc.T)
reconstructed = np.dot(transformed.T, V_trunc)
residuals.append(curr_frame - reconstructed)
return residuals, V.shape[0], data_ref.shape[0]
def loci_adisdi(cube, angle_list,scale_list=None, fwhm=4, metric='manhattan',
dist_threshold=50, delta_rot=0.5,delta_sep=0.1, radius_int=0, asize=4,
n_segments=1, nproc=1, solver='lstsq', tol=1e-3,
optim_scale_fact=1, imlib='opencv', interpolation='lanczos4',
collapse='median', nann=None,local=False, verbose=True, full_output=False):
""" Least-squares model PSF subtraction for ADI or ADI+SDI. This code is an adaptation of the VIP
xloci function to provide, if required, the residuals after speckle field subtraction
for a given annulus.
"""
cube_rot_scale,angle_list_t,scale_list_t=rot_scale('ini',cube,None,angle_list,scale_list,imlib, interpolation)
y = cube_rot_scale.shape[1]
if not asize < y // 2:
raise ValueError("asize is too large")
angle_list = check_pa_vector(angle_list)
if local==True:
n_annuli = 3
radius_int=nann-asize
else:
n_annuli= int((y / 2 - radius_int) / asize)
if verbose:
print("Building {} annuli:".format(n_annuli))
if isinstance(delta_rot, tuple):
delta_rot = np.linspace(delta_rot[0], delta_rot[1], num=n_annuli)
elif isinstance(delta_rot, (int, float)):
delta_rot = [delta_rot] * n_annuli
if nproc is None:
nproc = cpu_count() // 2 # Hyper-threading doubles the # of cores
annulus_width = asize
if isinstance(n_segments, int):
n_segments = [n_segments]*n_annuli
elif n_segments == 'auto':
n_segments = list()
n_segments.append(2) # for first annulus
n_segments.append(3) # for second annulus
ld = 2 * np.tan(360/4/2) * annulus_width
for i in range(2, n_annuli): # rest of annuli
radius = i * annulus_width
ang = np.rad2deg(2 * np.arctan(ld / (2 * radius)))
n_segments.append(int(np.ceil(360/ang)))
# annulus-wise least-squares combination and subtraction
cube_res = np.zeros_like(cube_rot_scale)
ayxyx = [] # contains per-segment data
for ann in range(n_annuli):
n_segments_ann = n_segments[ann]
inner_radius_ann = radius_int + ann*annulus_width
# angles
pa_threshold = _define_annuli(angle_list, ann, n_annuli, fwhm,
radius_int, asize, delta_rot[ann],
n_segments_ann, verbose)[0]
# indices
indices = get_annulus_segments(cube_rot_scale[0], inner_radius=inner_radius_ann,
width=asize, nsegm=n_segments_ann)
ind_opt = get_annulus_segments(cube_rot_scale[0], inner_radius=inner_radius_ann,
width=asize, nsegm=n_segments_ann,
optim_scale_fact=optim_scale_fact)
# store segment data for multiprocessing
ayxyx += [(inner_radius_ann+asize//2,pa_threshold, indices[nseg][0], indices[nseg][1],
ind_opt[nseg][0], ind_opt[nseg][1]) for nseg in
range(n_segments_ann)]
msg = 'Patch-wise least-square combination and subtraction:'
# reverse order of processing, as outer segments take longer
res_patch = pool_map(nproc, _leastsq_patch, iterable(ayxyx[::-1]),
angle_list_t,scale_list_t,fwhm,cube_rot_scale, None, metric, dist_threshold,delta_sep,
solver, tol, verbose=verbose, msg=msg,
progressbar_single=True)
for patch in res_patch:
matrix_res, yy, xx = patch
cube_res[:, yy, xx] = matrix_res
cube_der=rot_scale('fin',cube,cube_res,angle_list_t,scale_list_t, imlib, interpolation)
frame_der_median = cube_collapse(cube_der, collapse)
if verbose:
print('Done processing annuli')
return cube_der, frame_der_median
def _leastsq_patch(ayxyx, angle_list,scale_list,fwhm,cube, nann,metric, dist_threshold,delta_sep,
solver, tol,formod=False,psf=None):
"""
Function allowing th estimation of the optimal factors for the modeled speckle field
estimation via the LOCI framework. The code has been developped based on the VIP
python function _leastsq_patch, but return additionnaly the set of coefficients used for
the speckle field computation.
"""
ann_center,pa_threshold, yy, xx, yy_opti, xx_opti = ayxyx
ind_ref_list=[]
coef_list=[]
yy_opt=[]
xx_opt=[]
for j in range(0,len(yy_opti)):
if not any(x in np.where(yy==yy_opti[j])[0] for x in np.where(xx==xx_opti[j])[0]):
xx_opt.append(xx_opti[j])
yy_opt.append(yy_opti[j])
values = cube[:, yy, xx]
matrix_res = np.zeros((values.shape[0], yy.shape[0]))
values_opt = cube[:, yy_opti, xx_opti]
n_frames = cube.shape[0]
for i in range(n_frames):
if scale_list is not None:
ind_fr_i = np.intersect1d(_find_indices_adi(angle_list, i,
pa_threshold, truncate=False),_find_indices_sdi(scale_list, ann_center, i,
fwhm, delta_sep))
else:
ind_fr_i = _find_indices_adi(angle_list, i,
pa_threshold, truncate=False)
if len(ind_fr_i) > 0:
A = values_opt[ind_fr_i]
b = values_opt[i]
if solver == 'lstsq':
coef = np.linalg.lstsq(A.T, b, rcond=tol)[0] # SVD method
elif solver == 'nnls':
coef = sp.optimize.nnls(A.T, b)[0]
elif solver == 'lsq':
coef = sp.optimize.lsq_linear(A.T, b, bounds=(0, 1),
method='trf',
lsq_solver='lsmr')['x']
else:
raise ValueError("`solver` not recognized")
else:
msg = "No frames left in the reference set. Try increasing "
msg += "`dist_threshold` or decreasing `delta_rot`."
raise RuntimeError(msg)
if formod==True:
ind_ref_list.append(ind_fr_i)
coef_list.append(coef)
recon = np.dot(coef, values[ind_fr_i])
matrix_res[i] = values[i] - recon
if formod==True:
return matrix_res,ind_ref_list,coef_list, yy, xx,
else:
return matrix_res, yy,xx
def llsg_adisdi(cube, angle_list,scale_list, fwhm, rank=10, thresh=1, max_iter=10,
low_rank_ref=False, low_rank_mode='svd', auto_rank_mode='noise',
residuals_tol=1e-1, cevr=0.9, thresh_mode='soft', nproc=1,
asize=None, n_segments=4, azimuth_overlap=None, radius_int=None,
random_seed=None, imlib='opencv', interpolation='lanczos4',
high_pass=None, collapse='median', full_output=True, verbose=True,
debug=False):
""" Local low rank plus Gaussian PSF subtraction for ADI or ADI+SDI. This
code is an adaptation of the VIP llsg function.
"""
cube_rot_scale,angle_list_t,scale_list_t=rot_scale('ini',cube,None,angle_list,scale_list,imlib, interpolation)
list_l, list_s, list_g, f_l, frame_fin, f_g = vip.llsg.llsg(cube_rot_scale, angle_list_t, fwhm, rank=rank,asize=asize, thresh=1,n_segments=n_segments, max_iter=40, random_seed=10, nproc=nproc,full_output=True,verbose=False)
res_s=np.array(list_s)
residuals_cube_=cube_derotate(res_s[0],-angle_list_t)
cube_der=rot_scale('fin',cube,residuals_cube_,angle_list_t,scale_list_t, imlib, interpolation)
frame_fin=cube_collapse(cube_der, collapse)
return cube_der,frame_fin
def _decompose_patch(indices, i_patch,cube_init, n_segments_ann, rank, low_rank_ref,
low_rank_mode, thresh, thresh_mode, max_iter,
auto_rank_mode, cevr, residuals_tol, random_seed,
debug=False, full_output=False):
""" Patch decomposition from the LLSG VIP function.
"""
j = i_patch
yy = indices[j][0]
xx = indices[j][1]
data_segm = cube_init[:, yy, xx]
if low_rank_ref:
ref_segments = list(range(n_segments_ann))
ref_segments.pop(j)
for m, n in enumerate(ref_segments):
if m == 0:
yy_ref = indices[n][0]
xx_ref = indices[n][1]
else:
yy_ref = np.hstack((yy_ref, indices[n][0]))
xx_ref = np.hstack((xx_ref, indices[n][1]))
data_ref = cube_init[:, yy_ref, xx_ref]
else:
data_ref = data_segm
patch = _patch_rlrps(data_segm, data_ref, rank, low_rank_ref,
low_rank_mode, thresh, thresh_mode,
max_iter, auto_rank_mode, cevr,
residuals_tol, random_seed, debug=debug,
full_output=full_output)
return patch
_largenumber = 1E100
_smallnumber = 1E-5
| 38.590982 | 242 | 0.614542 |
733a51b0598b93f7ddad878e61c9f58e36f463d6 | 4,618 | py | Python | src/custom_dataset.py | devJWSong/transformer-multiturn-dialogue-pytorch | 4ddedaef45f31d75e88bdb909a4451173faec4c8 | [
"MIT"
] | 11 | 2021-03-22T10:22:42.000Z | 2021-09-15T23:50:46.000Z | src/custom_dataset.py | devjwsong/transformer-chatbot-pytorch | 4ddedaef45f31d75e88bdb909a4451173faec4c8 | [
"MIT"
] | 1 | 2021-12-10T04:52:39.000Z | 2021-12-10T04:52:40.000Z | src/custom_dataset.py | devjwsong/transformer-chatbot-pytorch | 4ddedaef45f31d75e88bdb909a4451173faec4c8 | [
"MIT"
] | null | null | null | from torch.utils.data import Dataset
from tqdm import tqdm
import torch
import pickle
import json
| 38.483333 | 129 | 0.529017 |
733ab2bdfefcfa168386562dc21d727ee4511840 | 1,588 | py | Python | research/codec/codec_example.py | FXTD-ODYSSEY/QBinder | b4b288e7c0ef09d2382e3d6678a5c41950257b76 | [
"MIT"
] | 13 | 2020-11-29T15:02:57.000Z | 2022-02-11T03:12:25.000Z | research/codec/codec_example.py | FXTD-ODYSSEY/QBinder | b4b288e7c0ef09d2382e3d6678a5c41950257b76 | [
"MIT"
] | 8 | 2020-11-30T02:47:56.000Z | 2021-05-19T03:44:16.000Z | research/codec/codec_example.py | FXTD-ODYSSEY/QtConfig | 978cddf26c0305677b65b04d206138970cb73762 | [
"MIT"
] | 2 | 2020-11-30T01:59:19.000Z | 2021-12-17T06:44:54.000Z | # -*- coding: future_fstrings -*-
import codecs
import pdb
import string
# NOTE https://stackoverflow.com/questions/38777818/how-do-i-properly-create-custom-text-codecs
# prepare map from numbers to letters
_encode_table = {str(number): bytes(letter) for number, letter in enumerate(string.ascii_lowercase)}
# prepare inverse map
_decode_table = {v: k for k, v in _encode_table.items()}
if __name__ == '__main__':
main() | 28.872727 | 100 | 0.714736 |
733b44cae4895b7b97c2632f68beb2990e9371cb | 370 | py | Python | benchmark_python_lkml.py | Ladvien/rust_lookml_parser | a99a9663f2e0ccd0e7eff0fb6ec4f4496032265c | [
"MIT"
] | null | null | null | benchmark_python_lkml.py | Ladvien/rust_lookml_parser | a99a9663f2e0ccd0e7eff0fb6ec4f4496032265c | [
"MIT"
] | null | null | null | benchmark_python_lkml.py | Ladvien/rust_lookml_parser | a99a9663f2e0ccd0e7eff0fb6ec4f4496032265c | [
"MIT"
] | null | null | null | import lkml
from time import time_ns
from rich import print
FILE_PATH = "/Users/ladvien/rusty_looker/src/resources/test.lkml"
with open(FILE_PATH, "r") as f:
lookml = f.read()
startTime = time_ns() // 1_000_000
result = lkml.load(lookml)
print(result)
executionTime = (time_ns() // 1_000_000) - startTime
print('Execution time in seconds: ' + str(executionTime)) | 26.428571 | 65 | 0.735135 |
733c41f200ce9ccff635234faca97343a23e5190 | 1,595 | py | Python | Linear_Regression.py | svdeepak99/TSA-Twitter_Sentiment_Analysis | 41c13682ccc110025c5fbd396c0982d54febc6cc | [
"MIT"
] | null | null | null | Linear_Regression.py | svdeepak99/TSA-Twitter_Sentiment_Analysis | 41c13682ccc110025c5fbd396c0982d54febc6cc | [
"MIT"
] | null | null | null | Linear_Regression.py | svdeepak99/TSA-Twitter_Sentiment_Analysis | 41c13682ccc110025c5fbd396c0982d54febc6cc | [
"MIT"
] | null | null | null | from keras.models import Sequential, load_model
from keras.layers import Dense
import csv
import numpy as np
import os
LOAD_MODEL = False
with open("Linear_Regression/Normalized_Attributes.csv", "r", newline='') as fp:
reader = csv.reader(fp)
headings = next(reader)
dataset = np.array(list(reader), dtype=np.float)
with open("Linear_Regression/VADER_Sentiment.csv", "r", newline='') as fp:
reader = csv.reader(fp)
outputs = np.array([x[0] for x in list(reader)])
if os.path.isfile("Linear_Regression/model/regression_full.h5") and LOAD_MODEL:
model = load_model("Linear_Regression/model/regression_full.h5")
else:
model = Sequential()
model.add(Dense(1, input_dim = 33, activation='linear'))
model.compile(loss='mse', optimizer='rmsprop', metrics=['mse'])
model.fit(x=dataset, y=outputs, epochs=40, verbose=1)
model.save("Linear_Regression/model/regression_full.h5")
model.summary()
weights = model.get_weights()
weights_list = []
for i, w in enumerate(weights[0]):
print(f'{i+1}) {headings[i]} : {w[0]}')
weights_list.append([headings[i], w[0]])
print(f'34) BIAS: {weights[1][0]}\n')
weights_list.append(['BIAS', weights[1][0]])
with open("Linear_Regression/Full_weights.csv", "w", newline='') as fp:
writer = csv.writer(fp)
writer.writerows(weights_list)
print(len(weights), len(weights[0]), len(weights[1]))
print(model.predict(dataset[:10]))
print(outputs[:10])
print(np.sum(dataset[0]*np.array([x[0] for x in weights[0]]))+weights[1][0], model.predict(np.array([dataset[0]])))
| 32.55102 | 116 | 0.680251 |
733c8d8b8ea4cf5eaafe8785802f0c3c067c38ff | 3,141 | py | Python | UserCode/bressler/multibubblescintillationcheck.py | cericdahl/SBCcode | 90a7841a5c1208d64f71a332289d9005a011aa21 | [
"MIT"
] | 4 | 2018-08-27T18:02:34.000Z | 2020-06-09T21:19:04.000Z | UserCode/bressler/multibubblescintillationcheck.py | SBC-Collaboration/SBC-Analysis | 90a7841a5c1208d64f71a332289d9005a011aa21 | [
"MIT"
] | null | null | null | UserCode/bressler/multibubblescintillationcheck.py | SBC-Collaboration/SBC-Analysis | 90a7841a5c1208d64f71a332289d9005a011aa21 | [
"MIT"
] | 4 | 2019-06-20T21:36:26.000Z | 2020-11-10T17:23:14.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 2 19:33:02 2021
@author: bressler
"""
import SBCcode as sbc
import numpy as np
import pulse_integrator as pi
import gc
if __name__ == "__main__":
main() | 36.523256 | 97 | 0.539 |
733cfbac832497fb734f7d0cde01792ad3325cd5 | 4,038 | py | Python | app/core/models.py | echosisdev/openmrs-disa-sync | 077331c5b96394f15cc90aba7ce4018e10d5608d | [
"MIT"
] | null | null | null | app/core/models.py | echosisdev/openmrs-disa-sync | 077331c5b96394f15cc90aba7ce4018e10d5608d | [
"MIT"
] | null | null | null | app/core/models.py | echosisdev/openmrs-disa-sync | 077331c5b96394f15cc90aba7ce4018e10d5608d | [
"MIT"
] | null | null | null | from django.db import models
from django.db.models.signals import pre_save, post_save
from core.utils.constants import Constants
from core.utils.data_convertion import DataConversion
# def insert_formatted_nid(sender, instance, created, *args, **kwargs):
# if created:
# instance.formatted_nid = DataConversion.format_nid(instance.nid)
# print(instance.formatted_nid)
# post_save.connect(insert_formatted_nid, sender=ViralLoad)
| 38.826923 | 78 | 0.733779 |
733e97b6658e7e2eb8c13752d62cc0a274acaa1f | 1,533 | py | Python | ceibacli/job_schedulers/slurm.py | cffbots/ceiba-cli | 7e77199c1fe919f024c4707b65578fec320713a6 | [
"Apache-2.0"
] | 2 | 2020-11-10T08:52:15.000Z | 2020-11-10T08:52:17.000Z | ceibacli/job_schedulers/slurm.py | cffbots/ceiba-cli | 7e77199c1fe919f024c4707b65578fec320713a6 | [
"Apache-2.0"
] | 24 | 2020-09-22T09:58:38.000Z | 2021-01-14T11:02:33.000Z | ceibacli/job_schedulers/slurm.py | cffbots/ceiba-cli | 7e77199c1fe919f024c4707b65578fec320713a6 | [
"Apache-2.0"
] | 1 | 2022-02-03T13:46:07.000Z | 2022-02-03T13:46:07.000Z | """Interface to the `SLURM job scheduler <https://slurm.schedmd.com/documentation.html>`_
.. autofunction:: create_slurm_script
"""
from pathlib import Path
from typing import Any, Dict, List
from ..utils import Options
def create_slurm_script(opts: Options, jobs: List[Dict[str, Any]], jobs_metadata: List[Options]) -> str:
"""Create a script to run the workflow using the SLURM job schedule."""
slurm_file = Path("launch.sh")
# Get SLURM configuration
scheduler = opts.scheduler
# Use the configuration provided by the user
if scheduler.free_format is not None:
script = scheduler.free_format
else:
script = make_script(opts.scheduler)
# Append command to run the workflow
for meta, job in zip(jobs_metadata, jobs):
input_file = meta.input.absolute().as_posix()
workdir = opts.workdir.absolute().as_posix()
script += f'\ncd {workdir} && {opts.command} {input_file}'
with open(slurm_file, 'w') as handler:
handler.write(script)
return f"sbatch {slurm_file.absolute().as_posix()}"
def make_script(scheduler: Options) -> str:
"""Create a SLURM script using the ``scheduler`` options."""
arguments = {"cpus-per-task", "partition"}
script = f"""#!/bin/bash
#SBATCH -N {scheduler.nodes}
#SBATCH -t {scheduler.wall_time}
"""
# Add optional arguments
for arg in arguments:
value = scheduler.get(arg, None)
if value is not None:
script += f"#SBATCH --{arg} {value}\n"
return script
| 29.480769 | 104 | 0.666014 |
733ee42203016605540515b9f13fedcc898ddec0 | 5,290 | py | Python | thumbor/url.py | wking/thumbor | 97a55594a67e3cf3b5e7d09cde5944bc821eeb1e | [
"MIT"
] | null | null | null | thumbor/url.py | wking/thumbor | 97a55594a67e3cf3b5e7d09cde5944bc821eeb1e | [
"MIT"
] | null | null | null | thumbor/url.py | wking/thumbor | 97a55594a67e3cf3b5e7d09cde5944bc821eeb1e | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com timehome@corp.globo.com
import re
from urllib import quote
| 31.117647 | 119 | 0.479017 |
733f199984f8b993cc317085788a45b7c591ad8b | 1,084 | py | Python | Projects/herdimmunity/Person.py | Tech-at-DU/ACS-1111.1-Object-Oriented-Programming | e4405678282b7fbd78a3b8337116b61083417343 | [
"MIT"
] | null | null | null | Projects/herdimmunity/Person.py | Tech-at-DU/ACS-1111.1-Object-Oriented-Programming | e4405678282b7fbd78a3b8337116b61083417343 | [
"MIT"
] | null | null | null | Projects/herdimmunity/Person.py | Tech-at-DU/ACS-1111.1-Object-Oriented-Programming | e4405678282b7fbd78a3b8337116b61083417343 | [
"MIT"
] | 1 | 2021-11-01T21:04:52.000Z | 2021-11-01T21:04:52.000Z | import random
from Virus import Virus
| 37.37931 | 86 | 0.665129 |
733fd1e0e78df5be2052554568f783ebcd9a6ad0 | 1,741 | py | Python | Cursos/Alura/Python3_Avancando_na_orientacao_a_objetos/models_playlist3.py | ramonvaleriano/python- | ada70918e945e8f2d3b59555e9ccc35cf0178dbd | [
"MIT"
] | null | null | null | Cursos/Alura/Python3_Avancando_na_orientacao_a_objetos/models_playlist3.py | ramonvaleriano/python- | ada70918e945e8f2d3b59555e9ccc35cf0178dbd | [
"MIT"
] | null | null | null | Cursos/Alura/Python3_Avancando_na_orientacao_a_objetos/models_playlist3.py | ramonvaleriano/python- | ada70918e945e8f2d3b59555e9ccc35cf0178dbd | [
"MIT"
] | null | null | null |
class Filme(Programa):
class Serie(Programa):
class Playlist:
vingadores = Filme('Vigadores - Guerra Infinita', 2018, 160)
atlanta = Serie('Atlatan', 2018, 2)
tmep = Filme('Todo mundo em pnico', 1999, 100)
demolidor = Serie('Demolidor', 2016, 2)
filmes_e_series = [vingadores, atlanta, demolidor, tmep]
playlist_fim_de_semana = Playlist('fim de semana', filmes_e_series)
print(f'Tamonho do playlist: {len(playlist_fim_de_semana)}')
for dados in playlist_fim_de_semana:
print(dados) | 24.871429 | 88 | 0.638713 |
73401ec9a9c7c85f7251558930f267232a9f7bb1 | 3,275 | py | Python | blackjack/game.py | cuiqui/blackjack | 5ecb0ae1c065fa284c2209222f6f958e1f514249 | [
"MIT"
] | null | null | null | blackjack/game.py | cuiqui/blackjack | 5ecb0ae1c065fa284c2209222f6f958e1f514249 | [
"MIT"
] | null | null | null | blackjack/game.py | cuiqui/blackjack | 5ecb0ae1c065fa284c2209222f6f958e1f514249 | [
"MIT"
] | null | null | null | import constants as c
from deck import Deck
from player import Human, RandomAI
if __name__ == '__main__':
g = Game()
g.new()
g.run()
| 31.490385 | 102 | 0.487328 |
73407d37b530e40b65a5d94f1bc5d3086355dead | 1,084 | py | Python | numba/tests/__init__.py | mawanda-jun/numba | 8c6658375c1f8fe50e1a5ccd11d4e7bf5a8053de | [
"BSD-2-Clause",
"Apache-2.0"
] | 1 | 2019-12-04T07:13:18.000Z | 2019-12-04T07:13:18.000Z | numba/tests/__init__.py | mawanda-jun/numba | 8c6658375c1f8fe50e1a5ccd11d4e7bf5a8053de | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | numba/tests/__init__.py | mawanda-jun/numba | 8c6658375c1f8fe50e1a5ccd11d4e7bf5a8053de | [
"BSD-2-Clause",
"Apache-2.0"
] | 1 | 2020-09-18T15:03:46.000Z | 2020-09-18T15:03:46.000Z | from numba import unittest_support as unittest
import gc
from os.path import dirname, join
import multiprocessing
import sys
import time
import warnings
from unittest.suite import TestSuite
from numba.testing import load_testsuite
from numba.testing import ddt # for backward compatibility
try:
import faulthandler
except ImportError:
faulthandler = None
else:
try:
# May fail in IPython Notebook with UnsupportedOperation
faulthandler.enable()
except Exception as e:
msg = "Failed to enable faulthandler due to:\n{err}"
warnings.warn(msg.format(err=e))
| 27.1 | 64 | 0.737085 |
7340e2ed735c34bf4441bf796759a517ee89ee90 | 5,377 | py | Python | src/clustar/fit.py | clustar/Clustar | 83e155feffc10c4bf172f8ec769fb3c5ffe1d579 | [
"MIT"
] | 4 | 2021-02-24T17:27:25.000Z | 2021-06-28T04:45:32.000Z | src/clustar/fit.py | clustar/Clustar | 83e155feffc10c4bf172f8ec769fb3c5ffe1d579 | [
"MIT"
] | 3 | 2021-04-05T14:53:26.000Z | 2021-06-27T20:17:14.000Z | src/clustar/fit.py | clustar/Clustar | 83e155feffc10c4bf172f8ec769fb3c5ffe1d579 | [
"MIT"
] | 1 | 2021-02-15T16:13:05.000Z | 2021-02-15T16:13:05.000Z | """
Clustar module for fitting-related methods.
This module is designed for the 'ClustarData' object. All listed methods take
an input parameter of a 'ClustarData' object and return a 'ClustarData' object
after processing the method. As a result, all changes are localized within the
'ClustarData' object.
Visit <https://clustar.github.io/> for additional information.
"""
from clustar import graph
from scipy import ndimage, stats
from shapely import affinity, geometry
import numpy as np
def compute_fit(cd):
"""
Computes the normalized bivariate gaussian fit for the 'Group' objects.
Parameters
----------
cd : ClustarData
'ClustarData' object required for processing.
Returns
-------
ClustarData
"""
i = 0
while i < len(cd.groups):
group = cd.groups[i]
try:
rv = stats.multivariate_normal([group.stats.x_bar,
group.stats.y_bar],
group.stats.covariance_matrix)
except ValueError:
del cd.groups[i]
continue
bvg = rv.pdf(group.image.pos)
bvg *= np.max(group.image.data) / np.max(bvg)
group.res.data = 1 - (bvg / group.image.data)
group.fit.bvg = bvg
group.fit.rv = rv
i += 1
return cd
def compute_ellipse(cd):
"""
Computes the ellipse parameters and localized residuals for the 'Group'
objects.
Parameters
----------
cd : ClustarData
'ClustarData' object required for processing.
Returns
-------
ClustarData
"""
for group in cd.groups:
a = group.stats.x_len / 2
b = group.stats.y_len / 2
theta = np.linspace(0, np.pi * 2, 360)
r = a * b / np.sqrt((b * np.cos(theta)) ** 2 +
(a * np.sin(theta)) ** 2)
xy = np.stack([group.stats.x_bar + r * np.cos(theta),
group.stats.y_bar + r * np.sin(theta)], 1)
ellipse = affinity.rotate(geometry.Polygon(xy),
group.stats.degrees,
(group.stats.x_bar, group.stats.y_bar))
pos = np.array([[i, j] for i in range(group.image.data.shape[0])
for j in range(group.image.data.shape[1])])
inside = np.array([p for p in pos
if ellipse.contains(geometry.Point(p))])
outside = np.array([p for p in pos
if not ellipse.contains(geometry.Point(p))])
group.fit.ellipse = ellipse
group.res.pos = pos
group.res.inside = inside
group.res.outside = outside
return cd
def compute_metrics(cd):
"""
Computes the evaluation metrics for the 'Group' objects.
Parameters
----------
cd : ClustarData
'ClustarData' object required for processing.
Returns
-------
ClustarData
"""
for group in cd.groups:
res = group.res
output = np.abs(res.data[res.inside[:, 0], res.inside[:, 1]])
output[output < 0] = 0
output[output > 1] = 1
bias = group.image.data[res.inside[:, 0], res.inside[:, 1]]
group.metrics.standard_deviation = np.std(output)
group.metrics.variance = group.metrics.standard_deviation ** 2
group.metrics.average = np.mean(output)
group.metrics.weighted_average = np.average(output, weights=bias)
group.res.output = output
return cd
def compute_peaks(cd):
"""
Computes the number of peaks along the major and minor axes for the
'Group' objects.
Parameters
----------
cd : ClustarData
'ClustarData' object required for processing.
Returns
-------
ClustarData
"""
for group in cd.groups:
res = np.array(group.res.data, copy=True)
res_out = group.res.outside
res[res_out[:, 0], res_out[:, 1]] = 0
r_major = np.abs(ndimage.rotate(res, group.stats.degrees))
r_minor = np.abs(ndimage.rotate(res, group.stats.degrees + 90))
major_idx = graph.critical_points(r_major)
minor_idx = graph.critical_points(r_minor)
major_idx = [major_idx[i] for i in range(len(major_idx))
if i % 2 == 0]
minor_idx = [minor_idx[i] for i in range(len(minor_idx))
if i % 2 == 0]
group.fit.major_peaks = len(major_idx)
group.fit.minor_peaks = len(minor_idx)
group.res.clean = res
return cd
def validate(cd):
"""
Determines which 'Group' objects are flagged for manual review by using
the specified validation parameters.
Parameters
----------
cd : ClustarData
'ClustarData' object required for processing.
Returns
-------
ClustarData
"""
attribute = cd.params.metric.lower()
threshold = cd.params.threshold
for group in cd.groups:
metric = getattr(group.metrics, attribute)
if metric > threshold:
group.flag = True
cd.flag = True
if cd.params.evaluate_peaks and \
((group.fit.major_peaks in [2, 4]) or
(group.fit.minor_peaks in [2, 4])):
group.flag = False
cd.flag = False
return cd
| 29.382514 | 78 | 0.563139 |
73411a436e5f2edebb124c6122419fcdeef298b3 | 970 | py | Python | tests/wizard/namedwizardtests/urls.py | felixxm/django-formtools | ba62c6fa14edbd4197bda8ed0d23eb006ebebeba | [
"BSD-3-Clause"
] | null | null | null | tests/wizard/namedwizardtests/urls.py | felixxm/django-formtools | ba62c6fa14edbd4197bda8ed0d23eb006ebebeba | [
"BSD-3-Clause"
] | null | null | null | tests/wizard/namedwizardtests/urls.py | felixxm/django-formtools | ba62c6fa14edbd4197bda8ed0d23eb006ebebeba | [
"BSD-3-Clause"
] | 1 | 2019-11-04T22:52:19.000Z | 2019-11-04T22:52:19.000Z | from django.conf.urls import url
from .forms import (
CookieContactWizard, Page1, Page2, Page3, Page4, SessionContactWizard,
)
urlpatterns = [
url(r'^nwiz_session/(?P<step>.+)/$', get_named_session_wizard(), name='nwiz_session'),
url(r'^nwiz_session/$', get_named_session_wizard(), name='nwiz_session_start'),
url(r'^nwiz_cookie/(?P<step>.+)/$', get_named_cookie_wizard(), name='nwiz_cookie'),
url(r'^nwiz_cookie/$', get_named_cookie_wizard(), name='nwiz_cookie_start'),
]
| 32.333333 | 90 | 0.670103 |
73418fc41479ed48faa479be47ae0461c5d41885 | 907 | py | Python | setup.py | ajayp10/derive_event_pm4py | d1fd16c65081348b617dc0697b372a294c91023a | [
"MIT"
] | null | null | null | setup.py | ajayp10/derive_event_pm4py | d1fd16c65081348b617dc0697b372a294c91023a | [
"MIT"
] | null | null | null | setup.py | ajayp10/derive_event_pm4py | d1fd16c65081348b617dc0697b372a294c91023a | [
"MIT"
] | null | null | null | import pathlib
from setuptools import setup
CURRENT_PATH = pathlib.Path(__file__).parent
README = (CURRENT_PATH/"README.md").read_text()
setup(
name="derive_event_pm4py",
version="1.0.1",
description="It derives new events based on rules provided as inputs.",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/ajayp10/derive_event_pm4py",
author="Ajay Pandi",
author_email="ajay.pandi@rwth-aachen.de",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
packages=["derive_event"],
include_package_data=True,
install_requires=['pandas', 'numpy', 'pm4py',
],
entry_points={
"console_scripts": [
"derive=derive_event.derive:main",
]
},
) | 28.34375 | 75 | 0.651599 |
73425bf1b2ce90f77e267345bd3b090b0208b790 | 16,334 | py | Python | tests/service/ai/test_not_killing_itself_ai.py | jonashellmann/informaticup21-team-chillow | f2e519af0a5d9a9368d62556703cfb1066ebb58f | [
"MIT"
] | 3 | 2021-01-17T23:32:07.000Z | 2022-01-30T14:49:16.000Z | tests/service/ai/test_not_killing_itself_ai.py | jonashellmann/informaticup21-team-chillow | f2e519af0a5d9a9368d62556703cfb1066ebb58f | [
"MIT"
] | 2 | 2021-01-17T13:37:56.000Z | 2021-04-14T12:28:49.000Z | tests/service/ai/test_not_killing_itself_ai.py | jonashellmann/informaticup21-team-chillow | f2e519af0a5d9a9368d62556703cfb1066ebb58f | [
"MIT"
] | 2 | 2021-04-02T14:53:38.000Z | 2021-04-20T11:10:17.000Z | import unittest
from datetime import datetime, timezone
from typing import List
from chillow.service.ai.not_killing_itself_ai import NotKillingItselfAI
from chillow.model.action import Action
from chillow.model.cell import Cell
from chillow.model.direction import Direction
from chillow.model.game import Game
from chillow.model.player import Player
from chillow.service.game_service import GameService
| 52.185304 | 120 | 0.53355 |
73441d4a3b24e3d3313825da48a3c91f2e8b65de | 1,123 | py | Python | setup.py | meisanggou/ldapuser | 45a9e5eba8bbf173ce2ec87f9a32cff8db549e7c | [
"MIT"
] | null | null | null | setup.py | meisanggou/ldapuser | 45a9e5eba8bbf173ce2ec87f9a32cff8db549e7c | [
"MIT"
] | null | null | null | setup.py | meisanggou/ldapuser | 45a9e5eba8bbf173ce2ec87f9a32cff8db549e7c | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# coding: utf-8
# __author__ = 'meisanggou'
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sys
if sys.version_info <= (2, 7):
sys.stderr.write("ERROR: ldap-user requires Python Version 2.7 or above.\n")
sys.stderr.write("Your Python Version is %s.%s.%s.\n" % sys.version_info[:3])
sys.exit(1)
name = "ldap-user"
version = "0.5"
url = "https://github.com/meisanggou/ldapuser"
license = "MIT"
author = "meisanggou"
short_description = "use ldap verify user"
long_description = """use ldap verify user"""
keywords = "ldap-user"
install_requires = ["python-ldap", "six"]
entry_points = {'console_scripts': [
'jy-ldap-config=ldap_user.cli:create_config'
]}
setup(name=name,
version=version,
author=author,
author_email="zhouheng@gene.ac",
url=url,
packages=["ldap_user", "ldap_user/util"],
license=license,
description=short_description,
long_description=long_description,
keywords=keywords,
install_requires=install_requires,
entry_points=entry_points
)
| 25.522727 | 81 | 0.685663 |
73455aa40d8fdaf8fad425f0bc60becf47571215 | 4,387 | py | Python | tests/test_infection.py | chinapnr/covid-19-data | 409fa260c16e09b7ef820435c5086207bb5e40ef | [
"MIT"
] | 3 | 2020-05-27T01:21:50.000Z | 2020-08-20T07:54:42.000Z | tests/test_infection.py | chinapnr/covid-19-data | 409fa260c16e09b7ef820435c5086207bb5e40ef | [
"MIT"
] | 24 | 2020-03-26T10:45:34.000Z | 2020-04-06T06:13:50.000Z | tests/test_infection.py | chinapnr/covid-19-data | 409fa260c16e09b7ef820435c5086207bb5e40ef | [
"MIT"
] | null | null | null | import json
import pytest
| 35.096 | 90 | 0.581947 |
734586c386b99571285203cdfc4477ce123175a7 | 19,442 | py | Python | tests/test_util_owsutil.py | TimFranken/pydov | da393129207c93ab845a28525864c13374459dbf | [
"MIT"
] | null | null | null | tests/test_util_owsutil.py | TimFranken/pydov | da393129207c93ab845a28525864c13374459dbf | [
"MIT"
] | null | null | null | tests/test_util_owsutil.py | TimFranken/pydov | da393129207c93ab845a28525864c13374459dbf | [
"MIT"
] | null | null | null | """Module grouping tests for the pydov.util.owsutil module."""
import copy
import re
import pytest
from numpy.compat import unicode
from owslib.etree import etree
from owslib.fes import (
PropertyIsEqualTo,
FilterRequest,
)
from owslib.iso import MD_Metadata
from owslib.util import nspath_eval
from pydov.util import owsutil
from pydov.util.errors import (
MetadataNotFoundError,
FeatureCatalogueNotFoundError,
)
from pydov.util.location import (
Within,
Box,
)
from tests.test_search_boring import (
md_metadata,
mp_remote_md,
mp_remote_describefeaturetype,
mp_remote_fc,
location_md_metadata,
location_fc_featurecatalogue,
location_wfs_describefeaturetype,
)
from tests.test_search import (
wfs,
mp_wfs,
mp_remote_fc_notfound
)
def clean_xml(xml):
"""Clean the given XML string of namespace definition, namespace
prefixes and syntactical but otherwise meaningless differences.
Parameters
----------
xml : str
String representation of XML document.
Returns
-------
str
String representation of cleaned XML document.
"""
# remove xmlns namespace definitions
r = re.sub(r'[ ]+xmlns:[^=]+="[^"]+"', '', xml)
# remove namespace prefixes in tags
r = re.sub(r'<(/?)[^:]+:([^ >]+)([ >])', r'<\1\2\3', r)
# remove extra spaces in tags
r = re.sub(r'[ ]+/>', '/>', r)
# remove extra spaces between tags
r = re.sub(r'>[ ]+<', '><', r)
return r
| 40.419958 | 79 | 0.620152 |
7347c43851f55966f151bfafefba0299301f676e | 1,430 | py | Python | manage.py | jessekl/twiliochallenge | 2bba8bc2e0928880f1e2abe6b53b96dbc67ef34f | [
"MIT"
] | null | null | null | manage.py | jessekl/twiliochallenge | 2bba8bc2e0928880f1e2abe6b53b96dbc67ef34f | [
"MIT"
] | null | null | null | manage.py | jessekl/twiliochallenge | 2bba8bc2e0928880f1e2abe6b53b96dbc67ef34f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
manage
~~~~~~
Flask-Script Manager
"""
import os
from flask.ext.script import Manager
from flask.ext.migrate import MigrateCommand
from fbone import create_app
from fbone.extensions import db
from fbone.utils import PROJECT_PATH, MALE
from fbone.modules.user import User, ADMIN, ACTIVE
from fbone.modules.movies import Movie
from fbone.modules.user.commands import CreateUserCommand, DeleteUserCommand, ListUsersCommand
app = create_app()
manager = Manager(create_app)
manager.add_option('-c', '--config', dest='config', required=False)
manager.add_command('create_user', CreateUserCommand())
manager.add_command('delete_user', DeleteUserCommand())
manager.add_command('list_users', ListUsersCommand())
manager.add_command('db', MigrateCommand)
if __name__ == "__main__":
manager.run()
| 23.442623 | 94 | 0.692308 |
7347cda8008de4de3e2356287a34d7f4b9da7478 | 444 | py | Python | llvmsqlite_util/benchmarking/micro/aggregate.py | KowalskiThomas/LLVMSQLite | a36b85dfadf44b0a4008d9f01ebd79d5ca2cace4 | [
"blessing"
] | null | null | null | llvmsqlite_util/benchmarking/micro/aggregate.py | KowalskiThomas/LLVMSQLite | a36b85dfadf44b0a4008d9f01ebd79d5ca2cace4 | [
"blessing"
] | null | null | null | llvmsqlite_util/benchmarking/micro/aggregate.py | KowalskiThomas/LLVMSQLite | a36b85dfadf44b0a4008d9f01ebd79d5ca2cace4 | [
"blessing"
] | null | null | null | import os
sql_files = [x for x in os.listdir(".") if x.endswith("sql")]
sql_files = list(sorted(sql_files, key = lambda x : int(x.split('.')[0])))
result = ""
for i, f in enumerate(sql_files):
i = i + 1
i = f.replace(".sql", "")
with open(f) as sql:
result += f"--- Query {i}\n"
result += sql.read().strip()
result += "\n\n\n"
result = result.strip()
with open("output.txt", 'w') as f:
f.write(result) | 26.117647 | 74 | 0.554054 |
73486adc08d97e3620d3c9533949d0a3a23b6c00 | 2,882 | py | Python | demos/crane/main.py | Starli8ht/KivyMD | 5a3425d4e39e8615a0ba8b879db1eb5d7bfb3b49 | [
"MIT"
] | null | null | null | demos/crane/main.py | Starli8ht/KivyMD | 5a3425d4e39e8615a0ba8b879db1eb5d7bfb3b49 | [
"MIT"
] | null | null | null | demos/crane/main.py | Starli8ht/KivyMD | 5a3425d4e39e8615a0ba8b879db1eb5d7bfb3b49 | [
"MIT"
] | null | null | null | """
MDCrane demo
=============
.. seealso::
`Material Design spec,
Crane <https://material.io/design/material-studies/crane.html#>`
Crane is a travel app that helps users find and book travel, lodging, and
restaurant options that match their input preferences.
"""
import os
import sys
from pathlib import Path
from kivy.lang import Builder
from kivymd.app import MDApp
if getattr(sys, "frozen", False): # bundle mode with PyInstaller
os.environ["CRANE_ROOT"] = sys._MEIPASS
else:
os.environ["CRANE_ROOT"] = str(Path(__file__).parent)
KV_DIR = f"{os.path.dirname(__file__)}/libs/kv/"
for kv_file in os.listdir(KV_DIR):
with open(os.path.join(KV_DIR, kv_file), encoding="utf-8") as kv:
Builder.load_string(kv.read())
KV = """
#:import FadeTransition kivy.uix.screenmanager.FadeTransition
#:import CraneRootScreen libs.baseclass.root_screen.CraneRootScreen
ScreenManager:
transition: FadeTransition()
CraneRootScreen:
name: "crane root screen"
"""
MDCrane().run()
| 29.408163 | 77 | 0.519084 |
7349101381b3dbb9e23adbac5458b1fa8f012f0b | 8,368 | py | Python | tasks/lgutil/graph_net.py | HimmelStein/lg-flask | 562adfe16c3dd718faf694b8233586422d035e17 | [
"MIT"
] | null | null | null | tasks/lgutil/graph_net.py | HimmelStein/lg-flask | 562adfe16c3dd718faf694b8233586422d035e17 | [
"MIT"
] | null | null | null | tasks/lgutil/graph_net.py | HimmelStein/lg-flask | 562adfe16c3dd718faf694b8233586422d035e17 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from nltk.parse import DependencyGraph
from collections import defaultdict
import random
import sys
import copy
from json import dumps
from pprint import pprint
try:
from .lg_graph import LgGraph
except:
sys.path.append("/Users/tdong/git/lg-flask/tasks/lgutil")
from .lg_graph import LgGraph
if __name__ == '__main__':
LgGraph0 = LgGraph()
LgGraph0.set_sample_snt_ldg_from_db(lan='de', table='pons', num=0)
GraphNet0 = GraphNet(ldg = LgGraph0)
GraphNet0.apply_graph_operation('remove-link-verb')
pprint(GraphNet0.to_json())
| 34.866667 | 113 | 0.518164 |
7349161371152ef9656dab45ddf6d709b3bf142a | 5,517 | py | Python | utils/transformations/char_level/char_dces_substitute.py | Yzx835/AISafety | eb09551814898c7f6d86641b47faf7845c948640 | [
"MIT"
] | null | null | null | utils/transformations/char_level/char_dces_substitute.py | Yzx835/AISafety | eb09551814898c7f6d86641b47faf7845c948640 | [
"MIT"
] | null | null | null | utils/transformations/char_level/char_dces_substitute.py | Yzx835/AISafety | eb09551814898c7f6d86641b47faf7845c948640 | [
"MIT"
] | null | null | null | # !/usr/bin/env python
# coding=UTF-8
"""
@Author: WEN Hao
@LastEditors: WEN Hao
@Description:
@Date: 2021-09-24
@LastEditTime: 2022-04-17
OpenAttackDCESSubstitute
"""
import random
from typing import NoReturn, List, Any, Optional
import numpy as np
from utils.transformations.base import CharSubstitute
from utils.assets import fetch
from utils.misc import DEFAULTS
__all__ = [
"CharacterDCESSubstitute",
]
_disallowed = [
"TAG",
"MALAYALAM",
"BAMUM",
"HIRAGANA",
"RUNIC",
"TAI",
"SUNDANESE",
"BATAK",
"LEPCHA",
"CHAM",
"TELUGU",
"DEVANGARAI",
"BUGINESE",
"MYANMAR",
"LINEAR",
"SYLOTI",
"PHAGS-PA",
"CHEROKEE",
"CANADIAN",
"YI",
"LYCIAN",
"HANGUL",
"KATAKANA",
"JAVANESE",
"ARABIC",
"KANNADA",
"BUHID",
"TAGBANWA",
"DESERET",
"REJANG",
"BOPOMOFO",
"PERMIC",
"OSAGE",
"TAGALOG",
"MEETEI",
"CARIAN",
"UGARITIC",
"ORIYA",
"ELBASAN",
"CYPRIOT",
"HANUNOO",
"GUJARATI",
"LYDIAN",
"MONGOLIAN",
"AVESTAN",
"MEROITIC",
"KHAROSHTHI",
"HUNGARIAN",
"KHUDAWADI",
"ETHIOPIC",
"PERSIAN",
"OSMANYA",
"ELBASAN",
"TIBETAN",
"BENGALI",
"TURKIC",
"THROWING",
"HANIFI",
"BRAHMI",
"KAITHI",
"LIMBU",
"LAO",
"CHAKMA",
"DEVANAGARI",
"ITALIC",
"CJK",
"MEDEFAIDRIN",
"DIAMOND",
"SAURASHTRA",
"ADLAM",
"DUPLOYAN",
]
_disallowed_codes = [
"1F1A4",
"A7AF",
]
| 23.576923 | 84 | 0.518035 |
734ac6f57c878180c7a2282d8eb947a2ce6b549f | 2,588 | py | Python | piptools/repositories/base.py | LaudateCorpus1/pip-tools | 53c927262d816c336664afee9b03996bfb8f9c44 | [
"BSD-3-Clause"
] | 2 | 2021-12-02T11:41:02.000Z | 2021-12-27T12:01:53.000Z | piptools/repositories/base.py | LaudateCorpus1/pip-tools | 53c927262d816c336664afee9b03996bfb8f9c44 | [
"BSD-3-Clause"
] | 3 | 2020-11-20T18:42:20.000Z | 2021-06-20T09:38:27.000Z | piptools/repositories/base.py | LaudateCorpus1/pip-tools | 53c927262d816c336664afee9b03996bfb8f9c44 | [
"BSD-3-Clause"
] | 2 | 2021-07-13T08:53:43.000Z | 2022-02-02T14:10:58.000Z | import optparse
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
from typing import Iterator, Optional, Set
from pip._internal.index.package_finder import PackageFinder
from pip._internal.models.index import PyPI
from pip._internal.network.session import PipSession
from pip._internal.req import InstallRequirement
| 34.506667 | 88 | 0.693199 |
734b4343088715a23f5435206ac174b0bc22413c | 11,371 | py | Python | tfx/orchestration/portable/execution_publish_utils.py | johnPertoft/tfx | c6335684a54651adbcbe50aa52918b9b9948326e | [
"Apache-2.0"
] | null | null | null | tfx/orchestration/portable/execution_publish_utils.py | johnPertoft/tfx | c6335684a54651adbcbe50aa52918b9b9948326e | [
"Apache-2.0"
] | null | null | null | tfx/orchestration/portable/execution_publish_utils.py | johnPertoft/tfx | c6335684a54651adbcbe50aa52918b9b9948326e | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Portable library for registering and publishing executions."""
import copy
import os
from typing import List, Mapping, MutableMapping, Optional, Sequence, cast
from absl import logging
from tfx import types
from tfx.orchestration import metadata
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import execution_result_pb2
from ml_metadata.proto import metadata_store_pb2
def _check_validity(new_artifact: metadata_store_pb2.Artifact,
original_artifact: types.Artifact,
has_multiple_artifacts: bool) -> None:
"""Check the validity of new artifact against the original artifact."""
if new_artifact.type_id != original_artifact.type_id:
raise RuntimeError('Executor output should not change artifact type.')
if has_multiple_artifacts:
# If there are multiple artifacts in the executor output, their URIs should
# be a direct sub-dir of the system generated URI.
if os.path.dirname(new_artifact.uri) != original_artifact.uri:
raise RuntimeError(
'When there are multiple artifacts to publish, their URIs '
'should be direct sub-directories of the URI of the system generated '
'artifact.')
else:
# If there is only one output artifact, its URI should not be changed
if new_artifact.uri != original_artifact.uri:
# TODO(b/175426744): Data Binder will modify the uri.
logging.warning(
'When there is one artifact to publish, the URI of it should be '
'identical to the URI of system generated artifact.')
def publish_cached_execution(
metadata_handler: metadata.Metadata,
contexts: Sequence[metadata_store_pb2.Context],
execution_id: int,
output_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None,
) -> None:
"""Marks an existing execution as using cached outputs from a previous execution.
Args:
metadata_handler: A handler to access MLMD.
contexts: MLMD contexts to associated with the execution.
execution_id: The id of the execution.
output_artifacts: Output artifacts of the execution. Each artifact will be
linked with the execution through an event with type OUTPUT.
"""
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.CACHED
execution_lib.put_execution(
metadata_handler,
execution,
contexts,
input_artifacts=None,
output_artifacts=output_artifacts)
def _set_execution_result_if_not_empty(
executor_output: Optional[execution_result_pb2.ExecutorOutput],
execution: metadata_store_pb2.Execution) -> bool:
"""Sets execution result as a custom property of the execution."""
if executor_output and (executor_output.execution_result.result_message or
executor_output.execution_result.metadata_details or
executor_output.execution_result.code):
# TODO(b/190001754): Consider either switching to base64 encoding or using
# a proto descriptor pool to circumvent TypeError which may be raised when
# converting embedded `Any` protos.
try:
execution_lib.set_execution_result(executor_output.execution_result,
execution)
except TypeError:
logging.exception(
'Skipped setting execution_result as custom property of the '
'execution due to error')
def publish_succeeded_execution(
metadata_handler: metadata.Metadata,
execution_id: int,
contexts: Sequence[metadata_store_pb2.Context],
output_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None,
executor_output: Optional[execution_result_pb2.ExecutorOutput] = None
) -> Optional[MutableMapping[str, List[types.Artifact]]]:
"""Marks an existing execution as success.
Also publishes the output artifacts produced by the execution. This method
will also merge the executor produced info into system generated output
artifacts. The `last_know_state` of the execution will be changed to
`COMPLETE` and the output artifacts will be marked as `LIVE`.
Args:
metadata_handler: A handler to access MLMD.
execution_id: The id of the execution to mark successful.
contexts: MLMD contexts to associated with the execution.
output_artifacts: Output artifacts skeleton of the execution, generated by
the system. Each artifact will be linked with the execution through an
event with type OUTPUT.
executor_output: Executor outputs. `executor_output.output_artifacts` will
be used to update system-generated output artifacts passed in through
`output_artifacts` arg. There are three contraints to the update: 1. The
keys in `executor_output.output_artifacts` are expected to be a subset
of the system-generated output artifacts dict. 2. An update to a certain
key should contains all the artifacts under that key. 3. An update to an
artifact should not change the type of the artifact.
Returns:
The maybe updated output_artifacts, note that only outputs whose key are in
executor_output will be updated and others will be untouched. That said,
it can be partially updated.
Raises:
RuntimeError: if the executor output to a output channel is partial.
"""
output_artifacts = copy.deepcopy(output_artifacts) or {}
output_artifacts = cast(MutableMapping[str, List[types.Artifact]],
output_artifacts)
if executor_output:
if not set(executor_output.output_artifacts.keys()).issubset(
output_artifacts.keys()):
raise RuntimeError(
'Executor output %s contains more keys than output skeleton %s.' %
(executor_output, output_artifacts))
for key, artifact_list in output_artifacts.items():
if key not in executor_output.output_artifacts:
continue
updated_artifact_list = executor_output.output_artifacts[key].artifacts
# We assume the original output dict must include at least one output
# artifact and all artifacts in the list share the same type.
original_artifact = artifact_list[0]
# Update the artifact list with what's in the executor output
artifact_list.clear()
# TODO(b/175426744): revisit this:
# 1) Whether multiple output is needed or not after TFX componets
# are upgraded.
# 2) If multiple output are needed and is a common practice, should we
# use driver instead to create the list of output artifact instead
# of letting executor to create them.
for proto_artifact in updated_artifact_list:
_check_validity(proto_artifact, original_artifact,
len(updated_artifact_list) > 1)
python_artifact = types.Artifact(original_artifact.artifact_type)
python_artifact.set_mlmd_artifact(proto_artifact)
artifact_list.append(python_artifact)
# Marks output artifacts as LIVE.
for artifact_list in output_artifacts.values():
for artifact in artifact_list:
artifact.mlmd_artifact.state = metadata_store_pb2.Artifact.LIVE
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.COMPLETE
_set_execution_result_if_not_empty(executor_output, execution)
execution_lib.put_execution(
metadata_handler, execution, contexts, output_artifacts=output_artifacts)
return output_artifacts
def publish_failed_execution(
metadata_handler: metadata.Metadata,
contexts: Sequence[metadata_store_pb2.Context],
execution_id: int,
executor_output: Optional[execution_result_pb2.ExecutorOutput] = None
) -> None:
"""Marks an existing execution as failed.
Args:
metadata_handler: A handler to access MLMD.
contexts: MLMD contexts to associated with the execution.
execution_id: The id of the execution.
executor_output: The output of executor.
"""
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.FAILED
_set_execution_result_if_not_empty(executor_output, execution)
execution_lib.put_execution(metadata_handler, execution, contexts)
def publish_internal_execution(
metadata_handler: metadata.Metadata,
contexts: Sequence[metadata_store_pb2.Context],
execution_id: int,
output_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None
) -> None:
"""Marks an exeisting execution as as success and links its output to an INTERNAL_OUTPUT event.
Args:
metadata_handler: A handler to access MLMD.
contexts: MLMD contexts to associated with the execution.
execution_id: The id of the execution.
output_artifacts: Output artifacts of the execution. Each artifact will be
linked with the execution through an event with type INTERNAL_OUTPUT.
"""
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.COMPLETE
execution_lib.put_execution(
metadata_handler,
execution,
contexts,
output_artifacts=output_artifacts,
output_event_type=metadata_store_pb2.Event.INTERNAL_OUTPUT)
def register_execution(
metadata_handler: metadata.Metadata,
execution_type: metadata_store_pb2.ExecutionType,
contexts: Sequence[metadata_store_pb2.Context],
input_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None,
exec_properties: Optional[Mapping[str, types.Property]] = None,
) -> metadata_store_pb2.Execution:
"""Registers a new execution in MLMD.
Along with the execution:
- the input artifacts will be linked to the execution.
- the contexts will be linked to both the execution and its input artifacts.
Args:
metadata_handler: A handler to access MLMD.
execution_type: The type of the execution.
contexts: MLMD contexts to associated with the execution.
input_artifacts: Input artifacts of the execution. Each artifact will be
linked with the execution through an event.
exec_properties: Execution properties. Will be attached to the execution.
Returns:
An MLMD execution that is registered in MLMD, with id populated.
"""
execution = execution_lib.prepare_execution(
metadata_handler, execution_type, metadata_store_pb2.Execution.RUNNING,
exec_properties)
return execution_lib.put_execution(
metadata_handler, execution, contexts, input_artifacts=input_artifacts)
| 43.734615 | 97 | 0.735731 |
734bd8fdc6b5e208d672c4c4eac90f446f5043c6 | 6,220 | py | Python | src/dctm/datasets.py | spotify-research/dctm | e813aca23c3f54bc55ace5b3342aaec5cc7dad60 | [
"Apache-2.0"
] | 11 | 2020-08-11T10:18:48.000Z | 2021-12-23T15:34:46.000Z | src/dctm/datasets.py | spotify-research/dctm | e813aca23c3f54bc55ace5b3342aaec5cc7dad60 | [
"Apache-2.0"
] | null | null | null | src/dctm/datasets.py | spotify-research/dctm | e813aca23c3f54bc55ace5b3342aaec5cc7dad60 | [
"Apache-2.0"
] | 2 | 2020-09-02T23:02:11.000Z | 2020-11-17T05:16:29.000Z | #
# Copyright 2020 Spotify AB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Datasets utilities.
If you use nltk you may need the following:
nltk.download('words')
nltk.download('punkt')
nltk.download('wordnet')
"""
import os
import nltk
import numpy as np
import pandas as pd
from nltk import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS, CountVectorizer
from sklearn.utils import Bunch
ENGLISH_WORDS = set(nltk.corpus.words.words())
STEMMER = SnowballStemmer('english')
def get_neurips(filename: str):
"""Get NeurIPS dataset.
Args:
filename (str): Location of the file for NeurIPS dataset.
"""
df = pd.read_csv(filename, header=0, index_col=0)
year = np.array([x.split('_')[0] for x in df.columns])
# preprocess
df = df.loc[df.index.dropna()]
df = df.loc[~df.index.isin(ENGLISH_STOP_WORDS)]
df.index = [STEMMER.stem(x) for x in df.index.tolist()]
# merge same words together
df = df.groupby(level=0).sum()
vocabulary = df.sum(axis=1)
return df, year, vocabulary
import json
def print_to_file_for_gdtm(df, vocabulary, corpus, filename='test', path='.'):
"""Utility function to save datasets for gDTM.
Args:
df ([type]): [description]
vocabulary ([type]): [description]
corpus ([type]): [description]
filename (str, optional): [description]. Defaults to 'test'.
"""
with open(os.path.join(path, '{}_corpus.txt'.format(filename)), 'w') as f:
n_times = df.years.unique().size
f.writelines('{}\n'.format(n_times))
for name, group in df.groupby('years')[0]:
n_docs = group.shape[0]
f.writelines('{}\n{}\n'.format(name.timestamp(), n_docs))
idx = group.index.values
# np.array([df.index.get_loc(x) for x in group.index])
for c in corpus[idx]:
d = c.todok()
f.writelines(
str(len(d)) + ' ' + ' '.join(
'{}:{}'.format(x[1], int(v))
for x, v in d.items()) + '\n')
with open(os.path.join(path, '{}_lexicon.txt'.format(filename)), 'w') as f:
f.writelines('\n'.join(vocabulary))
| 32.910053 | 79 | 0.635852 |
734d1d2e2ae1a6d7737d630a4bc5c6e70adf63d2 | 28,079 | py | Python | billingbudgets/google/cloud/billing_budgets_v1beta1/proto/budget_model_pb2.py | hugovk/google-cloud-python | b387134827dbc3be0e1b431201e0875798002fda | [
"Apache-2.0"
] | 1 | 2019-12-09T11:40:28.000Z | 2019-12-09T11:40:28.000Z | billingbudgets/google/cloud/billing_budgets_v1beta1/proto/budget_model_pb2.py | hugovk/google-cloud-python | b387134827dbc3be0e1b431201e0875798002fda | [
"Apache-2.0"
] | 1 | 2019-03-29T22:03:48.000Z | 2019-04-02T22:24:45.000Z | billingbudgets/google/cloud/billing_budgets_v1beta1/proto/budget_model_pb2.py | hugovk/google-cloud-python | b387134827dbc3be0e1b431201e0875798002fda | [
"Apache-2.0"
] | 1 | 2019-03-29T18:26:16.000Z | 2019-03-29T18:26:16.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/billing/budgets_v1beta1/proto/budget_model.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.type import money_pb2 as google_dot_type_dot_money__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/billing/budgets_v1beta1/proto/budget_model.proto",
package="google.cloud.billing.budgets.v1beta1",
syntax="proto3",
serialized_options=_b(
"\n(com.google.cloud.billing.budgets.v1beta1P\001ZKgoogle.golang.org/genproto/googleapis/cloud/billing/budgets/v1beta1;budgets"
),
serialized_pb=_b(
'\n=google/cloud/billing/budgets_v1beta1/proto/budget_model.proto\x12$google.cloud.billing.budgets.v1beta1\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x17google/type/money.proto"\xde\x03\n\x06\x42udget\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12H\n\rbudget_filter\x18\x03 \x01(\x0b\x32,.google.cloud.billing.budgets.v1beta1.FilterB\x03\xe0\x41\x01\x12G\n\x06\x61mount\x18\x04 \x01(\x0b\x32\x32.google.cloud.billing.budgets.v1beta1.BudgetAmountB\x03\xe0\x41\x02\x12Q\n\x0fthreshold_rules\x18\x05 \x03(\x0b\x32\x33.google.cloud.billing.budgets.v1beta1.ThresholdRuleB\x03\xe0\x41\x02\x12S\n\x10\x61ll_updates_rule\x18\x06 \x01(\x0b\x32\x34.google.cloud.billing.budgets.v1beta1.AllUpdatesRuleB\x03\xe0\x41\x01\x12\x11\n\x04\x65tag\x18\x07 \x01(\tB\x03\xe0\x41\x01:]\xea\x41Z\n$billingbudgets.googleapis.com/Budget\x12\x32\x62illingAccounts/{billing_account}/budgets/{budget}"\xa5\x01\n\x0c\x42udgetAmount\x12.\n\x10specified_amount\x18\x01 \x01(\x0b\x32\x12.google.type.MoneyH\x00\x12T\n\x12last_period_amount\x18\x02 \x01(\x0b\x32\x36.google.cloud.billing.budgets.v1beta1.LastPeriodAmountH\x00\x42\x0f\n\rbudget_amount"\x12\n\x10LastPeriodAmount"\xcd\x01\n\rThresholdRule\x12\x1e\n\x11threshold_percent\x18\x01 \x01(\x01\x42\x03\xe0\x41\x02\x12S\n\x0bspend_basis\x18\x02 \x01(\x0e\x32\x39.google.cloud.billing.budgets.v1beta1.ThresholdRule.BasisB\x03\xe0\x41\x01"G\n\x05\x42\x61sis\x12\x15\n\x11\x42\x41SIS_UNSPECIFIED\x10\x00\x12\x11\n\rCURRENT_SPEND\x10\x01\x12\x14\n\x10\x46ORECASTED_SPEND\x10\x02"H\n\x0e\x41llUpdatesRule\x12\x19\n\x0cpubsub_topic\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x1b\n\x0eschema_version\x18\x02 \x01(\tB\x03\xe0\x41\x02"\x90\x02\n\x06\x46ilter\x12\x15\n\x08projects\x18\x01 \x03(\tB\x03\xe0\x41\x01\x12\x66\n\x16\x63redit_types_treatment\x18\x04 \x01(\x0e\x32\x41.google.cloud.billing.budgets.v1beta1.Filter.CreditTypesTreatmentB\x03\xe0\x41\x01\x12\x15\n\x08services\x18\x03 \x03(\tB\x03\xe0\x41\x01"p\n\x14\x43reditTypesTreatment\x12&\n"CREDIT_TYPES_TREATMENT_UNSPECIFIED\x10\x00\x12\x17\n\x13INCLUDE_ALL_CREDITS\x10\x01\x12\x17\n\x13\x45XCLUDE_ALL_CREDITS\x10\x02\x42y\n(com.google.cloud.billing.budgets.v1beta1P\x01ZKgoogle.golang.org/genproto/googleapis/cloud/billing/budgets/v1beta1;budgetsb\x06proto3'
),
dependencies=[
google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,
google_dot_api_dot_resource__pb2.DESCRIPTOR,
google_dot_type_dot_money__pb2.DESCRIPTOR,
],
)
_THRESHOLDRULE_BASIS = _descriptor.EnumDescriptor(
name="Basis",
full_name="google.cloud.billing.budgets.v1beta1.ThresholdRule.Basis",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="BASIS_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="CURRENT_SPEND", index=1, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="FORECASTED_SPEND",
index=2,
number=2,
serialized_options=None,
type=None,
),
],
containing_type=None,
serialized_options=None,
serialized_start=992,
serialized_end=1063,
)
_sym_db.RegisterEnumDescriptor(_THRESHOLDRULE_BASIS)
_FILTER_CREDITTYPESTREATMENT = _descriptor.EnumDescriptor(
name="CreditTypesTreatment",
full_name="google.cloud.billing.budgets.v1beta1.Filter.CreditTypesTreatment",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="CREDIT_TYPES_TREATMENT_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="INCLUDE_ALL_CREDITS",
index=1,
number=1,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="EXCLUDE_ALL_CREDITS",
index=2,
number=2,
serialized_options=None,
type=None,
),
],
containing_type=None,
serialized_options=None,
serialized_start=1300,
serialized_end=1412,
)
_sym_db.RegisterEnumDescriptor(_FILTER_CREDITTYPESTREATMENT)
_BUDGET = _descriptor.Descriptor(
name="Budget",
full_name="google.cloud.billing.budgets.v1beta1.Budget",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.billing.budgets.v1beta1.Budget.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="display_name",
full_name="google.cloud.billing.budgets.v1beta1.Budget.display_name",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="budget_filter",
full_name="google.cloud.billing.budgets.v1beta1.Budget.budget_filter",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\001"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="amount",
full_name="google.cloud.billing.budgets.v1beta1.Budget.amount",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="threshold_rules",
full_name="google.cloud.billing.budgets.v1beta1.Budget.threshold_rules",
index=4,
number=5,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="all_updates_rule",
full_name="google.cloud.billing.budgets.v1beta1.Budget.all_updates_rule",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\001"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="etag",
full_name="google.cloud.billing.budgets.v1beta1.Budget.etag",
index=6,
number=7,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\001"),
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=_b(
"\352AZ\n$billingbudgets.googleapis.com/Budget\0222billingAccounts/{billing_account}/budgets/{budget}"
),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=189,
serialized_end=667,
)
_BUDGETAMOUNT = _descriptor.Descriptor(
name="BudgetAmount",
full_name="google.cloud.billing.budgets.v1beta1.BudgetAmount",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="specified_amount",
full_name="google.cloud.billing.budgets.v1beta1.BudgetAmount.specified_amount",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="last_period_amount",
full_name="google.cloud.billing.budgets.v1beta1.BudgetAmount.last_period_amount",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="budget_amount",
full_name="google.cloud.billing.budgets.v1beta1.BudgetAmount.budget_amount",
index=0,
containing_type=None,
fields=[],
)
],
serialized_start=670,
serialized_end=835,
)
_LASTPERIODAMOUNT = _descriptor.Descriptor(
name="LastPeriodAmount",
full_name="google.cloud.billing.budgets.v1beta1.LastPeriodAmount",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=837,
serialized_end=855,
)
_THRESHOLDRULE = _descriptor.Descriptor(
name="ThresholdRule",
full_name="google.cloud.billing.budgets.v1beta1.ThresholdRule",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="threshold_percent",
full_name="google.cloud.billing.budgets.v1beta1.ThresholdRule.threshold_percent",
index=0,
number=1,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="spend_basis",
full_name="google.cloud.billing.budgets.v1beta1.ThresholdRule.spend_basis",
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\001"),
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[_THRESHOLDRULE_BASIS],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=858,
serialized_end=1063,
)
_ALLUPDATESRULE = _descriptor.Descriptor(
name="AllUpdatesRule",
full_name="google.cloud.billing.budgets.v1beta1.AllUpdatesRule",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="pubsub_topic",
full_name="google.cloud.billing.budgets.v1beta1.AllUpdatesRule.pubsub_topic",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="schema_version",
full_name="google.cloud.billing.budgets.v1beta1.AllUpdatesRule.schema_version",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1065,
serialized_end=1137,
)
_FILTER = _descriptor.Descriptor(
name="Filter",
full_name="google.cloud.billing.budgets.v1beta1.Filter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="projects",
full_name="google.cloud.billing.budgets.v1beta1.Filter.projects",
index=0,
number=1,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\001"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="credit_types_treatment",
full_name="google.cloud.billing.budgets.v1beta1.Filter.credit_types_treatment",
index=1,
number=4,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\001"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="services",
full_name="google.cloud.billing.budgets.v1beta1.Filter.services",
index=2,
number=3,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\001"),
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[_FILTER_CREDITTYPESTREATMENT],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1140,
serialized_end=1412,
)
_BUDGET.fields_by_name["budget_filter"].message_type = _FILTER
_BUDGET.fields_by_name["amount"].message_type = _BUDGETAMOUNT
_BUDGET.fields_by_name["threshold_rules"].message_type = _THRESHOLDRULE
_BUDGET.fields_by_name["all_updates_rule"].message_type = _ALLUPDATESRULE
_BUDGETAMOUNT.fields_by_name[
"specified_amount"
].message_type = google_dot_type_dot_money__pb2._MONEY
_BUDGETAMOUNT.fields_by_name["last_period_amount"].message_type = _LASTPERIODAMOUNT
_BUDGETAMOUNT.oneofs_by_name["budget_amount"].fields.append(
_BUDGETAMOUNT.fields_by_name["specified_amount"]
)
_BUDGETAMOUNT.fields_by_name[
"specified_amount"
].containing_oneof = _BUDGETAMOUNT.oneofs_by_name["budget_amount"]
_BUDGETAMOUNT.oneofs_by_name["budget_amount"].fields.append(
_BUDGETAMOUNT.fields_by_name["last_period_amount"]
)
_BUDGETAMOUNT.fields_by_name[
"last_period_amount"
].containing_oneof = _BUDGETAMOUNT.oneofs_by_name["budget_amount"]
_THRESHOLDRULE.fields_by_name["spend_basis"].enum_type = _THRESHOLDRULE_BASIS
_THRESHOLDRULE_BASIS.containing_type = _THRESHOLDRULE
_FILTER.fields_by_name[
"credit_types_treatment"
].enum_type = _FILTER_CREDITTYPESTREATMENT
_FILTER_CREDITTYPESTREATMENT.containing_type = _FILTER
DESCRIPTOR.message_types_by_name["Budget"] = _BUDGET
DESCRIPTOR.message_types_by_name["BudgetAmount"] = _BUDGETAMOUNT
DESCRIPTOR.message_types_by_name["LastPeriodAmount"] = _LASTPERIODAMOUNT
DESCRIPTOR.message_types_by_name["ThresholdRule"] = _THRESHOLDRULE
DESCRIPTOR.message_types_by_name["AllUpdatesRule"] = _ALLUPDATESRULE
DESCRIPTOR.message_types_by_name["Filter"] = _FILTER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Budget = _reflection.GeneratedProtocolMessageType(
"Budget",
(_message.Message,),
dict(
DESCRIPTOR=_BUDGET,
__module__="google.cloud.billing.budgets_v1beta1.proto.budget_model_pb2",
__doc__="""A budget is a plan that describes what you expect to spend on Cloud
projects, plus the rules to execute as spend is tracked against that
plan, (for example, send an alert when 90% of the target spend is met).
Currently all plans are monthly budgets so the usage period(s) tracked
are implied (calendar months of usage back-to-back).
Attributes:
name:
Output only. Resource name of the budget. The resource name
implies the scope of a budget. Values are of the form
``billingAccounts/{billingAccountId}/budgets/{budgetId}``.
display_name:
User data for display name in UI. Validation: <= 60 chars.
budget_filter:
Optional. Filters that define which resources are used to
compute the actual spend against the budget.
amount:
Required. Budgeted amount.
threshold_rules:
Required. Rules that trigger alerts (notifications of
thresholds being crossed) when spend exceeds the specified
percentages of the budget.
all_updates_rule:
Optional. Rules to apply to all updates to the actual spend,
regardless of the thresholds set in ``threshold_rules``.
etag:
Optional. Etag to validate that the object is unchanged for a
read-modify-write operation. An empty etag will cause an
update to overwrite other changes.
""",
# @@protoc_insertion_point(class_scope:google.cloud.billing.budgets.v1beta1.Budget)
),
)
_sym_db.RegisterMessage(Budget)
BudgetAmount = _reflection.GeneratedProtocolMessageType(
"BudgetAmount",
(_message.Message,),
dict(
DESCRIPTOR=_BUDGETAMOUNT,
__module__="google.cloud.billing.budgets_v1beta1.proto.budget_model_pb2",
__doc__="""The budgeted amount for each usage period.
Attributes:
budget_amount:
Specification for what amount to use as the budget.
specified_amount:
A specified amount to use as the budget. ``currency_code`` is
optional. If specified, it must match the currency of the
billing account. The ``currency_code`` is provided on output.
last_period_amount:
Use the last period's actual spend as the budget for the
present period.
""",
# @@protoc_insertion_point(class_scope:google.cloud.billing.budgets.v1beta1.BudgetAmount)
),
)
_sym_db.RegisterMessage(BudgetAmount)
LastPeriodAmount = _reflection.GeneratedProtocolMessageType(
"LastPeriodAmount",
(_message.Message,),
dict(
DESCRIPTOR=_LASTPERIODAMOUNT,
__module__="google.cloud.billing.budgets_v1beta1.proto.budget_model_pb2",
__doc__="""Describes a budget amount targeted to last period's spend. At this time,
the amount is automatically 100% of last period's spend; that is, there
are no other options yet. Future configuration will be described here
(for example, configuring a percentage of last period's spend).
""",
# @@protoc_insertion_point(class_scope:google.cloud.billing.budgets.v1beta1.LastPeriodAmount)
),
)
_sym_db.RegisterMessage(LastPeriodAmount)
ThresholdRule = _reflection.GeneratedProtocolMessageType(
"ThresholdRule",
(_message.Message,),
dict(
DESCRIPTOR=_THRESHOLDRULE,
__module__="google.cloud.billing.budgets_v1beta1.proto.budget_model_pb2",
__doc__="""ThresholdRule contains a definition of a threshold which triggers an
alert (a notification of a threshold being crossed) to be sent when
spend goes above the specified amount. Alerts are automatically e-mailed
to users with the Billing Account Administrator role or the Billing
Account User role. The thresholds here have no effect on notifications
sent to anything configured under ``Budget.all_updates_rule``.
Attributes:
threshold_percent:
Required. Send an alert when this threshold is exceeded. This
is a 1.0-based percentage, so 0.5 = 50%. Validation: non-
negative number.
spend_basis:
Optional. The type of basis used to determine if spend has
passed the threshold. Behavior defaults to CURRENT\_SPEND if
not set.
""",
# @@protoc_insertion_point(class_scope:google.cloud.billing.budgets.v1beta1.ThresholdRule)
),
)
_sym_db.RegisterMessage(ThresholdRule)
AllUpdatesRule = _reflection.GeneratedProtocolMessageType(
"AllUpdatesRule",
(_message.Message,),
dict(
DESCRIPTOR=_ALLUPDATESRULE,
__module__="google.cloud.billing.budgets_v1beta1.proto.budget_model_pb2",
__doc__="""AllUpdatesRule defines notifications that are sent on every update to
the billing account's spend, regardless of the thresholds defined using
threshold rules.
Attributes:
pubsub_topic:
Required. The name of the Cloud Pub/Sub topic where budget
related messages will be published, in the form
``projects/{project_id}/topics/{topic_id}``. Updates are sent
at regular intervals to the topic. The topic needs to be
created before the budget is created; see
https://cloud.google.com/billing/docs/how-to/budgets#manage-
notifications for more details. Caller is expected to have
``pubsub.topics.setIamPolicy`` permission on the topic when
it's set for a budget, otherwise, the API call will fail with
PERMISSION\_DENIED. See
https://cloud.google.com/pubsub/docs/access-control for more
details on Pub/Sub roles and permissions.
schema_version:
Required. The schema version of the notification. Only "1.0"
is accepted. It represents the JSON schema as defined in
https://cloud.google.com/billing/docs/how-
to/budgets#notification\_format
""",
# @@protoc_insertion_point(class_scope:google.cloud.billing.budgets.v1beta1.AllUpdatesRule)
),
)
_sym_db.RegisterMessage(AllUpdatesRule)
Filter = _reflection.GeneratedProtocolMessageType(
"Filter",
(_message.Message,),
dict(
DESCRIPTOR=_FILTER,
__module__="google.cloud.billing.budgets_v1beta1.proto.budget_model_pb2",
__doc__="""A filter for a budget, limiting the scope of the cost to calculate.
Attributes:
projects:
Optional. A set of projects of the form
``projects/{project_id}``, specifying that usage from only
this set of projects should be included in the budget. If
omitted, the report will include all usage for the billing
account, regardless of which project the usage occurred on.
Only zero or one project can be specified currently.
credit_types_treatment:
Optional. If not set, default behavior is
``INCLUDE_ALL_CREDITS``.
services:
Optional. A set of services of the form
``services/{service_id}``, specifying that usage from only
this set of services should be included in the budget. If
omitted, the report will include usage for all the services.
The service names are available through the Catalog API:
https://cloud.google.com/billing/v1/how-tos/catalog-api.
""",
# @@protoc_insertion_point(class_scope:google.cloud.billing.budgets.v1beta1.Filter)
),
)
_sym_db.RegisterMessage(Filter)
DESCRIPTOR._options = None
_BUDGET.fields_by_name["name"]._options = None
_BUDGET.fields_by_name["budget_filter"]._options = None
_BUDGET.fields_by_name["amount"]._options = None
_BUDGET.fields_by_name["threshold_rules"]._options = None
_BUDGET.fields_by_name["all_updates_rule"]._options = None
_BUDGET.fields_by_name["etag"]._options = None
_BUDGET._options = None
_THRESHOLDRULE.fields_by_name["threshold_percent"]._options = None
_THRESHOLDRULE.fields_by_name["spend_basis"]._options = None
_ALLUPDATESRULE.fields_by_name["pubsub_topic"]._options = None
_ALLUPDATESRULE.fields_by_name["schema_version"]._options = None
_FILTER.fields_by_name["projects"]._options = None
_FILTER.fields_by_name["credit_types_treatment"]._options = None
_FILTER.fields_by_name["services"]._options = None
# @@protoc_insertion_point(module_scope)
| 36.849081 | 2,327 | 0.654795 |
734e2605b9fe6651d724a46a3b07b21d5d438537 | 4,010 | py | Python | torchreid/optim/sam.py | opencv/deep-person-reid | ccc305614e968d4b64cc7d4b6664eb42267e6250 | [
"MIT"
] | 1 | 2020-07-07T19:22:17.000Z | 2020-07-07T19:22:17.000Z | torchreid/optim/sam.py | opencv/deep-person-reid | ccc305614e968d4b64cc7d4b6664eb42267e6250 | [
"MIT"
] | 1 | 2020-06-04T15:22:09.000Z | 2020-06-04T15:22:09.000Z | torchreid/optim/sam.py | opencv/deep-person-reid | ccc305614e968d4b64cc7d4b6664eb42267e6250 | [
"MIT"
] | 4 | 2020-07-02T09:23:11.000Z | 2020-08-21T08:24:13.000Z | # Copyright 2020 Google Research
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
'''
Imported from: https://github.com/google-research/sam
'''
import torch
| 37.830189 | 131 | 0.56783 |
735158944908fbafce88d97668526717a22003eb | 11,958 | py | Python | src/konfiger_stream.py | konfiger/konfiger-python | 294fb2fed8a46f7e242825fc0b723b0ff7132c8c | [
"MIT"
] | 4 | 2019-09-25T02:18:43.000Z | 2020-01-21T19:16:05.000Z | src/konfiger_stream.py | keyvaluedb/key-value-db-python | 294fb2fed8a46f7e242825fc0b723b0ff7132c8c | [
"MIT"
] | null | null | null | src/konfiger_stream.py | keyvaluedb/key-value-db-python | 294fb2fed8a46f7e242825fc0b723b0ff7132c8c | [
"MIT"
] | null | null | null |
"""
The MIT License
Copyright 2020 Adewale Azeez <azeezadewale98@gmail.com>.
"""
import os.path
from .konfiger_util import type_of, is_string, is_char, is_bool, escape_string, un_escape_string
| 45.816092 | 150 | 0.511373 |
735211327e137e292f3ce5c7750409c77a35d0dd | 2,674 | py | Python | matematik.py | Drummersbrother/math_for_school | 5eaa22320298d59b770e68b9640b34a525875132 | [
"MIT"
] | null | null | null | matematik.py | Drummersbrother/math_for_school | 5eaa22320298d59b770e68b9640b34a525875132 | [
"MIT"
] | null | null | null | matematik.py | Drummersbrother/math_for_school | 5eaa22320298d59b770e68b9640b34a525875132 | [
"MIT"
] | null | null | null | import math
import numpy as np
import collections
import scipy.stats as sst
import matplotlib.pyplot as plt
def list_or_starargs(func):
"""This is a decorator to specify that a function either takes iterable input in the form of an iterable or a list of passed arguments.
If other arguments are needed, the function will need to use kwargs.
This passes the list as the first argument."""
return decorated
def standardav(data, stick=False):
"""Returns the standard deviation of the input data, which has to be an iterable. stick specifies if it should be treated like
non-total set of values (divide by n-1 instead of n)."""
div_by = len(data) if (not stick) else (len(data) - 1)
medelv = medel(data)
return math.sqrt(sum([(val-medelv)**2 for val in data]) / div_by)
def normal_d(x, u, o):
"""Returns the value of a normal/standard distribution at the value x. u is Mu, and o is the standard deviation."""
return (1 / (o * math.sqrt(2*math.pi))) * (math.e ** (-(((x-u)**2) / (2 * (o**2)))))
| 33.848101 | 139 | 0.628646 |
7352cdca72cd11a42b689b908ad454fb587ad295 | 5,362 | py | Python | docly/ioutils/__init__.py | autosoft-dev/docly | 0bd6216b8a9735e9fa76bffd4ffea6cec6cc4a01 | [
"MIT"
] | 29 | 2020-12-31T08:27:32.000Z | 2022-02-15T08:48:51.000Z | docly/ioutils/__init__.py | autosoft-dev/docly | 0bd6216b8a9735e9fa76bffd4ffea6cec6cc4a01 | [
"MIT"
] | 4 | 2020-12-30T18:18:54.000Z | 2021-08-03T14:42:35.000Z | docly/ioutils/__init__.py | autosoft-dev/docly | 0bd6216b8a9735e9fa76bffd4ffea6cec6cc4a01 | [
"MIT"
] | 2 | 2022-01-04T17:58:22.000Z | 2022-02-05T13:04:14.000Z | import os
from pathlib import Path
import requests
import shutil
import sys
from distutils.version import LooseVersion
import time
from tqdm import tqdm
from docly.parser import parser as py_parser
from docly.tokenizers import tokenize_code_string
from docly import __version__
# from c2nl.objects import Code
UPDATE_CHECK_URL = "http://3.80.2.138:8584/vercheck/check-version/"
# UPDATE_CHECK_URL = "http://127.0.0.1:5000/vercheck/check-version/"
interaction_cache = lambda : Path(Path.home() / ".docly" / "interaction_cache")
CACHE_DIR = (Path().home() / ".docly" / "file_cache")
cache_exists = lambda : CACHE_DIR.exists()
make_cache_dir = lambda : os.mkdir(str(CACHE_DIR))
def download_from_url(url, dst):
"""
@param: url to download file
@param: dst place to put the file
"""
file_size = int(requests.head(url).headers["Content-Length"])
if os.path.exists(dst):
first_byte = os.path.getsize(dst)
else:
first_byte = 0
if first_byte >= file_size:
return file_size
header = {"Range": "bytes=%s-%s" % (first_byte, file_size)}
pbar = tqdm(
total=file_size, initial=first_byte,
unit='B', unit_scale=True, desc=dst.split('/')[-1])
req = requests.get(url, headers=header, stream=True)
with(open(dst, 'ab')) as f:
for chunk in req.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.update(1024)
pbar.close()
return file_size
def query_yes_no(question, default="yes"):
"""Ask a yes/no question and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes", "no", or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '{}}'".format(default))
while True:
print(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
print("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n") | 31.356725 | 82 | 0.610966 |
735333a22976a616b2a1727e7723502f1d1387bb | 21 | bzl | Python | java/version.bzl | symonk/selenium | 38e89630f1c4b1a0d3ac6e17765a6ccb58160f83 | [
"Apache-2.0"
] | null | null | null | java/version.bzl | symonk/selenium | 38e89630f1c4b1a0d3ac6e17765a6ccb58160f83 | [
"Apache-2.0"
] | null | null | null | java/version.bzl | symonk/selenium | 38e89630f1c4b1a0d3ac6e17765a6ccb58160f83 | [
"Apache-2.0"
] | null | null | null | SE_VERSION = "4.2.1"
| 10.5 | 20 | 0.619048 |
735414eb5a0cf25ba65326dd7cc3a0b2acaea272 | 2,978 | py | Python | scripts/preprocess_for_prediction.py | jmueller95/deepgrind | 1fdca224a5256b820fa817a529e79b70c8808d65 | [
"Apache-2.0"
] | null | null | null | scripts/preprocess_for_prediction.py | jmueller95/deepgrind | 1fdca224a5256b820fa817a529e79b70c8808d65 | [
"Apache-2.0"
] | null | null | null | scripts/preprocess_for_prediction.py | jmueller95/deepgrind | 1fdca224a5256b820fa817a529e79b70c8808d65 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import utils
def main():
# Parse the input file:
comet_df = pd.read_csv(snakemake.input[0], sep="\t", header=0,
usecols=["Peptide", "Charge1", "Charge2", "Charge3"],
index_col=False)
# Determine if MSMS and RT prediction will be performed jointly or separately
if "msms_model" in dict(snakemake.params) and "rt_model" in dict(snakemake.params):
_convert_for_msms(comet_df, style=snakemake.params['msms_model'].lower(),
output=snakemake.output['msms_prediction_input'])
_convert_for_rt(comet_df, style=snakemake.params['rt_model'].lower(),
output=snakemake.output['rt_prediction_input'])
else:
# If only one model was supplied, the prediction will be joint
# Only convert the input for msms in that case
_convert_for_msms(comet_df, style=snakemake.params['model'].lower(),
output=snakemake.output['prediction_input'])
if __name__ == '__main__':
main()
| 46.53125 | 131 | 0.624244 |
7354ded194b9ee5cde59d94c66a6556bf76f8b32 | 1,497 | py | Python | GettingStarted/gettingstarted.py | rohitp934/roadtoadatascientist | 50724b63c2692659cdd48e9ed20e856c231695fd | [
"MIT"
] | null | null | null | GettingStarted/gettingstarted.py | rohitp934/roadtoadatascientist | 50724b63c2692659cdd48e9ed20e856c231695fd | [
"MIT"
] | null | null | null | GettingStarted/gettingstarted.py | rohitp934/roadtoadatascientist | 50724b63c2692659cdd48e9ed20e856c231695fd | [
"MIT"
] | null | null | null | #importing necessary modules
from sklearn.linear_model import Perceptron
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
import numpy as np
# Data and labels
Xtrain = [[182, 80, 34], [176, 70, 33], [161, 60, 28], [154, 55, 27], [166, 63, 30], [189, 90, 36], [175, 63, 28], [177, 71, 30], [159, 52, 27], [171, 72, 32], [181, 85, 34]]
Ytrain = ['male', 'male', 'female', 'female', 'male', 'male', 'female', 'female', 'female', 'male', 'male']
Xval = [[163, 62, 28], [182, 80, 35], [150, 50, 24], [160, 57, 27], [175, 62, 30], [183, 67, 32], [177, 64, 29], [164, 62, 29], [157, 53, 23], [170, 73, 32], [169, 59, 29]]
Yval = ['female', 'male', 'female', 'female', 'male', 'male', 'female', 'female',
'female', 'male', 'female']
# initializing the ML models
knn = KNeighborsClassifier()
perceptron = Perceptron()
# Fitting the models
knn.fit(Xtrain, Ytrain)
perceptron.fit(Xtrain, Ytrain)
# Testing using our input data
pred_knn = knn.predict(Xval)
acc_knn = accuracy_score(Yval, pred_knn) * 100
print(f'Accuracy for knn: {acc_knn}')
pred_perceptron = perceptron.predict(Xval)
acc_perceptron = accuracy_score(Yval, pred_perceptron) * 100
print(f'Accuracy for perceptron: {acc_perceptron}')
# The best classifier out of the two models
index = np.argmax([acc_knn, acc_perceptron])
#argmax function assigns the index of the maximum value to the variable
classifiers = {0: 'KNN', 1:'PER'}
print(f'Best gender classifier is {classifiers[index]}')
| 38.384615 | 174 | 0.676687 |
7356af2b787834d2216080e3079e961a0d62871f | 909 | py | Python | libs/optimizers.py | bxtkezhan/AILabs | 6328aa65a3ce5f450389a5a848b641ba36f0e9c5 | [
"MIT"
] | null | null | null | libs/optimizers.py | bxtkezhan/AILabs | 6328aa65a3ce5f450389a5a848b641ba36f0e9c5 | [
"MIT"
] | null | null | null | libs/optimizers.py | bxtkezhan/AILabs | 6328aa65a3ce5f450389a5a848b641ba36f0e9c5 | [
"MIT"
] | null | null | null | import numpy as np
| 31.344828 | 72 | 0.563256 |
735716881d6460c9c4e13489b7256920b070c665 | 122,809 | py | Python | pennylane/transforms/qcut.py | therooler/pennylane | 88a8a5960a2ffd218a12f85ace632021eef2abf5 | [
"Apache-2.0"
] | null | null | null | pennylane/transforms/qcut.py | therooler/pennylane | 88a8a5960a2ffd218a12f85ace632021eef2abf5 | [
"Apache-2.0"
] | null | null | null | pennylane/transforms/qcut.py | therooler/pennylane | 88a8a5960a2ffd218a12f85ace632021eef2abf5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 Xanadu Quantum Technologies Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Functions for performing quantum circuit cutting.
"""
import copy
import inspect
import string
import uuid
import warnings
from collections.abc import Sequence as SequenceType
from dataclasses import InitVar, dataclass
from functools import partial
from itertools import compress, product
from pathlib import Path
from typing import Any, Callable, ClassVar, Dict, List, Optional, Sequence, Tuple, Union
from networkx import MultiDiGraph, has_path, weakly_connected_components
import pennylane as qml
from pennylane import apply, expval
from pennylane import numpy as np
from pennylane.grouping import string_to_pauli_word
from pennylane.measurements import Expectation, MeasurementProcess, Sample
from pennylane.operation import Operation, Operator, Tensor
from pennylane.ops.qubit.non_parametric_ops import WireCut
from pennylane.tape import QuantumTape
from pennylane.wires import Wires
from .batch_transform import batch_transform
def replace_wire_cut_node(node: WireCut, graph: MultiDiGraph):
"""
Replace a :class:`~.WireCut` node in the graph with a :class:`~.MeasureNode`
and :class:`~.PrepareNode`.
.. note::
This function is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
node (WireCut): the :class:`~.WireCut` node to be replaced with a :class:`~.MeasureNode`
and :class:`~.PrepareNode`
graph (nx.MultiDiGraph): the graph containing the node to be replaced
**Example**
Consider the following circuit with a manually-placed wire cut:
.. code-block:: python
wire_cut = qml.WireCut(wires=0)
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.apply(wire_cut)
qml.RY(0.5, wires=0)
qml.expval(qml.PauliZ(0))
We can find the circuit graph and remove the wire cut node using:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
>>> qml.transforms.qcut.replace_wire_cut_node(wire_cut, graph)
"""
predecessors = graph.pred[node]
successors = graph.succ[node]
predecessor_on_wire = {}
for op, data in predecessors.items():
for d in data.values():
wire = d["wire"]
predecessor_on_wire[wire] = op
successor_on_wire = {}
for op, data in successors.items():
for d in data.values():
wire = d["wire"]
successor_on_wire[wire] = op
order = graph.nodes[node]["order"]
graph.remove_node(node)
for wire in node.wires:
predecessor = predecessor_on_wire.get(wire, None)
successor = successor_on_wire.get(wire, None)
meas = MeasureNode(wires=wire)
prep = PrepareNode(wires=wire)
# We are introducing a degeneracy in the order of the measure and prepare nodes
# here but the order can be inferred as MeasureNode always precedes
# the corresponding PrepareNode
graph.add_node(meas, order=order)
graph.add_node(prep, order=order)
graph.add_edge(meas, prep, wire=wire)
if predecessor is not None:
graph.add_edge(predecessor, meas, wire=wire)
if successor is not None:
graph.add_edge(prep, successor, wire=wire)
def replace_wire_cut_nodes(graph: MultiDiGraph):
"""
Replace each :class:`~.WireCut` node in the graph with a
:class:`~.MeasureNode` and :class:`~.PrepareNode`.
.. note::
This function is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
graph (nx.MultiDiGraph): The graph containing the :class:`~.WireCut` nodes
to be replaced
**Example**
Consider the following circuit with manually-placed wire cuts:
.. code-block:: python
wire_cut_0 = qml.WireCut(wires=0)
wire_cut_1 = qml.WireCut(wires=1)
multi_wire_cut = qml.WireCut(wires=[0, 1])
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.apply(wire_cut_0)
qml.RY(0.5, wires=0)
qml.apply(wire_cut_1)
qml.CNOT(wires=[0, 1])
qml.apply(multi_wire_cut)
qml.RZ(0.6, wires=1)
qml.expval(qml.PauliZ(0))
We can find the circuit graph and remove all the wire cut nodes using:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
>>> qml.transforms.qcut.replace_wire_cut_nodes(graph)
"""
for op in list(graph.nodes):
if isinstance(op, WireCut):
replace_wire_cut_node(op, graph)
def _add_operator_node(graph: MultiDiGraph, op: Operator, order: int, wire_latest_node: dict):
"""
Helper function to add operators as nodes during tape to graph conversion.
"""
graph.add_node(op, order=order)
for wire in op.wires:
if wire_latest_node[wire] is not None:
parent_op = wire_latest_node[wire]
graph.add_edge(parent_op, op, wire=wire)
wire_latest_node[wire] = op
def tape_to_graph(tape: QuantumTape) -> MultiDiGraph:
"""
Converts a quantum tape to a directed multigraph.
.. note::
This operation is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
tape (QuantumTape): tape to be converted into a directed multigraph
Returns:
nx.MultiDiGraph: a directed multigraph that captures the circuit structure
of the input tape
**Example**
Consider the following tape:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.RY(0.9, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(1))
Its corresponding circuit graph can be found using
>>> qml.transforms.qcut.tape_to_graph(tape)
<networkx.classes.multidigraph.MultiDiGraph at 0x7fe41cbd7210>
"""
graph = MultiDiGraph()
wire_latest_node = {w: None for w in tape.wires}
for order, op in enumerate(tape.operations):
_add_operator_node(graph, op, order, wire_latest_node)
order += 1 # pylint: disable=undefined-loop-variable
for m in tape.measurements:
obs = getattr(m, "obs", None)
if obs is not None and isinstance(obs, Tensor):
if m.return_type is Sample:
raise ValueError(
"Sampling from tensor products of observables "
"is not supported in circuit cutting"
)
for o in obs.obs:
m_ = MeasurementProcess(m.return_type, obs=o)
_add_operator_node(graph, m_, order, wire_latest_node)
elif m.return_type is Sample and obs is None:
for w in m.wires:
s_ = qml.sample(qml.Projector([1], wires=w))
_add_operator_node(graph, s_, order, wire_latest_node)
else:
_add_operator_node(graph, m, order, wire_latest_node)
order += 1
return graph
# pylint: disable=too-many-branches
def fragment_graph(graph: MultiDiGraph) -> Tuple[Tuple[MultiDiGraph], MultiDiGraph]:
"""
Fragments a graph into a collection of subgraphs as well as returning
the communication (`quotient <https://en.wikipedia.org/wiki/Quotient_graph>`__)
graph.
The input ``graph`` is fragmented by disconnecting each :class:`~.MeasureNode` and
:class:`~.PrepareNode` pair and finding the resultant disconnected subgraph fragments.
Each node of the communication graph represents a subgraph fragment and the edges
denote the flow of qubits between fragments due to the removed :class:`~.MeasureNode` and
:class:`~.PrepareNode` pairs.
.. note::
This operation is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
graph (nx.MultiDiGraph): directed multigraph containing measure and prepare
nodes at cut locations
Returns:
Tuple[Tuple[nx.MultiDiGraph], nx.MultiDiGraph]: the subgraphs of the cut graph
and the communication graph.
**Example**
Consider the following circuit with manually-placed wire cuts:
.. code-block:: python
wire_cut_0 = qml.WireCut(wires=0)
wire_cut_1 = qml.WireCut(wires=1)
multi_wire_cut = qml.WireCut(wires=[0, 1])
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.apply(wire_cut_0)
qml.RY(0.5, wires=0)
qml.apply(wire_cut_1)
qml.CNOT(wires=[0, 1])
qml.apply(multi_wire_cut)
qml.RZ(0.6, wires=1)
qml.expval(qml.PauliZ(0))
We can find the corresponding graph, remove all the wire cut nodes, and
find the subgraphs and communication graph by using:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
>>> qml.transforms.qcut.replace_wire_cut_nodes(graph)
>>> qml.transforms.qcut.fragment_graph(graph)
((<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311940>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311c10>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e2820>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e27f0>),
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e26a0>)
"""
graph_copy = graph.copy()
cut_edges = []
measure_nodes = [n for n in graph.nodes if isinstance(n, MeasurementProcess)]
for node1, node2, wire_key in graph.edges:
if isinstance(node1, MeasureNode):
assert isinstance(node2, PrepareNode)
cut_edges.append((node1, node2, wire_key))
graph_copy.remove_edge(node1, node2, key=wire_key)
subgraph_nodes = weakly_connected_components(graph_copy)
subgraphs = tuple(MultiDiGraph(graph_copy.subgraph(n)) for n in subgraph_nodes)
communication_graph = MultiDiGraph()
communication_graph.add_nodes_from(range(len(subgraphs)))
for node1, node2, _ in cut_edges:
for i, subgraph in enumerate(subgraphs):
if subgraph.has_node(node1):
start_fragment = i
if subgraph.has_node(node2):
end_fragment = i
if start_fragment != end_fragment:
communication_graph.add_edge(start_fragment, end_fragment, pair=(node1, node2))
else:
# The MeasureNode and PrepareNode pair live in the same fragment and did not result
# in a disconnection. We can therefore remove these nodes. Note that we do not need
# to worry about adding back an edge between the predecessor to node1 and the successor
# to node2 because our next step is to convert the fragment circuit graphs to tapes,
# a process that does not depend on edge connections in the subgraph.
subgraphs[start_fragment].remove_node(node1)
subgraphs[end_fragment].remove_node(node2)
terminal_indices = [i for i, s in enumerate(subgraphs) for n in measure_nodes if s.has_node(n)]
subgraphs_connected_to_measurements = []
subgraphs_indices_to_remove = []
prepare_nodes_removed = []
for i, s in enumerate(subgraphs):
if any(has_path(communication_graph, i, t) for t in terminal_indices):
subgraphs_connected_to_measurements.append(s)
else:
subgraphs_indices_to_remove.append(i)
prepare_nodes_removed.extend([n for n in s.nodes if isinstance(n, PrepareNode)])
measure_nodes_to_remove = [
m for p in prepare_nodes_removed for m, p_, _ in cut_edges if p is p_
]
communication_graph.remove_nodes_from(subgraphs_indices_to_remove)
for m in measure_nodes_to_remove:
for s in subgraphs_connected_to_measurements:
if s.has_node(m):
s.remove_node(m)
return subgraphs_connected_to_measurements, communication_graph
def _find_new_wire(wires: Wires) -> int:
"""Finds a new wire label that is not in ``wires``."""
ctr = 0
while ctr in wires:
ctr += 1
return ctr
# pylint: disable=protected-access
def graph_to_tape(graph: MultiDiGraph) -> QuantumTape:
"""
Converts a directed multigraph to the corresponding :class:`~.QuantumTape`.
To account for the possibility of needing to perform mid-circuit measurements, if any operations
follow a :class:`MeasureNode` operation on a given wire then these operations are mapped to a
new wire.
.. note::
This function is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
graph (nx.MultiDiGraph): directed multigraph to be converted to a tape
Returns:
QuantumTape: the quantum tape corresponding to the input graph
**Example**
Consider the following circuit:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.RY(0.5, wires=1)
qml.CNOT(wires=[0, 1])
qml.transforms.qcut.MeasureNode(wires=1)
qml.transforms.qcut.PrepareNode(wires=1)
qml.CNOT(wires=[1, 0])
qml.expval(qml.PauliZ(0))
This circuit contains operations that follow a :class:`~.MeasureNode`. These operations will
subsequently act on wire ``2`` instead of wire ``1``:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
>>> tape = qml.transforms.qcut.graph_to_tape(graph)
>>> print(tape.draw())
0: RX(0.4)CX Z
1: RY(0.5)XMeasureNode
2: PrepareNodeC
"""
wires = Wires.all_wires([n.wires for n in graph.nodes])
ordered_ops = sorted(
[(order, op) for op, order in graph.nodes(data="order")], key=lambda x: x[0]
)
wire_map = {w: w for w in wires}
reverse_wire_map = {v: k for k, v in wire_map.items()}
copy_ops = [copy.copy(op) for _, op in ordered_ops if not isinstance(op, MeasurementProcess)]
copy_meas = [copy.copy(op) for _, op in ordered_ops if isinstance(op, MeasurementProcess)]
observables = []
with QuantumTape() as tape:
for op in copy_ops:
new_wires = Wires([wire_map[w] for w in op.wires])
# TODO: find a better way to update operation wires
op._wires = new_wires
apply(op)
if isinstance(op, MeasureNode):
assert len(op.wires) == 1
measured_wire = op.wires[0]
new_wire = _find_new_wire(wires)
wires += new_wire
original_wire = reverse_wire_map[measured_wire]
wire_map[original_wire] = new_wire
reverse_wire_map[new_wire] = original_wire
if copy_meas:
return_types = set(meas.return_type for meas in copy_meas)
if len(return_types) > 1:
raise ValueError(
"Only a single return type can be used for measurement "
"nodes in graph_to_tape"
)
return_type = return_types.pop()
if return_type not in {Sample, Expectation}:
raise ValueError(
"Invalid return type. Only expectation value and sampling measurements "
"are supported in graph_to_tape"
)
for meas in copy_meas:
obs = meas.obs
obs._wires = Wires([wire_map[w] for w in obs.wires])
observables.append(obs)
if return_type is Sample:
apply(meas)
if return_type is Expectation:
if len(observables) > 1:
qml.expval(Tensor(*observables))
else:
qml.expval(obs)
return tape
def _get_measurements(
group: Sequence[Operator], measurements: Sequence[MeasurementProcess]
) -> List[MeasurementProcess]:
"""Pairs each observable in ``group`` with the circuit ``measurements``.
Only a single measurement of an expectation value is currently supported
in ``measurements``.
Args:
group (Sequence[Operator]): a collection of observables
measurements (Sequence[MeasurementProcess]): measurements from the circuit
Returns:
List[MeasurementProcess]: the expectation values of ``g @ obs``, where ``g`` is iterated
over ``group`` and ``obs`` is the observable composing the single measurement
in ``measurements``
"""
if len(group) == 0:
# This ensures the measurements of the original tape are carried over to the
# following tape configurations in the absence of any MeasureNodes in the fragment
return measurements
n_measurements = len(measurements)
if n_measurements > 1:
raise ValueError(
"The circuit cutting workflow only supports circuits with a single output "
"measurement"
)
if n_measurements == 0:
return [expval(g) for g in group]
measurement = measurements[0]
if measurement.return_type is not Expectation:
raise ValueError(
"The circuit cutting workflow only supports circuits with expectation "
"value measurements"
)
obs = measurement.obs
return [expval(copy.copy(obs) @ g) for g in group]
PREPARE_SETTINGS = [_prep_zero_state, _prep_one_state, _prep_plus_state, _prep_iplus_state]
def expand_fragment_tape(
tape: QuantumTape,
) -> Tuple[List[QuantumTape], List[PrepareNode], List[MeasureNode]]:
"""
Expands a fragment tape into a sequence of tapes for each configuration of the contained
:class:`MeasureNode` and :class:`PrepareNode` operations.
.. note::
This function is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
tape (QuantumTape): the fragment tape containing :class:`MeasureNode` and
:class:`PrepareNode` operations to be expanded
Returns:
Tuple[List[QuantumTape], List[PrepareNode], List[MeasureNode]]: the
tapes corresponding to each configuration and the order of preparation nodes and
measurement nodes used in the expansion
**Example**
Consider the following circuit, which contains a :class:`~.MeasureNode` and
:class:`~.PrepareNode` operation:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.transforms.qcut.PrepareNode(wires=0)
qml.RX(0.5, wires=0)
qml.transforms.qcut.MeasureNode(wires=0)
We can expand over the measurement and preparation nodes using:
>>> tapes, prep, meas = qml.transforms.qcut.expand_fragment_tape(tape)
>>> for t in tapes:
... print(qml.drawer.tape_text(t, decimals=1))
0: IRX(0.5) <I> <Z>
0: IRX(0.5) <X>
0: IRX(0.5) <Y>
0: XRX(0.5) <I> <Z>
0: XRX(0.5) <X>
0: XRX(0.5) <Y>
0: HRX(0.5) <I> <Z>
0: HRX(0.5) <X>
0: HRX(0.5) <Y>
0: HSRX(0.5) <I> <Z>
0: HSRX(0.5) <X>
0: HSRX(0.5) <Y>
"""
prepare_nodes = [o for o in tape.operations if isinstance(o, PrepareNode)]
measure_nodes = [o for o in tape.operations if isinstance(o, MeasureNode)]
wire_map = {mn.wires[0]: i for i, mn in enumerate(measure_nodes)}
n_meas = len(measure_nodes)
if n_meas >= 1:
measure_combinations = qml.grouping.partition_pauli_group(len(measure_nodes))
else:
measure_combinations = [[""]]
tapes = []
for prepare_settings in product(range(len(PREPARE_SETTINGS)), repeat=len(prepare_nodes)):
for measure_group in measure_combinations:
if n_meas >= 1:
group = [
string_to_pauli_word(paulis, wire_map=wire_map) for paulis in measure_group
]
else:
group = []
prepare_mapping = {
n: PREPARE_SETTINGS[s] for n, s in zip(prepare_nodes, prepare_settings)
}
with QuantumTape() as tape_:
for op in tape.operations:
if isinstance(op, PrepareNode):
w = op.wires[0]
prepare_mapping[op](w)
elif not isinstance(op, MeasureNode):
apply(op)
with qml.tape.stop_recording():
measurements = _get_measurements(group, tape.measurements)
for meas in measurements:
apply(meas)
tapes.append(tape_)
return tapes, prepare_nodes, measure_nodes
MC_STATES = [
_prep_zero_state,
_prep_one_state,
_prep_plus_state,
_prep_minus_state,
_prep_iplus_state,
_prep_iminus_state,
_prep_zero_state,
_prep_one_state,
]
MC_MEASUREMENTS = [
_identity,
_identity,
_pauliX,
_pauliX,
_pauliY,
_pauliY,
_pauliZ,
_pauliZ,
]
def expand_fragment_tapes_mc(
tapes: Sequence[QuantumTape], communication_graph: MultiDiGraph, shots: int
) -> Tuple[List[QuantumTape], np.ndarray]:
"""
Expands fragment tapes into a sequence of random configurations of the contained pairs of
:class:`MeasureNode` and :class:`PrepareNode` operations.
For each pair, a measurement is sampled from
the Pauli basis and a state preparation is sampled from the corresponding pair of eigenstates.
A settings array is also given which tracks the configuration pairs. Since each of the 4
measurements has 2 possible eigenvectors, all configurations can be uniquely identified by
8 values. The number of rows is determined by the number of cuts and the number of columns
is determined by the number of shots.
.. note::
This function is designed for use as part of the sampling-based circuit cutting workflow.
Check out the :func:`~.cut_circuit_mc` transform for more details.
Args:
tapes (Sequence[QuantumTape]): the fragment tapes containing :class:`MeasureNode` and
:class:`PrepareNode` operations to be expanded
communication_graph (nx.MultiDiGraph): the communication (quotient) graph of the fragmented
full graph
shots (int): number of shots
Returns:
Tuple[List[QuantumTape], np.ndarray]: the tapes corresponding to each configuration and the
settings that track each configuration pair
**Example**
Consider the following circuit that contains a sample measurement:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
qml.WireCut(wires=1)
qml.CNOT(wires=[1, 2])
qml.sample(wires=[0, 1, 2])
We can generate the fragment tapes using the following workflow:
>>> g = qml.transforms.qcut.tape_to_graph(tape)
>>> qml.transforms.qcut.replace_wire_cut_nodes(g)
>>> subgraphs, communication_graph = qml.transforms.qcut.fragment_graph(g)
>>> tapes = [qml.transforms.qcut.graph_to_tape(sg) for sg in subgraphs]
We can then expand over the measurement and preparation nodes to generate random
configurations using:
.. code-block:: python
>>> configs, settings = qml.transforms.qcut.expand_fragment_tapes_mc(tapes, communication_graph, 3)
>>> print(settings)
[[1 6 2]]
>>> for i, (c1, c2) in enumerate(zip(configs[0], configs[1])):
... print(f"config {i}:")
... print(c1.draw())
... print("")
... print(c2.draw())
... print("")
...
config 0:
0: HC Sample[|11|]
1: X Sample[I]
1: XC Sample[|11|]
2: X Sample[|11|]
config 1:
0: HC Sample[|11|]
1: X Sample[Z]
1: IC Sample[|11|]
2: X Sample[|11|]
config 2:
0: HC Sample[|11|]
1: X Sample[X]
1: HC Sample[|11|]
2: X Sample[|11|]
"""
pairs = [e[-1] for e in communication_graph.edges.data("pair")]
settings = np.random.choice(range(8), size=(len(pairs), shots), replace=True)
meas_settings = {pair[0].id: setting for pair, setting in zip(pairs, settings)}
prep_settings = {pair[1].id: setting for pair, setting in zip(pairs, settings)}
all_configs = []
for tape in tapes:
frag_config = []
for shot in range(shots):
with qml.tape.QuantumTape() as new_tape:
for op in tape.operations:
w = op.wires[0]
if isinstance(op, PrepareNode):
MC_STATES[prep_settings[op.id][shot]](w)
elif not isinstance(op, MeasureNode):
qml.apply(op)
for meas in tape.measurements:
qml.apply(meas)
for op in tape.operations:
meas_w = op.wires[0]
if isinstance(op, MeasureNode):
MC_MEASUREMENTS[meas_settings[op.id][shot]](meas_w)
frag_config.append(new_tape)
all_configs.append(frag_config)
return all_configs, settings
def _reshape_results(results: Sequence, shots: int) -> List[List]:
"""
Helper function to reshape ``results`` into a two-dimensional nested list whose number of rows
is determined by the number of shots and whose number of columns is determined by the number of
cuts.
"""
results = [qml.math.flatten(r) for r in results]
results = [results[i : i + shots] for i in range(0, len(results), shots)]
results = list(map(list, zip(*results))) # calculate list-based transpose
return results
def qcut_processing_fn_sample(
results: Sequence, communication_graph: MultiDiGraph, shots: int
) -> List:
"""
Function to postprocess samples for the :func:`cut_circuit_mc() <pennylane.cut_circuit_mc>`
transform. This removes superfluous mid-circuit measurement samples from fragment
circuit outputs.
.. note::
This function is designed for use as part of the sampling-based circuit cutting workflow.
Check out the :func:`qml.cut_circuit_mc() <pennylane.cut_circuit_mc>` transform for more details.
Args:
results (Sequence): a collection of sample-based execution results generated from the
random expansion of circuit fragments over measurement and preparation node configurations
communication_graph (nx.MultiDiGraph): the communication graph determining connectivity
between circuit fragments
shots (int): the number of shots
Returns:
List[tensor_like]: the sampled output for all terminal measurements over the number of shots given
"""
res0 = results[0]
results = _reshape_results(results, shots)
out_degrees = [d for _, d in communication_graph.out_degree]
samples = []
for result in results:
sample = []
for fragment_result, out_degree in zip(result, out_degrees):
sample.append(fragment_result[: -out_degree or None])
samples.append(np.hstack(sample))
return [qml.math.convert_like(np.array(samples), res0)]
def qcut_processing_fn_mc(
results: Sequence,
communication_graph: MultiDiGraph,
settings: np.ndarray,
shots: int,
classical_processing_fn: callable,
):
"""
Function to postprocess samples for the :func:`cut_circuit_mc() <pennylane.cut_circuit_mc>`
transform. This takes a user-specified classical function to act on bitstrings and
generates an expectation value.
.. note::
This function is designed for use as part of the sampling-based circuit cutting workflow.
Check out the :func:`qml.cut_circuit_mc() <pennylane.cut_circuit_mc>` transform for more details.
Args:
results (Sequence): a collection of sample-based execution results generated from the
random expansion of circuit fragments over measurement and preparation node configurations
communication_graph (nx.MultiDiGraph): the communication graph determining connectivity
between circuit fragments
settings (np.ndarray): Each element is one of 8 unique values that tracks the specific
measurement and preparation operations over all configurations. The number of rows is determined
by the number of cuts and the number of columns is determined by the number of shots.
shots (int): the number of shots
classical_processing_fn (callable): A classical postprocessing function to be applied to
the reconstructed bitstrings. The expected input is a bitstring; a flat array of length ``wires``
and the output should be a single number within the interval :math:`[-1, 1]`.
Returns:
float or tensor_like: the expectation value calculated in accordance to Eq. (35) of
`Peng et al. <https://arxiv.org/abs/1904.00102>`__
"""
res0 = results[0]
results = _reshape_results(results, shots)
out_degrees = [d for _, d in communication_graph.out_degree]
evals = (0.5, 0.5, 0.5, -0.5, 0.5, -0.5, 0.5, -0.5)
expvals = []
for result, setting in zip(results, settings.T):
sample_terminal = []
sample_mid = []
for fragment_result, out_degree in zip(result, out_degrees):
sample_terminal.append(fragment_result[: -out_degree or None])
sample_mid.append(fragment_result[-out_degree or len(fragment_result) :])
sample_terminal = np.hstack(sample_terminal)
sample_mid = np.hstack(sample_mid)
assert set(sample_terminal).issubset({np.array(0), np.array(1)})
assert set(sample_mid).issubset({np.array(-1), np.array(1)})
# following Eq.(35) of Peng et.al: https://arxiv.org/abs/1904.00102
f = classical_processing_fn(sample_terminal)
if not -1 <= f <= 1:
raise ValueError(
"The classical processing function supplied must "
"give output in the interval [-1, 1]"
)
sigma_s = np.prod(sample_mid)
t_s = f * sigma_s
c_s = np.prod([evals[s] for s in setting])
K = len(sample_mid)
expvals.append(8**K * c_s * t_s)
return qml.math.convert_like(np.mean(expvals), res0)
def _get_symbol(i):
"""Finds the i-th ASCII symbol. Works for lowercase and uppercase letters, allowing i up to
51."""
if i >= len(string.ascii_letters):
raise ValueError(
"Set the use_opt_einsum argument to True when applying more than "
f"{len(string.ascii_letters)} wire cuts to a circuit"
)
return string.ascii_letters[i]
# pylint: disable=too-many-branches
def contract_tensors(
tensors: Sequence,
communication_graph: MultiDiGraph,
prepare_nodes: Sequence[Sequence[PrepareNode]],
measure_nodes: Sequence[Sequence[MeasureNode]],
use_opt_einsum: bool = False,
):
r"""Contract tensors according to the edges specified in the communication graph.
.. note::
This function is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Consider the three tensors :math:`T^{(1)}`, :math:`T^{(2)}`, and :math:`T^{(3)}`, along with
their contraction equation
.. math::
\sum_{ijklmn} T^{(1)}_{ij,km} T^{(2)}_{kl,in} T^{(3)}_{mn,jl}
Each tensor is the result of the tomography of a circuit fragment and has some indices
corresponding to state preparations (marked by the indices before the comma) and some indices
corresponding to measurements (marked by the indices after the comma).
An equivalent representation of the contraction equation is to use a directed multigraph known
as the communication/quotient graph. In the communication graph, each tensor is assigned a node
and edges are added between nodes to mark a contraction along an index. The communication graph
resulting from the above contraction equation is a complete directed graph.
In the communication graph provided by :func:`fragment_graph`, edges are composed of
:class:`PrepareNode` and :class:`MeasureNode` pairs. To correctly map back to the contraction
equation, we must keep track of the order of preparation and measurement indices in each tensor.
This order is specified in the ``prepare_nodes`` and ``measure_nodes`` arguments.
Args:
tensors (Sequence): the tensors to be contracted
communication_graph (nx.MultiDiGraph): the communication graph determining connectivity
between the tensors
prepare_nodes (Sequence[Sequence[PrepareNode]]): a sequence of size
``len(communication_graph.nodes)`` that determines the order of preparation indices in
each tensor
measure_nodes (Sequence[Sequence[MeasureNode]]): a sequence of size
``len(communication_graph.nodes)`` that determines the order of measurement indices in
each tensor
use_opt_einsum (bool): Determines whether to use the
`opt_einsum <https://dgasmith.github.io/opt_einsum/>`__ package. This package is useful
for faster tensor contractions of large networks but must be installed separately using,
e.g., ``pip install opt_einsum``. Both settings for ``use_opt_einsum`` result in a
differentiable contraction.
Returns:
float or tensor_like: the result of contracting the tensor network
**Example**
We first set up the tensors and their corresponding :class:`~.PrepareNode` and
:class:`~.MeasureNode` orderings:
.. code-block:: python
from pennylane.transforms import qcut
import networkx as nx
import numpy as np
tensors = [np.arange(4), np.arange(4, 8)]
prep = [[], [qcut.PrepareNode(wires=0)]]
meas = [[qcut.MeasureNode(wires=0)], []]
The communication graph describing edges in the tensor network must also be constructed:
.. code-block:: python
graph = nx.MultiDiGraph([(0, 1, {"pair": (meas[0][0], prep[1][0])})])
The network can then be contracted using:
>>> qml.transforms.qcut.contract_tensors(tensors, graph, prep, meas)
38
"""
# pylint: disable=import-outside-toplevel
if use_opt_einsum:
try:
from opt_einsum import contract, get_symbol
except ImportError as e:
raise ImportError(
"The opt_einsum package is required when use_opt_einsum is set to "
"True in the contract_tensors function. This package can be "
"installed using:\npip install opt_einsum"
) from e
else:
contract = qml.math.einsum
get_symbol = _get_symbol
ctr = 0
tensor_indxs = [""] * len(communication_graph.nodes)
meas_map = {}
for i, (node, prep) in enumerate(zip(communication_graph.nodes, prepare_nodes)):
predecessors = communication_graph.pred[node]
for p in prep:
for _, pred_edges in predecessors.items():
for pred_edge in pred_edges.values():
meas_op, prep_op = pred_edge["pair"]
if p.id is prep_op.id:
symb = get_symbol(ctr)
ctr += 1
tensor_indxs[i] += symb
meas_map[meas_op] = symb
for i, (node, meas) in enumerate(zip(communication_graph.nodes, measure_nodes)):
successors = communication_graph.succ[node]
for m in meas:
for _, succ_edges in successors.items():
for succ_edge in succ_edges.values():
meas_op, _ = succ_edge["pair"]
if m.id is meas_op.id:
symb = meas_map[meas_op]
tensor_indxs[i] += symb
eqn = ",".join(tensor_indxs)
kwargs = {} if use_opt_einsum else {"like": tensors[0]}
return contract(eqn, *tensors, **kwargs)
CHANGE_OF_BASIS = qml.math.array(
[[1.0, 1.0, 0.0, 0.0], [-1.0, -1.0, 2.0, 0.0], [-1.0, -1.0, 0.0, 2.0], [1.0, -1.0, 0.0, 0.0]]
)
def _process_tensor(results, n_prep: int, n_meas: int):
"""Convert a flat slice of an individual circuit fragment's execution results into a tensor.
This function performs the following steps:
1. Reshapes ``results`` into the intermediate shape ``(4,) * n_prep + (4**n_meas,)``
2. Shuffles the final axis to follow the standard product over measurement settings. E.g., for
``n_meas = 2`` the standard product is: II, IX, IY, IZ, XI, ..., ZY, ZZ while the input order
will be the result of ``qml.grouping.partition_pauli_group(2)``, i.e., II, IZ, ZI, ZZ, ...,
YY.
3. Reshapes into the final target shape ``(4,) * (n_prep + n_meas)``
4. Performs a change of basis for the preparation indices (the first ``n_prep`` indices) from
the |0>, |1>, |+>, |+i> basis to the I, X, Y, Z basis using ``CHANGE_OF_BASIS``.
Args:
results (tensor_like): the input execution results
n_prep (int): the number of preparation nodes in the corresponding circuit fragment
n_meas (int): the number of measurement nodes in the corresponding circuit fragment
Returns:
tensor_like: the corresponding fragment tensor
"""
n = n_prep + n_meas
dim_meas = 4**n_meas
# Step 1
intermediate_shape = (4,) * n_prep + (dim_meas,)
intermediate_tensor = qml.math.reshape(results, intermediate_shape)
# Step 2
grouped = qml.grouping.partition_pauli_group(n_meas)
grouped_flat = [term for group in grouped for term in group]
order = qml.math.argsort(grouped_flat)
if qml.math.get_interface(intermediate_tensor) == "tensorflow":
# TensorFlow does not support slicing
intermediate_tensor = qml.math.gather(intermediate_tensor, order, axis=-1)
else:
sl = [slice(None)] * n_prep + [order]
intermediate_tensor = intermediate_tensor[tuple(sl)]
# Step 3
final_shape = (4,) * n
final_tensor = qml.math.reshape(intermediate_tensor, final_shape)
# Step 4
change_of_basis = qml.math.convert_like(CHANGE_OF_BASIS, intermediate_tensor)
for i in range(n_prep):
axes = [[1], [i]]
final_tensor = qml.math.tensordot(change_of_basis, final_tensor, axes=axes)
axes = list(reversed(range(n_prep))) + list(range(n_prep, n))
# Use transpose to reorder indices. We must do this because tensordot returns a tensor whose
# indices are ordered according to the uncontracted indices of the first tensor, followed
# by the uncontracted indices of the second tensor. For example, calculating C_kj T_ij returns
# a tensor T'_ki rather than T'_ik.
final_tensor = qml.math.transpose(final_tensor, axes=axes)
final_tensor *= qml.math.power(2, -(n_meas + n_prep) / 2)
return final_tensor
def _to_tensors(
results,
prepare_nodes: Sequence[Sequence[PrepareNode]],
measure_nodes: Sequence[Sequence[MeasureNode]],
) -> List:
"""Process a flat list of execution results from all circuit fragments into the corresponding
tensors.
This function slices ``results`` according to the expected size of fragment tensors derived from
the ``prepare_nodes`` and ``measure_nodes`` and then passes onto ``_process_tensor`` for further
transformation.
Args:
results (tensor_like): A collection of execution results, provided as a flat tensor,
corresponding to the expansion of circuit fragments in the communication graph over
measurement and preparation node configurations. These results are processed into
tensors by this function.
prepare_nodes (Sequence[Sequence[PrepareNode]]): a sequence whose length is equal to the
number of circuit fragments, with each element used here to determine the number of
preparation nodes in a given fragment
measure_nodes (Sequence[Sequence[MeasureNode]]): a sequence whose length is equal to the
number of circuit fragments, with each element used here to determine the number of
measurement nodes in a given fragment
Returns:
List[tensor_like]: the tensors for each circuit fragment in the communication graph
"""
ctr = 0
tensors = []
for p, m in zip(prepare_nodes, measure_nodes):
n_prep = len(p)
n_meas = len(m)
n = n_prep + n_meas
dim = 4**n
results_slice = results[ctr : dim + ctr]
tensors.append(_process_tensor(results_slice, n_prep, n_meas))
ctr += dim
if results.shape[0] != ctr:
raise ValueError(f"The results argument should be a flat list of length {ctr}")
return tensors
def qcut_processing_fn(
results: Sequence[Sequence],
communication_graph: MultiDiGraph,
prepare_nodes: Sequence[Sequence[PrepareNode]],
measure_nodes: Sequence[Sequence[MeasureNode]],
use_opt_einsum: bool = False,
):
"""Processing function for the :func:`cut_circuit() <pennylane.cut_circuit>` transform.
.. note::
This function is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
results (Sequence[Sequence]): A collection of execution results generated from the
expansion of circuit fragments over measurement and preparation node configurations.
These results are processed into tensors and then contracted.
communication_graph (nx.MultiDiGraph): the communication graph determining connectivity
between circuit fragments
prepare_nodes (Sequence[Sequence[PrepareNode]]): a sequence of size
``len(communication_graph.nodes)`` that determines the order of preparation indices in
each tensor
measure_nodes (Sequence[Sequence[MeasureNode]]): a sequence of size
``len(communication_graph.nodes)`` that determines the order of measurement indices in
each tensor
use_opt_einsum (bool): Determines whether to use the
`opt_einsum <https://dgasmith.github.io/opt_einsum/>`__ package. This package is useful
for faster tensor contractions of large networks but must be installed separately using,
e.g., ``pip install opt_einsum``. Both settings for ``use_opt_einsum`` result in a
differentiable contraction.
Returns:
float or tensor_like: the output of the original uncut circuit arising from contracting
the tensor network of circuit fragments
"""
flat_results = qml.math.concatenate(results)
tensors = _to_tensors(flat_results, prepare_nodes, measure_nodes)
result = contract_tensors(
tensors, communication_graph, prepare_nodes, measure_nodes, use_opt_einsum
)
return result
def _qcut_expand_fn(
tape: QuantumTape,
max_depth: int = 1,
auto_cutter: Union[bool, Callable] = False,
):
"""Expansion function for circuit cutting.
Expands operations until reaching a depth that includes :class:`~.WireCut` operations.
"""
for op in tape.operations:
if isinstance(op, WireCut):
return tape
if max_depth > 0:
return _qcut_expand_fn(tape.expand(), max_depth=max_depth - 1, auto_cutter=auto_cutter)
if not (auto_cutter is True or callable(auto_cutter)):
raise ValueError(
"No WireCut operations found in the circuit. Consider increasing the max_depth value if"
" operations or nested tapes contain WireCut operations."
)
return tape
def _cut_circuit_expand(
tape: QuantumTape,
use_opt_einsum: bool = False,
device_wires: Optional[Wires] = None,
max_depth: int = 1,
auto_cutter: Union[bool, Callable] = False,
**kwargs,
):
"""Main entry point for expanding operations until reaching a depth that
includes :class:`~.WireCut` operations."""
# pylint: disable=unused-argument
return _qcut_expand_fn(tape, max_depth, auto_cutter)
def _cut_circuit_mc_expand(
tape: QuantumTape,
classical_processing_fn: Optional[callable] = None,
max_depth: int = 1,
shots: Optional[int] = None,
device_wires: Optional[Wires] = None,
auto_cutter: Union[bool, Callable] = False,
**kwargs,
):
"""Main entry point for expanding operations in sample-based tapes until
reaching a depth that includes :class:`~.WireCut` operations."""
# pylint: disable=unused-argument, too-many-arguments
return _qcut_expand_fn(tape, max_depth, auto_cutter)
cut_circuit.expand_fn = _cut_circuit_expand
cut_circuit_mc.expand_fn = _cut_circuit_mc_expand
def remap_tape_wires(tape: QuantumTape, wires: Sequence) -> QuantumTape:
"""Map the wires of a tape to a new set of wires.
Given an :math:`n`-wire ``tape``, this function returns a new :class:`~.QuantumTape` with
operations and measurements acting on the first :math:`n` wires provided in the ``wires``
argument. The input ``tape`` is left unmodified.
.. note::
This function is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
tape (QuantumTape): the quantum tape whose wires should be remapped
wires (Sequence): the new set of wires to map to
Returns:
QuantumTape: A remapped copy of the input tape
Raises:
ValueError: if the number of wires in ``tape`` exceeds ``len(wires)``
**Example**
Consider the following circuit that operates on wires ``[2, 3]``:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.5, wires=2)
qml.RY(0.6, wires=3)
qml.CNOT(wires=[2, 3])
qml.expval(qml.PauliZ(2) @ qml.PauliZ(3))
We can map from wires ``[2, 3]`` to ``[0, 1]`` using:
>>> new_wires = [0, 1]
>>> new_tape = qml.transforms.qcut.remap_tape_wires(tape, new_wires)
>>> print(new_tape.draw())
0: RX(0.5)C Z Z
1: RY(0.6)X Z Z
"""
if len(tape.wires) > len(wires):
raise ValueError(
f"Attempting to run a {len(tape.wires)}-wire circuit on a "
f"{len(wires)}-wire device. Consider increasing the number of wires in "
f"your device."
)
wire_map = dict(zip(tape.wires, wires))
copy_ops = [copy.copy(op) for op in tape.operations]
copy_meas = [copy.copy(op) for op in tape.measurements]
with QuantumTape() as new_tape:
for op in copy_ops:
new_wires = Wires([wire_map[w] for w in op.wires])
op._wires = new_wires
apply(op)
for meas in copy_meas:
obs = meas.obs
if isinstance(obs, Tensor):
for obs in obs.obs:
new_wires = Wires([wire_map[w] for w in obs.wires])
obs._wires = new_wires
else:
new_wires = Wires([wire_map[w] for w in obs.wires])
obs._wires = new_wires
apply(meas)
return new_tape
def _graph_to_hmetis(
graph: MultiDiGraph,
hyperwire_weight: int = 0,
edge_weights: Sequence[int] = None,
) -> Tuple[List[int], List[int], List[Union[int, float]]]:
"""Converts a ``MultiDiGraph`` into the
`hMETIS hypergraph input format <http://glaros.dtc.umn.edu/gkhome/fetch/sw/hmetis/manual.pdf>`__
conforming to KaHyPar's calling signature.
Args:
graph (MultiDiGraph): The original (tape-converted) graph to be cut.
hyperwire_weight (int): Weight on the artificially appended hyperedges representing wires.
Defaults to 0 which leads to no such insertion. If greater than 0, hyperedges will be
appended with the provided weight, to encourage the resulting fragments to cluster gates
on the same wire together.
edge_weights (Sequence[int]): Weights for regular edges in the graph. Defaults to ``None``,
which leads to unit-weighted edges.
Returns:
Tuple[List,List,List]: The 3 lists representing an (optionally weighted) hypergraph:
- Flattened list of adjacent node indices.
- List of starting indices for edges in the above adjacent-nodes-list.
- Optional list of edge weights. ``None`` if ``hyperwire_weight`` is equal to 0.
"""
nodes = list(graph.nodes)
edges = graph.edges(data="wire")
wires = {w for _, _, w in edges}
adj_nodes = [nodes.index(v) for ops in graph.edges(keys=False) for v in ops]
edge_splits = qml.math.cumsum([0] + [len(e) for e in graph.edges(keys=False)]).tolist()
edge_weights = (
edge_weights if edge_weights is not None and len(edges) == len(edge_weights) else None
)
if hyperwire_weight:
hyperwires = {w: set() for w in wires}
num_wires = len(hyperwires)
for v0, v1, wire in edges:
hyperwires[wire].update([nodes.index(v0), nodes.index(v1)])
for wire, nodes_on_wire in hyperwires.items():
nwv = len(nodes_on_wire)
edge_splits.append(nwv + edge_splits[-1])
adj_nodes = adj_nodes + list(nodes_on_wire)
assert len(edge_splits) == len(edges) + num_wires + 1
if isinstance(hyperwire_weight, (int, float)):
# assumes original edges having unit weights by default:
edge_weights = edge_weights or ([1] * len(edges))
wire_weights = [hyperwire_weight] * num_wires
edge_weights = edge_weights + wire_weights
return adj_nodes, edge_splits, edge_weights
def kahypar_cut(
graph: MultiDiGraph,
num_fragments: int,
imbalance: int = None,
edge_weights: List[Union[int, float]] = None,
node_weights: List[Union[int, float]] = None,
fragment_weights: List[Union[int, float]] = None,
hyperwire_weight: int = 1,
seed: int = None,
config_path: Union[str, Path] = None,
trial: int = None,
verbose: bool = False,
) -> List[Tuple[Operation, Operation, Any]]:
"""Calls `KaHyPar <https://kahypar.org/>`__ to partition a graph.
.. warning::
Requires KaHyPar to be installed separately. For Linux and Mac users,
KaHyPar can be installed using ``pip install kahypar``. Windows users
can follow the instructions
`here <https://kahypar.org>`__ to compile from source.
Args:
graph (nx.MultiDiGraph): The graph to be partitioned.
num_fragments (int): Desired number of fragments.
imbalance (int): Imbalance factor of the partitioning. Defaults to KaHyPar's determination.
edge_weights (List[Union[int, float]]): Weights for edges. Defaults to unit-weighted edges.
node_weights (List[Union[int, float]]): Weights for nodes. Defaults to unit-weighted nodes.
fragment_weights (List[Union[int, float]]): Maximum size constraints by fragment. Defaults
to no such constraints, with ``imbalance`` the only parameter affecting fragment sizes.
hyperwire_weight (int): Weight on the artificially appended hyperedges representing wires.
Setting it to 0 leads to no such insertion. If greater than 0, hyperedges will be
appended with the provided weight, to encourage the resulting fragments to cluster gates
on the same wire together. Defaults to 1.
seed (int): KaHyPar's seed. Defaults to the seed in the config file which defaults to -1,
i.e. unfixed seed.
config_path (str): KaHyPar's ``.ini`` config file path. Defaults to its SEA20 paper config.
trial (int): trial id for summary label creation. Defaults to ``None``.
verbose (bool): Flag for printing KaHyPar's output summary. Defaults to ``False``.
Returns:
List[Union[int, Any]]: List of cut edges.
**Example**
Consider the following 2-wire circuit with one CNOT gate connecting the wires:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.432, wires=0)
qml.RY(0.543, wires="a")
qml.CNOT(wires=[0, "a"])
qml.RZ(0.240, wires=0)
qml.RZ(0.133, wires="a")
qml.RX(0.432, wires=0)
qml.RY(0.543, wires="a")
qml.expval(qml.PauliZ(wires=[0]))
We can let KaHyPar automatically find the optimal edges to place cuts:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
>>> cut_edges = qml.transforms.qcut.kahypar_cut(
graph=graph,
num_fragments=2,
)
>>> cut_edges
[(CNOT(wires=[0, 'a']), RZ(0.24, wires=[0]), 0)]
"""
# pylint: disable=too-many-arguments, import-outside-toplevel
try:
import kahypar
except ImportError as e:
raise ImportError(
"KaHyPar must be installed to use this method for automatic "
"cut placement. Try pip install kahypar or visit "
"https://kahypar.org/ for installation instructions."
) from e
adjacent_nodes, edge_splits, edge_weights = _graph_to_hmetis(
graph=graph, hyperwire_weight=hyperwire_weight, edge_weights=edge_weights
)
trial = 0 if trial is None else trial
ne = len(edge_splits) - 1
nv = max(adjacent_nodes) + 1
if edge_weights is not None or node_weights is not None:
edge_weights = edge_weights or [1] * ne
node_weights = node_weights or [1] * nv
hypergraph = kahypar.Hypergraph(
nv,
ne,
edge_splits,
adjacent_nodes,
num_fragments,
edge_weights,
node_weights,
)
else:
hypergraph = kahypar.Hypergraph(nv, ne, edge_splits, adjacent_nodes, num_fragments)
context = kahypar.Context()
config_path = config_path or str(Path(__file__).parent / "_cut_kKaHyPar_sea20.ini")
context.loadINIconfiguration(config_path)
context.setK(num_fragments)
if isinstance(imbalance, float):
context.setEpsilon(imbalance)
if isinstance(fragment_weights, SequenceType) and (len(fragment_weights) == num_fragments):
context.setCustomTargetBlockWeights(fragment_weights)
if not verbose:
context.suppressOutput(True)
# KaHyPar fixes seed to 42 by default, need to manually sample seed to randomize:
kahypar_seed = np.random.default_rng(seed).choice(2**15)
context.setSeed(kahypar_seed)
kahypar.partition(hypergraph, context)
cut_edge_mask = [hypergraph.connectivity(e) > 1 for e in hypergraph.edges()]
# compress() ignores the extra hyperwires at the end if there is any.
cut_edges = list(compress(graph.edges, cut_edge_mask))
if verbose:
fragment_sizes = [hypergraph.blockSize(p) for p in range(num_fragments)]
print(len(fragment_sizes), fragment_sizes)
return cut_edges
def place_wire_cuts(
graph: MultiDiGraph, cut_edges: Sequence[Tuple[Operation, Operation, Any]]
) -> MultiDiGraph:
"""Inserts a :class:`~.WireCut` node for each provided cut edge into a circuit graph.
Args:
graph (nx.MultiDiGraph): The original (tape-converted) graph to be cut.
cut_edges (Sequence[Tuple[Operation, Operation, Any]]): List of ``MultiDiGraph`` edges
to be replaced with a :class:`~.WireCut` node. Each 3-tuple represents the source node, the
target node, and the wire key of the (multi)edge.
Returns:
MultiDiGraph: Copy of the input graph with :class:`~.WireCut` nodes inserted.
**Example**
Consider the following 2-wire circuit with one CNOT gate connecting the wires:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.432, wires=0)
qml.RY(0.543, wires="a")
qml.CNOT(wires=[0, "a"])
qml.expval(qml.PauliZ(wires=[0]))
>>> print(tape.draw())
0: RX(0.432)C Z
a: RY(0.543)X
If we know we want to place a :class:`~.WireCut` node between nodes ``RY(0.543, wires=["a"])`` and
``CNOT(wires=[0, 'a'])`` after the tape is constructed, we can first find the edge in the graph:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
>>> op0, op1 = tape.operations[1], tape.operations[2]
>>> cut_edges = [e for e in graph.edges if e[0] is op0 and e[1] is op1]
>>> cut_edges
[(RY(0.543, wires=['a']), CNOT(wires=[0, 'a']), 0)]
Then feed it to this function for placement:
>>> cut_graph = qml.transforms.qcut.place_wire_cuts(graph=graph, cut_edges=cut_edges)
>>> cut_graph
<networkx.classes.multidigraph.MultiDiGraph at 0x7f7251ac1220>
And visualize the cut by converting back to a tape:
>>> print(qml.transforms.qcut.graph_to_tape(cut_graph).draw())
0: RX(0.432)C Z
a: RY(0.543)//X
"""
cut_graph = graph.copy()
for op0, op1, wire_key in cut_edges:
# Get info:
order = cut_graph.nodes[op0]["order"] + 1
wire = cut_graph.edges[(op0, op1, wire_key)]["wire"]
# Apply cut:
cut_graph.remove_edge(op0, op1, wire_key)
# Increment order for all subsequent gates:
for op, o in cut_graph.nodes(data="order"):
if o >= order:
cut_graph.nodes[op]["order"] += 1
# Add WireCut
wire_cut = WireCut(wires=wire)
cut_graph.add_node(wire_cut, order=order)
cut_graph.add_edge(op0, wire_cut, wire=wire)
cut_graph.add_edge(wire_cut, op1, wire=wire)
return cut_graph
def _remove_existing_cuts(graph: MultiDiGraph) -> MultiDiGraph:
"""Removes all existing, manually or automatically placed, cuts from a circuit graph, be it
``WireCut``s or ``MeasureNode``-``PrepareNode`` pairs.
Args:
graph (MultiDiGraph): The original (tape-converted) graph to be cut.
Returns:
(MultiDiGraph): Copy of the input graph with all its existing cuts removed.
"""
uncut_graph = graph.copy()
for op in list(graph.nodes):
if isinstance(op, WireCut):
uncut_graph.remove_node(op)
elif isinstance(op, MeasureNode):
for op1 in graph.neighbors(op):
if isinstance(op1, PrepareNode):
uncut_graph.remove_node(op)
uncut_graph.remove_node(op1)
if len([n for n in uncut_graph.nodes if isinstance(n, (MeasureNode, PrepareNode))]) > 0:
warnings.warn(
"The circuit contains `MeasureNode` or `PrepareNode` operations that are "
"not paired up correctly. Please check.",
UserWarning,
)
return uncut_graph
def find_and_place_cuts(
graph: MultiDiGraph,
cut_method: Callable = kahypar_cut,
cut_strategy: CutStrategy = None,
replace_wire_cuts=False,
local_measurement=False,
**kwargs,
) -> MultiDiGraph:
"""Automatically finds and places optimal :class:`~.WireCut` nodes into a given tape-converted graph
using a customizable graph partitioning function. Preserves existing placed cuts.
Args:
graph (MultiDiGraph): The original (tape-converted) graph to be cut.
cut_method (Callable): A graph partitioning function that takes an input graph and returns
a list of edges to be cut based on a given set of constraints and objective. Defaults
to :func:`kahypar_cut` which requires KaHyPar to be installed using
``pip install kahypar`` for Linux and Mac users or visiting the
instructions `here <https://kahypar.org>`__ to compile from
source for Windows users.
cut_strategy (CutStrategy): Strategy for optimizing cutting parameters based on device
constraints. Defaults to ``None`` in which case ``kwargs`` must be fully specified
for passing to the ``cut_method``.
replace_wire_cuts (bool): Whether to replace :class:`~.WireCut` nodes with
:class:`~.MeasureNode` and :class:`~.PrepareNode` pairs. Defaults to ``False``.
local_measurement (bool): Whether to use the local-measurement circuit-cutting objective,
i.e. the maximum node-degree of the communication graph, for cut evaluation. Defaults
to ``False`` which assumes global measurement and uses the total number of cuts as the
cutting objective.
kwargs: Additional keyword arguments to be passed to the callable ``cut_method``.
Returns:
nx.MultiDiGraph: Copy of the input graph with :class:`~.WireCut` nodes inserted.
**Example**
Consider the following 4-wire circuit with a single CNOT gate connecting the top (wires
``[0, 1]``) and bottom (wires ``["a", "b"]``) halves of the circuit. Note there's a
:class:`~.WireCut` manually placed into the circuit already.
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.1, wires=0)
qml.RY(0.2, wires=1)
qml.RX(0.3, wires="a")
qml.RY(0.4, wires="b")
qml.CNOT(wires=[0, 1])
qml.WireCut(wires=1)
qml.CNOT(wires=["a", "b"])
qml.CNOT(wires=[1, "a"])
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=["a", "b"])
qml.RX(0.5, wires="a")
qml.RY(0.6, wires="b")
qml.expval(qml.PauliX(wires=[0]) @ qml.PauliY(wires=["a"]) @ qml.PauliZ(wires=["b"]))
>>> print(tape.draw())
0: RX(0.1)CC X Y Z
1: RY(0.2)X//CX
a: RX(0.3)CXCRX(0.5) X Y Z
b: RY(0.4)XXRY(0.6) X Y Z
Since the existing :class:`~.WireCut` doesn't sufficiently fragment the circuit, we can find the
remaining cuts using the default KaHyPar partitioner:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
>>> cut_graph = qml.transforms.qcut.find_and_place_cuts(
graph=graph,
num_fragments=2,
imbalance=0.5,
)
Visualizing the newly-placed cut:
>>> print(qml.transforms.qcut.graph_to_tape(cut_graph).draw())
0: RX(0.1)CC X Y Z
1: RY(0.2)X//C//X
a: RX(0.3)CXCRX(0.5) X Y Z
b: RY(0.4)XXRY(0.6) X Y Z
We can then proceed with the usual process of replacing :class:`~.WireCut` nodes with
pairs of :class:`~.MeasureNode` and :class:`~.PrepareNode`, and then break the graph
into fragments. Or, alternatively, we can directly get such processed graph by passing
``replace_wire_cuts=True``:
>>> cut_graph = qml.transforms.qcut.find_and_place_cuts(
graph=graph,
num_fragments=2,
imbalance=0.5,
replace_wire_cuts=True,
)
>>> frags, comm_graph = qml.transforms.qcut.fragment_graph(cut_graph)
>>> for t in frags:
... print(qml.transforms.qcut.graph_to_tape(t).draw())
.. code-block::
0: RX(0.1)CC X
1: RY(0.2)XMeasureNode
2: PrepareNodeX
a: RX(0.3)CXCRX(0.5) Y Z
b: RY(0.4)XXRY(0.6) Y Z
1: PrepareNodeCMeasureNode
Alternatively, if all we want to do is to find the optimal way to fit a circuit onto a smaller
device, a :class:`~.CutStrategy` can be used to populate the necessary explorations of cutting
parameters. As an extreme example, if the only device at our disposal is a 2-qubit device, a
simple cut strategy is to simply specify the the ``max_free_wires`` argument (or equivalently
directly passing a :class:`~.Device` to the ``device`` argument):
>>> cut_strategy = qml.transforms.qcut.CutStrategy(max_free_wires=2)
>>> print(cut_strategy.get_cut_kwargs(graph))
[{'num_fragments': 2, 'imbalance': 0.5714285714285714},
{'num_fragments': 3, 'imbalance': 1.4},
{'num_fragments': 4, 'imbalance': 1.75},
{'num_fragments': 5, 'imbalance': 2.3333333333333335},
{'num_fragments': 6, 'imbalance': 2.0},
{'num_fragments': 7, 'imbalance': 3.0},
{'num_fragments': 8, 'imbalance': 2.5},
{'num_fragments': 9, 'imbalance': 2.0},
{'num_fragments': 10, 'imbalance': 1.5},
{'num_fragments': 11, 'imbalance': 1.0},
{'num_fragments': 12, 'imbalance': 0.5},
{'num_fragments': 13, 'imbalance': 0.05},
{'num_fragments': 14, 'imbalance': 0.1}]
The printed list above shows all the possible cutting configurations one can attempt to perform
in order to search for the optimal cut. This is done by directly passing a
:class:`~.CutStrategy` to :func:`~.find_and_place_cuts`:
>>> cut_graph = qml.transforms.qcut.find_and_place_cuts(
graph=graph,
cut_strategy=cut_strategy,
)
>>> print(qml.transforms.qcut.graph_to_tape(cut_graph).draw())
0: RX//C//C// <X@Y@Z>
1: RY//X//C//X
a: RX//C//X//C//RX// <X@Y@Z>
b: RY//X//X//RY <X@Y@Z>
As one can tell, quite a few cuts have to be made in order to execute the circuit on solely
2-qubit devices. To verify, let's print the fragments:
>>> qml.transforms.qcut.replace_wire_cut_nodes(cut_graph)
>>> frags, comm_graph = qml.transforms.qcut.fragment_graph(cut_graph)
>>> for t in frags:
... print(qml.transforms.qcut.graph_to_tape(t).draw())
.. code-block::
0: RXMeasureNode
1: RYMeasureNode
a: RXMeasureNode
b: RYMeasureNode
0: PrepareNodeCMeasureNode
1: PrepareNodeXMeasureNode
a: PrepareNodeCMeasureNode
b: PrepareNodeXMeasureNode
1: PrepareNodeCMeasureNode
a: PrepareNodeXMeasureNode
0: PrepareNodeCMeasureNode
1: PrepareNodeX
b: PrepareNodeXMeasureNode
a: PrepareNodeCMeasureNode
a: PrepareNodeRXMeasureNode
b: PrepareNodeRY <Z>
0: PrepareNode <X>
a: PrepareNode <Y>
"""
cut_graph = _remove_existing_cuts(graph)
if isinstance(cut_strategy, CutStrategy):
cut_kwargs_probed = cut_strategy.get_cut_kwargs(cut_graph)
# Need to reseed if a seed is passed:
seed = kwargs.pop("seed", None)
seeds = np.random.default_rng(seed).choice(2**15, cut_strategy.trials_per_probe).tolist()
cut_edges_probed = {
(cut_kwargs["num_fragments"], trial_id): cut_method(
cut_graph,
**{
**cut_kwargs,
**kwargs,
"seed": seed,
}, # kwargs has higher precedence for colliding keys
)
for cut_kwargs in cut_kwargs_probed
for trial_id, seed in zip(range(cut_strategy.trials_per_probe), seeds)
}
valid_cut_edges = {}
for (num_partitions, _), cut_edges in cut_edges_probed.items():
# The easiest way to tell if a cut is valid is to just do the fragment graph.
cut_graph = place_wire_cuts(graph=graph, cut_edges=cut_edges)
num_cuts = sum(isinstance(n, WireCut) for n in cut_graph.nodes)
replace_wire_cut_nodes(cut_graph)
frags, comm = fragment_graph(cut_graph)
max_frag_degree = max(dict(comm.degree()).values())
if _is_valid_cut(
fragments=frags,
num_cuts=num_cuts,
max_frag_degree=max_frag_degree,
num_fragments_requested=num_partitions,
cut_candidates=valid_cut_edges,
max_free_wires=cut_strategy.max_free_wires,
):
key = (len(frags), max_frag_degree)
valid_cut_edges[key] = cut_edges
if len(valid_cut_edges) < 1:
raise ValueError(
"Unable to find a circuit cutting that satisfies all constraints. "
"Are the constraints too strict?"
)
cut_edges = _get_optim_cut(valid_cut_edges, local_measurement=local_measurement)
else:
cut_edges = cut_method(cut_graph, **kwargs)
cut_graph = place_wire_cuts(graph=graph, cut_edges=cut_edges)
if replace_wire_cuts:
replace_wire_cut_nodes(cut_graph)
return cut_graph
def _is_valid_cut(
fragments,
num_cuts,
max_frag_degree,
num_fragments_requested,
cut_candidates,
max_free_wires,
):
"""Helper function for determining if a cut is a valid canditate."""
# pylint: disable=too-many-arguments
k = len(fragments)
key = (k, max_frag_degree)
correct_num_fragments = k <= num_fragments_requested
best_candidate_yet = (key not in cut_candidates) or (len(cut_candidates[key]) > num_cuts)
# pylint: disable=no-member
all_fragments_fit = all(
len(graph_to_tape(f).wires) <= max_free_wires for j, f in enumerate(fragments)
)
return correct_num_fragments and best_candidate_yet and all_fragments_fit
def _get_optim_cut(valid_cut_edges, local_measurement=False):
"""Picks out the best cut from a dict of valid candidate cuts."""
if local_measurement:
min_max_node_degree = min(max_node_degree for _, max_node_degree in valid_cut_edges)
optim_cuts = {
k: cut_edges
for (k, max_node_degree), cut_edges in valid_cut_edges.items()
if (max_node_degree == min_max_node_degree)
}
else:
min_cuts = min(len(cut_edges) for cut_edges in valid_cut_edges.values())
optim_cuts = {
k: cut_edges
for (k, _), cut_edges in valid_cut_edges.items()
if (len(cut_edges) == min_cuts)
}
return optim_cuts[min(optim_cuts)] # choose the lowest num_fragments among best ones.
| 38.741009 | 117 | 0.629905 |
735799fe024faf41da595642a3d8bdb3ba238a42 | 1,693 | py | Python | tools/SDKTool/src/ui/dialog/progress_bar_dialog.py | Passer-D/GameAISDK | a089330a30b7bfe1f6442258a12d8c0086240606 | [
"Apache-2.0"
] | 1,210 | 2020-08-18T07:57:36.000Z | 2022-03-31T15:06:05.000Z | tools/SDKTool/src/ui/dialog/progress_bar_dialog.py | guokaiSama/GameAISDK | a089330a30b7bfe1f6442258a12d8c0086240606 | [
"Apache-2.0"
] | 37 | 2020-08-24T02:48:38.000Z | 2022-01-30T06:41:52.000Z | tools/SDKTool/src/ui/dialog/progress_bar_dialog.py | guokaiSama/GameAISDK | a089330a30b7bfe1f6442258a12d8c0086240606 | [
"Apache-2.0"
] | 275 | 2020-08-18T08:35:16.000Z | 2022-03-31T15:06:07.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making GameAISDK available.
This source code file is licensed under the GNU General Public License Version 3.
For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package.
Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
"""
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget, QProgressDialog
| 31.351852 | 111 | 0.705848 |
7358c21d44c9b2e4044c283c45da55bafa2452ee | 2,469 | py | Python | 9/main.py | misterwilliam/advent-of-code | b8ddcbc5efcf62c7e5e4244339029783ac9f90b6 | [
"MIT"
] | null | null | null | 9/main.py | misterwilliam/advent-of-code | b8ddcbc5efcf62c7e5e4244339029783ac9f90b6 | [
"MIT"
] | null | null | null | 9/main.py | misterwilliam/advent-of-code | b8ddcbc5efcf62c7e5e4244339029783ac9f90b6 | [
"MIT"
] | null | null | null | import itertools
import unittest
data = """Faerun to Norrath = 129
Faerun to Tristram = 58
Faerun to AlphaCentauri = 13
Faerun to Arbre = 24
Faerun to Snowdin = 60
Faerun to Tambi = 71
Faerun to Straylight = 67
Norrath to Tristram = 142
Norrath to AlphaCentauri = 15
Norrath to Arbre = 135
Norrath to Snowdin = 75
Norrath to Tambi = 82
Norrath to Straylight = 54
Tristram to AlphaCentauri = 118
Tristram to Arbre = 122
Tristram to Snowdin = 103
Tristram to Tambi = 49
Tristram to Straylight = 97
AlphaCentauri to Arbre = 116
AlphaCentauri to Snowdin = 12
AlphaCentauri to Tambi = 18
AlphaCentauri to Straylight = 91
Arbre to Snowdin = 129
Arbre to Tambi = 53
Arbre to Straylight = 40
Snowdin to Tambi = 15
Snowdin to Straylight = 99
Tambi to Straylight = 70"""
def CalcDistance(start, dest, distancePairs):
return distancePairs[frozenset((start, dest))]
def CalcPathLength(path, distance_pairs):
length = 0
for i in xrange(len(path) - 1):
length += CalcDistance(path[i], path[i+1], distance_pairs)
return length
def LoadData(data):
distance_pairs = {}
cities = set()
for line in data.split("\n"):
start, _, dest, _, distance = line.split()
cities.add(start)
cities.add(dest)
distance_pairs[frozenset([start, dest])] = int(distance)
return cities, distance_pairs
# ANSWER --------------------------------
cities, distance_pairs = LoadData(data)
longestLength = -1
for path in GenPaths(cities):
length = CalcPathLength(path, distance_pairs)
longestLength = max(longestLength, length)
print longestLength
# TESTS ---------------------------------
if __name__ == "__main__":
unittest.main() | 26.548387 | 85 | 0.665857 |
73592c3ecd42d5c4a472b3d8242eb4b399af73f6 | 1,000 | py | Python | 100-Exercicios/ex039.py | thedennerdev/ExerciciosPython-Iniciante | de36c4a09700353a9a1daa7f1320e416c6201a5c | [
"MIT"
] | null | null | null | 100-Exercicios/ex039.py | thedennerdev/ExerciciosPython-Iniciante | de36c4a09700353a9a1daa7f1320e416c6201a5c | [
"MIT"
] | null | null | null | 100-Exercicios/ex039.py | thedennerdev/ExerciciosPython-Iniciante | de36c4a09700353a9a1daa7f1320e416c6201a5c | [
"MIT"
] | null | null | null | #Exerccio Python 39: Faa um programa que leia o ano de nascimento de um jovem e informe, de acordo com a sua idade, se ele ainda vai se alistar ao servio militar, se a hora exata de se alistar ou se j passou do tempo do alistamento. Seu programa tambm dever mostrar o tempo que falta ou que passou do prazo.
import datetime
current_year = datetime.datetime.today().year
ano_nasc = int(input('Informe o ano de seu nascimento: '))
idade_alistamento = current_year - ano_nasc
if idade_alistamento < 18:
print('Ainda no est na hora de se alistar')
print(f'Sua idade ainda {idade_alistamento} anos, faltam {18 - idade_alistamento } anos. Aguarde mais um pouco!')
elif idade_alistamento == 18:
print(f'Sua idade j {idade_alistamento} anos')
print('Voc est na idade de se alistar. No perca tempo!')
else:
print('Voc passou do prazo de alistamento.')
print(f'Sua idade {idade_alistamento} anos, j passou {idade_alistamento - 18} anos. Regularize a situao!') | 62.5 | 315 | 0.75 |
735938898c03a603b4b3dd0bb3da69ebc37d8938 | 10,903 | py | Python | fish_dashboard/scrapyd/scrapyd_service.py | SylvanasSun/FishFishJump | 696212d242d8d572f3f1b43925f3d8ab8acc6a2d | [
"MIT"
] | 60 | 2018-03-09T07:06:10.000Z | 2021-11-18T15:53:04.000Z | fish_dashboard/scrapyd/scrapyd_service.py | qiubaiying/FishFishJump | 696212d242d8d572f3f1b43925f3d8ab8acc6a2d | [
"MIT"
] | 1 | 2018-04-03T11:05:54.000Z | 2018-04-03T20:06:41.000Z | fish_dashboard/scrapyd/scrapyd_service.py | qiubaiying/FishFishJump | 696212d242d8d572f3f1b43925f3d8ab8acc6a2d | [
"MIT"
] | 8 | 2018-03-12T03:07:00.000Z | 2021-06-11T05:16:11.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from fish_core.utils.common_utils import format_dict_to_str, get_current_date, list_to_str, str_to_list
from fish_dashboard.scrapyd.model import ScrapydStatusVO, JobListDO, JobStatus, JobPriority, ProjectListVO, SpiderListVO
from fish_dashboard.scrapyd.scrapyd_db import SqlLite3Agent
def open_sqllite(sql_set):
agent = SqlLite3Agent(sql_set.DB_FILE_NAME)
agent.create_table(sql_set.CREATE_TABLE)
return agent
sqllite_agent = open_sqllite(ScrapydJobExtInfoSQLSet)
def cancel_job(agent, project_name, job_id):
"""
cancel a job.
If the job is pending, it will be removed. If the job is running, it will be terminated.
"""
prevstate = agent.cancel(project_name, job_id)['prevstate']
if prevstate == 'pending':
sqllite_agent.execute(ScrapydJobExtInfoSQLSet.DELETE_BY_ID, (job_id,))
def packing_job_ext_info(job_lsit_DO):
"""
Packing additional information of the job into the job_list_DO(JobListDO)
"""
ext_info = sqllite_agent.execute(ScrapydJobExtInfoSQLSet.SELECT_BY_ID, (job_lsit_DO.job_id,))
if ext_info is None or len(ext_info) <= 0: return
ext_info = ext_info[0]
job_lsit_DO.args = ext_info[1]
job_lsit_DO.priority = ext_info[2]
job_lsit_DO.creation_time = ext_info[3]
job_lsit_DO.logs_name = str_to_list(ext_info[4], ',')
job_lsit_DO.logs_url = str_to_list(ext_info[5], ',')
def get_all_job_list(agent):
"""
Get all job list by each project name then
return three job list on the base of different status(pending,running,finished).
"""
project_list = agent.get_project_list()
if project_list['status'] == 'error':
raise ScrapydTimeoutException
project_list = project_list['projects']
pending_job_list = []
running_job_list = []
finished_job_list = []
for project_name in project_list:
job_list = agent.get_job_list(project_name)
# Extract latest version
project_version = agent.get_version_list(project_name)['versions'][-1:]
for pending_job in job_list['pending']:
pending_job_list.append(JobListDO(project_name=project_name,
project_version=project_version,
job_id=pending_job['id'],
spider_name=pending_job['spider'],
job_status=JobStatus.PENDING
))
for running_job in job_list['running']:
running_job_list.append(JobListDO(project_name=project_name,
project_version=project_version,
job_id=running_job['id'],
spider_name=running_job['spider'],
start_time=running_job['start_time'],
job_status=JobStatus.RUNNING
))
for finished_job in job_list['finished']:
finished_job_list.append(JobListDO(project_name=project_name,
project_version=project_version,
job_id=finished_job['id'],
spider_name=finished_job['spider'],
start_time=finished_job['start_time'],
end_time=finished_job['end_time'],
job_status=JobStatus.FINISHED
))
return pending_job_list, running_job_list, finished_job_list
def get_job_amounts(agent, project_name, spider_name=None):
"""
Get amounts that pending job amount, running job amount, finished job amount.
"""
job_list = agent.get_job_list(project_name)
pending_job_list = job_list['pending']
running_job_list = job_list['running']
finished_job_list = job_list['finished']
job_amounts = {}
if spider_name is None:
job_amounts['pending'] = len(pending_job_list)
job_amounts['running'] = len(running_job_list)
job_amounts['finished'] = len(finished_job_list)
else:
job_amounts['pending'] = len([j for j in pending_job_list if j['spider'] == spider_name])
job_amounts['running'] = len([j for j in running_job_list if j['spider'] == spider_name])
job_amounts['finished'] = len([j for j in finished_job_list if j['spider'] == spider_name])
return job_amounts
| 46.199153 | 120 | 0.604879 |
7359bba7e09630706e6e5d4a81fb814a396993e5 | 1,750 | py | Python | apps/shared/storage.py | bensternthal/affiliates | e234b0ab925b33d71cb5ded3d51dccbcbb0e59c1 | [
"BSD-3-Clause"
] | null | null | null | apps/shared/storage.py | bensternthal/affiliates | e234b0ab925b33d71cb5ded3d51dccbcbb0e59c1 | [
"BSD-3-Clause"
] | null | null | null | apps/shared/storage.py | bensternthal/affiliates | e234b0ab925b33d71cb5ded3d51dccbcbb0e59c1 | [
"BSD-3-Clause"
] | null | null | null | import os
from tempfile import mkstemp
from django.conf import settings
from django.core.files import locks
from django.core.files.move import file_move_safe
from django.core.files.storage import FileSystemStorage
from django.utils.text import get_valid_filename
| 32.407407 | 79 | 0.652571 |
735a0631d562698eec79867185c8831049a8bf3f | 3,783 | py | Python | bin/dupeFinder.py | kebman/dupe-finder-py | 3ac23da711577466043b5032a4022516f4ccef95 | [
"BSD-3-Clause"
] | 1 | 2018-02-17T09:00:48.000Z | 2018-02-17T09:00:48.000Z | bin/dupeFinder.py | kebman/dupe-finder-py | 3ac23da711577466043b5032a4022516f4ccef95 | [
"BSD-3-Clause"
] | null | null | null | bin/dupeFinder.py | kebman/dupe-finder-py | 3ac23da711577466043b5032a4022516f4ccef95 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python2
import os
import hashlib
import datetime
import sqlite3
from sqlite3 import Error
def sha256(fname):
"""Return sha256 hash from input file (fname).
:param fname:
:return: Sha256 hash digest in hexadecimal"""
hash_sha256 = hashlib.sha256()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(65536), b""):
hash_sha256.update(chunk)
return hash_sha256.hexdigest()
def getHRT(timestamp):
"""Get human readable time from a Python timestamp.
:param timestamp:
:return: Human readable timestamp (HRT)"""
dtval = datetime.datetime.fromtimestamp(timestamp)
return dtval.strftime('%Y-%m-%d %H:%M:%S')
def getSQLT(timestamp):
"""Make timestamp for SQLite from Python timestamp, meaning a UNIX epoch INTEGER.
:param timestamp:
:return: SQLite compatible timestamp in the form of a UNIX epoch INTEGER"""
# I know this is a very small function, but now it's clear what SQL needs
return int(timestamp)
def create_connection(db_file):
"""Create a database connection to the SQLite database specified by db_file
:param db_file: database file
:return: Connection object or None"""
try:
connection = sqlite3.connect(db_file)
return connection
except Error as e:
print(e)
return None
def check_exists(connection, path):
"""Check the file path in the SQL filepaths table.
:param connection:
:param path:
:return: path id"""
exists = '''SELECT EXISTS(SELECT 1 FROM filepaths
WHERE filepath = ?);'''
cursor = connection.cursor()
cursor.execute(exists, (path,))
return cursor.fetchone()
def get_path(connection, path):
"""Get the file path in the SQL filepaths table.
:param connection:
:param path:
:return: path id"""
select = '''SELECT id FROM filepaths
WHERE filepath = ?;'''
cursor = connection.cursor()
cursor.execute(select, (path,))
return cursor.fetchone()[0]
def store_path(connection, path):
"""Store the file path in the SQL filepaths table.
:param connection:
:param path:
:return: path id"""
insert = '''INSERT OR IGNORE INTO filepaths(filepath)
VALUES(?)'''
cursor = connection.cursor()
cursor.execute(insert, (path,))
return cursor.lastrowid
def store_file(connection, file):
"""Store the file, hash and relevant file attributes in the SQL files table.
:param connection:
:param file:
:return: Filepath ID"""
sql = '''INSERT INTO files(filename, checksum, filesize, btime, ctime, mtime, filepath_id)
VALUES(?, ?, ?, ?, ?, ?, ?)'''
cursor = connection.cursor()
cursor.execute(sql, file)
return None
# return cursor.lastrowid
if __name__ == '__main__':
main()
| 29.787402 | 111 | 0.694422 |
735afc924941206a74f98559fb49787e7b5af8e7 | 309 | py | Python | Python/factorialIterative.py | Ricardoengithub/Factorial | 0c45201bbe1ad94bf0d090381eb662cf2a281fda | [
"MIT"
] | null | null | null | Python/factorialIterative.py | Ricardoengithub/Factorial | 0c45201bbe1ad94bf0d090381eb662cf2a281fda | [
"MIT"
] | null | null | null | Python/factorialIterative.py | Ricardoengithub/Factorial | 0c45201bbe1ad94bf0d090381eb662cf2a281fda | [
"MIT"
] | null | null | null |
if __name__ == "__main__":
main()
| 15.45 | 43 | 0.501618 |
735c291e6927c7998102106ab071603c6808076b | 5,919 | py | Python | code_old/sort.py | benwoo1110/A-List-of-Sorts-v2 | 2d404bda6c6ddc689e705cad6966f2a656ddac2f | [
"MIT"
] | 6 | 2020-06-29T01:57:44.000Z | 2022-01-14T09:00:03.000Z | code_old/sort.py | benwoo1110/A-List-of-Sorts-v2 | 2d404bda6c6ddc689e705cad6966f2a656ddac2f | [
"MIT"
] | null | null | null | code_old/sort.py | benwoo1110/A-List-of-Sorts-v2 | 2d404bda6c6ddc689e705cad6966f2a656ddac2f | [
"MIT"
] | 1 | 2021-03-26T04:30:37.000Z | 2021-03-26T04:30:37.000Z | ######################################
# Import and initialize the librarys #
#####################################
from code.pygame_objects import *
from code.algorithm.bubblesort import bubblesort
from code.algorithm.insertionsort import insertionsort
from code.algorithm.bogosort import bogosort
from code.algorithm.mergesort import mergesort
from code.algorithm.quicksort import quicksort
from code.algorithm.radixsort import radixsort
from code.algorithm.selectionsort import selectionsort
from code.algorithm.commonFunc import commonFunc
#################
# Setup logging #
#################
filename = os.path.basename(__file__).split('.')[0]
logger = log.get_logger(filename)
logger.info('Loading up {}...'.format(filename))
sort_screen = screen(
name = 'sort',
surfaceParameters = {
'frame': coord(w=1024, h=768)
},
objectsParameters = {
'background': {
'type': 'object',
'frame': {
'image': coord(w=1024, h=768)
},
},
'sort_title': {
'type': 'title',
'frame': {
'image': coord(w=1024, h=135)
},
},
'back': {
'type': 'button',
'frame': {
'box': coord(x=71, y=41, w=112, h=61),
'image': coord(x=71, y=41, w=112, h=61)
},
'runclass': runclass(action='go_back')
},
'info': {
'type': 'button',
'frame': {
'box': coord(x=841, y=40, w=112, h=61),
'image': coord(x=841, y=40, w=112, h=61),
},
'runclass': runclass(action='info')
},
'speed': {
'type': 'text',
'frame': {
'image': coord(x=349, y=630, w=254, h=40),
'text': coord(x=349, y=630, w=254, h=40)
},
'data': text(
text = '10',
editable = False,
suffix = ' sec per move',
format = textFormat(
fontType=pg_ess.font.futura,
fontSize=28,
colour=pg_ess.colour.black
)
),
'dataAddSelf': True,
},
'moves': {
'type': 'text',
'frame': {
'image': coord(x=436, y=677, w=112, h=40),
'text': coord(x=436, y=677, w=112, h=40)
},
'data': moves(
format = textFormat(
fontType=pg_ess.font.futura,
fontSize=28,
colour=pg_ess.colour.black
)
),
'dataAddSelf': True,
},
'time_taken': {
'type': 'text',
'frame': {
'image': coord(x=768, y=630, w=177, h=40),
'text': coord(x=768, y=630, w=177, h=40)
},
'data': timer(
format = textFormat(
fontType=pg_ess.font.futura,
fontSize=28,
colour=pg_ess.colour.black
)
),
'dataAddSelf': True,
},
'list_length': {
'type': 'text',
'frame': {
'image': coord(x=759, y=677, w=112, h=186),
'text': coord(x=759, y=677, w=112, h=186)
},
'data': text(
text = '100',
editable = False,
suffix = ' bars',
format = textFormat(
fontType=pg_ess.font.futura,
fontSize=28,
colour=pg_ess.colour.black
)
),
'dataAddSelf': True,
},
'sortbox': {
'type': 'object',
'frame': {
'box': coord(x=52, y=145, w=922, h=430),
'image': coord(x=52, y=145, w=922, h=430)
},
'data': sortbars(
bars=10,
),
'dataAddSelf': True,
}
}
)
runSort = {
'Bubble sort': bubblesort.run,
'Insertion sort': insertionsort.run,
'Merge sort': mergesort.run,
'Quick sort': quicksort.run,
'Radix sort': radixsort.run,
'Bogo sort': bogosort.run,
'Selection sort': selectionsort.run
}
| 31.994595 | 112 | 0.487751 |
735e36175591a886d021d1f42c7e0f23a0bc609d | 489 | py | Python | catkin_ws/src/tutorials/scripts/number_sub.py | vipulkumbhar/AuE893Spring19_VipulKumbhar | f741d5299b2804fd541b2bba64b8a4fba8521f33 | [
"MIT"
] | 3 | 2020-12-04T22:00:12.000Z | 2022-02-09T15:53:14.000Z | catkin_ws/src/tutorials/scripts/number_sub.py | vipulkumbhar/AuE893Spring19_VipulKumbhar | f741d5299b2804fd541b2bba64b8a4fba8521f33 | [
"MIT"
] | 1 | 2020-04-15T19:58:30.000Z | 2020-04-15T19:58:30.000Z | catkin_ws/src/tutorials/scripts/number_sub.py | vipulkumbhar/AuE893Spring19_VipulKumbhar | f741d5299b2804fd541b2bba64b8a4fba8521f33 | [
"MIT"
] | 1 | 2020-05-21T21:59:21.000Z | 2020-05-21T21:59:21.000Z | #!/usr/bin/env python
import rospy
from std_msgs.msg import Int64
counter = 0
pub = None
if __name__ == '__main__':
rospy.init_node('number_counter')
sub = rospy.Subscriber("/number", Int64, callback_number)
pub = rospy.Publisher("/number_count", Int64, queue_size =10)
rospy.spin()
| 18.807692 | 66 | 0.666667 |
735e8db4e1d5d21ba03d9d6374f1111bc5cde6f4 | 806 | py | Python | setup.py | eddo888/perdy | 616473e9bde3ad58dc1ebf054fb78a7cc48c3adf | [
"MIT"
] | null | null | null | setup.py | eddo888/perdy | 616473e9bde3ad58dc1ebf054fb78a7cc48c3adf | [
"MIT"
] | null | null | null | setup.py | eddo888/perdy | 616473e9bde3ad58dc1ebf054fb78a7cc48c3adf | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import codecs
from os import path
from setuptools import setup
pwd = path.abspath(path.dirname(__file__))
with codecs.open(path.join(pwd, 'README.md'), 'r', encoding='utf8') as input:
long_description = input.read()
version='1.7'
setup(
name='Perdy',
version=version,
license='MIT',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/eddo888/perdy',
download_url='https://github.com/eddo888/perdy/archive/%s.tar.gz'%version,
author='David Edson',
author_email='eddo888@tpg.com.au',
packages=[
'Perdy',
],
install_requires=[
'pytz',
'arrow',
'xmltodict',
'PyYAML',
'jsonpath',
'argcomplete',
'Baubles',
],
scripts=[
'bin/parser.py',
'bin/pyson.py',
'bin/colourize.py',
],
)
| 19.190476 | 77 | 0.682382 |
736056399bf64b21d6f7dca419596b81048da99f | 2,658 | py | Python | utils/slack_send.py | IntelliGrape/pennypincher | d0d503eb8a480bf28f308ff52834170cca5a53d7 | [
"MIT"
] | null | null | null | utils/slack_send.py | IntelliGrape/pennypincher | d0d503eb8a480bf28f308ff52834170cca5a53d7 | [
"MIT"
] | null | null | null | utils/slack_send.py | IntelliGrape/pennypincher | d0d503eb8a480bf28f308ff52834170cca5a53d7 | [
"MIT"
] | null | null | null | from tabulate import tabulate
from slack.errors import SlackApiError
import sys
import logging
import slack
| 45.050847 | 122 | 0.598947 |
73612698a39e054c2b652bdecf1e853efdbc6d55 | 526 | py | Python | src/importer/importer.py | tiefenauer/ip7-python | 512105ba39110ec77d2ea0961dd7c2a42d4ec26d | [
"MIT"
] | null | null | null | src/importer/importer.py | tiefenauer/ip7-python | 512105ba39110ec77d2ea0961dd7c2a42d4ec26d | [
"MIT"
] | null | null | null | src/importer/importer.py | tiefenauer/ip7-python | 512105ba39110ec77d2ea0961dd7c2a42d4ec26d | [
"MIT"
] | null | null | null | import logging
from abc import ABC, abstractmethod
from pony.orm import db_session, commit
log = logging.getLogger(__name__)
| 21.04 | 52 | 0.653992 |
73617c822f5af71e4276c1b4c85554260d13ae06 | 982 | py | Python | news/pybo/migrations/0006_auto_20211010_0322.py | Smashh712/nrib | 375c9625e9efa6bb9a6f466312de3c6fcd5818a4 | [
"MIT"
] | null | null | null | news/pybo/migrations/0006_auto_20211010_0322.py | Smashh712/nrib | 375c9625e9efa6bb9a6f466312de3c6fcd5818a4 | [
"MIT"
] | null | null | null | news/pybo/migrations/0006_auto_20211010_0322.py | Smashh712/nrib | 375c9625e9efa6bb9a6f466312de3c6fcd5818a4 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.7 on 2021-10-09 18:22
from django.db import migrations, models
| 28.882353 | 73 | 0.580448 |
7361d838090b7ba746e73857fad1d1b69e7ce317 | 852 | py | Python | anno_gen/modify_filesprocessed.py | KevinQian97/diva_toolbox | de83de7f7602665c92dca943ab2a0b4c1b2fdfde | [
"Apache-2.0"
] | null | null | null | anno_gen/modify_filesprocessed.py | KevinQian97/diva_toolbox | de83de7f7602665c92dca943ab2a0b4c1b2fdfde | [
"Apache-2.0"
] | null | null | null | anno_gen/modify_filesprocessed.py | KevinQian97/diva_toolbox | de83de7f7602665c92dca943ab2a0b4c1b2fdfde | [
"Apache-2.0"
] | 1 | 2021-09-29T04:10:10.000Z | 2021-09-29T04:10:10.000Z | import json
import os
ref = json.load(open("/home/lijun/downloads/kf1_meta/references/kf1_all.json","r"))
files = ref["filesProcessed"]
print(len(files))
output = json.load(open("/mnt/ssdb/kevinq/adaptive_temporal_shift_module/exp/iod_kf1_all/output.json","r"))
output["filesProcessed"] = files
jname = "/mnt/ssdb/kevinq/adaptive_temporal_shift_module/exp/iod_kf1_all/output-mod.json"
with open(jname,'w') as j:
json.dump(output,j,indent=2,ensure_ascii=False)
file_dict = get_file_index(files)
jname = "/mnt/ssdb/kevinq/adaptive_temporal_shift_module/exp/iod_kf1_all/file-index.json"
with open(jname,'w') as j:
json.dump(file_dict,j,indent=2,ensure_ascii=False)
| 32.769231 | 107 | 0.738263 |
73622863ce396d64c3c5ebe2afec91bcbe2b4043 | 2,561 | py | Python | monotone_bipartition/search.py | mvcisback/monotone-bipartition | c92262fac14258ed25619681ebcb0f8734044d22 | [
"MIT"
] | 1 | 2017-05-17T22:47:33.000Z | 2017-05-17T22:47:33.000Z | monotone_bipartition/search.py | mvcisback/multidim-threshold | c92262fac14258ed25619681ebcb0f8734044d22 | [
"MIT"
] | 10 | 2019-04-01T17:05:14.000Z | 2020-05-01T17:23:18.000Z | monotone_bipartition/search.py | mvcisback/monotone-bipartition | c92262fac14258ed25619681ebcb0f8734044d22 | [
"MIT"
] | 4 | 2017-02-03T01:30:03.000Z | 2018-04-25T22:28:23.000Z | from enum import Enum, auto
import funcy as fn
import numpy as np
from monotone_bipartition import rectangles as mdtr
from monotone_bipartition import refine
EPS = 1e-4
def binsearch(r, oracle, eps=EPS, find_lambda=False):
"""Binary search over the diagonal of the rectangle.
Returns the lower and upper approximation on the diagonal.
"""
f = diagonal_convex_comb(r)
feval = fn.compose(oracle, f)
lo, hi = 0, 1
# Early termination via bounds checks
if feval(lo):
result_type = SearchResultType.TRIVIALLY_TRUE
hi = 0
elif not feval(hi):
result_type = SearchResultType.TRIVIALLY_FALSE
else:
result_type = SearchResultType.NON_TRIVIAL
mid = lo
while hi - lo > eps:
mid = lo + (hi - lo) / 2
lo, hi = (lo, mid) if feval(mid) else (mid, hi)
if find_lambda:
if result_type == SearchResultType.TRIVIALLY_TRUE:
return result_type, -1
elif result_type == SearchResultType.TRIVIALLY_FALSE:
return result_type, 2
return result_type, (lo+hi)/2
else:
return result_type, mdtr.to_rec(zip(f(lo), f(hi)))
| 28.775281 | 77 | 0.636861 |
73623a0c8d94829ad21399f5bae6f22979a769e7 | 1,562 | py | Python | api/web/apps/auth/views.py | procool/itstructure | 6aa3a43e1a759f5509f130ddf911779645dc89d0 | [
"BSD-2-Clause"
] | null | null | null | api/web/apps/auth/views.py | procool/itstructure | 6aa3a43e1a759f5509f130ddf911779645dc89d0 | [
"BSD-2-Clause"
] | null | null | null | api/web/apps/auth/views.py | procool/itstructure | 6aa3a43e1a759f5509f130ddf911779645dc89d0 | [
"BSD-2-Clause"
] | null | null | null | from flask import url_for
from flaskcbv.view import View
from flaskcbv.conf import settings
from misc.mixins import HelperMixin
from misc.views import JSONView
| 26.474576 | 107 | 0.608195 |
7363b08e9959a774b4c96272382532b62b203a94 | 2,069 | py | Python | tests/test_heart_forest.py | RainingComputers/pykitml | 1c3e50cebcdb6c4da63979ef9a812b44d23a4857 | [
"MIT"
] | 34 | 2020-03-06T07:53:43.000Z | 2022-03-13T06:12:29.000Z | tests/test_heart_forest.py | RainingComputers/pykitml | 1c3e50cebcdb6c4da63979ef9a812b44d23a4857 | [
"MIT"
] | 6 | 2021-06-08T22:43:23.000Z | 2022-03-08T13:57:33.000Z | tests/test_heart_forest.py | RainingComputers/pykitml | 1c3e50cebcdb6c4da63979ef9a812b44d23a4857 | [
"MIT"
] | 1 | 2020-11-30T21:20:32.000Z | 2020-11-30T21:20:32.000Z | from pykitml.testing import pktest_graph, pktest_nograph
if __name__ == '__main__':
try:
test_heart_forest.__wrapped__()
test_predict_heart_forest.__wrapped__()
except AssertionError:
pass | 29.140845 | 88 | 0.685839 |
736486ab642c356a4d5f9aa4e677a035c93276d3 | 25,682 | py | Python | pdf_audit.py | marctjones/perception | 9a9fe4e5cef6a2aa66544066d8c03e0e9c3b0528 | [
"MIT"
] | null | null | null | pdf_audit.py | marctjones/perception | 9a9fe4e5cef6a2aa66544066d8c03e0e9c3b0528 | [
"MIT"
] | null | null | null | pdf_audit.py | marctjones/perception | 9a9fe4e5cef6a2aa66544066d8c03e0e9c3b0528 | [
"MIT"
] | null | null | null | from globals import Globals
import os
import subprocess
import datetime as dt
from urllib import \
request as request
# urlopen
from io import \
StringIO, BytesIO
import string
import requests
import re
import csv
import threading
import utils as utils
import time
import datetime as datetime
import multiprocessing
from report import PDFItem
from PyPDF2 import PdfFileReader
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfinterp import resolve1
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFTextExtractionNotAllowed
from pdfminer.layout import LAParams # , LTTextBox, LTTextLine
from threading import Thread, Event
stop_event = Event()
global document
| 43.825939 | 133 | 0.50035 |
7df69847b16a72c401c8d03768fb93c74d01b5c9 | 2,114 | py | Python | morepath/tests/test_method_directive.py | DuncanBetts/morepath | acad10489b051df9c512f6735a9338854745a599 | [
"BSD-3-Clause"
] | null | null | null | morepath/tests/test_method_directive.py | DuncanBetts/morepath | acad10489b051df9c512f6735a9338854745a599 | [
"BSD-3-Clause"
] | null | null | null | morepath/tests/test_method_directive.py | DuncanBetts/morepath | acad10489b051df9c512f6735a9338854745a599 | [
"BSD-3-Clause"
] | null | null | null | import morepath
from webtest import TestApp as Client
| 21.793814 | 72 | 0.580889 |
7df6fe1ea2b65847f447c2f9cd2b5b13e71d4aef | 14,020 | py | Python | edb/edgeql/tracer.py | hyperdrivetech/edgedb | 6d84d607889eca771e902f28c2329e388fd172b0 | [
"Apache-2.0"
] | null | null | null | edb/edgeql/tracer.py | hyperdrivetech/edgedb | 6d84d607889eca771e902f28c2329e388fd172b0 | [
"Apache-2.0"
] | null | null | null | edb/edgeql/tracer.py | hyperdrivetech/edgedb | 6d84d607889eca771e902f28c2329e388fd172b0 | [
"Apache-2.0"
] | null | null | null | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2015-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import functools
import typing
from edb.schema import name as sn
from edb.schema import objects as so
from edb.edgeql import ast as qlast
def trace_refs(
qltree: qlast.Base,
*,
schema,
source: typing.Optional[sn.Name] = None,
subject: typing.Optional[sn.Name] = None,
path_prefix: typing.Optional[sn.Name] = None,
module: typing.Optional[str] = None,
objects: typing.Dict[str, object],
) -> typing.FrozenSet[sn.Name]:
"""Return a list of schema item names used in an expression."""
ctx = TracerContext(schema, module, objects,
source, subject, path_prefix)
trace(qltree, ctx=ctx)
return frozenset(ctx.refs)
def _resolve_type_expr(
texpr: qlast.TypeExpr, *,
ctx: TracerContext
) -> typing.Union[so.Object, UnionType]:
if isinstance(texpr, qlast.TypeName):
if texpr.subtypes:
return Type(name=texpr.maintype.name)
else:
refname = ctx.get_ref_name(texpr.maintype)
obj = ctx.objects.get(refname)
if obj is None:
obj = ctx.schema.get(refname)
else:
ctx.refs.add(refname)
return obj
elif isinstance(texpr, qlast.TypeOp):
if texpr.op == '|':
return UnionType([
_resolve_type_expr(texpr.left, ctx=ctx),
_resolve_type_expr(texpr.right, ctx=ctx),
])
else:
raise NotImplementedError(
f'unsupported type operation: {texpr.op}')
else:
raise NotImplementedError(
f'unsupported type expression: {texpr!r}'
)
| 27.984032 | 78 | 0.604922 |
7df75836ee916a28f4a031535dcb56b53a8daeb4 | 255 | py | Python | libraries/website/docs/snippets/envs/tree_to_list.py | justindujardin/mathy | 776ac528d4586d6ea558a7212adb3559ea487a3c | [
"MIT"
] | 95 | 2020-01-02T23:02:34.000Z | 2022-03-08T18:57:24.000Z | libraries/website/docs/snippets/envs/tree_to_list.py | justindujardin/mathy | 776ac528d4586d6ea558a7212adb3559ea487a3c | [
"MIT"
] | 44 | 2020-01-05T03:07:45.000Z | 2021-08-11T20:45:53.000Z | libraries/website/docs/snippets/envs/tree_to_list.py | justindujardin/mathy | 776ac528d4586d6ea558a7212adb3559ea487a3c | [
"MIT"
] | 5 | 2020-04-04T16:46:36.000Z | 2022-03-30T08:10:02.000Z | from typing import List
from mathy_core import ExpressionParser, MathExpression
parser = ExpressionParser()
expression: MathExpression = parser.parse("4 + 2x")
nodes: List[MathExpression] = expression.to_list()
# len([4,+,2,*,x])
assert len(nodes) == 5
| 25.5 | 55 | 0.74902 |
7df75aa4524bb4f5a708857ab0d660fb8ccedfb8 | 603 | py | Python | math/0x04-convolutions_and_pooling/test/2-main.py | cbarros7/holbertonschool-machine_learning | 1edb4c253441f6319b86c9c590d1e7dd3fc32bf4 | [
"MIT"
] | 1 | 2022-03-09T19:12:22.000Z | 2022-03-09T19:12:22.000Z | math/0x04-convolutions_and_pooling/test/2-main.py | cbarros7/holbertonschool-machine_learning | 1edb4c253441f6319b86c9c590d1e7dd3fc32bf4 | [
"MIT"
] | null | null | null | math/0x04-convolutions_and_pooling/test/2-main.py | cbarros7/holbertonschool-machine_learning | 1edb4c253441f6319b86c9c590d1e7dd3fc32bf4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
convolve_grayscale_padding = __import__(
'2-convolve_grayscale_padding').convolve_grayscale_padding
if __name__ == '__main__':
dataset = np.load('../../supervised_learning/data/MNIST.npz')
images = dataset['X_train']
print(images.shape)
kernel = np.array([[1, 0, -1], [1, 0, -1], [1, 0, -1]])
images_conv = convolve_grayscale_padding(images, kernel, (2, 4))
print(images_conv.shape)
plt.imshow(images[0], cmap='gray')
plt.show()
plt.imshow(images_conv[0], cmap='gray')
plt.show()
| 27.409091 | 68 | 0.6733 |
7df78eabcc3fb72c6b36049cdb0e6b3517bdbd8a | 2,950 | py | Python | code.py | surojitnath/olympic-hero | aee1ddf291bf5097fa7fd5442483fbbe87ec001f | [
"MIT"
] | null | null | null | code.py | surojitnath/olympic-hero | aee1ddf291bf5097fa7fd5442483fbbe87ec001f | [
"MIT"
] | null | null | null | code.py | surojitnath/olympic-hero | aee1ddf291bf5097fa7fd5442483fbbe87ec001f | [
"MIT"
] | null | null | null | # --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
data=pd.read_csv(path)
data.rename(columns={'Total':'Total_Medals'},inplace =True)
data.head(10)
#Code starts here
# --------------
try:
data['Better_Event'] = np.where(data['Total_Summer'] > data['Total_Winter'] , 'Summer', 'Winter')
data['Better_Event'] =np.where(data['Total_Summer'] ==data['Total_Winter'],'Both',data['Better_Event'])
#print(data['Better_Event'])
Total_Count=data['Better_Event'].value_counts()
if(Total_Count[0]>Total_Count[1]):
better_event='Summer'
print(better_event)
print(data)
else:
better_event='Winter'
print(better_event)
except:
print("code Failed")
else:
print("code passed Successfully")
# --------------
#Code starts here
top_countries= data[['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']]
top_countries=top_countries[:-1]
#print(top_countries)
top_10_summer=top_ten('Total_Summer')
top_10_winter =top_ten('Total_Winter')
top_10 =top_ten('Total_Medals')
common=list(set(top_10_summer) & set(top_10_winter) & set(top_10))
print("common",common)
# --------------
#Code starts here
summer_df =data[data['Country_Name'].isin(top_10_summer)]
winter_df =data[data['Country_Name'].isin(top_10_winter)]
top_df =data[data['Country_Name'].isin(top_10)]
# --------------
#Code starts here
summer_df['Golden_Ratio']=summer_df['Gold_Summer']/summer_df['Total_Summer']
summer_max_ratio=max(summer_df['Golden_Ratio'])
summer_country_gold=summer_df.loc[summer_df['Golden_Ratio'].idxmax(),'Country_Name']
winter_df['Golden_Ratio']=winter_df['Gold_Winter']/winter_df['Total_Winter']
winter_max_ratio=max(winter_df['Golden_Ratio'])
winter_country_gold=summer_df.loc[winter_df['Golden_Ratio'].idxmax(),'Country_Name']
top_df['Golden_Ratio']=top_df['Gold_Total']/top_df['Total_Medals']
top_max_ratio=max(top_df['Golden_Ratio'])
top_country_gold=summer_df.loc[top_df['Golden_Ratio'].idxmax(),'Country_Name']
# --------------
#Code starts here
data_1=data[:-1]
data_1['Total_Points']=pd.Series(data_1['Gold_Total']*3+data_1['Silver_Total']*2+data_1['Bronze_Total'])
print(data_1['Total_Points'])
most_points = max(data_1['Total_Points'])
print(most_points)
best_country = data_1.loc[data_1['Total_Points'].idxmax(),'Country_Name']
print(most_points)
print(best_country)
# --------------
#Code starts here
best = pd.DataFrame(data[data['Country_Name']==best_country])
best=best[['Gold_Total','Silver_Total','Bronze_Total']]
best.plot.bar()
plt.xlabel('United States')
plt.ylabel('Medals Tally')
# Rotate X-axes labels
plt.xticks(rotation=45)
| 27.570093 | 109 | 0.694915 |
7df8cceb59a2bcfb8715aedd4215b42ada0971fd | 7,096 | py | Python | planes/kissSlope/kissSlopeWing2.py | alexpGH/blenderCadCamTools | 1db2a750ed227d46e174350a2e37c4951c669867 | [
"MIT"
] | 3 | 2020-12-28T11:58:26.000Z | 2021-05-31T03:03:04.000Z | planes/kissSlope/kissSlopeWing2.py | alexpGH/blenderCadCamTools | 1db2a750ed227d46e174350a2e37c4951c669867 | [
"MIT"
] | null | null | null | planes/kissSlope/kissSlopeWing2.py | alexpGH/blenderCadCamTools | 1db2a750ed227d46e174350a2e37c4951c669867 | [
"MIT"
] | null | null | null | import bpy
import math
import numpy as np
#=== add scripts dir to path
import sys
import os
#=== define path of scripts dir
libDir=bpy.path.abspath("//../../scripts/") # version1: relative to current file
#libDir="/where/you/placed/blenderCadCam/scripts/" #version 2: usa an absolute path
if not libDir in sys.path:
sys.path.append(libDir)
#=== add local dir to path
dir = os.path.dirname(bpy.data.filepath)
if not dir in sys.path:
sys.path.append(dir)
#print(sys.path)
#=== blender imports only once even if the file change. if we edit outsde, we need to force a reload
from importlib import reload
#=== import scripts modules
import wingLib
reload(wingLib)
#===================================================================================================
#===
#===================================================================================================
if 0:
import ipdb
ipdb.set_trace()
ipdb.set_trace(context=5)
if 1:
#=== delete all but camera and lamp to start from a clean scene collection
wingLib.deleteAllButNames(['outl','outl2','myWing1','myWing2'])
#===================================================================================================
#=== basic geometry definition
#===================================================================================================
foilwidth=1.6
chAdditive=0.06 #we add this additive as constant to the chordlength to generate an (towrds tip) increasing over-elliptic ch
chordlength=0.17
nSec=41*2
halfSpan=foilwidth/2.0
if 1:
#=============================================================
#=== prepare profiles
#=============================================================
f=libDir+'/AG25_resampled.dat'
cAG25, leAG25=wingLib.foilImport(f,'auto')
f=libDir+'/AG26_resampled.dat'
cAG26, leAG26=wingLib.foilImport(f,'auto')
f=libDir+'/AG14_resampled.dat'
cAG14, leAG14=wingLib.foilImport(f,'auto')
#f=libDir+'/AG27_resampled.dat'
#cAG27, leAG27=wingLib.foilImport(f,'auto')
#=== downsampling of the root profile - we don't nee a too fine resolution for the CAM model
nPoints=100
cAG25r, leAG25r=wingLib.foildDataReduceToNpoints(cAG25,nPoints, True) #True: save trailing edge (kep 1st and last point)
pAG25r=wingLib.curveBezierFromPoints(cAG25r,'PAG25r',True,True)
#=== get & interpolate the outer profile on the root (necessary for morphing)
pAG26=wingLib.curveBezierFromPoints(cAG26,'PAG26',True,True)
pAG14=wingLib.curveBezierFromPoints(cAG14,'PAG14',True,True)
#pAG27=wingLib.curveBezierFromPoints(cAG27,'PAG27',True,True)
cAG14r=wingLib.interpolateBezier2on1(pAG25r, pAG14, leAG25r, leAG14, 40)
cAG26r=wingLib.interpolateBezier2on1(pAG25r, pAG26, leAG25r, leAG26, 40)
#cAG27_=wingLib.interpolateBezier2on1(pAG25, pAG27, leAG25, leAG27, 40)
#=== plot for check:
if 0:
pAG25=wingLib.curveBezierFromPoints(cAG25,'PAG25',True,True)
pAG14r=wingLib.curveBezierFromPoints(cAG14_,'PG14r',True,True)
pAG26r=wingLib.curveBezierFromPoints(cAG26_,'ProfileAG26r',True,True)
#=== clean-up
if 1:
wingLib.deleteByName('PAG25r')
wingLib.deleteByName('PAG14')
wingLib.deleteByName('PAG26')
# compile the coord dict for easy access
cDict={
"AG25": cAG25r,
"AG26": cAG26r,
"AG14": cAG14r,
#"AG27": cAG27_,
}
#=============================================================
#=== prepare base sections settings
#=============================================================
baseSectionsL=[]
baseSectionsL.append({"p":'AG25', "s":0.00*halfSpan, "tA":0.0, "tMorph":True, "morphT":'lCh'})
baseSectionsL.append({"p":'AG25', "s":0.05*halfSpan, "tA":0.0, "tMorph":True, "morphT":'lCh'})
baseSectionsL.append({"p":'AG26', "s":0.40*halfSpan, "tA":0.0, "tMorph":True, "morphT":'lCh'})
baseSectionsL.append({"p":'AG14', "s":0.95*halfSpan, "tA":0.0, "tMorph":False, "morphT":'lCh'})
baseSectionsL.append({"p":'AG14', "s":1.00*halfSpan, "tA":0.0, "tMorph":False, "morphT":'lCh'})
#=============================================================
#=== chordlength distribution
#=============================================================
#=== define section-wise ch extension
dChL=[]
dChL.append({"s": 0.00*halfSpan, "dy": chAdditive})
dChL.append({"s": 0.40*halfSpan, "dy": chAdditive})
dChL.append({"s": 0.95*halfSpan, "dy": chAdditive})
dChL.append({"s": 1.00*halfSpan, "dy": chAdditive})
#=== ellipse parameters
a=halfSpan
b=(chordlength-chAdditive)/2.0
#=== get/init the wing Data object
# for morphed profiles, le is the same
wingData=wingLib.WingFromSections(cDict, leAG25r, baseSectionsL, halfSpan, a, b, dChL)
if 1:
#=== get data for indivudual CAM sections
# get basic ellipse arc points in 1st and 2nd quadrant (the unshifted leading edge) and chordlength
x,y=wingLib.ellipseParamV(a,b,nSec)
ch=np.multiply(y,2.0)#
#==adapted chordlength
ch=wingLib.chordExtensionLinear(ch, x, dChL)
#shellthickness
#thickness=1.0
#=== set 2d profile to be used (gives us a function reference used later)
func4coords=wingData.coords
quality='none'
#plot Re(span)
if 0:
v=8.0# determined from stall velocity, see e.g. https://alexpgh.github.io/foss-toolchain-mpcnc/blenderKissSlope/#wing-loading-and-re
v2=9.7
#v3=15.0
#v4=30.0
#v5=45.0
nu=1.52E-05
outFile=bpy.path.abspath("//Fig_ReSpan_fast.png")
Re=[]
Re.append(np.multiply(ch,v/nu))
Re.append(np.multiply(ch,v2/nu))
#Re.append(np.multiply(ch,v3/nu))
#Re.append(np.multiply(ch,v4/nu))
#Re.append(np.multiply(ch,v5/nu))
numpy_array = np.array(Re)
transpose = numpy_array.T
#legend=[str(v)+' m/s', str(v2), str(v3),str(v4),str(v5)]
legend=[]
#n=int(len(Re)/2)+1
n=int(transpose.shape[0]/2)+1
#import ipdb
#ipdb.set_trace()
#ipdb.set_trace(context=5)
#wingLib.plotArray(x[0:n],Re[0:n],'Re(span)',outFile)
#wingLib.plotArray(x,Re,'Re(span)',outFile)
wingLib.plotArray(x[0:n],transpose[0:n,:],'Re(span)', legend, outFile)
import ipdb
ipdb.set_trace()
ipdb.set_trace(context=5)
#=== leading edge shift definition
LeShiftL=[]
LeShiftL.append(wingLib.LeShift('elliptic',0.04, 0.5, 1.0,foilwidth/2.0))
ysh=wingLib.applyLeShifts(x,y, LeShiftL)
#placeSections(x,ysh,ch)
sectionNames=wingLib.placeSectionsMinLimited(x,ysh,ch,0.001,func4coords,quality)
if 1:
wingLib.bridgeListOfEdgeLoopsCloseOuterWithFace(sectionNames,'myWing')
#shift to origin
bpy.context.object.location[1] = -chordlength/2.0
bpy.context.object.location[2] = 0.0
| 29.322314 | 140 | 0.567644 |
7dfb15185b5928b42e0c69caa80b31116a8fea1a | 1,715 | py | Python | saleor/order/migrations/0072_django_price_2.py | elwoodxblues/saleor | 5e4e4a4259a011d24b04ebd24c77c689de843fa1 | [
"CC-BY-4.0"
] | 19 | 2019-12-03T17:28:07.000Z | 2021-09-10T21:30:52.000Z | saleor/order/migrations/0072_django_price_2.py | elwoodxblues/saleor | 5e4e4a4259a011d24b04ebd24c77c689de843fa1 | [
"CC-BY-4.0"
] | 51 | 2019-12-06T08:06:07.000Z | 2021-05-06T02:10:50.000Z | saleor/order/migrations/0072_django_price_2.py | elwoodxblues/saleor | 5e4e4a4259a011d24b04ebd24c77c689de843fa1 | [
"CC-BY-4.0"
] | 20 | 2020-02-03T00:38:59.000Z | 2022-01-03T13:07:52.000Z | # Generated by Django 2.2.4 on 2019-08-14 09:13
from django.conf import settings
from django.db import migrations, models
| 31.181818 | 85 | 0.58484 |
7dfb769eb03d5be318cb102a630728947e956816 | 9,382 | py | Python | miping/training/features.py | mclgoerg/MiningPersonalityInGerman | 4c5811a0f72100b7afef9695475a6de9251444b7 | [
"Apache-2.0"
] | 1 | 2020-09-11T01:11:19.000Z | 2020-09-11T01:11:19.000Z | miping/training/features.py | mclgoerg/MiningPersonalityInGerman | 4c5811a0f72100b7afef9695475a6de9251444b7 | [
"Apache-2.0"
] | null | null | null | miping/training/features.py | mclgoerg/MiningPersonalityInGerman | 4c5811a0f72100b7afef9695475a6de9251444b7 | [
"Apache-2.0"
] | 2 | 2020-08-12T15:57:06.000Z | 2020-12-17T18:11:03.000Z | import numpy as np
from sklearn.preprocessing import FunctionTransformer
from sklearn.pipeline import Pipeline
from sklearn.pipeline import FeatureUnion
from sklearn.preprocessing import StandardScaler
from ..models.profile import Profile
from ..interfaces.helper import Helper
from ..interfaces.glove import GloVe
from .noGloveValueError import NoGloveValueError
| 32.351724 | 77 | 0.588361 |
7dfc55af75328775b1d9e9abc358301541231f7c | 1,383 | py | Python | tests/unit/test_serializers.py | launchpadrecruits/placebo | 7b6db70a341d935a2e250b76d1ea47e56e8c9d92 | [
"Apache-2.0"
] | 1 | 2019-06-10T13:52:41.000Z | 2019-06-10T13:52:41.000Z | tests/unit/test_serializers.py | launchpadrecruits/placebo | 7b6db70a341d935a2e250b76d1ea47e56e8c9d92 | [
"Apache-2.0"
] | 1 | 2018-10-01T13:11:50.000Z | 2018-10-01T13:11:50.000Z | tests/unit/test_serializers.py | launchpadrecruits/lpr-placebo | 7b6db70a341d935a2e250b76d1ea47e56e8c9d92 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 Mitch Garnaat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import unittest
import json
from placebo.serializer import serialize, deserialize, utc
date_sample = {
"LoginProfile": {
"UserName": "baz",
"CreateDate": datetime.datetime(2015, 1, 4, 9, 1, 2, 0, tzinfo=utc),
}
}
date_json = """{"LoginProfile": {"CreateDate": {"__class__": "datetime", "day": 4, "hour": 9, "microsecond": 0, "minute": 1, "month": 1, "second": 2, "year": 2015}, "UserName": "baz"}}"""
| 33.731707 | 187 | 0.707158 |
7dfc5fe7b48790825f5784ca8956028cbaaac9a8 | 1,267 | py | Python | Chapter11/web_03.py | vabyte/Modern-Python-Standard-Library-Cookbook | 4f53e3ab7b61aca1cca9343e7421e170280cd5b5 | [
"MIT"
] | 84 | 2018-08-09T09:30:03.000Z | 2022-01-04T23:20:38.000Z | Chapter11/web_03.py | jiro74/Modern-Python-Standard-Library-Cookbook | 4f53e3ab7b61aca1cca9343e7421e170280cd5b5 | [
"MIT"
] | 1 | 2019-11-04T18:57:40.000Z | 2020-09-07T08:52:25.000Z | Chapter11/web_03.py | jiro74/Modern-Python-Standard-Library-Cookbook | 4f53e3ab7b61aca1cca9343e7421e170280cd5b5 | [
"MIT"
] | 33 | 2018-09-26T11:05:55.000Z | 2022-03-15T10:31:10.000Z | import urllib.request
import urllib.parse
import json
def http_request(url, query=None, method=None, headers={}, data=None):
"""Perform an HTTP request and return the associated response."""
parts = vars(urllib.parse.urlparse(url))
if query:
parts['query'] = urllib.parse.urlencode(query)
url = urllib.parse.ParseResult(**parts).geturl()
r = urllib.request.Request(url=url, method=method, headers=headers,
data=data)
with urllib.request.urlopen(r) as resp:
msg, resp = resp.info(), resp.read()
if msg.get_content_type() == 'application/json':
resp = json.loads(resp.decode('utf-8'))
return msg, resp
if __name__ == '__main__':
msg, resp = http_request(
'https://httpbin.org/get',
query={
'a': 'Hello',
'b': 'World'
}
)
print(msg.get_content_type(), resp)
msg, resp = http_request('https://httpbin.org/bytes/16')
print(msg.get_content_type(), resp)
msg, resp = http_request('https://httpbin.org/post', method='POST',
data='This is my posted data!'.encode('ascii'),
headers={'Content-Type': 'text/plain'})
print(msg.get_content_type(), resp) | 31.675 | 75 | 0.594317 |
7dfda8cef5923a2a0d78158e8c874838389cfd46 | 3,678 | py | Python | src/oci/dns/models/external_master.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 249 | 2017-09-11T22:06:05.000Z | 2022-03-04T17:09:29.000Z | src/oci/dns/models/external_master.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 228 | 2017-09-11T23:07:26.000Z | 2022-03-23T10:58:50.000Z | src/oci/dns/models/external_master.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 224 | 2017-09-27T07:32:43.000Z | 2022-03-25T16:55:42.000Z | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
| 27.244444 | 245 | 0.609027 |
7dff476f5b07538c175407ac6793f4c21aad8c8f | 899 | py | Python | cinder/tests/unit/backup/fake_service_with_verify.py | puremudassir/cinder | 99aad0d094e726d328ea815cea8ebdc14957da8c | [
"Apache-2.0"
] | null | null | null | cinder/tests/unit/backup/fake_service_with_verify.py | puremudassir/cinder | 99aad0d094e726d328ea815cea8ebdc14957da8c | [
"Apache-2.0"
] | null | null | null | cinder/tests/unit/backup/fake_service_with_verify.py | puremudassir/cinder | 99aad0d094e726d328ea815cea8ebdc14957da8c | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2014 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.backup import driver
from cinder.tests.unit.backup import fake_service
| 37.458333 | 78 | 0.719689 |
b400a722c717d6322475d075e5e6ca07343e213f | 2,195 | py | Python | src/fasttick.py | JevinJ/Bittrex-Notify | ea1057fa2fd59d191893eb7a4c31f35db789ba29 | [
"MIT"
] | 12 | 2017-08-15T08:40:44.000Z | 2018-01-30T20:55:20.000Z | src/fasttick.py | alimogh/BittrexNotify | ea1057fa2fd59d191893eb7a4c31f35db789ba29 | [
"MIT"
] | 5 | 2017-08-30T15:46:03.000Z | 2018-02-16T09:18:27.000Z | src/fasttick.py | alimogh/BittrexNotify | ea1057fa2fd59d191893eb7a4c31f35db789ba29 | [
"MIT"
] | 3 | 2017-08-28T17:58:03.000Z | 2017-12-05T02:05:18.000Z | import config
import misc
def heartbeat():
"""
Processes data from Bittrex into a simpler dictionary,
calls the save function on it, deletes the oldest
saved dictionary(if it's out of lookback range), and finally
creates a list of the best coins to be used in tkinter listboxes.
:return: A list containing triples of (coin name, increase rate, volume)
"""
data = misc.retrieve_data()
# Processing for saving latest data from Bittrex API
latest_data = {}
for d in data.get('result', {}):
name = d.get('Market', {}).get('MarketCurrencyLong', '')
last_price = d.get('Summary', {}).get('Last', 0.0)
last_vol = d.get('Summary', {}).get('BaseVolume', 0.0)
base_currency = d.get('Market', {}).get('BaseCurrency', '')
if base_currency == 'BTC' and last_price >= \
config.FASTTICK_MIN_PRICE and last_vol >= config.FASTTICK_MIN_VOL:
latest_data[name] = {'Summary': d['Summary']}
# Processing all data within 9 ticks + latest and returning
# rate for output in GUI
prev_data = list(misc.open_pickles('fasttick_history', config.FASTTICK_LOOKBACK))
prev_data.append(latest_data)
ticker_data = []
if prev_data:
for name in latest_data:
prev_changes = []
for i in range(len(prev_data)-1):
old_price = float(prev_data[i].get(name, {}).get('Summary', {}).get('Last', 0.0))
new_price = float(prev_data[i+1].get(name, {}).get('Summary', {}).get('Last', 0.0))
if old_price != 0:
change = (((new_price - old_price) / old_price) * 100)
prev_changes.append(change)
if prev_changes:
volume = float(latest_data.get(name, {}).get('Summary', {}).get('BaseVolume', 0.0))
average_rate = float((sum(prev_changes) / len(prev_changes)))
if average_rate >= config.FASTTICK_MIN_RATE:
ticker_data.append((name, average_rate, volume))
misc.save_pickle(latest_data, 'fasttick_history')
misc.delete_ancient_pickles('fasttick_history', config.FASTTICK_LOOKBACK)
return ticker_data | 45.729167 | 99 | 0.615034 |
b4012c4378e508ce63325920dec3916fc3ec12bc | 2,325 | py | Python | src/mlb_statsapi/model/api/game.py | power-edge/mlb_statsapi_etl | 9cca2ae059e8aab98ed460e7b71ad6eeeed09ffe | [
"Apache-2.0"
] | null | null | null | src/mlb_statsapi/model/api/game.py | power-edge/mlb_statsapi_etl | 9cca2ae059e8aab98ed460e7b71ad6eeeed09ffe | [
"Apache-2.0"
] | null | null | null | src/mlb_statsapi/model/api/game.py | power-edge/mlb_statsapi_etl | 9cca2ae059e8aab98ed460e7b71ad6eeeed09ffe | [
"Apache-2.0"
] | null | null | null | """
created by nikos at 4/26/21
"""
import datetime
from ..base import MLBStatsAPIEndpointModel
from mlb_statsapi.utils.stats_api_object import configure_api
YMDTHMS = '%Y-%m-%dT%H:%M:%SZ'
YYYYMMDD_HHMMSS = '%Y%m%d_%H%M%S'
MMDDYYYY_HHMMSS = '%m%d%Y_%H%M%S'
| 25.549451 | 64 | 0.667957 |