max_stars_repo_path
stringlengths
3
269
max_stars_repo_name
stringlengths
4
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.05M
score
float64
0.23
5.13
int_score
int64
0
5
mapping/sandbox/graphslam/graphslam_pipeline.py
sameeptandon/sail-car-log
1
11900
<reponame>sameeptandon/sail-car-log import os from os.path import join as pjoin from subprocess import check_call from ruffus import files, follows, pipeline_run, pipeline_printout, pipeline_printout_graph, jobs_limit from graphslam_config import GRAPHSLAM_PATH,\ GRAPHSLAM_MATCH_DIR, GRAPHSLAM_OPT_POS_DIR, GRAPHSLAM_ALIGN_DIR,\ MATCHES_FILE, GPS_FILES, RSS_LIST, GRAPHSLAM_OUT_DIR, GRAPHSLAM_DIRS,\ GRAPHSLAM_MAPS_DIR, GRAPHSLAM_VIDEOS_DIR, GRAPHSLAM_EVAL_DIR from pipeline_config import NUM_CPUS, SAIL_CAR_LOG_PATH from pipeline_utils import print_and_call, touchf @files(None, MATCHES_FILE) def match_traces(dummy, output_file): cmd = 'python %s/match_traces.py %s' % (GRAPHSLAM_PATH, GRAPHSLAM_MATCH_DIR) print_and_call(cmd) # NOTE Have to rerun this after match_traces is run @follows('match_traces') @files(zip(GPS_FILES, [pjoin(GRAPHSLAM_OPT_POS_DIR, '--'.join(rss) + '.npz') for rss in RSS_LIST], GPS_FILES)) def solve_qps(gps_src_file, output_file, gps_tgt_file): cmd = 'python %s/solve_qp.py %s %s %s' % (GRAPHSLAM_PATH, gps_src_file, gps_tgt_file, output_file) print_and_call(cmd) @follows('solve_qps') @jobs_limit(1) @files(MATCHES_FILE, '%s/run_pipeline_sentinel' % GRAPHSLAM_OUT_DIR) def run_pipelines(dummy, sentinel): for route, segment, split in RSS_LIST: cmd = 'export SCL_ROUTE=%s; export SCL_SEGMENT=%s; export SCL_SPLIT=%s; python %s/mapping/pipeline/pipeline.py run estimate_normals' % (route, segment, split, SAIL_CAR_LOG_PATH) print_and_call(cmd) touchf('%s/run_pipeline_sentinel' % GRAPHSLAM_OUT_DIR) def clean_pipelines(): for route, segment, split in RSS_LIST: cmd = 'export SCL_ROUTE=%s; export SCL_SEGMENT=%s; export SCL_SPLIT=%s; python %s/mapping/pipeline/pipeline.py clean' % (route, segment, split, SAIL_CAR_LOG_PATH) print_and_call(cmd) @follows('run_pipelines') @files('%s/run_pipeline_sentinel' % GRAPHSLAM_OUT_DIR, '%s/chunk_and_align_sentinel' % GRAPHSLAM_ALIGN_DIR) def chunk_and_align(dummy, sentinel): cmd = 'python %s/chunk_and_align.py' % GRAPHSLAM_PATH print_and_call(cmd) touchf('%s/chunk_and_align_sentinel' % GRAPHSLAM_ALIGN_DIR) @follows('chunk_and_align') @files('%s/chunk_and_align_sentinel' % GRAPHSLAM_ALIGN_DIR, '%s/export_maps_sentinel' % GRAPHSLAM_MAPS_DIR) def export_maps(dummy, sentinel): cmd = 'python scripts/export_maps.py' print_and_call(cmd) touchf('%s/export_maps_sentinel' % GRAPHSLAM_MAPS_DIR) @follows('export_maps') @files('%s/export_maps_sentinel' % GRAPHSLAM_MAPS_DIR, '%s/align_maps_sentinel' % GRAPHSLAM_MAPS_DIR) def align_maps(dummy, sentinel): cmd = 'python scripts/align_maps_all.py' print_and_call(cmd) touchf('%s/align_maps_sentinel' % GRAPHSLAM_MAPS_DIR) @follows('align_maps') @files('%s/align_maps_sentinel' % GRAPHSLAM_MAPS_DIR, '%s/eval_maps_sentinel' % GRAPHSLAM_EVAL_DIR) def eval_maps(dummy, sentinel): cmd = 'python scripts/eval_maps.py' print_and_call(cmd) touchf('%s/eval_maps_sentinel' % GRAPHSLAM_EVAL_DIR) @follows('eval_maps') @files('%s/align_maps_sentinel' % GRAPHSLAM_MAPS_DIR, '%s/generate_videos_sentinel' % GRAPHSLAM_VIDEOS_DIR) def generate_videos(dummy, sentinel): cmd = 'python scripts/generate_videos.py' print_and_call(cmd) touchf('%s/generate_videos_sentinel' % GRAPHSLAM_VIDEOS_DIR) def clean(): for d in GRAPHSLAM_DIRS: print 'deleting %s' % d if os.path.exists(d): check_call('rm -r %s' % d, shell=True) if __name__ == '__main__': import sys if len(sys.argv) < 2: print 'Usage: python graphslam_pipeline.py print,graph,run (task1,task2)' sys.exit(1) TORUN = [ ] if len(sys.argv) == 3: TORUN = sys.argv[2].split(',') CMDS = sys.argv[1].split(',') tasks = { 'print': lambda: pipeline_printout(sys.stdout, TORUN, forcedtorun_tasks=[], verbose=5), 'graph': lambda: pipeline_printout_graph('graph.jpg', 'jpg', TORUN, forcedtorun_tasks=[], no_key_legend=False), 'run': lambda: pipeline_run(TORUN, multiprocess=NUM_CPUS, one_second_per_job=False), 'force': lambda: pipeline_run([], forcedtorun_tasks=TORUN, multiprocess=NUM_CPUS, one_second_per_job=False), 'printf': lambda: pipeline_printout(sys.stdout, [], forcedtorun_tasks=TORUN, verbose=2), 'clean': clean, 'clean_pipelines': clean_pipelines } for key in tasks: if key in CMDS: tasks[key]()
2.0625
2
sandbox/wavelets.py
EtalumaSupport/LumaViewPro
0
11901
import numpy as np import matplotlib.pyplot as plt from astropy.convolution import RickerWavelet2DKernel ricker_2d_kernel = RickerWavelet2DKernel(5) plt.imshow(ricker_2d_kernel, interpolation='none', origin='lower') plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() print(ricker_2d_kernel)
2.96875
3
tests/test_errors.py
raymundl/firepit
0
11902
<filename>tests/test_errors.py import os import pytest from firepit.exceptions import IncompatibleType from firepit.exceptions import InvalidAttr from firepit.exceptions import InvalidStixPath from firepit.exceptions import InvalidViewname from firepit.exceptions import StixPatternError from .helpers import tmp_storage @pytest.fixture def invalid_bundle_file(): cwd = os.path.dirname(os.path.abspath(__file__)) return os.path.join(cwd, 'test_error_bundle.json') def test_local(invalid_bundle_file, tmpdir): store = tmp_storage(tmpdir) store.cache('q1', [invalid_bundle_file]) def test_extract_bad_stix_pattern(fake_bundle_file, tmpdir): store = tmp_storage(tmpdir) store.cache('q1', [fake_bundle_file]) with pytest.raises(StixPatternError): store.extract('junk', 'ipv4-addr', 'q1', "whatever") def test_filter_bad_stix_pattern(fake_bundle_file, tmpdir): store = tmp_storage(tmpdir) store.cache('q1', [fake_bundle_file]) store.extract('urls', 'url', 'q1', "[url:value LIKE '%page/1%']") with pytest.raises(StixPatternError): store.filter('junk', 'url', 'urls', "value = 'http://www26.example.com/page/176'") def test_filter_bad_input_view(fake_bundle_file, tmpdir): store = tmp_storage(tmpdir) store.cache('q1', [fake_bundle_file]) store.extract('urls', 'url', 'q1', "[url:value LIKE '%page/1%']") with pytest.raises(InvalidViewname): store.filter('junk', 'url', 'urls OR 1', "[url:value = 'http://www26.example.com/page/176']") def test_sqli_1(fake_bundle_file, tmpdir): store = tmp_storage(tmpdir) store.cache('q1', [fake_bundle_file]) store.extract('urls', 'url', 'q1', "[url:value LIKE '%page/1%']") with pytest.raises(InvalidViewname): store.lookup('urls" UNION ALL SELECT * FROM "q1_url') def test_sqli_2(fake_bundle_file, tmpdir): store = tmp_storage(tmpdir) store.cache('q1', [fake_bundle_file]) store.extract('urls', 'url', 'q1', "[url:value LIKE '%page/1%']") with pytest.raises(InvalidAttr): store.values('url:fake.path', 'urls') with pytest.raises(InvalidStixPath): store.values('value" FROM "q1_ipv4-addr" UNION ALL SELECT "value', 'urls') def test_sqli_3(fake_bundle_file, tmpdir): store = tmp_storage(tmpdir) store.cache('q1', [fake_bundle_file]) store.extract('urls', 'url', 'q1', "[url:value LIKE '%page/1%']") res = store.load('test_urls', [ { 'type': 'url', 'value': 'http://www26.example.com/page/176', 'risk': 'high', }, { 'type': 'url', 'value': 'http://www67.example.com/page/264', 'risk': 'high', } ]) with pytest.raises(InvalidViewname): store.join('sqli" AS SELECT * FROM "q1_url"; CREATE VIEW "marked', 'urls', 'value', 'test_urls', 'value') def test_empty_results(fake_bundle_file, tmpdir): """Look for finding objects that aren't there""" store = tmp_storage(tmpdir) store.cache('q1', [fake_bundle_file]) store.extract('my_findings', 'x-ibm-finding', 'q1', "[x-ibm-finding:name = 'Whatever']") findings = store.lookup('my_findings') assert findings == [] def test_lookup_bad_columns(fake_bundle_file, tmpdir): store = tmp_storage(tmpdir) store.cache('q1', [fake_bundle_file]) store.extract('urls', 'url', 'q1', "[url:value LIKE '%page/1%']") with pytest.raises(InvalidAttr): store.lookup('urls', cols="1; select * from urls; --") def test_lookup_bad_offset(fake_bundle_file, tmpdir): store = tmp_storage(tmpdir) store.cache('q1', [fake_bundle_file]) store.extract('urls', 'url', 'q1', "[url:value LIKE '%page/1%']") with pytest.raises(TypeError): store.lookup('urls', offset="1; select * from urls; --") def test_bad_groupby(fake_bundle_file, fake_csv_file, tmpdir): store = tmp_storage(tmpdir) store.cache('q1', [fake_bundle_file]) store.extract('users', 'user-account', 'q1', "[ipv4-addr:value LIKE '10.%']") with pytest.raises(InvalidStixPath): store.assign('grouped_users', 'users', op='group', by='1,extractvalue(0x0a,concat(0x0a,(select database())))--') def test_assign_bad_columns(fake_bundle_file, tmpdir): store = tmp_storage(tmpdir) store.cache('q1', [fake_bundle_file]) store.extract('urls', 'url', 'q1', "[url:value LIKE '%page/1%']") with pytest.raises(InvalidStixPath): store.assign('sorted', 'urls', op='sort', by='value LIMIT 1; SELECT * FROM "urls"') def test_sort_bad_limit(fake_bundle_file, tmpdir): store = tmp_storage(tmpdir) store.cache('q1', [fake_bundle_file]) store.extract('urls', 'url', 'q1', "[url:value LIKE '%page/1%']") with pytest.raises(TypeError): store.assign('sorted', 'urls', op='sort', by='value', limit='1; SELECT 1; --') def test_merge_fail(fake_bundle_file, tmpdir): store = tmp_storage(tmpdir) store.cache('test-bundle', [fake_bundle_file]) store.extract('urls', 'url', 'test-bundle', "[url:value LIKE '%page/1%']") store.extract('ips', 'ipv4-addr', 'test-bundle', "[ipv4-addr:value != '8.8.8.8']") with pytest.raises(IncompatibleType): store.merge('merged', ['urls', 'ips'])
2.125
2
script/run_scribus.py
csneofreak/public-domain-season-songs
14
11903
#!/usr/bin/python # -*- coding: utf-8 -*- import time import json import os import math import scribus import simplebin import inspect from collections import defaultdict PWD = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) def pwd(path): return os.path.join(PWD, path); DATA_FILE = pwd("data.json") CACHE_FILE = pwd("cache.json") MANUEL_PROCESSING_FILE = pwd("manual_processing.json") FILES = pwd("lily_output/") FAST = False # use this to debug SPACING_SONGS = 10 EFFECTIVE_PAGE_HEIGHT = 255 + SPACING_SONGS SPACING_HEADLINE_SONG = 18 SPACING_SONG_TEXT = 5 PAGE_NUM_HEIGHT = 5 BASELINE_GRID = 5 def init(): scribus.openDoc(pwd("init.sla")) scribus.saveDocAs("/tmp/{}.sla".format(time.time())) scribus.setUnit(scribus.UNIT_MM) def front_matter(): # load pages from other document if not os.path.exists(pwd("front_matter.sla")): print "not front matter, file not found!" return scribus.openDoc(pwd("front_matter.sla")) pages = scribus.pageCount() scribus.closeDoc() scribus.importPage( pwd("front_matter.sla"), # filename tuple(range(1, pages+1)), # range of pages to import 1, # insert (1) or replace(0) 0, # where to insert ) scribus.gotoPage(pages+1) def fit_height(textbox): # come to a state that the text box does not overflow: width, height = scribus.getSize(textbox) to_add = height + 1 while scribus.textOverflows(textbox): scribus.sizeObject(width, height + to_add, textbox) to_add = to_add * 2 # reduce height step = height/2 overflows = False counter = 0 while step > 0.05 or overflows: counter += 1 width, old_height = scribus.getSize(textbox) if scribus.textOverflows(textbox): scribus.sizeObject(width, old_height + step, textbox) else: scribus.sizeObject(width, old_height - step, textbox) step = step * 0.5 overflows = scribus.textOverflows(textbox) def new_page(): scribus.newPage(-1) scribus.gotoPage(scribus.pageCount()) add_page_number() def add_page_number(): page_num = scribus.pageCount() page_width, page_height, margin_top, margin_left, margin_right, margin_bottom = page_size_margin(page_num) textbox = scribus.createText(margin_left, page_height-margin_bottom, page_width-margin_left-margin_right, PAGE_NUM_HEIGHT) scribus.setStyle("pagenumber_{}".format(get_style_suffix()), textbox) scribus.insertText(str(page_num), 0, textbox) scribus.deselectAll() def page_size_margin(page_num): size = scribus.getPageNSize(page_num) margin = scribus.getPageNMargins(page_num) return size + margin def get_style_suffix(): page_num = scribus.pageCount() style_suffix = "r" # is this really the right way? is there no shortcut provided by scribus? if page_num % 2 == 0: style_suffix = "l" return style_suffix def load_song(data, offset, settings): page_num = scribus.pageCount() page_width, page_height, margin_top, margin_left, margin_right, margin_bottom = page_size_margin(page_num) start_point = margin_top + offset new_width = page_width - margin_left - margin_right if not FAST: scribus.placeEPS(os.path.join(FILES, data["filename"]), 0, 0) eps = scribus.getSelectedObject() eps_width, eps_height = scribus.getSize(eps) #scribus.scaleGroup(new_width/eps_width) # slow on scribus 1.4; does something else on scribus 1.5 scribus.sizeObject(eps_width*0.86, eps_height*0.86, eps) scribus.moveObjectAbs(margin_left, start_point+SPACING_HEADLINE_SONG, eps) eps_width, eps_height = scribus.getSize(eps) else: eps_height = 0 scribus.deselectAll() textbox = scribus.createText(margin_left, start_point, new_width, 20) style_suffix = get_style_suffix() if data["composer"]: scribus.deselectAll() scribus.insertText(u"{}\n".format(data["composer"]), 0, textbox) scribus.selectText(0, 1, textbox) scribus.setStyle("subline_{}".format(style_suffix), textbox) if data["poet"]: scribus.deselectAll() scribus.insertText(u"{}\n".format(data["poet"]), 0, textbox) scribus.selectText(0, 1, textbox) scribus.setStyle("subline_{}".format(style_suffix), textbox) scribus.deselectAll() scribus.insertText(u"{}\n".format(data["name"]), 0, textbox) scribus.selectText(0, 1, textbox) scribus.setStyle("headline_{}".format(style_suffix), textbox) text = data["text"] text = [t.strip() for t in text if t.strip() != ""] # TODO: exit if text == [] textbox = scribus.createText(margin_left, start_point + eps_height + SPACING_HEADLINE_SONG + SPACING_SONG_TEXT, new_width, 50) scribus.setStyle("text", textbox) # let's see how many digits are in there: num_verses = len([l for l in text if l.isdigit()]) num_chars = 0 num_line_total = len(text) num_line_actually = 0 no_new_line = False verse_counter = 0 text_columns_height = 0 # TODO: should be None for num_line, line in enumerate(text): if line.strip == "": continue num_line_actually += 1 if line.isdigit(): print "#", verse_counter, math.ceil(num_verses * 0.5), num_verses, data["filename"] if verse_counter == math.ceil(num_verses*0.5): # this is the first verse that should be in the new column, so let's see what's the height print text_columns_height, num_line_actually text_columns_height = BASELINE_GRID * (num_line_actually -1) first_char = "\n" if num_line == 0: first_char = "" no_new_line = True line = u"{}{}.\t".format(first_char, line) scribus.insertText(line, -1, textbox) scribus.deselectAll() scribus.selectText(num_chars, len(line), textbox) #scribus.setStyle("num", textbox) # no character styles available #scribus.setFontSize(5, textbox) # TODO: testing only # BUG? scribus.setFont("Linux Libertine O Bold", textbox) num_chars += len(line) verse_counter += 1 else: if no_new_line: first_char = "" else: first_char = chr(28) no_new_line = False line = u"{}{}".format(first_char, line) scribus.insertText(line, -1, textbox) #scribus.deselectAll() #scribus.selectText(num_chars, len(line), textbox) #scribus.setStyle("text", textbox) num_chars += len(line) scribus.setColumnGap(5, textbox) columns = settings.get("columns", 2) scribus.setColumns(columns, textbox) if columns != 2: fit_height(textbox) else: scribus.sizeObject(new_width, text_columns_height, textbox) l, t = scribus.getPosition(textbox) scribus.moveObjectAbs(l, round(t/BASELINE_GRID)*BASELINE_GRID, textbox) if scribus.textOverflows(textbox): fit_height(textbox) # there are some cases,.. text_width, text_height = scribus.getSize(textbox) text_left, text_top = scribus.getPosition(textbox) return text_top + text_height - start_point + SPACING_SONGS, page_num def create_toc(data): if not scribus.objectExists("TOC"): new_page() page_width, page_height, margin_top, margin_left, margin_right, margin_bottom = page_size_margin(1) toc = scribus.createText(margin_left, margin_top, page_width-margin_right-margin_left, page_height-margin_top-margin_bottom) scribus.setNewName("TOC", toc) scribus.insertText("provide a textframe with name 'TOC' in front_matter.sla and i will not create the toc at the end of the document", 0, "TOC") text = "\n".join(("{}\t{}".format(title, pagenum) for (title, pagenum) in data)) scribus.insertText(text, -1, "TOC") def add_songs(all_songs, songs_double_page, manual_processing, songs_data, cache): # let's get the best sorting songs_combined = simplebin.best_fit(all_songs, EFFECTIVE_PAGE_HEIGHT) # sorting the songs alphabetic songs_sorted = sorted(songs_combined, key=lambda x: x[0]) # make sure the double page will be added on the left side page_num = scribus.pageCount() for double_page in songs_double_page: if not double_page in all_songs: continue offset = songs_sorted.index([double_page]) songs_sorted.insert(offset+1, None) # add a empty page after the song if (page_num + offset) % 2 != 0: # song is on right side, empty side on the left side. songs_sorted.insert(offset, songs_sorted.pop(offset+2)) # move next song before the double page # TODO: what if double sided song is last song? for songs in songs_sorted: current_pos = 0 if songs == None: # we added this for a song that should be set on double page new_page() continue for filename in songs: if not manual_processing[filename].get("show", True): continue data = songs_data[filename] height, page_num = load_song(data, current_pos, manual_processing[filename]) current_pos += math.ceil(height/BASELINE_GRID) * BASELINE_GRID cache[filename]["height"] = round(height, 2) cache[filename]["page"] = page_num scribus.progressSet(1) if current_pos != 0: new_page() def main(): cache = defaultdict(dict) try: with open(CACHE_FILE, "rb") as cache_file: cache = defaultdict(dict, json.load(cache_file)) except: pass with open(DATA_FILE, "rb") as data_file: songs_data = json.load(data_file) with open(MANUEL_PROCESSING_FILE, "rb") as manual_file: manual_processing = defaultdict(dict, json.load(manual_file)) scribus.statusMessage("Running script...") scribus.progressReset() scribus.progressTotal(len(songs_data)) init() front_matter() add_page_number() # trying to get the best sorting # setting all songs to the max height all_songs = dict(zip(songs_data.keys(), [EFFECTIVE_PAGE_HEIGHT] * len(songs_data))) # update according to cache for song_name, data in cache.iteritems(): all_songs[song_name] = min(data.get("height", EFFECTIVE_PAGE_HEIGHT), EFFECTIVE_PAGE_HEIGHT) # let's see which songs should be set on a double sided page: songs_double_page = filter(lambda x: manual_processing[x].get("double_page", False), manual_processing) for double_page in songs_double_page: all_songs[double_page] = EFFECTIVE_PAGE_HEIGHT # all double page songs should get a whole page despite their height appendix_filter = lambda a_s, boolean : {k:v for k,v in a_s.iteritems() if manual_processing[k].get("appendix", False) == boolean} main_songs = appendix_filter(all_songs, False) add_songs(main_songs, songs_double_page, manual_processing, songs_data, cache) appendix_songs = appendix_filter(all_songs, True) add_songs(appendix_songs, songs_double_page, manual_processing, songs_data, cache) toc = [] for filename in filter(lambda s: manual_processing[s].get("show", True), all_songs.keys()): toc.append((songs_data[filename]["name"], cache[filename].get("page", "XX"))) toc.sort(key=lambda (x,y): x) create_toc(toc) if scribus.haveDoc(): scribus.setRedraw(True) scribus.statusMessage("") scribus.progressReset() with open(CACHE_FILE, "wb") as cache_file: json.dump(cache, cache_file, indent=2) if __name__ == "__main__": main()
2.5
2
12-transformar_metro.py
tainagirotto/exercicios-py
0
11904
# Ler um número em metros e mostrar seu valor em cm e mm: m = float(input('Digite o valor em metros: ')) dm = m * 10 cm = m * 100 mm = m * 1000 km = m/1000 hm = m/100 dam = m/10 print('O valor em cm é {}' .format(cm)) print('O valor em milímetros é {}' .format(mm)) print('O valor em dm é {}' .format(dm)) print('O valor em km é {}' .format(km)) print('O valor em hm {}' .format(hm)) print('O valor em dm {}' .format(dm))
4.1875
4
src/urls.py
chunky2808/Hire-Me
0
11905
<filename>src/urls.py<gh_stars>0 """src URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.11/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url,include from django.contrib import admin from accounts import views as accounts_views from hire import views as hire_views from django.contrib.auth import views as auth_views from chat_app import views as chat_views from django.conf.urls.static import static from django.conf import settings urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^$', hire_views.mainee,name = 'maiee'), url(r'^accounts/', include('django.contrib.auth.urls')), url(r'^accounts/signup/$', accounts_views.signup, name='signup'), url(r'^accounts/signup/customer/$', accounts_views.CustomerSignUpView.as_view(), name='customer_signup'), url(r'^accounts/signup/service/$', accounts_views.ServiceSignUpView.as_view(), name='service_signup'), url(r'^chat/(?P<stri_id>\w+?)/', chat_views.chat, name='index'), url(r'^chatbox/(?P<stri_id>\w+?)/', chat_views.chatbox, name='chat'), url(r'^oauth/', include('social_django.urls', namespace='social')), # <-- url(r'^login/$', auth_views.LoginView.as_view(template_name='login.html'), name='login'), url(r'^logout/$', auth_views.LogoutView.as_view(), name='logout'), url(r'^services/$', hire_views.home, name='home'), url(r'^services/new/$', hire_views.add_service, name='new_service'), url(r'^services/(?P<pk>\d+)/$', hire_views.list_services, name='serve_list'), url(r'^services/(?P<pk>\d+)/new/$', hire_views.list_services_new, name='new_serve_list'), url(r'^services/(?P<pk>\d+)/delete/$', hire_views.delete_main, name='delete'), url(r'^services/(?P<pk>\d+)/(?P<Service_category_pk>\d+)/review/$', hire_views.review, name='review'), url(r'^services/(?P<pk>\d+)/(?P<Service_category_pk>\d+)/review/new/$', hire_views.review_new, name='review_new'), url(r'^worker_page/(?P<pk>\d+)/$', hire_views.worker_page, name='worker_page'), url(r'^increment/(?P<pk>\d+)/(?P<Service_category_pk>\d+)/review/$', hire_views.increment, name='increment'), url(r'^decrement/(?P<pk>\d+)/(?P<Service_category_pk>\d+)/review/$', hire_views.decrement, name='decrement'), # url(r'^user/$', hire_views.model_form_upload, name='model_form_upload'), url(r'^hello/$', hire_views.hello, name='hello'), ] if settings.DEBUG == True: urlpatterns += static(settings.STATIC_URL, document_root = settings.STATIC_ROOT)
2.5625
3
re_compare/re_compare.py
gchase/re-compare
0
11906
#!/usr/bin/env python3 import logging import argparse import traceback import os import sys from analysis import Analysis from collector import Collector from config import DEBUG, DEFAULT_LOG_FILE_DIR def is_dir(dirname): if not os.path.isdir(dirname): msg = "{0} is not a directory".format(dirname) raise argparse.ArgumentTypeError(msg) else: return dirname def main(): if DEBUG: logging.basicConfig( stream=sys.stdout, level=logging.INFO, format= '[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s', datefmt="%H:%M:%S") parser = argparse.ArgumentParser() parser.add_argument('--task', type=lambda x: is_dir(x)) parser.add_argument( '--test_types', nargs="+", choices=['first_match', 'all_matches', 'consecutive_matches']) parser.add_argument('--log_files', nargs='+', type=argparse.FileType()) parser.set_defaults( test_types=['first_match', 'all_matches', 'consecutive_matches']) args = parser.parse_args() if args.log_files: logging.info('starting analysis') Analysis(files=args.log_files).analyze_logs() logging.info('finished analysis') else: logging.info('starting collection') Collector(args.task).collect() logging.info('finished collection') logging.info('starting analysis') Analysis(logs_dir=DEFAULT_LOG_FILE_DIR).analyze_logs() if __name__ == '__main__': main()
2.4375
2
venv/Lib/site-packages/nipype/conftest.py
richung99/digitizePlots
585
11907
<gh_stars>100-1000 import os import shutil from tempfile import mkdtemp import pytest import numpy import py.path as pp NIPYPE_DATADIR = os.path.realpath( os.path.join(os.path.dirname(__file__), "testing/data") ) temp_folder = mkdtemp() data_dir = os.path.join(temp_folder, "data") shutil.copytree(NIPYPE_DATADIR, data_dir) @pytest.fixture(autouse=True) def add_np(doctest_namespace): doctest_namespace["np"] = numpy doctest_namespace["os"] = os doctest_namespace["pytest"] = pytest doctest_namespace["datadir"] = data_dir @pytest.fixture(autouse=True) def _docdir(request): """Grabbed from https://stackoverflow.com/a/46991331""" # Trigger ONLY for the doctests. doctest_plugin = request.config.pluginmanager.getplugin("doctest") if isinstance(request.node, doctest_plugin.DoctestItem): # Get the fixture dynamically by its name. tmpdir = pp.local(data_dir) # Chdir only for the duration of the test. with tmpdir.as_cwd(): yield else: # For normal tests, we have to yield, since this is a yield-fixture. yield def pytest_unconfigure(config): # Delete temp folder after session is finished shutil.rmtree(temp_folder)
1.9375
2
tests/test_modules/test_ADPandABlocks/test_adpandablocks_blocks.py
aaron-parsons/pymalcolm
0
11908
<reponame>aaron-parsons/pymalcolm from mock import Mock from malcolm.testutil import ChildTestCase from malcolm.modules.ADPandABlocks.blocks import pandablocks_runnable_block class TestADPandABlocksBlocks(ChildTestCase): def test_pandablocks_runnable_block(self): self.create_child_block( pandablocks_runnable_block, Mock(), mri_prefix="mri_prefix", pv_prefix="pv_prefix", config_dir="/tmp")
2.421875
2
pymbolic/mapper/coefficient.py
sv2518/pymbolic
0
11909
<filename>pymbolic/mapper/coefficient.py __copyright__ = "Copyright (C) 2013 <NAME>" __license__ = """ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from pymbolic.mapper import Mapper class CoefficientCollector(Mapper): def __init__(self, target_names=None): self.target_names = target_names def map_sum(self, expr): stride_dicts = [self.rec(ch) for ch in expr.children] result = {} for stride_dict in stride_dicts: for var, stride in stride_dict.items(): if var in result: result[var] += stride else: result[var] = stride return result def map_product(self, expr): result = {} children_coeffs = [self.rec(child) for child in expr.children] idx_of_child_with_vars = None for i, child_coeffs in enumerate(children_coeffs): for k in child_coeffs: if k != 1: if (idx_of_child_with_vars is not None and idx_of_child_with_vars != i): raise RuntimeError( "nonlinear expression") idx_of_child_with_vars = i other_coeffs = 1 for i, child_coeffs in enumerate(children_coeffs): if i != idx_of_child_with_vars: assert len(child_coeffs) == 1 other_coeffs *= child_coeffs[1] if idx_of_child_with_vars is None: return {1: other_coeffs} else: return { var: other_coeffs*coeff for var, coeff in children_coeffs[idx_of_child_with_vars].items()} return result def map_constant(self, expr): return {1: expr} def map_algebraic_leaf(self, expr): if self.target_names is None or expr.name in self.target_names: return {expr: 1} else: return {1: expr}
1.976563
2
day_06/balancer.py
anglerud/advent_of_code_2017
3
11910
#!/usr/bin/env python # coding: utf-8 """ """ import typing as t import attr import click @attr.s(frozen=True) class Memory(object): banks: t.Tuple[int, ...] = attr.ib() def balance(self) -> 'Memory': mem = list(self.banks) num_banks = len(self.banks) # Find the amount of blocks to balance - remove them from that bank. blocks_to_balance = max(mem) bank_pointer = mem.index(blocks_to_balance) mem[bank_pointer] = 0 # Rebalance balance_rounds = 0 while blocks_to_balance > 0: # Advance the pointer. bank_pointer = (bank_pointer + 1) % num_banks mem[bank_pointer] += 1 blocks_to_balance -= 1 return Memory( banks=tuple(mem) ) def detect_loop(memory: Memory) -> int: """Find how many steps until we detect a loop.""" arrangements_seen = set() balancer_rounds = 0 while memory not in arrangements_seen: arrangements_seen.add(memory) memory = memory.balance() balancer_rounds += 1 return balancer_rounds, memory @click.group() def balancer(): """Balancing memory like they were spinning tops.""" pass @balancer.command() @click.argument('memory_banks', type=click.File()) def distribute(memory_banks): banks = tuple(map(int, memory_banks.read().split())) memory = Memory(banks) steps, memory = detect_loop(memory) msg = f"Loop found after {steps} balance rounds." click.secho(msg, fg='green') @balancer.command() @click.argument('memory_banks', type=click.File()) def loop_size(memory_banks): banks = tuple(map(int, memory_banks.read().split())) memory = Memory(banks) _, memory = detect_loop(memory) loop_size, _ = detect_loop(memory) msg = f"Loop size is {loop_size}." click.secho(msg, fg='green') def main(): """Entrypoint.""" balancer() if __name__ == '__main__': main()
3.625
4
python-while/exercise4.py
crobert7/Py-Basics
0
11911
word = input('Type a word: ') while word != 'chupacabra': word = input('Type a word: ') if word == 'chupacabra': print('You are out of the loop') break
4.1875
4
pw_build/selects.bzl
mspang/pigweed
0
11912
# Copyright 2021 The Pigweed Authors # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. _RTOS_NONE = "//pw_build/constraints/rtos:none" # Common select for tagging a target as only compatible with host OS's. This # select implements the logic '(Windows or Macos or Linux) and not RTOS'. # Example usage: # load("//pw_build:selects.bzl","TARGET_COMPATIBLE_WITH_HOST_SELECT") # pw_cc_library( # name = "some_host_only_lib", # hdrs = ["host.h"], # target_compatible_with = select(TARGET_COMPATIBLE_WITH_HOST_SELECT), # ) TARGET_COMPATIBLE_WITH_HOST_SELECT = { "@platforms//os:windows": [_RTOS_NONE], "@platforms//os:macos": [_RTOS_NONE], "@platforms//os:linux": [_RTOS_NONE], "//conditions:default": ["@platforms//:incompatible"], }
1.476563
1
subscriptions/models.py
emil-magnusson/py-on-api
0
11913
<gh_stars>0 # subscriptions/models.py import uuid from django.db import models from accesses.models import Accesses, Services class OperationalState(models.Model): operationalState = models.CharField(primary_key=True, max_length=50) def __str__(self): return self.operationalState class Subscriptions(models.Model): subscriptionId = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) accessId = models.ForeignKey(Accesses, related_name='subscriptions', on_delete=models.PROTECT) service = models.ForeignKey(Services, on_delete=models.PROTECT) operationalState = models.ForeignKey(OperationalState, on_delete=models.PROTECT) spReference = models.CharField(max_length=50, default=uuid.uuid4().hex[:6].upper()) spSubscriptionId = models.UUIDField(default=uuid.uuid4, editable=False) #option82 = models.OneToOneField(Option82, on_delete=models.PROTECT) ##dhcpIdentifier note = models.CharField(max_length=350, null=True, blank=True) ##characteristics def __str__(self): return '{} - {}'.format(self.service, self.subscriptionId) class Equipment(models.Model): subscriptionId = models.ForeignKey(Subscriptions, related_name='equipment', on_delete=models.PROTECT) vendorId = models.CharField(max_length=250) macAddress = models.CharField(max_length=250) def __str__(self): return '{} - {}'.format(self.vendorId, self.macAddress)
2.0625
2
leboncrevard/job.py
mclbn/leboncrevard
5
11914
<reponame>mclbn/leboncrevard import smtplib import time from email.mime.text import MIMEText from leboncrevard import scrapper, config class LbcJob: def __init__(self, name, url, interval, recipients): self.name = name self.url = url self.scrapper = scrapper.LbcScrapper(url) self.interval = interval self.recipients = recipients self.outfile = name + ".csv" self.shouldrun = True def __eq__(self, other): if self.name != other.name: return False if self.url != other.url: return False # Ignoring interval and recipients for now # if self.interval != other.interval: # return False # if self.recipients != other.recipients: # return False return True def disable(self): self.shouldrun = False def enable(self): self.shouldrun = False def run(self): if not self.shouldrun: return if self.scrapper.test_connectivity() is False: print("No connectivity, aborting for now...") return False else: print("Starting scraping job: " + self.name) ads = self.scrapper.scrap() if ads is None: print("Nothing to scrap for " + self.name + ", aborting job.") return False text = "" hashes = "" f = open(self.outfile, "a+") f.seek(0) lines = f.read() for ad in ads: ad_hash = ad.get_hash() line = "\"" + ad.get_link() + "\"," + ad_hash if lines.find(line) != -1: print("Known ad, skipping.") continue if lines.find(ad_hash) != -1: text += "Repost: " print("Unknown ad, sending...") text += ad.get_text() hashes += time.strftime("%d-%m-%y") + "," + line + "\n" if len(text) > 0: for recipient in self.recipients: print(recipient) try: print("Sending mail...") msg = MIMEText(text) msg['Subject'] = "Nouvelles annonces (" + self.name + ")" msg['From'] = config.SMTP_USER msg['To'] = recipient s = smtplib.SMTP(config.SMTP_SERVER) s.ehlo() s.starttls() s.login(config.SMTP_USER, config.SMTP_PASS) s.send_message(msg) s.quit() f.write(hashes) except Exception as e: print(str(e)) pass f.close()
2.40625
2
tsl/data/datamodule/splitters.py
TorchSpatiotemporal/tsl
4
11915
<reponame>TorchSpatiotemporal/tsl<gh_stars>1-10 import functools from copy import deepcopy from datetime import datetime from typing import Mapping, Callable, Union, Tuple, Optional import numpy as np from tsl.utils.python_utils import ensure_list from ..spatiotemporal_dataset import SpatioTemporalDataset from ..utils import SynchMode __all__ = [ 'Splitter', 'CustomSplitter', 'TemporalSplitter', 'AtTimeStepSplitter', ] from ...typing import Index class Splitter: r"""Base class for splitter module.""" def __init__(self): self.__indices = dict() self._fitted = False self.reset() def __new__(cls, *args, **kwargs) -> "Splitter": obj = super().__new__(cls) # track `fit` calls obj.fit = cls._track_fit(obj, obj.fit) return obj @staticmethod def _track_fit(obj: "Splitter", fn: callable) -> callable: """A decorator to track fit calls. When ``splitter.fit(...)`` is called, :obj:`splitter.fitted` is set to :obj:`True`. Args: obj: Object whose function will be tracked. fn: Function that will be wrapped. Returns: Decorated method to track :obj:`fit` calls. """ @functools.wraps(fn) def fit(dataset: SpatioTemporalDataset) -> dict: fn(dataset) obj._fitted = True return obj.indices return fit def __getstate__(self) -> dict: # avoids _pickle.PicklingError: Can't pickle <...>: it's not the same # object as <...> d = self.__dict__.copy() del d['fit'] return d def __call__(self, *args, **kwargs): return self.split(*args, **kwargs) def __repr__(self): lens = ", ".join(map(lambda kv: "%s=%s" % kv, self.lens().items())) return "%s(%s)" % (self.__class__.__name__, lens) @property def indices(self): return self.__indices @property def fitted(self): return self._fitted @property def train_idxs(self): return self.__indices.get('train') @property def val_idxs(self): return self.__indices.get('val') @property def test_idxs(self): return self.__indices.get('test') @property def train_len(self): return len(self.train_idxs) if self.train_idxs is not None else None @property def val_len(self): return len(self.val_idxs) if self.val_idxs is not None else None @property def test_len(self): return len(self.test_idxs) if self.test_idxs is not None else None def set_indices(self, train=None, val=None, test=None): if train is not None: self.__indices['train'] = train if val is not None: self.__indices['val'] = val if test is not None: self.__indices['test'] = test def reset(self): self.__indices = dict(train=None, val=None, test=None) self._fitted = False def lens(self) -> dict: return dict(train_len=self.train_len, val_len=self.val_len, test_len=self.test_len) def copy(self) -> "Splitter": copy = Splitter() copy.__dict__ = deepcopy(self.__dict__) return copy def fit(self, dataset: SpatioTemporalDataset): raise NotImplementedError def split(self, dataset: SpatioTemporalDataset) -> dict: if self.fitted: return self.indices else: return self.fit(dataset) class CustomSplitter(Splitter): def __init__(self, val_split_fn: Callable = None, test_split_fn: Callable = None, val_kwargs: Mapping = None, test_kwargs: Mapping = None, mask_test_indices_in_val: bool = True): super(CustomSplitter, self).__init__() self.val_split_fn = val_split_fn self.test_split_fn = test_split_fn self.val_kwargs = val_kwargs or dict() self.test_kwargs = test_kwargs or dict() self.mask_test_indices_in_val = mask_test_indices_in_val @property def val_policy(self): return self.val_split_fn.__name__ if callable( self.val_split_fn) else None @property def test_policy(self): return self.test_split_fn.__name__ if callable( self.test_split_fn) else None def fit(self, dataset: SpatioTemporalDataset): _, test_idxs = self.test_split_fn(dataset, **self.test_kwargs) val_kwargs = self.val_kwargs if self.mask_test_indices_in_val and len(test_idxs): val_kwargs = dict(**self.val_kwargs, mask=test_idxs) train_idxs, val_idxs = self.val_split_fn(dataset, **val_kwargs) self.set_indices(train_idxs, val_idxs, test_idxs) class FixedIndicesSplitter(Splitter): def __init__(self, train_idxs: Optional[Index] = None, val_idxs: Optional[Index] = None, test_idxs: Optional[Index] = None): super(FixedIndicesSplitter, self).__init__() self.set_indices(train_idxs, val_idxs, test_idxs) self._fitted = True def fit(self, dataset: SpatioTemporalDataset): pass class TemporalSplitter(Splitter): def __init__(self, val_len: int = None, test_len: int = None): super(TemporalSplitter, self).__init__() self._val_len = val_len self._test_len = test_len def fit(self, dataset: SpatioTemporalDataset): idx = np.arange(len(dataset)) val_len, test_len = self._val_len, self._test_len if test_len < 1: test_len = int(test_len * len(idx)) if val_len < 1: val_len = int(val_len * (len(idx) - test_len)) test_start = len(idx) - test_len val_start = test_start - val_len self.set_indices(idx[:val_start - dataset.samples_offset], idx[val_start:test_start - dataset.samples_offset], idx[test_start:]) @staticmethod def add_argparse_args(parser): parser.add_argument('--val-len', type=float or int, default=0.1) parser.add_argument('--test-len', type=float or int, default=0.2) return parser class AtTimeStepSplitter(Splitter): def __init__(self, first_val_ts: Union[Tuple, datetime] = None, first_test_ts: Union[Tuple, datetime] = None): super(AtTimeStepSplitter, self).__init__() self.first_val_ts = first_val_ts self.first_test_ts = first_test_ts def fit(self, dataset: SpatioTemporalDataset): train_idx, test_idx = split_at_ts(dataset, ts=self.first_test_ts) train_idx, val_idx = split_at_ts(dataset, ts=self.first_val_ts, mask=test_idx) return self.set_indices(train_idx, val_idx, test_idx) @staticmethod def add_argparse_args(parser): parser.add_argument('--first-val-ts', type=list or tuple, default=None) parser.add_argument('--first-test-ts', type=list or tuple, default=None) return parser ### def indices_between(dataset: SpatioTemporalDataset, first_ts: Union[Tuple, datetime] = None, last_ts: Union[Tuple, datetime] = None): if first_ts is not None: if isinstance(first_ts, datetime): pass elif isinstance(first_ts, (tuple, list)) and len(first_ts) >= 3: first_ts = datetime(*first_ts, tzinfo=dataset.index.tzinfo) else: raise TypeError("first_ts must be a datetime or a tuple") if last_ts is not None: if isinstance(last_ts, datetime): pass elif isinstance(last_ts, (tuple, list)) and len(last_ts) >= 3: last_ts = datetime(*last_ts, tzinfo=dataset.index.tzinfo) else: raise TypeError("last_ts must be a datetime or a tuple") first_day_loc, last_day_loc = dataset.index.slice_locs(first_ts, last_ts) first_sample_loc = first_day_loc - dataset.horizon_offset last_sample_loc = last_day_loc - dataset.horizon_offset - 1 indices_from_sample = np.where((first_sample_loc <= dataset.indices) & ( dataset.indices < last_sample_loc))[0] return indices_from_sample def split_at_ts(dataset, ts, mask=None): from_day_idxs = indices_between(dataset, first_ts=ts) prev_idxs = np.arange( from_day_idxs[0] if len(from_day_idxs) else len(dataset)) if mask is not None: from_day_idxs = np.setdiff1d(from_day_idxs, mask) prev_idxs = np.setdiff1d(prev_idxs, mask) return prev_idxs, from_day_idxs def disjoint_months(dataset, months=None, synch_mode=SynchMode.WINDOW): idxs = np.arange(len(dataset)) months = ensure_list(months) # divide indices according to window or horizon if synch_mode is SynchMode.WINDOW: start = 0 end = dataset.window - 1 elif synch_mode is SynchMode.HORIZON: start = dataset.horizon_offset end = dataset.horizon_offset + dataset.horizon - 1 else: raise ValueError('synch_mode can only be one of %s' % [SynchMode.WINDOW, SynchMode.HORIZON]) # after idxs indices = np.asarray(dataset._indices) start_in_months = np.in1d(dataset.index[indices + start].month, months) end_in_months = np.in1d(dataset.index[indices + end].month, months) idxs_in_months = start_in_months & end_in_months after_idxs = idxs[idxs_in_months] # previous idxs months = np.setdiff1d(np.arange(1, 13), months) start_in_months = np.in1d(dataset.index[indices + start].month, months) end_in_months = np.in1d(dataset.index[indices + end].month, months) idxs_in_months = start_in_months & end_in_months prev_idxs = idxs[idxs_in_months] return prev_idxs, after_idxs # SPLIT FUNCTIONS def split_function_builder(fn, *args, name=None, **kwargs): def wrapper_split_fn(dataset, length=None, mask=None): return fn(dataset, length=length, mask=mask, *args, **kwargs) wrapper_split_fn.__name__ = name or "wrapped__%s" % fn.__name__ return wrapper_split_fn def subset_len(length, set_size, period=None): if period is None: period = set_size if length is None or length <= 0: length = 0 if 0. < length < 1.: length = max(int(length * period), 1) elif period <= length < set_size: length = int(length / set_size * period) elif length > set_size: raise ValueError("Provided length of %i is greater than set_size %i" % ( length, set_size)) return length def tail_of_period(iterable, length, mask=None, period=None): size = len(iterable) period = period or size if mask is None: mask = [] indices = np.arange(size) length = subset_len(length, size, period) prev_idxs, after_idxs = [], [] for batch_idxs in [indices[i:i + period] for i in range(0, size, period)]: batch_idxs = np.setdiff1d(batch_idxs, mask) prev_idxs.extend(batch_idxs[:len(batch_idxs) - length]) after_idxs.extend(batch_idxs[len(batch_idxs) - length:]) return np.array(prev_idxs), np.array(after_idxs) def random(iterable, length, mask=None): size = len(iterable) if mask is None: mask = [] indices = np.setdiff1d(np.arange(size), mask) np.random.shuffle(indices) split_at = len(indices) - subset_len(length, size) res = [np.sort(indices[:split_at]), np.sort(indices[split_at:])] return res def past_pretest_days(dataset, length, mask): # get the first day of testing, as the first step of the horizon keep_until = np.min(mask) first_testing_day_idx = dataset._indices[keep_until] first_testing_day = dataset.index[ first_testing_day_idx + dataset.lookback + dataset.delay] # extract samples before first day of testing through the years tz_info = dataset.index.tzinfo years = sorted(set(dataset.index.year)) yearly_testing_loc = [] for year in years: ftd_year = datetime(year, first_testing_day.month, first_testing_day.day, tzinfo=tz_info) yearly_testing_loc.append(dataset.index.slice_locs(ftd_year)[0]) yearly_train_samples = [ np.where(dataset._indices < ytl - dataset.lookback - dataset.delay)[0] for ytl in yearly_testing_loc] # filter the years in which there are no such samples yearly_train_samples = [yts for yts in yearly_train_samples if len(yts) > 0] # for each year excluding the last take the last "val_len // n_years" samples yearly_val_len = length // len(yearly_train_samples) yearly_val_lens = [min(yearly_val_len, len(yts)) for yts in yearly_train_samples[:-1]] # for the last year, take the remaining number of samples needed to reach val_len # this value is always greater or equals to the other so we have at least the same number of validation samples # coming from the last year than the maximum among all the other years. yearly_val_lens.append(length - sum(yearly_val_lens)) # finally extracts the validation samples val_idxs = [idxs[-val_len:] for idxs, val_len in zip(yearly_train_samples, yearly_val_lens)] val_idxs = np.concatenate(val_idxs) # recompute training and test indices all_idxs = np.arange(len(dataset)) train_idxs = np.setdiff1d(all_idxs, val_idxs) return train_idxs, val_idxs def last_month(dataset, mask=None): if mask is not None: keep_until = np.min(mask) last_day_idx = dataset._indices[keep_until] last_day = dataset.index[last_day_idx] else: last_day = dataset.index[-1] split_day = (last_day.year, last_day.month, 1) return split_at_ts(dataset, split_day, mask) # aliases temporal = TemporalSplitter at_ts = AtTimeStepSplitter
2.09375
2
demo.py
bringBackm/SSD
0
11916
import glob import os import torch from PIL import Image from tqdm import tqdm from ssd.config import cfg from ssd.data.datasets import COCODataset, VOCDataset from ssd.modeling.predictor import Predictor from ssd.modeling.vgg_ssd import build_ssd_model import argparse import numpy as np from ssd.utils.viz import draw_bounding_boxes def run_demo(cfg, weights_file, iou_threshold, score_threshold, images_dir, output_dir, dataset_type): if dataset_type == "voc": class_names = VOCDataset.class_names elif dataset_type == 'coco': class_names = COCODataset.class_names else: raise NotImplementedError('Not implemented now.') device = torch.device(cfg.MODEL.DEVICE) model = build_ssd_model(cfg) model.load(weights_file) print('Loaded weights from {}.'.format(weights_file)) model = model.to(device) predictor = Predictor(cfg=cfg, model=model, iou_threshold=iou_threshold, score_threshold=score_threshold, device=device) cpu_device = torch.device("cpu") image_paths = glob.glob(os.path.join(images_dir, '*.jpg')) if not os.path.exists(output_dir): os.makedirs(output_dir) for image_path in tqdm(image_paths): image = Image.open(image_path).convert("RGB") image = np.array(image) output = predictor.predict(image) boxes, labels, scores = [o.to(cpu_device).numpy() for o in output] drawn_image = draw_bounding_boxes(image, boxes, labels, scores, class_names).astype(np.uint8) image_name = os.path.basename(image_path) Image.fromarray(drawn_image).save(os.path.join(output_dir, image_name)) def main(): parser = argparse.ArgumentParser(description="SSD Demo.") parser.add_argument( "--config-file", default="configs/ssd300_voc0712.yaml", metavar="FILE", help="path to config file", type=str, ) parser.add_argument("--weights", default='weights/ssd300_voc0712_mAP77.83.pth',type=str, help="Trained weights.") parser.add_argument("--iou_threshold", type=float, default=0.5) parser.add_argument("--score_threshold", type=float, default=0.5) parser.add_argument("--images_dir", default='demo', type=str, help='Specify a image dir to do prediction.') parser.add_argument("--output_dir", default='demo/result', type=str, help='Specify a image dir to save predicted images.') parser.add_argument("--dataset_type", default="voc", type=str, help='Specify dataset type. Currently support voc and coco.') parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() print(args) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() print("Loaded configuration file {}".format(args.config_file)) #with open(args.config_file, "r") as cf: # config_str = "\n" + cf.read() # print(config_str) #print("Running with config:\n{}".format(cfg)) run_demo(cfg=cfg, weights_file=args.weights, iou_threshold=args.iou_threshold, score_threshold=args.score_threshold, images_dir=args.images_dir, output_dir=args.output_dir, dataset_type=args.dataset_type) if __name__ == '__main__': main()
2.34375
2
quiz/bot/storage/shelter.py
shubham-king/guess-the-melody
4
11917
<filename>quiz/bot/storage/shelter.py from shelve import DbfilenameShelf, open from typing import Type from quiz.config import Config from quiz.types import ContextManager, DictAccess class Shelter(ContextManager, DictAccess): """Interface for bot shelter.""" def __init__(self, config: Type[Config]) -> None: self._shelter: DbfilenameShelf = open(config.shelve_name) def __enter__(self) -> "Shelter": return self def __getitem__(self, item: str) -> int: return self._shelter[item] def __setitem__(self, key: str, value: int) -> None: self._shelter[key] = value def __delitem__(self, key: str) -> None: del self._shelter[key] def close(self) -> None: self._shelter.close() def __exit__(self, exc_type, exc_val, exc_tb) -> None: self.close()
2.734375
3
src/applications/task310/apps.py
SergeyNazarovSam/SergeyPythonfirst
2
11918
<gh_stars>1-10 from django.apps import AppConfig class Task310Config(AppConfig): label = "task310" name = f"applications.{label}"
1.382813
1
scripts/data_creation_v3.py
deepchecks/url_classification_dl
3
11919
<filename>scripts/data_creation_v3.py import whois from datetime import datetime, timezone import math import pandas as pd import numpy as np from pyquery import PyQuery from requests import get class UrlFeaturizer(object): def __init__(self, url): self.url = url self.domain = url.split('//')[-1].split('/')[0] self.today = datetime.now().replace(tzinfo=None) try: self.whois = whois.query(self.domain).__dict__ except: self.whois = None try: self.response = get(self.url) self.pq = PyQuery(self.response.text) except: self.response = None self.pq = None ## URL string Features def entropy(self): string = self.url.strip() prob = [float(string.count(c)) / len(string) for c in dict.fromkeys(list(string))] entropy = sum([(p * math.log(p) / math.log(2.0)) for p in prob]) return entropy def ip(self): string = self.url flag = False if ("." in string): elements_array = string.strip().split(".") if(len(elements_array) == 4): for i in elements_array: if (i.isnumeric() and int(i)>=0 and int(i)<=255): flag=True else: flag=False break if flag: return 1 else: return 0 def numDigits(self): digits = [i for i in self.url if i.isdigit()] return len(digits) def urlLength(self): return len(self.url) def numParameters(self): params = self.url.split('&') return len(params) - 1 def numFragments(self): fragments = self.url.split('#') return len(fragments) - 1 def numSubDomains(self): subdomains = self.url.split('http')[-1].split('//')[-1].split('/') return len(subdomains)-1 def domainExtension(self): ext = self.url.split('.')[-1].split('/')[0] return ext ## URL domain features def hasHttp(self): return 'http:' in self.url def hasHttps(self): return 'https:' in self.url def daysSinceRegistration(self): if self.whois and self.whois['creation_date']: diff = self.today - self.whois['creation_date'].replace(tzinfo=None) diff = str(diff).split(' days')[0] return diff else: return 0 def daysSinceExpiration(self): if self.whois and self.whois['expiration_date']: diff = self.whois['expiration_date'].replace(tzinfo=None) - self.today diff = str(diff).split(' days')[0] return diff else: return 0 ## URL Page Features def bodyLength(self): if self.pq is not None: return len(self.pq('html').text()) if self.urlIsLive else 0 else: return 0 def numTitles(self): if self.pq is not None: titles = ['h{}'.format(i) for i in range(7)] titles = [self.pq(i).items() for i in titles] return len([item for s in titles for item in s]) else: return 0 def numImages(self): if self.pq is not None: return len([i for i in self.pq('img').items()]) else: return 0 def numLinks(self): if self.pq is not None: return len([i for i in self.pq('a').items()]) else: return 0 def scriptLength(self): if self.pq is not None: return len(self.pq('script').text()) else: return 0 def specialCharacters(self): if self.pq is not None: bodyText = self.pq('html').text() schars = [i for i in bodyText if not i.isdigit() and not i.isalpha()] return len(schars) else: return 0 def scriptToSpecialCharsRatio(self): v = self.specialCharacters() if self.pq is not None and v!=0: sscr = self.scriptLength()/v else: sscr = 0 return sscr def scriptTobodyRatio(self): v = self.bodyLength() if self.pq is not None and v!=0: sbr = self.scriptLength()/v else: sbr = 0 return sbr def bodyToSpecialCharRatio(self): v = self.bodyLength() if self.pq is not None and v!=0: bscr = self.specialCharacters()/v else: bscr = 0 return bscr def urlIsLive(self): return self.response == 200 def run(self): data = {} data['entropy'] = self.entropy() data['numDigits'] = self.numDigits() data['urlLength'] = self.urlLength() data['numParams'] = self.numParameters() data['hasHttp'] = self.hasHttp() data['hasHttps'] = self.hasHttps() data['urlIsLive'] = self.urlIsLive() data['bodyLength'] = self.bodyLength() data['numTitles'] = self.numTitles() data['numImages'] = self.numImages() data['numLinks'] = self.numLinks() data['scriptLength'] = self.scriptLength() data['specialChars'] = self.specialCharacters() data['ext'] = self.domainExtension() data['dsr'] = self.daysSinceRegistration() data['dse'] = self.daysSinceExpiration() data['sscr'] = self.scriptToSpecialCharsRatio() data['sbr'] = self.scriptTobodyRatio() data['bscr'] = self.bodyToSpecialCharRatio() data['num_%20'] = self.url.count("%20") data['num_@'] = self.url.count("@") data['has_ip'] = self.ip() return data
2.765625
3
rgnn_at_scale/models/gat.py
sigeisler/robustness_of_gnns_at_scale
11
11920
from typing import Any, Dict, Tuple import torch from torch_geometric.nn import GATConv from torch_sparse import SparseTensor, set_diag from rgnn_at_scale.aggregation import ROBUST_MEANS from rgnn_at_scale.models.gcn import GCN class RGATConv(GATConv): """Extension of Pytorch Geometric's `GCNConv` to execute a robust aggregation function: - soft_k_medoid - soft_medoid (not scalable) - k_medoid - medoid (not scalable) - dimmedian Parameters ---------- mean : str, optional The desired mean (see above for the options), by default 'soft_k_medoid' mean_kwargs : Dict[str, Any], optional Arguments for the mean, by default dict(k=64, temperature=1.0, with_weight_correction=True) """ def __init__(self, mean='soft_k_medoid', mean_kwargs: Dict[str, Any] = dict(k=64, temperature=1.0, with_weight_correction=True), **kwargs): kwargs['in_channels'] = 2 * [kwargs['in_channels']] super().__init__(**kwargs) self._mean = ROBUST_MEANS[mean] if mean is not None else None self._mean_kwargs = mean_kwargs def forward(self, arguments: Tuple[torch.Tensor, SparseTensor] = None) -> torch.Tensor: """Predictions based on the input. Parameters ---------- arguments : Sequence[torch.Tensor] [x, edge indices] or [x, edge indices, edge weights], by default None Returns ------- torch.Tensor the output of `GCNConv`. Raises ------ NotImplementedError if the arguments are not of length 2 or 3 """ if len(arguments) == 2: x, edge_index = arguments edge_weight = None elif len(arguments) == 3: x, edge_index, edge_weight = arguments else: raise NotImplementedError("This method is just implemented for two or three arguments") assert isinstance(edge_index, SparseTensor), 'GAT requires a SparseTensor as input' assert edge_weight is None, 'The weights must be passed via a SparseTensor' H, C = self.heads, self.out_channels assert x.dim() == 2, 'Static graphs not supported in `GATConv`.' x_l = x_r = self.lin_l(x).view(-1, H, C) alpha_l = (x_l * self.att_l).sum(dim=-1) alpha_r = (x_r * self.att_r).sum(dim=-1) if self.add_self_loops: edge_index = set_diag(edge_index) # propagate_type: (x: OptPairTensor, alpha: OptPairTensor) out = self.propagate(edge_index, x=(x_l, x_r), alpha=(alpha_l, alpha_r)) alpha = self._alpha * edge_index.storage.value()[:, None] self._alpha = None if self.concat: out = out.view(-1, self.heads * self.out_channels) else: out = out.mean(dim=1) if self.bias is not None: out += self.bias attention_matrix = edge_index.set_value(alpha, layout='coo') attention_matrix.storage._value = attention_matrix.storage._value.squeeze() x = self.lin_l(x) if self._mean is not None: x = self._mean(attention_matrix, x, **self._mean_kwargs) else: x = attention_matrix @ x x += self.bias return x class RGAT(GCN): """Generic Reliable Graph Neural Network (RGNN) implementation which currently supports a GCN architecture with the aggregation functions: - soft_k_medoid - soft_medoid (not scalable) - k_medoid - medoid (not scalable) - dimmedian and with the adjacency preprocessings: - SVD: <NAME>, <NAME>, <NAME>, and <NAME>. All you need is Low (rank): Defending against adversarial attacks on graphs. - GDC: <NAME>, <NAME>, and <NAME>. Diffusion Improves Graph Learning. - Jaccard: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. Adversarial examples for graph data: Deep insights into attack and defense. Parameters ---------- mean : str, optional The desired mean (see above for the options), by default 'soft_k_medoid' mean_kwargs : Dict[str, Any], optional Arguments for the mean, by default dict(k=64, temperature=1.0, with_weight_correction=True) """ def __init__(self, mean: str = 'soft_k_medoid', mean_kwargs: Dict[str, Any] = dict(k=64, temperature=1.0, with_weight_correction=True), **kwargs): self._mean_kwargs = dict(mean_kwargs) self._mean = mean super().__init__(**kwargs) assert not self.do_checkpoint, 'Checkpointing is not supported' def _build_conv_layer(self, in_channels: int, out_channels: int): return RGATConv(mean=self._mean, mean_kwargs=self._mean_kwargs, in_channels=in_channels, out_channels=out_channels) def _cache_if_option_is_set(self, callback, x, edge_idx, edge_weight): return SparseTensor.from_edge_index(edge_idx, edge_weight, (x.shape[0], x.shape[0])), None
2.015625
2
code/renderer/randomize/material.py
jonathangranskog/shading-scene-representations
21
11921
<gh_stars>10-100 import numpy as np import pyrr import os class Material(): def __init__(self, color=np.ones(3, dtype=np.float32), emission=np.zeros(3, dtype=np.float32), roughness=1.0, ior=15.0, id=0, texture=None, texture_frequency=np.array([1.0, 1.0])): self.color = color self.emission = emission self.roughness = roughness self.ior = ior if not texture is None: self.texture = os.path.abspath(texture) else: self.texture = "" self.texture_frequency = texture_frequency self.id = id def as_dict(self): d = {} d["color"] = self.color.tolist() d["emission"] = self.emission.tolist() d["roughness"] = self.roughness d["ior"] = self.ior d["texture"] = self.texture d["texture_frequency"] = self.texture_frequency.tolist() d["id"] = self.id return d
2.546875
3
frappe/core/doctype/sms_settings/sms_settings.py
ektai/erp2Dodock
0
11922
# -*- coding: utf-8 -*- # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from frappe import _, throw, msgprint from frappe.utils import nowdate from frappe.model.document import Document import six from six import string_types class SMSSettings(Document): pass def validate_receiver_nos(receiver_list): validated_receiver_list = [] for d in receiver_list: if not d: break # remove invalid character for x in [' ','-', '(', ')']: d = d.replace(x, '') validated_receiver_list.append(d) if not validated_receiver_list: throw(_("Please enter valid mobile nos")) return validated_receiver_list @frappe.whitelist() def get_contact_number(contact_name, ref_doctype, ref_name): "returns mobile number of the contact" number = frappe.db.sql("""select mobile_no, phone from tabContact where name=%s and exists( select name from `tabDynamic Link` where link_doctype=%s and link_name=%s ) """, (contact_name, ref_doctype, ref_name)) return number and (number[0][0] or number[0][1]) or '' @frappe.whitelist() def send_sms(receiver_list, msg, sender_name = '', success_msg = True): import json if isinstance(receiver_list, string_types): receiver_list = json.loads(receiver_list) if not isinstance(receiver_list, list): receiver_list = [receiver_list] receiver_list = validate_receiver_nos(receiver_list) arg = { 'receiver_list' : receiver_list, 'message' : frappe.safe_decode(msg).encode('utf-8'), 'success_msg' : success_msg } if frappe.db.get_value('SMS Settings', None, 'sms_gateway_url'): send_via_gateway(arg) else: msgprint(_("Please Update SMS Settings")) def send_via_gateway(arg): ss = frappe.get_doc('SMS Settings', 'SMS Settings') headers = get_headers(ss) use_json = headers.get("Content-Type") == "application/json" message = frappe.safe_decode(arg.get('message')) args = {ss.message_parameter: message} for d in ss.get("parameters"): if not d.header: args[d.parameter] = d.value success_list = [] for d in arg.get('receiver_list'): args[ss.receiver_parameter] = d status = send_request(ss.sms_gateway_url, args, headers, ss.use_post, use_json) if 200 <= status < 300: success_list.append(d) if len(success_list) > 0: args.update(arg) create_sms_log(args, success_list) if arg.get('success_msg'): frappe.msgprint(_("SMS sent to following numbers: {0}").format("\n" + "\n".join(success_list))) def get_headers(sms_settings=None): if not sms_settings: sms_settings = frappe.get_doc('SMS Settings', 'SMS Settings') headers={'Accept': "text/plain, text/html, */*"} for d in sms_settings.get("parameters"): if d.header == 1: headers.update({d.parameter: d.value}) return headers def send_request(gateway_url, params, headers=None, use_post=False, use_json=False): import requests if not headers: headers = get_headers() kwargs = {"headers": headers} if use_json: kwargs["json"] = params elif use_post: kwargs["data"] = params else: kwargs["params"] = params if use_post: response = requests.post(gateway_url, **kwargs) else: response = requests.get(gateway_url, **kwargs) response.raise_for_status() return response.status_code # Create SMS Log # ========================================================= def create_sms_log(args, sent_to): sl = frappe.new_doc('SMS Log') sl.sent_on = nowdate() sl.message = args['message'].decode('utf-8') sl.no_of_requested_sms = len(args['receiver_list']) sl.requested_numbers = "\n".join(args['receiver_list']) sl.no_of_sent_sms = len(sent_to) sl.sent_to = "\n".join(sent_to) sl.flags.ignore_permissions = True sl.save()
2.296875
2
blog/migrations/0041_auto_20190504_0855.py
akindele214/181hub_2
1
11923
# Generated by Django 2.1.5 on 2019-05-04 07:55 import blog.formatChecker from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('blog', '0040_auto_20190504_0840'), ] operations = [ migrations.AlterField( model_name='videos', name='video', field=models.FileField(blank=True, null=True, upload_to='uploads/', validators=[blog.formatChecker.file_size]), ), ]
1.679688
2
tardis/model/tests/test_csvy_model.py
Youssef15015/tardis
0
11924
<filename>tardis/model/tests/test_csvy_model.py import numpy as np import numpy.testing as npt import tardis import os from astropy import units as u from tardis.io.config_reader import Configuration from tardis.model import Radial1DModel import pytest DATA_PATH = os.path.join(tardis.__path__[0],'model','tests','data') @pytest.fixture(scope="module", params=['config_csvy_full.yml', 'config_csvy_nocsv_branch85.yml', 'config_csvy_nocsv_uniform.yml', 'config_csvy_nocsv_powerlaw.yml', 'config_csvy_nocsv_exponential.yml', 'config_csvy_full_rad.yml']) def full_filename(request): return os.path.join(DATA_PATH, request.param) def test_compare_models(full_filename): tardis_config = Configuration.from_yaml(full_filename) csvy_model = Radial1DModel.from_csvy(tardis_config) config_model = Radial1DModel.from_config(tardis_config) csvy_model_props = csvy_model.get_properties().keys() config_model_props = config_model.get_properties().keys() npt.assert_array_equal(csvy_model_props, config_model_props) for prop in config_model_props: csvy_model_val = csvy_model.get_properties()[prop] config_model_val = config_model.get_properties()[prop] if prop == 'homologous_density': npt.assert_array_almost_equal(csvy_model_val.density_0.value, config_model_val.density_0.value) npt.assert_array_almost_equal(csvy_model_val.time_0.value, config_model_val.time_0.value) else: if hasattr(config_model_val, 'value'): config_model_val = config_model_val.value csvy_model_val = csvy_model_val.value npt.assert_array_almost_equal(csvy_model_val, config_model_val)
2.140625
2
wepppy/taudem/topaz_emulator.py
hwbeeson/wepppy
0
11925
<gh_stars>0 from typing import List import os import json from os.path import join as _join from os.path import exists as _exists import math from osgeo import gdal, osr import numpy as np from scipy.ndimage import label from subprocess import Popen, PIPE from pprint import pprint from wepppy.all_your_base.geo import read_tif, centroid_px from wepppy.watershed_abstraction.wepp_top_translator import WeppTopTranslator from wepppy.watershed_abstraction.support import ( cummnorm_distance, compute_direction, representative_normalized_elevations, weighted_slope_average, rect_to_polar, write_slp, HillSummary, ChannelSummary, CentroidSummary, slp_asp_color, polygonize_netful, polygonize_bound, polygonize_subcatchments, json_to_wgs ) from .taudem import TauDEMRunner _USE_MPI = False _DEBUG = False class Node: def __init__(self, tau_id, network): self.data = tau_id d = network[tau_id] self.top = top = d['top'] self.bottom = bottom = d['bottom'] links = d['links'] if len(links) == 2: refvec = np.array(bottom, dtype=float) - np.array(top, dtype=float) links = sorted([dict(tau_id=_id, point=network[_id]['top'], origin=top, refvec=refvec) for _id in links], key=lambda _d: rect_to_polar(_d)) links = [_d['tau_id'] for _d in links] if len(links) > 0: self.left = Node(links[0], network) else: self.left = None if len(links) > 1: self.right = Node(links[1], network) else: self.right = None class TauDEMTopazEmulator(TauDEMRunner): def __init__(self, wd, dem, vector_ext='geojson'): super(TauDEMTopazEmulator, self).__init__(wd, dem, vector_ext) # subwta @property def _subwta(self): return _join(self.wd, 'subwta.tif') # subwta @property def _subwta_shp(self): return _join(self.wd, 'subwta.geojson') # subcatchments @property def _subcatchments_shp(self): return _join(self.wd, 'subcatchments.geojson') # bound @property def _bound(self): return _join(self.wd, 'bound.tif') # bound @property def _bound_shp(self): return _join(self.wd, 'bound.geojson') # net @property def _netful_shp(self): return _join(self.wd, 'netful.geojson') @property def _channels(self): return _join(self.wd, 'channels.tif') def topaz2tau_translator_factory(self): d = self.tau2topaz_translator_factory() return {v: k for k, v in d.items()} def run_streamnet(self, single_watershed=False): super(TauDEMTopazEmulator, self).run_streamnet(single_watershed=single_watershed) tau2top_translator = self.tau2topaz_translator_factory() with open(self._net) as fp: js = json.load(fp) for i, feature in enumerate(js['features']): topaz_id = tau2top_translator[feature['properties']['WSNO']] js['features'][i]['properties']['TopazID'] = int(str(topaz_id) + '4') with open(self._net, 'w') as fp: json.dump(js, fp) cmd = ['gdal_rasterize', '-a', 'TopazID', '-a_nodata', '0', '-a_srs', 'epsg:{}'.format(self.epsg), '-te', self.ul_x, self.lr_y, self.lr_x, self.ul_y, '-tr', self.cellsize, self.cellsize, '-ot', 'UInt16', self._net, self._channels] cmd = [str(v) for v in cmd] print(' '.join(cmd)) p = Popen(cmd, stdout=PIPE, stderr=PIPE) p.wait() assert _exists(self._channels) def build_channels(self, csa=None): if csa is None: csa = 100 wd = self.wd self.run_pitremove() self.run_d8flowdir() self.run_aread8() self.run_gridnet() self.run_src_threshold(threshold=csa) polygonize_netful(self._src, self._netful_shp) def set_outlet(self, lng, lat): self.run_moveoutletstostrm(lng=lng, lat=lat) def build_subcatchments(self, threshold=None): self.run_peukerdouglas() self.run_peukerdouglas_stream_delineation(threshold=threshold) self.run_streamnet() self.run_dinfflowdir() self.run_areadinf() self.run_dinfdistdown() json_to_wgs(self._net) self.delineate_subcatchments() polygonize_subcatchments(self._subwta, self._subwta_shp, self._subcatchments_shp) self.make_bound() polygonize_bound(self._bound, self._bound_shp) def abstract_watershed(self, wepp_chn_type, clip_hillslopes=False, clip_hillslope_length=300.0): self.abstract_channels(wepp_chn_type=wepp_chn_type) self.abstract_subcatchments(clip_hillslopes=clip_hillslopes, clip_hillslope_length=clip_hillslope_length) self.abstract_structure() @property def _abstracted_channels(self): return _join(self.wd, 'channels.json') @property def abstracted_channels(self): with open(self._abstracted_channels) as fp: summaries = json.load(fp) translator = self.translator chns_summary = {} for topaz_id, d in summaries.items(): wepp_id = translator.wepp(top=topaz_id) chn_enum = translator.chn_enum(top=topaz_id) slope_scalar = d['slope_scalar'] aspect = d['aspect'] chns_summary[topaz_id] = \ ChannelSummary( topaz_id=topaz_id, wepp_id=wepp_id, chn_enum=chn_enum, chn_type=d['wepp_chn_type'], isoutlet=d['isoutlet'], length=d['length'], width=d['width'], order=d['order'], aspect=aspect, head=d['head'], tail=d['tail'], direction=d['direction'], slope_scalar=slope_scalar, color=slp_asp_color(slope_scalar, aspect), area=d['area'], elevs=d['elevs'], distance_p=d['distance_p'], slopes=d['slopes'], centroid=CentroidSummary( px=d['centroid_px'], lnglat=d['centroid_lnglat'] ) ) return chns_summary @property def _abstracted_subcatchments(self): return _join(self.wd, 'subcatchments.json') @property def abstracted_subcatchments(self): with open(self._abstracted_subcatchments) as fp: summaries = json.load(fp) translator = self.translator subs_summary = {} for topaz_id, d in summaries.items(): wepp_id = translator.wepp(top=topaz_id) slope_scalar = d['slope_scalar'] aspect = d['aspect'] subs_summary[topaz_id] = \ HillSummary(topaz_id=topaz_id, wepp_id=wepp_id, w_slopes=d['w_slopes'], length=d['length'], width=d['width'], area=d['area'], direction=d['direction'], elevs=d['elevs'], aspect=aspect, slope_scalar=slope_scalar, color=slp_asp_color(slope_scalar, aspect), distance_p=d['distance_p'], centroid=CentroidSummary( px=d['centroid_px'], lnglat=d['centroid_lnglat'] ), fp_longest=d['fp_longest'], fp_longest_length=d['fp_longest_length'], fp_longest_slope=d['fp_longest_slope'] ) return subs_summary @property def _structure(self): return _join(self.wd, 'structure.tsv') @property def structure(self): with open(self._structure) as fp: return [[int(v) for v in line.split()] for line in fp.readlines()] def abstract_channels(self, wepp_chn_type=None): cellsize = self.cellsize cellsize2 = self.cellsize2 translator = self.translator slopes = self.data_fetcher('dinf_slope', dtype=np.float) fvslop = self.data_fetcher('dinf_angle', dtype=np.float) with open(self._net) as fp: js = json.load(fp) chn_d = {} for feature in js['features']: topaz_id = int(str(feature['properties']['TopazID'])[:-1]) catchment_id = feature['properties']['WSNO'] uslinkn01 = feature['properties']['USLINKNO1'] uslinkn02 = feature['properties']['USLINKNO2'] dslinkn0 = feature['properties']['DSLINKNO'] order = feature['properties']['strmOrder'] chn_id = int(str(topaz_id) + '4') enz_coords = feature['geometry']['coordinates'] # listed bottom to top # need to identify unique pixels px_last, py_last = None, None indx, indy = [], [] for e, n, z in enz_coords: px, py = self.utm_to_px(e, n) if px != px_last or py != py_last: assert 0 <= px < slopes.shape[0], ((px, py), (e, n), slopes.shape) assert 0 <= py < slopes.shape[1], ((px, py), (e, n), slopes.shape) indx.append(px) indy.append(py) px_last, py_last = px, py # the pixels are listed bottom to top we want them top to bottom as if we walked downt the flowpath indx = indx[::-1] indy = indy[::-1] flowpath = np.array([indx, indy]).T _distance = flowpath[:-1, :] - flowpath[1:, :] distance = np.sqrt(np.power(_distance[:, 0], 2.0) + np.power(_distance[:, 1], 2.0)) slope = np.array([slopes[px, py] for px, py in zip(indx[:-1], indy[:-1])]) assert distance.shape == slope.shape, (distance.shape, slope.shape) if len(indx) == 1: px, py = indx[0], indy[0] slope_scalar = float(slopes[px, py]) slope = np.array([slope_scalar, slope_scalar]) # todo: don't think head and tail are being used any where, but these # are inconsistent with case when there is more than one pixel head = enz_coords[-1][:-1] tail = enz_coords[0][:-1] direction = compute_direction(head, tail) length = np.linalg.norm(np.array(head) - np.array(tail)) if length < cellsize: length = cellsize width = cellsize2 / length distance_p = [0.0, 1.0] elevs = representative_normalized_elevations(distance_p, list(slope)) else: # need normalized distance_p to define slope distance_p = cummnorm_distance(distance) if len(slope) == 1: slope = np.array([float(slope), float(slope)]) # calculate the length from the distance array length = float(np.sum(distance) * cellsize) width = float(cellsize) # aspect = float(self._determine_aspect(indx, indy)) head = [v * cellsize for v in flowpath[-1]] head = [float(v) for v in head] tail = [v * cellsize for v in flowpath[0]] tail = [float(v) for v in tail] direction = compute_direction(head, tail) elevs = representative_normalized_elevations(distance_p, list(slope)) slope_scalar = float(abs(elevs[-1])) area = float(length) * float(width) # calculate aspect aspect = np.mean(np.angle([np.complex(np.cos(rad), np.sin(rad)) for rad in fvslop[(indx, indy)]], deg=True)) isoutlet = dslinkn0 == -1 c_px, c_py = centroid_px(indx, indy) centroid_lnglat = self.px_to_lnglat(c_px, c_py) chn_enum = translator.chn_enum(chn_id=chn_id) chn_d[str(chn_id)] = dict(chn_id=int(chn_id), chn_enum=int(chn_enum), order=int(order), length=float(length), width=float(width), area=float(area), elevs=[float(v) for v in elevs], wepp_chn_type=wepp_chn_type, head=head, tail=tail, aspect=float(aspect), slopes=[float(v) for v in slope], isoutlet=isoutlet, direction=float(direction), distance_p=[float(v) for v in distance_p], centroid_px=[int(c_px), int(c_py)], centroid_lnglat=[float(v) for v in centroid_lnglat], slope_scalar=float(slope_scalar) ) with open(self._abstracted_channels, 'w') as fp: json.dump(chn_d, fp, indent=2, sort_keys=True) @property def topaz_sub_ids(self): subwta = self.data_fetcher('subwta', dtype=np.uint16) sub_ids = sorted(list(set(subwta.flatten()))) if 0 in sub_ids: sub_ids.remove(0) sub_ids = [v for v in sub_ids if not str(v).endswith('4')] return sub_ids @property def topaz_chn_ids(self): with open(self._net) as fp: js = json.load(fp) chn_ids = [] for feature in js['features']: chn_ids.append(feature['properties']['TopazID']) return chn_ids @property def translator(self): return WeppTopTranslator(top_sub_ids=self.topaz_sub_ids, top_chn_ids=self.topaz_chn_ids) def abstract_subcatchments(self, clip_hillslopes=False, clip_hillslope_length=300.0): """ in: dinf_dd_horizontal, dinf_dd_vertical, dinf_dd_surface, dinf_slope, subwta :return: """ cellsize = self.cellsize cellsize2 = self.cellsize2 sub_ids = self.topaz_sub_ids assert _exists(self._dinf_dd_horizontal), self._dinf_dd_horizontal assert _exists(self._dinf_dd_vertical), self._dinf_dd_vertical assert _exists(self._dinf_dd_surface), self._dinf_dd_surface assert _exists(self._dinf_slope), self._dinf_slope assert _exists(self._subwta), self._subwta assert _exists(self._dinf_angle), self._dinf_angle subwta = self.data_fetcher('subwta', dtype=np.uint16) lengths = self.data_fetcher('dinf_dd_horizontal', dtype=np.float) verticals = self.data_fetcher('dinf_dd_vertical', dtype=np.float) surface_lengths = self.data_fetcher('dinf_dd_surface', dtype=np.float) slopes = self.data_fetcher('dinf_slope', dtype=np.float) aspects = self.data_fetcher('dinf_angle', dtype=np.float) chns_d = self.abstracted_channels subs_d = {} for sub_id in sub_ids: # identify cooresponding channel chn_id = str(sub_id)[:-1] + '4' # identify indicies of sub_id raw_indx, raw_indy = np.where(subwta == sub_id) area = float(len(raw_indx)) * cellsize2 indx, indy = [], [] for _x, _y in zip(raw_indx, raw_indy): if lengths[_x, _y] >= 0.0: indx.append(_x) indy.append(_y) if len(indx) == 0: print('sub_id', sub_id) print('raw_indx, raw_indy', raw_indx, raw_indy) print(lengths[(raw_indx, raw_indy)]) print(surface_lengths[(raw_indx, raw_indy)]) print(slopes[(raw_indx, raw_indy)]) print(aspects[(raw_indx, raw_indy)]) width = length = math.sqrt(area) _slp = np.mean(slopes[(raw_indx, raw_indy)]) w_slopes = [_slp, _slp] distance_p = [0, 1] fp_longest = None fp_longest_length = length fp_longest_slope = _slp else: # extract flowpath statistics fp_lengths = lengths[(indx, indy)] fp_lengths += cellsize fp_verticals = verticals[(indx, indy)] fp_surface_lengths = surface_lengths[(indx, indy)] fp_surface_lengths += cellsize fp_surface_areas = np.ceil(fp_surface_lengths) * cellsize fp_slopes = slopes[(indx, indy)] length = float(np.sum(fp_lengths * fp_surface_areas) / np.sum(fp_surface_areas)) if clip_hillslopes and length > clip_hillslope_length: length = clip_hillslope_length width = area / length # if str(sub_id).endswith('1'): # # determine representative length and width # # Cochrane dissertation eq 3.4 # # #print('sub_id', sub_id) # #pprint('fp_lengths') # #pprint(fp_lengths) # #pprint('fp_surface_areas') # #pprint(fp_surface_areas) # length = float(np.sum(fp_lengths * fp_surface_areas) / np.sum(fp_surface_areas)) # width = area / length # # #print('area', area) # #print('width', width) # #print('length', length, '\n\n\n') # else: # width = chns_d[chn_id].length # length = area / width # determine representative slope profile w_slopes, distance_p = weighted_slope_average(fp_surface_areas, fp_slopes, fp_lengths) # calculate longest flowpath statistics fp_longest = int(np.argmax(fp_lengths)) fp_longest_vertical = fp_verticals[fp_longest] fp_longest_length = fp_lengths[fp_longest] fp_longest_slope = fp_longest_vertical / fp_longest_length # calculate slope for hillslope elevs = representative_normalized_elevations(distance_p, w_slopes) slope_scalar = float(abs(elevs[-1])) # calculate aspect _aspects = aspects[(indx, indy)] aspect = np.mean(np.angle([np.complex(np.cos(rad), np.sin(rad)) for rad in _aspects], deg=True)) # calculate centroid c_px, c_py = centroid_px(raw_indx, raw_indy) centroid_lnglat = self.px_to_lnglat(c_px, c_py) direction = chns_d[chn_id].direction if str(sub_id).endswith('2'): direction += 90 if str(sub_id).endswith('3'): direction -= 90 subs_d[str(sub_id)] = dict(sub_id=int(sub_id), area=float(area), length=float(length), aspect=float(aspect), direction=float(direction), width=float(width), w_slopes=list(w_slopes), distance_p=list(distance_p), centroid_lnglat=[float(v) for v in centroid_lnglat], centroid_px=[int(c_px), int(c_py)], elevs=list(elevs), slope_scalar=float(slope_scalar), fp_longest=fp_longest, fp_longest_length=float(fp_longest_length), fp_longest_slope=float(fp_longest_slope) ) with open(self._abstracted_subcatchments, 'w') as fp: json.dump(subs_d, fp, indent=2, sort_keys=True) def abstract_structure(self, verbose=False): translator = self.translator topaz_network = self.topaz_network # now we are going to define the lines of the structure file # this doesn't handle impoundments structure = [] for chn_id in translator.iter_chn_ids(): if verbose: print('abstracting structure for channel %s...' % chn_id) top = translator.top(chn_id=chn_id) chn_enum = translator.chn_enum(chn_id=chn_id) # right subcatchments end in 2 hright = top - 2 if not translator.has_top(hright): hright = 0 # left subcatchments end in 3 hleft = top - 1 if not translator.has_top(hleft): hleft = 0 # center subcatchments end in 1 hcenter = top - 3 if not translator.has_top(hcenter): hcenter = 0 # define structure for channel # the first item defines the channel _structure = [chn_enum] # network is defined from the NETW.TAB file that has # already been read into {network} # the 0s are appended to make sure it has a length of # at least 3 chns = topaz_network[top] + [0, 0, 0] # structure line with top ids _structure += [hright, hleft, hcenter] + chns[:3] # this is where we would handle impoundments # for now no impoundments are assumed _structure += [0, 0, 0] # and translate topaz to wepp structure.append([int(v) for v in _structure]) with open(self._structure, 'w') as fp: for row in structure: fp.write('\t'.join([str(v) for v in row])) fp.write('\n') def delineate_subcatchments(self, use_topaz_ids=True): """ in: pksrc, net out: subwta :return: """ w_data = self.data_fetcher('w', dtype=np.int32) _src_data = self.data_fetcher('pksrc', dtype=np.int32) src_data = np.zeros(_src_data.shape, dtype=np.int32) src_data[np.where(_src_data == 1)] = 1 subwta = np.zeros(w_data.shape, dtype=np.uint16) with open(self._net) as fp: js = json.load(fp) # identify pourpoints of the end node catchments end_node_pourpoints = {} for feature in js['features']: catchment_id = feature['properties']['WSNO'] coords = feature['geometry']['coordinates'] uslinkn01 = feature['properties']['USLINKNO1'] uslinkn02 = feature['properties']['USLINKNO2'] end_node = uslinkn01 == -1 and uslinkn02 == -1 top = coords[-1][:-1] if end_node: end_node_pourpoints[catchment_id] = top # make geojson with pourpoints as input for gage watershed outlets_fn = _join(self.wd, 'outlets.geojson') self._make_multiple_outlets_geojson(dst=outlets_fn, en_points_dict=end_node_pourpoints) gw_fn = _join(self.wd, 'end_nodes_gw.tif') self._run_gagewatershed(outlets_fn=outlets_fn, dst=gw_fn) gw, _, _ = read_tif(gw_fn, dtype=np.int16) for _pass in range(2): for feature in js['features']: topaz_id = int(str(feature['properties']['TopazID'])[:-1]) catchment_id = feature['properties']['WSNO'] coords = feature['geometry']['coordinates'] uslinkn01 = feature['properties']['USLINKNO1'] uslinkn02 = feature['properties']['USLINKNO2'] end_node = uslinkn01 == -1 and uslinkn02 == -1 if (end_node and _pass) or (not end_node and not _pass): continue # this has already been processed top = coords[-1] bottom = coords[0] top_px = self.utm_to_px(top[0], top[1]) bottom_px = self.utm_to_px(bottom[0], bottom[1]) # need a mask for the side subcatchments catchment_data = np.zeros(w_data.shape, dtype=np.int32) catchment_data[np.where(w_data == catchment_id)] = 1 if end_node: # restrict the end node catchment the catchment area. # otherwise there are cases where it gets drainage from beyond the watershed gw_sub = gw * catchment_data # identify top subcatchment cells gw_indx = np.where(gw_sub == catchment_id) # copy the top subcatchment to the subwta raster if use_topaz_ids: subwta[gw_indx] = int(str(topaz_id) + '1') else: subwta[gw_indx] = int(str(catchment_id) + '1') # remove end subcatchments from the catchment mask catchment_data[np.where(subwta != 0)] = 0 # remove channels from catchment mask catchment_data -= src_data catchment_data = np.clip(catchment_data, a_min=0, a_max=1) indx, indy = np.where(catchment_data == 1) print(catchment_id, _pass, len(indx)) # the whole catchment drains through the top of the channel if len(indx) == 0: continue if _DEBUG: driver = gdal.GetDriverByName('GTiff') dst_ds = driver.Create(_join(self.wd, 'catchment_for_label_%05i.tif' % catchment_id), xsize=subwta.shape[0], ysize=subwta.shape[1], bands=1, eType=gdal.GDT_Int32, options=['COMPRESS=LZW', 'PREDICTOR=2']) dst_ds.SetGeoTransform(self.transform) dst_ds.SetProjection(self.srs_wkt) band = dst_ds.GetRasterBand(1) band.WriteArray(catchment_data.T) dst_ds = None # we are going to crop the catchment for scipy.ndimage.label. It is really slow otherwise # to do this we identify the bounds and then add a pad pad = 1 x0, xend = np.min(indx), np.max(indx) if x0 >= pad: x0 -= pad else: x0 = 0 if xend < self.num_cols - pad: xend += pad else: xend = self.num_cols - 1 y0, yend = np.min(indy), np.max(indy) if y0 >= pad: y0 -= pad else: y0 = 0 if yend < self.num_rows - pad: yend += pad else: yend = self.num_rows - 1 # crop to just the side channel catchments _catchment_data = catchment_data[x0:xend, y0:yend] # use scipy.ndimage.label to identify side subcatchments # todo: compare performance to opencv connectedComponents # https://stackoverflow.com/questions/46441893/connected-component-labeling-in-python subcatchment_data, n_labels = label(_catchment_data) # isolated pixels in the channel can get misidentified as subcatchments # this gets rid of those subcatchment_data -= src_data[x0:xend, y0:yend] # we only want the two largest subcatchments. These should be the side subcatchments # so we need to identify which are the largest sub_d = [] for i in range(n_labels): s_indx, s_indy = np.where(subcatchment_data == i + 1) sub_d.append(dict(rank=len(s_indx), s_indx=s_indx, s_indy=s_indy, point=(x0 + np.mean(s_indx), y0 + np.mean(s_indy)), origin=(float(bottom_px[0]), float(bottom_px[1])), refvec=np.array(top_px, dtype=float) - np.array(bottom_px, dtype=float) ) ) # sort clockwise sub_d = sorted(sub_d, key=lambda _d: _d['rank'], reverse=True) if len(sub_d) > 2: sub_d = sub_d[:2] sub_d = sorted(sub_d, key=lambda _d: rect_to_polar(_d)) # assert len(sub_d) == 2 k = 2 for d in sub_d: if use_topaz_ids: subwta[x0:xend, y0:yend][d['s_indx'], d['s_indy']] = int(str(topaz_id) + str(k)) else: subwta[x0:xend, y0:yend][d['s_indx'], d['s_indy']] = int(str(catchment_id) + str(k)) k += 1 channels = self.data_fetcher('channels', dtype=np.int32) ind = np.where(subwta == 0) subwta[ind] = channels[ind] driver = gdal.GetDriverByName('GTiff') dst_ds = driver.Create(self._subwta, xsize=subwta.shape[0], ysize=subwta.shape[1], bands=1, eType=gdal.GDT_UInt16, options=['COMPRESS=LZW', 'PREDICTOR=2']) dst_ds.SetGeoTransform(self.transform) dst_ds.SetProjection(self.srs_wkt) band = dst_ds.GetRasterBand(1) band.WriteArray(subwta.T) band.SetNoDataValue(0) dst_ds = None def make_bound(self): w_data = self.data_fetcher('w', dtype=np.int32) bound = np.zeros(w_data.shape, dtype=np.int32) bound[np.where(w_data > 0)] = 1 driver = gdal.GetDriverByName('GTiff') dst_ds = driver.Create(self._bound, xsize=bound.shape[0], ysize=bound.shape[1], bands=1, eType=gdal.GDT_Byte, options=['COMPRESS=LZW', 'PREDICTOR=2']) dst_ds.SetGeoTransform(self.transform) dst_ds.SetProjection(self.srs_wkt) band = dst_ds.GetRasterBand(1) band.WriteArray(bound.T) band.SetNoDataValue(0) dst_ds = None def calculate_watershed_statistics(self): bound = self.data_fetcher('bound', dtype=np.int32) fvslop = self.data_fetcher('dinf_angle', dtype=np.float32) relief = self.data_fetcher('fel', dtype=np.float32) # calculate descriptive statistics cellsize = self.cellsize wsarea = float(np.sum(bound) * cellsize * cellsize) mask = -1 * bound + 1 # determine area with slope > 30 fvslop_ma = np.ma.masked_array(fvslop, mask=mask) indx, indy = np.ma.where(fvslop_ma > 0.3) area_gt30 = float(len(indx) * cellsize * cellsize) # determine ruggedness of watershed relief_ma = np.ma.masked_array(relief, mask=mask) minz = float(np.min(relief_ma)) maxz = float(np.max(relief_ma)) ruggedness = float((maxz - minz) / math.sqrt(wsarea)) indx, indy = np.ma.where(bound == 1) ws_cen_px, ws_cen_py = int(np.round(np.mean(indx))), int(np.round(np.mean(indy))) ws_centroid = self.px_to_lnglat(ws_cen_px, ws_cen_py) outlet_top_id = None # todo return dict(wsarea=wsarea, area_gt30=area_gt30, ruggedness=ruggedness, minz=minz, maxz=maxz, ws_centroid=ws_centroid, outlet_top_id=outlet_top_id,) @property def topaz_network(self): tau2top = self.tau2topaz_translator_factory() network = self.network top_network = {} for tau_id, d in network.items(): topaz_id = int(str(tau2top[tau_id]) + '4') links = [int(str(tau2top[_tau_id]) + '4') for _tau_id in d['links']] top_network[topaz_id] = links return top_network def tau2topaz_translator_factory(self): tree = Node(self.outlet_tau_id, self.network) def preorder_traverse(node): res = [] if node: res.append(node.data) res.extend(preorder_traverse(node.left)) res.extend(preorder_traverse(node.right)) return res tau_ids = preorder_traverse(tree) if _DEBUG: print('network', tau_ids) d = {tau_id: i+2 for i, tau_id in enumerate(tau_ids)} return d def write_slps(self, out_dir, channels=1, subcatchments=1, flowpaths=0): """ Writes slope files to the specified wat_dir. The channels, subcatchments, and flowpaths args specify what slope files should be written. """ if channels: self._make_channel_slps(out_dir) if subcatchments: self._write_subcatchment_slps(out_dir) if flowpaths: raise NotImplementedError def _make_channel_slps(self, out_dir): channels = self.abstracted_channels translator = self.translator chn_ids = channels.keys() chn_enums = sorted([translator.chn_enum(chn_id=v) for v in chn_ids]) # watershed run requires a slope file defining all of the channels in the # 99.1 format. Here we write a combined channel slope file and a slope # file for each individual channel fp2 = open(_join(out_dir, 'channels.slp'), 'w') fp2.write('99.1\n') fp2.write('%i\n' % len(chn_enums)) for chn_enum in chn_enums: top = translator.top(chn_enum=chn_enum) chn_id = str(top) d = channels[chn_id] _chn_wepp_width = d.chn_wepp_width write_slp(d.aspect, d.width, _chn_wepp_width, d.length, d.slopes, d.distance_p, fp2, 99.1) fp2.close() def _write_subcatchment_slps(self, out_dir): subcatchments = self.abstracted_subcatchments cellsize = self.cellsize for sub_id, d in subcatchments.items(): slp_fn = _join(out_dir, 'hill_%s.slp' % sub_id) fp = open(slp_fn, 'w') write_slp(d.aspect, d.width, cellsize, d.length, d.w_slopes, d.distance_p, fp, 97.3) fp.close()
1.984375
2
PyRSM/utils.py
chdahlqvist/RSMmap
3
11926
<reponame>chdahlqvist/RSMmap """ Set of functions used by the PyRSM class to compute detection maps and optimize the parameters of the RSM algorithm and PSF-subtraction techniques via the auto-RSM and auto-S/N frameworks """ __author__ = '<NAME>' from scipy.interpolate import Rbf import pandas as pd import numpy.linalg as la from vip_hci.var import get_annulus_segments, frame_center,prepare_matrix from vip_hci.preproc.derotation import _define_annuli import numpy as np from vip_hci.preproc import cube_derotate, cube_collapse, check_pa_vector,check_scal_vector from vip_hci.preproc.derotation import _find_indices_adi from vip_hci.preproc.rescaling import _find_indices_sdi import scipy as sp from multiprocessing import cpu_count from vip_hci.conf.utils_conf import pool_map, iterable from vip_hci.pca.svd import get_eigenvectors from vip_hci.llsg.llsg import _patch_rlrps from vip_hci.preproc import cube_rescaling_wavelengths as scwave import vip_hci as vip from sklearn.decomposition import NMF as NMF_sklearn def check_delta_sep(scale_list,delta_sep,minradius,fwhm,c): wl = np.asarray(scale_list) wl_ref = wl[len(wl)//2] sep_lft = (wl_ref - wl) / wl_ref * ((minradius + fwhm * delta_sep) / fwhm) sep_rgt = (wl - wl_ref) / wl_ref * ((minradius - fwhm * delta_sep) / fwhm) map_lft = sep_lft >= delta_sep map_rgt = sep_rgt >= delta_sep indices = np.nonzero(map_lft | map_rgt)[0] if indices.size == 0: raise RuntimeError(("No frames left after radial motion threshold for cube {}. Try " "decreasing the value of `delta_sep`").format(c)) def rot_scale(step,cube,cube_scaled,angle_list,scale_list, imlib, interpolation): """ Function used to rescale the frames when relying on ADI+SDI before the computation the reference PSF (step='ini') and rescale and derotate the frames to generate the cube of residuals used by the RSM algorithm (step='fin'). Parameters ---------- step: str 'ini' before the reference PSF computation and 'fin' after PSF subtraction. cube: numpy ndarray, 3d or 4d Original cube cube_scaled: numpy ndarray, 3d Cube of residuals to be rescaled and derotated (None for the step='ini') angle_list : numpy ndarray, 1d Parallactic angles for each frame of the ADI sequences. scale_list: numpy ndarray, 1d, optional Scaling factors in case of IFS data (ADI+mSDI cube). Usually, the scaling factors are the central channel wavelength divided by the shortest wavelength in the cube (more thorough approaches can be used to get the scaling factors). This scaling factors are used to re-scale the spectral channels and align the speckles. Default is None imlib : str, optional See the documentation of the ``vip_hci.preproc.frame_rotate`` function. interpolation : str, optional See the documentation of the ``vip_hci.preproc.frame_rotate`` function. """ if cube.ndim == 4: z, n, y_in, x_in = cube.shape scale_list = check_scal_vector(scale_list) if step=='ini': # rescaled cube, aligning speckles for SDI for i in range(n): if i==0: fin_cube = scwave(cube[:, i, :, :], scale_list, imlib=imlib, interpolation=interpolation)[0] fin_pa=np.repeat(angle_list[i],z) fin_scale=scale_list else: fin_cube = np.append(fin_cube,scwave(cube[:, i, :, :], scale_list, imlib=imlib, interpolation=interpolation)[0],axis=0) fin_pa=np.append(fin_pa,np.repeat(angle_list[i],z),axis=0) fin_scale=np.append(fin_scale,scale_list,axis=0) return fin_cube,fin_pa,fin_scale elif step=='fin': cube_fin=np.zeros((n,y_in, x_in)) cube_rescaled = scwave(cube_scaled, scale_list, full_output=True, inverse=True, y_in=y_in, x_in=x_in, imlib=imlib, interpolation=interpolation)[0] cube_derotated=cube_derotate(cube_rescaled,angle_list, interpolation=interpolation,imlib=imlib) for i in range(n): cube_fin[i]=np.mean(cube_derotated[(i*z):((i+1)*z),:,:],axis=0) return cube_fin if cube.ndim == 3: if step=='ini': return cube,angle_list,None elif step=='fin': cube_derotated=cube_derotate(cube_scaled,angle_list, interpolation=interpolation,imlib=imlib) return cube_derotated def remove_outliers(time_s, range_sel, k=5, t0=3): """ Hampel Filter to remove potential outliers in the set of selected parameters for the annular mode of the auto-RSM framework """ vals=pd.DataFrame(data=time_s[range_sel]) L= 1.4826 rolling_median=vals.rolling(k).median() difference=np.abs(rolling_median-vals) median_abs_deviation=difference.rolling(k).median() threshold= t0 *L * median_abs_deviation outlier_idx=difference>threshold vals[outlier_idx]=threshold[outlier_idx] return(vals.to_numpy().reshape(-1)) def interpolation(time_s,range_sel): """ Interpolation algorithm for the RSM parameters for the annular mode of the auto-RSM framework """ time_series=time_s.copy() time_series[range_sel]=remove_outliers(time_series,range_sel) fit = Rbf(range_sel,time_s[range_sel]) inter_point = np.linspace(range_sel[0],range_sel[-1]+1, num=(range_sel[-1]-range_sel[0]+1), endpoint=True) return fit(inter_point) def poly_fit(time_s,range_sel,poly_n): """ Smoothing procedure for the computation of the final radial thresholds which are subtracted from the final RSM detection map in the final step of the auto-RSM framework """ time_series=time_s.copy() time_series[range_sel]=remove_outliers(time_series,range_sel) fit_p=np.poly1d(np.polyfit(range_sel,time_series[range_sel], poly_n)) time_series=fit_p(range(len(time_series))) return time_series def get_time_series(mcube,ann_center): """ Function defining and ordering (anti-clockwise) the pixels composing an annulus at a radial distance of ann_center for an ADI sequence mcube """ if mcube.ndim == 4: indices = get_annulus_segments(mcube[0,0,:,:], ann_center,1,4,90) else: indices = get_annulus_segments(mcube[0], ann_center,1,4,90) tempind=np.vstack((indices[0][0],indices[0][1])) ind = np.lexsort((tempind[0], tempind[1])) indicesy=tempind[0,ind[::-1]] indicesx=tempind[1,ind[::-1]] tempind=np.vstack((indices[1][0],indices[1][1])) ind = np.lexsort((-tempind[0], tempind[1])) indicesy=np.hstack((indicesy,tempind[0,ind[::-1]])) indicesx=np.hstack((indicesx,tempind[1,ind[::-1]])) tempind=np.vstack((indices[2][0],indices[2][1])) ind = np.lexsort((tempind[0], tempind[1])) indicesy=np.hstack((indicesy,tempind[0,ind])) indicesx=np.hstack((indicesx,tempind[1,ind])) tempind=np.vstack((indices[3][0],indices[3][1])) ind = np.lexsort((-tempind[0], tempind[1])) indicesy=np.hstack((indicesy,tempind[0,ind])) indicesx=np.hstack((indicesx,tempind[1,ind])) return indicesy,indicesx def perturb(frame,model_matrix,numbasis,evals_matrix, evecs_matrix, KL_basis_matrix,sci_mean_sub_matrix,refs_mean_sub_matrix, angle_list, fwhm, pa_threshold, ann_center): """ Function allowing the estimation of the PSF forward model when relying on KLIP for the computation of the speckle field. The code is based on the PyKLIP library considering only the ADI case with a singlle number of principal components considered. For more details about the code, consider the PyKLIP library or the originall articles (Pueyo, L. 2016, ApJ, 824, 117 or <NAME>., <NAME>., <NAME>., & <NAME>. 2017, ApJ, 842) """ #Selection of the reference library based on the given parralactic angle threshold if pa_threshold != 0: indices_left = _find_indices_adi(angle_list, frame, pa_threshold, truncate=False) models_ref = model_matrix[indices_left] else: models_ref = model_matrix #Computation of the self-subtraction and over-subtraction for the current frame model_sci = model_matrix[frame] KL_basis=KL_basis_matrix[frame] sci_mean_sub=sci_mean_sub_matrix[frame] refs_mean_sub=refs_mean_sub_matrix[frame] evals=evals_matrix[frame] evecs=evecs_matrix[frame] max_basis = KL_basis.shape[0] N_pix = KL_basis.shape[1] models_mean_sub = models_ref - np.nanmean(models_ref, axis=1)[:,None] models_mean_sub[np.where(np.isnan(models_mean_sub))] = 0 model_sci_mean_sub = model_sci- np.nanmean(model_sci) model_sci_mean_sub[np.where(np.isnan(model_sci_mean_sub))] = 0 model_sci_mean_sub_rows = np.reshape(model_sci_mean_sub,(1,N_pix)) sci_mean_sub_rows = np.reshape(sci_mean_sub,(1,N_pix)) delta_KL = np.zeros([max_basis, N_pix]) models_mean_sub_X_refs_mean_sub_T = models_mean_sub.dot(refs_mean_sub.transpose()) for k in range(max_basis): Zk = np.reshape(KL_basis[k,:],(1,KL_basis[k,:].size)) Vk = (evecs[:,k])[:,None] diagVk_X_models_mean_sub_X_refs_mean_sub_T = (Vk.T).dot(models_mean_sub_X_refs_mean_sub_T) models_mean_sub_X_refs_mean_sub_T_X_Vk = models_mean_sub_X_refs_mean_sub_T.dot(Vk) DeltaZk = -(1/(2*np.sqrt(evals[k])))*(diagVk_X_models_mean_sub_X_refs_mean_sub_T.dot(Vk) + ((Vk.T).dot(models_mean_sub_X_refs_mean_sub_T_X_Vk))).dot(Zk)+(Vk.T).dot(models_mean_sub) for j in range(k): Zj = KL_basis[j, :][None,:] Vj = evecs[:, j][:,None] DeltaZk += np.sqrt(evals[j])/(evals[k]-evals[j])*(diagVk_X_models_mean_sub_X_refs_mean_sub_T.dot(Vj) + ((Vj.T).dot(models_mean_sub_X_refs_mean_sub_T_X_Vk))).dot(Zj) for j in range(k+1, max_basis): Zj = KL_basis[j, :][None,:] Vj = evecs[:, j][:,None] DeltaZk += np.sqrt(evals[j])/(evals[k]-evals[j])*(diagVk_X_models_mean_sub_X_refs_mean_sub_T.dot(Vj) + ((Vj.T).dot(models_mean_sub_X_refs_mean_sub_T_X_Vk))).dot(Zj) delta_KL[k] = DeltaZk/np.sqrt(evals[k]) oversubtraction_inner_products = np.dot(model_sci_mean_sub_rows, KL_basis.T) selfsubtraction_1_inner_products = np.dot(sci_mean_sub_rows, delta_KL.T) selfsubtraction_2_inner_products = np.dot(sci_mean_sub_rows, KL_basis.T) oversubtraction_inner_products[max_basis::] = 0 klipped_oversub = np.dot(oversubtraction_inner_products, KL_basis) selfsubtraction_1_inner_products[0,max_basis::] = 0 selfsubtraction_2_inner_products[0,max_basis::] = 0 klipped_selfsub = np.dot(selfsubtraction_1_inner_products, KL_basis) + \ np.dot(selfsubtraction_2_inner_products, delta_KL) return model_sci[None,:] - klipped_oversub - klipped_selfsub def KLIP(cube, angle_list, nann=None, local=False, fwhm=4, asize=2, n_segments=1,delta_rot=1, ncomp=1,min_frames_lib=2, max_frames_lib=200,imlib='opencv',nframes=None, interpolation='lanczos4', collapse='median',full_output=False, verbose=1): """ Function allowing the estimation of the cube of residuals after the subtraction of the speckle field modeled via the KLIP framework """ array = cube if array.ndim != 3: raise TypeError('Input array is not a cube or 3d array') if array.shape[0] != angle_list.shape[0]: raise TypeError('Input vector or parallactic angles has wrong length') n, y, _ = array.shape angle_list = check_pa_vector(angle_list) if asize is None: annulus_width = int(np.ceil(2 * fwhm)) elif isinstance(asize, int): annulus_width = asize # Annulus parametrization radius_int=fwhm if local==True: if nann> 2*annulus_width: n_annuli = 5 radius_int=(nann//annulus_width-2)*annulus_width else: n_annuli = 4 radius_int=(nann//annulus_width-1)*annulus_width else: n_annuli = int((y / 2 - radius_int) / asize) # Definition of the number of segment for the diifferent annuli if isinstance(n_segments, int): n_segments = [n_segments for _ in range(n_annuli)] elif n_segments == 'auto': n_segments = list() n_segments.append(2) n_segments.append(3) ld = 2 * np.tan(360 / 4 / 2) * asize for i in range(2, n_annuli): radius = i * asize ang = np.rad2deg(2 * np.arctan(ld / (2 * radius))) n_segments.append(int(np.ceil(360 / ang))) if verbose: msg = '# annuli = {}, Ann width = {}, FWHM = {:.3f}' print(msg.format(n_annuli, asize, fwhm)) print('PCA per annulus (or annular sectors):') # Definition of the annuli and the corresmponding parralactic angle threshold cube_out = np.zeros_like(array) for ann in range(n_annuli): if isinstance(ncomp, list) or isinstance(ncomp, np.ndarray): if len(ncomp) == n_annuli: ncompann = ncomp[ann] else: msge = 'If ncomp is a list, it must match the number of annuli' raise TypeError(msge) else: ncompann = ncomp inner_radius = radius_int + ann * annulus_width n_segments_ann = n_segments[ann] if verbose: print('{} : in_rad={}, n_segm={}'.format(ann+1, inner_radius, n_segments_ann)) theta_init = 90 res_ann_par = _define_annuli(angle_list, ann, int((y / 2 - radius_int) / asize), fwhm,radius_int, annulus_width, delta_rot,n_segments_ann, verbose) pa_thr, inner_radius, ann_center = res_ann_par indices = get_annulus_segments(array[0], inner_radius, annulus_width,n_segments_ann,theta_init) # Computation of the speckle field for the different frames and estimation of the cube of residuals for j in range(n_segments_ann): for k in range(array.shape[0]): res =KLIP_patch(k,array[:, indices[j][0], indices[j][1]], ncompann, angle_list, fwhm, pa_thr, ann_center,nframes=nframes) cube_out[k,indices[j][0], indices[j][1]] = res[3] # Cube is derotated according to the parallactic angle and collapsed cube_der = cube_derotate(cube_out, angle_list, imlib=imlib,interpolation=interpolation) frame = cube_collapse(cube_der, mode=collapse) if full_output: return cube_out, cube_der, frame else: return frame def KLIP_patch(frame, matrix, numbasis, angle_list, fwhm, pa_threshold, ann_center,nframes=None): """ Function allowing the computation via KLIP of the speckle field for a given sub-region of the original ADI sequence. Code inspired by the PyKLIP librabry """ max_frames_lib=200 if pa_threshold != 0: if ann_center > fwhm*20: indices_left = _find_indices_adi(angle_list,frame,pa_threshold, truncate=True,max_frames=max_frames_lib) else: indices_left = _find_indices_adi(angle_list, frame,pa_threshold, truncate=False,nframes=nframes) refs = matrix[indices_left] else: refs = matrix sci = matrix[frame] sci_mean_sub = sci - np.nanmean(sci) #sci_mean_sub[np.where(np.isnan(sci_mean_sub))] = 0 refs_mean_sub = refs- np.nanmean(refs, axis=1)[:, None] #refs_mean_sub[np.where(np.isnan(refs_mean_sub))] = 0 # Covariance matrix definition covar_psfs = np.cov(refs_mean_sub) covar_psfs *= (np.size(sci)-1) tot_basis = covar_psfs.shape[0] numbasis = np.clip(numbasis - 1, 0, tot_basis-1) max_basis = np.max(numbasis) + 1 #Computation of the eigenvectors/values of the covariance matrix evals, evecs = la.eigh(covar_psfs) evals = np.copy(evals[int(tot_basis-max_basis):int(tot_basis)]) evecs = np.copy(evecs[:,int(tot_basis-max_basis):int(tot_basis)]) evals = np.copy(evals[::-1]) evecs = np.copy(evecs[:,::-1]) # Computation of the principal components KL_basis = np.dot(refs_mean_sub.T,evecs) KL_basis = KL_basis * (1. / np.sqrt(evals))[None,:] KL_basis = KL_basis.T N_pix = np.size(sci_mean_sub) sci_rows = np.reshape(sci_mean_sub, (1,N_pix)) inner_products = np.dot(sci_rows, KL_basis.T) inner_products[0,int(max_basis)::]=0 #Projection of the science image on the selected prinicpal component #to generate the speckle field model klip_reconstruction = np.dot(inner_products, KL_basis) # Subtraction of the speckle field model from the riginal science image #to obtain the residual frame sub_img_rows = sci_rows - klip_reconstruction return evals,evecs,KL_basis,np.reshape(sub_img_rows, (N_pix)),refs_mean_sub,sci_mean_sub def LOCI_FM(cube, psf, ann_center, angle_list,scale_list, asize,fwhm, Tol,delta_rot,delta_sep): """ Computation of the optimal factors weigthing the linear combination of reference frames used to obtain the modeled speckle field for each frame and allowing the determination of the forward modeled PSF. Estimation of the cube of residuals based on the modeled speckle field. """ cube_res = np.zeros_like(cube) ceny, cenx = frame_center(cube[0]) radius_int=ann_center-int(1.5*asize) if radius_int<=0: radius_int=1 for ann in range(3): n_segments_ann = 1 inner_radius_ann = radius_int + ann*asize pa_threshold = _define_annuli(angle_list, ann, 3, asize, radius_int, asize, delta_rot, n_segments_ann, verbose=False)[0] indices = get_annulus_segments(cube[0], inner_radius=inner_radius_ann, width=asize, nsegm=n_segments_ann) ind_opt = get_annulus_segments(cube[0], inner_radius=inner_radius_ann, width=asize, nsegm=n_segments_ann, optim_scale_fact=2) ayxyx = [inner_radius_ann,pa_threshold, indices[0][0], indices[0][1], ind_opt[0][0], ind_opt[0][1]] matrix_res, ind_ref, coef, yy, xx = _leastsq_patch(ayxyx, angle_list,scale_list,fwhm,cube,ann_center,'manhattan', 100,delta_sep, 'lstsq', Tol,formod=True,psf=psf) if ann==1: ind_ref_list=ind_ref coef_list=coef cube_res[:, yy, xx] = matrix_res return cube_res, ind_ref_list,coef_list def nmf_adisdi(cube, angle_list,scale_list=None, cube_ref=None, ncomp=1, scaling=None, max_iter=100, random_state=None, mask_center_px=None, imlib='opencv', interpolation='lanczos4', collapse='median', full_output=False, verbose=True, **kwargs): """ Non Negative Matrix Factorization for ADI or ADI+SDI sequences.This function embeds the scikit-learn NMF algorithm solved through coordinate descent method. """ array,angle_list_t,scale_list_t=rot_scale('ini',cube,None,angle_list,scale_list,imlib, interpolation) n, y, x = array.shape matrix = prepare_matrix(array, scaling, mask_center_px, mode='fullfr', verbose=verbose) matrix += np.abs(matrix.min()) if cube_ref is not None: matrix_ref = prepare_matrix(cube_ref, scaling, mask_center_px, mode='fullfr', verbose=verbose) matrix_ref += np.abs(matrix_ref.min()) mod = NMF_sklearn(n_components=ncomp, alpha=0, solver='cd', init='nndsvd', max_iter=max_iter, random_state=random_state, **kwargs) # H [ncomp, n_pixels]: Non-negative components of the data if cube_ref is not None: H = mod.fit(matrix_ref).components_ else: H = mod.fit(matrix).components_ # W: coefficients [n_frames, ncomp] W = mod.transform(matrix) reconstructed = np.dot(W, H) residuals = matrix - reconstructed array_out = np.zeros_like(array) for i in range(n): array_out[i] = residuals[i].reshape(y,x) cube_der=rot_scale('fin',cube,array_out,angle_list_t,scale_list_t, imlib, interpolation) frame_fin = cube_collapse(cube_der, mode=collapse) return cube_der,frame_fin def annular_NMF(cube, angle_list, nann=None, local=False, fwhm=4, asize=2, n_segments=1, ncomp=20,imlib='opencv', interpolation='lanczos4', collapse='median',max_iter=100, random_state=None,full_output=False, verbose=False): """ Function allowing the estimation of the cube of residuals after the subtraction of the speckle field modeled via the NMF framework. This codes is an adaptation of the VIP NMF function to the case of annular computation of the modeled speckle fields (only full-frame estimation in Gonzalez et al. AJ, 154:7,2017) """ array = cube if array.ndim != 3: raise TypeError('Input array is not a cube or 3d array') if array.shape[0] != angle_list.shape[0]: raise TypeError('Input vector or parallactic angles has wrong length') n, y, _ = array.shape angle_list = check_pa_vector(angle_list) if asize is None: annulus_width = int(np.ceil(2 * fwhm)) elif isinstance(asize, int): annulus_width = asize # Annulus parametrization radius_int=fwhm if local==True: if nann> 2*annulus_width: n_annuli = 5 radius_int=(nann//annulus_width-2)*annulus_width else: n_annuli = 4 radius_int=(nann//annulus_width-1)*annulus_width else: n_annuli = int((y / 2 - radius_int) / asize) # Definition of the annuli and the corresponding parralactic angle threshold cube_out = np.zeros_like(array) for ann in range(n_annuli): inner_radius = radius_int + ann * annulus_width if verbose: print('{} : in_rad={}'.format(ann+1, inner_radius)) theta_init = 90 indices = get_annulus_segments(array[0], inner_radius, annulus_width,n_segments,theta_init) # Computation of the speckle field for the different frames and estimation of the cube of residuals for j in range(n_segments): cube_out[:,indices[j][0], indices[j][1]] =NMF_patch(array[:, indices[j][0], indices[j][1]], ncomp, max_iter,random_state,verbose) # Cube is derotated according to the parallactic angle and collapsed cube_der = cube_derotate(cube_out, angle_list, imlib=imlib,interpolation=interpolation) frame = cube_collapse(cube_der, mode=collapse) if full_output: return cube_out, cube_der, frame else: return frame def NMF_patch(matrix, ncomp, max_iter,random_state,sklearn=False): """ Function allowing the computation via NMF of the speckle field for a given sub-region of the original ADI sequence. The code is a partial reproduction of the VIP function NMF_patch (Gonzalez et al. AJ, 154:7,2017) """ refs = matrix+ np.abs(matrix.min()) if sklearn==True: mod = NMF_sklearn(n_components=ncomp, alpha=0, solver='cd', init='nndsvd', max_iter=max_iter, random_state=random_state) # H [ncomp, n_pixels]: Non-negative components of the data H = mod.fit(refs).components_ W = mod.transform(refs) reconstructed = np.dot(W, H) else: mod = NMF(X=refs, n_components=ncomp) mod.SolveNMF(maxiters=max_iter, tol=0.001) H=mod.H W=mod.W reconstructed = np.dot(W, H) residuals = refs - reconstructed return residuals def NMF_patch_range(matrix, ncomp_range, max_iter,random_state,verbose): """ Function allowing the computation via NMF of the speckle field for a range of principal components ncomp_range and a given sub-region of the original ADI sequence. The code is a partial reproduction of the VIP function NMF_patch (Gonzalez et al. AJ, 154:7,2017) """ refs = matrix+ np.abs(matrix.min()) mod = NMF(X=refs, n_components=ncomp_range[len(ncomp_range)-1]) mod.SolveNMF(maxiters=max_iter, tol=0.001) if verbose: print('Done NMF with sklearn.NMF.') residuals=[] for i in ncomp_range: H=mod.H[ncomp_range[0]:i,:] W=mod.W[:,ncomp_range[0]:i] reconstructed = np.dot(W, H) residuals.append(refs - reconstructed) return residuals def annular_pca_adisdi(cube, angle_list,scale_list=None, radius_int=0, fwhm=4, asize=2, n_segments=1, delta_rot=1,delta_sep=0.1, ncomp=1, svd_mode='lapack', nproc=None, min_frames_lib=2, max_frames_lib=200, tol=1e-1, scaling=None, imlib='opencv', interpolation='lanczos4', collapse='median', full_output=False, verbose=False, cube_ref=None, weights=None): """ PCA exploiting angular and spectral variability (ADI or ADI+SDI fashion). """ array,angle_list_t,scale_list_t=rot_scale('ini',cube,None,angle_list,scale_list,imlib, interpolation) n, y, _ = array.shape angle_list_t = check_pa_vector(angle_list_t) n_annuli = int((y / 2 - radius_int) / asize) if isinstance(delta_rot, tuple): delta_rot = np.linspace(delta_rot[0], delta_rot[1], num=n_annuli) elif isinstance(delta_rot, (int, float)): delta_rot = [delta_rot] * n_annuli if isinstance(n_segments, int): n_segments = [n_segments for _ in range(n_annuli)] elif n_segments == 'auto': n_segments = list() n_segments.append(2) # for first annulus n_segments.append(3) # for second annulus ld = 2 * np.tan(360 / 4 / 2) * asize for i in range(2, n_annuli): # rest of annuli radius = i * asize ang = np.rad2deg(2 * np.arctan(ld / (2 * radius))) n_segments.append(int(np.ceil(360 / ang))) if verbose: msg = 'N annuli = {}, FWHM = {:.3f}' print(msg.format(n_annuli, fwhm)) print('PCA per annulus (or annular sectors):') if nproc is None: # Hyper-threading "duplicates" the cores -> cpu_count/2 nproc = cpu_count() // 2 # The annuli are built, and the corresponding PA thresholds for frame # rejection are calculated (at the center of the annulus) cube_out = np.zeros_like(array) for ann in range(n_annuli): if isinstance(ncomp, tuple) or isinstance(ncomp, np.ndarray): if len(ncomp) == n_annuli: ncompann = ncomp[ann] else: raise TypeError('If `ncomp` is a tuple, it must match the ' 'number of annuli') else: ncompann = ncomp n_segments_ann = n_segments[ann] res_ann_par = _define_annuli(angle_list_t, ann, n_annuli, fwhm, radius_int, asize, delta_rot[ann], n_segments_ann, verbose) pa_thr, inner_radius, ann_center = res_ann_par indices = get_annulus_segments(array[0], inner_radius, asize, n_segments_ann) # Library matrix is created for each segment and scaled if needed for j in range(n_segments_ann): yy = indices[j][0] xx = indices[j][1] matrix_segm = array[:, yy, xx] # shape [nframes x npx_segment] if cube_ref is not None: matrix_segm_ref = cube_ref[:, yy, xx] else: matrix_segm_ref = None res = pool_map(nproc, do_pca_patch, matrix_segm, iterable(range(n)), angle_list_t,scale_list_t, fwhm, pa_thr,delta_sep, ann_center, svd_mode, ncompann, min_frames_lib, max_frames_lib, tol, matrix_segm_ref) res = np.array(res) residuals = np.array(res[:, 0]) for fr in range(n): cube_out[fr][yy, xx] = residuals[fr] # Cube is derotated according to the parallactic angle and collapsed cube_der=rot_scale('fin',cube,cube_out,angle_list_t,scale_list_t, imlib, interpolation) frame = cube_collapse(cube_der, mode=collapse) return cube_der, frame def do_pca_patch(matrix, frame, angle_list,scale_list, fwhm, pa_threshold, delta_sep, ann_center, svd_mode, ncomp, min_frames_lib, max_frames_lib, tol, matrix_ref): """ Function doing the SVD/PCA for each frame patch. The code is a partial reproduction of the VIP function do_pca_patch (Gonzalez et al. AJ, 154:7,2017) """ if scale_list is not None: indices_left = np.intersect1d(_find_indices_adi(angle_list, frame, pa_threshold, truncate=False),_find_indices_sdi(scale_list, ann_center, frame, fwhm, delta_sep)) else: indices_left = _find_indices_adi(angle_list, frame, pa_threshold, truncate=False) data_ref = matrix[indices_left] if matrix_ref is not None: # Stacking the ref and the target ref (pa thresh) libraries data_ref = np.vstack((matrix_ref, data_ref)) curr_frame = matrix[frame] # current frame V = get_eigenvectors(ncomp, data_ref, svd_mode, noise_error=tol) transformed = np.dot(curr_frame, V.T) reconstructed = np.dot(transformed.T, V) residuals = curr_frame - reconstructed return residuals, V.shape[0], data_ref.shape[0] def do_pca_patch_range(matrix, frame, angle_list,scale_list, fwhm, pa_threshold,delta_sep, ann_center, svd_mode, ncomp_range, min_frames_lib, max_frames_lib, tol, matrix_ref): """ Function doing the SVD/PCA for each frame patch for a range of principal component ncomp_range. The code is a partial reproduction of the VIP function do_pca_patch (Gonzalez et al. AJ, 154:7,2017) """ if scale_list is not None: indices_left = np.intersect1d(_find_indices_adi(angle_list, frame, pa_threshold, truncate=False),_find_indices_sdi(scale_list, ann_center, frame, fwhm, delta_sep)) else: indices_left = _find_indices_adi(angle_list, frame, pa_threshold, truncate=False) data_ref = matrix[indices_left] if matrix_ref is not None: # Stacking the ref and the target ref (pa thresh) libraries data_ref = np.vstack((matrix_ref, data_ref)) curr_frame = matrix[frame] # current frame V = get_eigenvectors(ncomp_range[len(ncomp_range)-1], data_ref, svd_mode, noise_error=tol) residuals=[] for i in ncomp_range: V_trunc=V[ncomp_range[0]:i,:] transformed = np.dot(curr_frame, V_trunc.T) reconstructed = np.dot(transformed.T, V_trunc) residuals.append(curr_frame - reconstructed) return residuals, V.shape[0], data_ref.shape[0] def loci_adisdi(cube, angle_list,scale_list=None, fwhm=4, metric='manhattan', dist_threshold=50, delta_rot=0.5,delta_sep=0.1, radius_int=0, asize=4, n_segments=1, nproc=1, solver='lstsq', tol=1e-3, optim_scale_fact=1, imlib='opencv', interpolation='lanczos4', collapse='median', nann=None,local=False, verbose=True, full_output=False): """ Least-squares model PSF subtraction for ADI or ADI+SDI. This code is an adaptation of the VIP xloci function to provide, if required, the residuals after speckle field subtraction for a given annulus. """ cube_rot_scale,angle_list_t,scale_list_t=rot_scale('ini',cube,None,angle_list,scale_list,imlib, interpolation) y = cube_rot_scale.shape[1] if not asize < y // 2: raise ValueError("asize is too large") angle_list = check_pa_vector(angle_list) if local==True: n_annuli = 3 radius_int=nann-asize else: n_annuli= int((y / 2 - radius_int) / asize) if verbose: print("Building {} annuli:".format(n_annuli)) if isinstance(delta_rot, tuple): delta_rot = np.linspace(delta_rot[0], delta_rot[1], num=n_annuli) elif isinstance(delta_rot, (int, float)): delta_rot = [delta_rot] * n_annuli if nproc is None: nproc = cpu_count() // 2 # Hyper-threading doubles the # of cores annulus_width = asize if isinstance(n_segments, int): n_segments = [n_segments]*n_annuli elif n_segments == 'auto': n_segments = list() n_segments.append(2) # for first annulus n_segments.append(3) # for second annulus ld = 2 * np.tan(360/4/2) * annulus_width for i in range(2, n_annuli): # rest of annuli radius = i * annulus_width ang = np.rad2deg(2 * np.arctan(ld / (2 * radius))) n_segments.append(int(np.ceil(360/ang))) # annulus-wise least-squares combination and subtraction cube_res = np.zeros_like(cube_rot_scale) ayxyx = [] # contains per-segment data for ann in range(n_annuli): n_segments_ann = n_segments[ann] inner_radius_ann = radius_int + ann*annulus_width # angles pa_threshold = _define_annuli(angle_list, ann, n_annuli, fwhm, radius_int, asize, delta_rot[ann], n_segments_ann, verbose)[0] # indices indices = get_annulus_segments(cube_rot_scale[0], inner_radius=inner_radius_ann, width=asize, nsegm=n_segments_ann) ind_opt = get_annulus_segments(cube_rot_scale[0], inner_radius=inner_radius_ann, width=asize, nsegm=n_segments_ann, optim_scale_fact=optim_scale_fact) # store segment data for multiprocessing ayxyx += [(inner_radius_ann+asize//2,pa_threshold, indices[nseg][0], indices[nseg][1], ind_opt[nseg][0], ind_opt[nseg][1]) for nseg in range(n_segments_ann)] msg = 'Patch-wise least-square combination and subtraction:' # reverse order of processing, as outer segments take longer res_patch = pool_map(nproc, _leastsq_patch, iterable(ayxyx[::-1]), angle_list_t,scale_list_t,fwhm,cube_rot_scale, None, metric, dist_threshold,delta_sep, solver, tol, verbose=verbose, msg=msg, progressbar_single=True) for patch in res_patch: matrix_res, yy, xx = patch cube_res[:, yy, xx] = matrix_res cube_der=rot_scale('fin',cube,cube_res,angle_list_t,scale_list_t, imlib, interpolation) frame_der_median = cube_collapse(cube_der, collapse) if verbose: print('Done processing annuli') return cube_der, frame_der_median def _leastsq_patch(ayxyx, angle_list,scale_list,fwhm,cube, nann,metric, dist_threshold,delta_sep, solver, tol,formod=False,psf=None): """ Function allowing th estimation of the optimal factors for the modeled speckle field estimation via the LOCI framework. The code has been developped based on the VIP python function _leastsq_patch, but return additionnaly the set of coefficients used for the speckle field computation. """ ann_center,pa_threshold, yy, xx, yy_opti, xx_opti = ayxyx ind_ref_list=[] coef_list=[] yy_opt=[] xx_opt=[] for j in range(0,len(yy_opti)): if not any(x in np.where(yy==yy_opti[j])[0] for x in np.where(xx==xx_opti[j])[0]): xx_opt.append(xx_opti[j]) yy_opt.append(yy_opti[j]) values = cube[:, yy, xx] matrix_res = np.zeros((values.shape[0], yy.shape[0])) values_opt = cube[:, yy_opti, xx_opti] n_frames = cube.shape[0] for i in range(n_frames): if scale_list is not None: ind_fr_i = np.intersect1d(_find_indices_adi(angle_list, i, pa_threshold, truncate=False),_find_indices_sdi(scale_list, ann_center, i, fwhm, delta_sep)) else: ind_fr_i = _find_indices_adi(angle_list, i, pa_threshold, truncate=False) if len(ind_fr_i) > 0: A = values_opt[ind_fr_i] b = values_opt[i] if solver == 'lstsq': coef = np.linalg.lstsq(A.T, b, rcond=tol)[0] # SVD method elif solver == 'nnls': coef = sp.optimize.nnls(A.T, b)[0] elif solver == 'lsq': coef = sp.optimize.lsq_linear(A.T, b, bounds=(0, 1), method='trf', lsq_solver='lsmr')['x'] else: raise ValueError("`solver` not recognized") else: msg = "No frames left in the reference set. Try increasing " msg += "`dist_threshold` or decreasing `delta_rot`." raise RuntimeError(msg) if formod==True: ind_ref_list.append(ind_fr_i) coef_list.append(coef) recon = np.dot(coef, values[ind_fr_i]) matrix_res[i] = values[i] - recon if formod==True: return matrix_res,ind_ref_list,coef_list, yy, xx, else: return matrix_res, yy,xx def llsg_adisdi(cube, angle_list,scale_list, fwhm, rank=10, thresh=1, max_iter=10, low_rank_ref=False, low_rank_mode='svd', auto_rank_mode='noise', residuals_tol=1e-1, cevr=0.9, thresh_mode='soft', nproc=1, asize=None, n_segments=4, azimuth_overlap=None, radius_int=None, random_seed=None, imlib='opencv', interpolation='lanczos4', high_pass=None, collapse='median', full_output=True, verbose=True, debug=False): """ Local low rank plus Gaussian PSF subtraction for ADI or ADI+SDI. This code is an adaptation of the VIP llsg function. """ cube_rot_scale,angle_list_t,scale_list_t=rot_scale('ini',cube,None,angle_list,scale_list,imlib, interpolation) list_l, list_s, list_g, f_l, frame_fin, f_g = vip.llsg.llsg(cube_rot_scale, angle_list_t, fwhm, rank=rank,asize=asize, thresh=1,n_segments=n_segments, max_iter=40, random_seed=10, nproc=nproc,full_output=True,verbose=False) res_s=np.array(list_s) residuals_cube_=cube_derotate(res_s[0],-angle_list_t) cube_der=rot_scale('fin',cube,residuals_cube_,angle_list_t,scale_list_t, imlib, interpolation) frame_fin=cube_collapse(cube_der, collapse) return cube_der,frame_fin def _decompose_patch(indices, i_patch,cube_init, n_segments_ann, rank, low_rank_ref, low_rank_mode, thresh, thresh_mode, max_iter, auto_rank_mode, cevr, residuals_tol, random_seed, debug=False, full_output=False): """ Patch decomposition from the LLSG VIP function. """ j = i_patch yy = indices[j][0] xx = indices[j][1] data_segm = cube_init[:, yy, xx] if low_rank_ref: ref_segments = list(range(n_segments_ann)) ref_segments.pop(j) for m, n in enumerate(ref_segments): if m == 0: yy_ref = indices[n][0] xx_ref = indices[n][1] else: yy_ref = np.hstack((yy_ref, indices[n][0])) xx_ref = np.hstack((xx_ref, indices[n][1])) data_ref = cube_init[:, yy_ref, xx_ref] else: data_ref = data_segm patch = _patch_rlrps(data_segm, data_ref, rank, low_rank_ref, low_rank_mode, thresh, thresh_mode, max_iter, auto_rank_mode, cevr, residuals_tol, random_seed, debug=debug, full_output=full_output) return patch _largenumber = 1E100 _smallnumber = 1E-5 class NMF: """ Nonnegative Matrix Factorization - Build a set of nonnegative basis components given a dataset with Heteroscedastic uncertainties and missing data with a vectorized update rule. Algorithm: -- Iterative multiplicative update rule Input: -- X: m x n matrix, the dataset Optional Input/Output: -- n_components: desired size of the basis set, default 5 -- V: m x n matrix, the weight, (usually) the inverse variance -- M: m x n binary matrix, the mask, False means missing/undesired data -- H: n_components x n matrix, the H matrix, usually interpreted as the coefficients -- W: m x n_components matrix, the W matrix, usually interpreted as the basis set Comments: -- Between W and H, which one is the basis set and which one is the coefficient depends on how you interpret the data, because you can simply transpose everything as in X-WH versus X^T - (H^T)(W^T) -- Everything needs to be non-negative References: -- <NAME>, 2016 A Vectorized Algorithm for Nonnegative Matrix Factorization with Heteroskedastic Uncertainties and Missing Data AJ/PASP, (to be submitted) -- <NAME>. and <NAME>. 2007 K-corrections and Filter Transformations in the Ultraviolet, Optical, and Near-infrared The Astronomical Journal, 133, 734 -- <NAME>., & <NAME>., 2001 Algorithms for non-negative matrix factorization Advances in neural information processing systems, pp. 556-562 """ def __init__(self, X, W=None, H=None, V=None, M=None, n_components=5): """ Initialization Required Input: X -- the input data set Optional Input/Output: -- n_components: desired size of the basis set, default 5 -- V: m x n matrix, the weight, (usually) the inverse variance -- M: m x n binary matrix, the mask, False means missing/undesired data -- H: n_components x n matrix, the H matrix, usually interpreted as the coefficients -- W: m x n_components matrix, the W matrix, usually interpreted as the basis set """ # I'm making a copy for the safety of everything; should not be a bottleneck self.X = np.copy(X) if (np.count_nonzero(self.X<0)>0): print("There are negative values in X. Setting them to be zero...", flush=True) self.X[self.X<0] = 0. self.n_components = n_components self.maxiters = 100 self.tol = _smallnumber np.random.seed(10) if (W is None): self.W = np.random.rand(self.X.shape[0], self.n_components) else: if (W.shape != (self.X.shape[0], self.n_components)): raise ValueError("Initial W has wrong shape.") self.W = np.copy(W) if (np.count_nonzero(self.W<0)>0): print("There are negative values in W. Setting them to be zero...", flush=True) self.W[self.W<0] = 0. if (H is None): self.H = np.random.rand(self.n_components, self.X.shape[1]) else: if (H.shape != (self.n_components, self.X.shape[1])): raise ValueError("Initial H has wrong shape.") self.H = np.copy(H) if (np.count_nonzero(self.H<0)>0): print("There are negative values in H. Setting them to be zero...", flush=True) self.H[self.H<0] = 0. if (V is None): self.V = np.ones(self.X.shape) else: if (V.shape != self.X.shape): raise ValueError("Initial V(Weight) has wrong shape.") self.V = np.copy(V) if (np.count_nonzero(self.V<0)>0): print("There are negative values in V. Setting them to be zero...", flush=True) self.V[self.V<0] = 0. if (M is None): self.M = np.ones(self.X.shape, dtype=np.bool) else: if (M.shape != self.X.shape): raise ValueError("M(ask) has wrong shape.") if (M.dtype != np.bool): raise TypeError("M(ask) needs to be boolean.") self.M = np.copy(M) # Set masked elements to be zero self.V[(self.V*self.M)<=0] = 0 self.V_size = np.count_nonzero(self.V) @property def cost(self): """ Total cost of a given set s """ diff = self.X - np.dot(self.W, self.H) chi2 = np.einsum('ij,ij', self.V*diff, diff)/self.V_size return chi2 def SolveNMF(self, W_only=False, H_only=False, maxiters=None, tol=None): """ Construct the NMF basis Keywords: -- W_only: Only update W, assuming H is known -- H_only: Only update H, assuming W is known -- Only one of them can be set Optional Input: -- tol: convergence criterion, default 1E-5 -- maxiters: allowed maximum number of iterations, default 1000 Output: -- chi2: reduced final cost -- time_used: time used in this run """ if (maxiters is not None): self.maxiters = maxiters if (tol is not None): self.tol = tol chi2 = self.cost oldchi2 = _largenumber if (W_only and H_only): return (chi2, 0.) V = np.copy(self.V) VT = V.T #XV = self.X*self.V XV = np.multiply(V, self.X) XVT = np.multiply(VT, self.X.T) niter = 0 while (niter < self.maxiters) and ((oldchi2-chi2)/oldchi2 > self.tol): # Update H if (not W_only): H_up = np.dot(XVT, self.W) WHVT = np.multiply(VT, np.dot(self.W, self.H).T) H_down = np.dot(WHVT, self.W) self.H = self.H*H_up.T/H_down.T # Update W if (not H_only): W_up = np.dot(XV, self.H.T) WHV = np.multiply(V, np.dot(self.W, self.H)) W_down = np.dot(WHV, self.H.T) self.W = self.W*W_up/W_down # chi2 oldchi2 = chi2 chi2 = self.cost return
1.84375
2
src/custom_dataset.py
devJWSong/transformer-multiturn-dialogue-pytorch
11
11927
<reponame>devJWSong/transformer-multiturn-dialogue-pytorch from torch.utils.data import Dataset from tqdm import tqdm import torch import pickle import json class CustomDataset(Dataset): def __init__(self, args, tokenizer, data_type): assert data_type in ["train", "valid", "test"] print(f"Loading {data_type} data...") with open(f"{args.task_dir}/{data_type}.pickle", "rb") as f: dials = pickle.load(f) with open(f"{args.task_dir}/data_info.json", "r") as f: data_info = json.load(f) self.src_idxs = [] # (N, T, S_L) self.num_valid_turns = [] # (N) self.trg_idxs = [] # (N, T_L) max_pers = data_info["max_num_pers"] num_contexts = max_pers + args.max_turns for dial in tqdm(dials): hists = [] persona1, persona2, turns = dial['persona1'], dial['persona2'], dial['turns'] pers = [] # The system's persona will be handled as extra histories without a speacker token. (or maybe empty...) for per in persona2: token_idxs = [args.bos_id] + tokenizer.encode(per) + [args.eos_id] pers.append(token_idxs) for t, turn in enumerate(turns): if t % 2 == 0: # Speaker 1: User token_idxs = [args.bos_id, args.sp1_id] + tokenizer.encode(turn) + [args.eos_id] else: # Speacker 2: System token_idxs = [args.bos_id, args.sp2_id] + tokenizer.encode(turn) + [args.eos_id] hists.append(token_idxs) hists = [self.trunc(token_idxs, args.src_max_len, args.eos_id) for token_idxs in hists] if len(pers) > 0: pers = [self.trunc(token_idxs, args.src_max_len, args.eos_id) for token_idxs in pers] for i in range(len(hists)): if i % 2 == 1: self.trg_idxs.append(hists[i]) start, end = i-args.max_turns, i if start < 0: start = 0 context = hists[start:end] assert len(context) > 0 if len(pers) > 0: context = pers + context self.num_valid_turns.append(len(context)) if len(context) < num_contexts: num_extras = num_contexts - len(context) context += [[args.bos_id, args.eos_id]] * num_extras assert len(context) == num_contexts self.src_idxs.append(context) # Padding for c, context in enumerate(self.src_idxs): for i, utter in enumerate(self.src_idxs[c]): token_idxs = self.src_idxs[c][i] self.src_idxs[c][i] = self.padding(token_idxs, args.src_max_len, args.pad_id) assert len(self.src_idxs) == len(self.trg_idxs) assert len(self.src_idxs) == len(self.num_valid_turns) def __len__(self): return len(self.src_idxs) def __getitem__(self, idx): return self.src_idxs[idx], self.num_valid_turns[idx], self.trg_idxs[idx] def padding(self, token_idxs, max_len, pad_id): num_extras = max_len - len(token_idxs) token_idxs += [pad_id] * num_extras return token_idxs def trunc(self, token_idxs, max_len, eos_id): token_idxs = token_idxs[:max_len] token_idxs[-1] = eos_id return token_idxs class PadCollate(): def __init__(self, pad_id): self.pad_id = pad_id def pad_collate(self, batch): src_idxs, num_valid_turns, trg_idxs = [], [], [] for seqs in batch: src_idxs.append(seqs[0]) num_valid_turns.append(seqs[1]) trg_idxs.append(torch.LongTensor(seqs[2])) trg_idxs = torch.nn.utils.rnn.pad_sequence(trg_idxs, batch_first=True, padding_value=self.pad_id) # (B, T_L) try: return torch.LongTensor(src_idxs).contiguous(), torch.LongTensor(num_valid_turns).contiguous(), trg_idxs.contiguous() except: print(f"batch size: {len(src_idxs)}") for b in range(len(src_idxs)): print(f"num turns: {len(src_idxs[b])}") print(f"batch size: {len(num_valid_turns)}") print(num_valid_turns) print(trg_idxs.shape) exit()
2.375
2
research/codec/codec_example.py
FXTD-ODYSSEY/QBinder
13
11928
# -*- coding: future_fstrings -*- import codecs import pdb import string # NOTE https://stackoverflow.com/questions/38777818/how-do-i-properly-create-custom-text-codecs # prepare map from numbers to letters _encode_table = {str(number): bytes(letter) for number, letter in enumerate(string.ascii_lowercase)} # prepare inverse map _decode_table = {v: k for k, v in _encode_table.items()} def custom_encode(text): # example encoder that converts ints to letters print "custom_encode",text # see https://docs.python.org/3/library/codecs.html#codecs.Codec.encode return b''.join(_encode_table[x] for x in text), len(text) def custom_decode(binary): # example decoder that converts letters to ints print "custom_decode",binary # see https://docs.python.org/3/library/codecs.html#codecs.Codec.decode return ''.join(_decode_table[x] for x in binary), len(binary) def custom_search_function(encoding_name): return codecs.CodecInfo(encode=custom_encode, decode=custom_decode, name='Reasons') def main(): # register your custom codec # note that CodecInfo.name is used later codecs.register(custom_search_function) binary = 'abcdefg' # decode letters to numbers pdb.set_trace() text = binary.decode('Reasons') print(text) # encode numbers to letters binary2 = text.encode('Reasons') print(binary2) # fstring = 'f"hello {text}"'.decode('future-fstrings') # print fstring # encode(decode(...)) should be an identity function assert binary == binary2 if __name__ == '__main__': main()
3.421875
3
benchmark_python_lkml.py
Ladvien/rust_lookml_parser
0
11929
import lkml from time import time_ns from rich import print FILE_PATH = "/Users/ladvien/rusty_looker/src/resources/test.lkml" with open(FILE_PATH, "r") as f: lookml = f.read() startTime = time_ns() // 1_000_000 result = lkml.load(lookml) print(result) executionTime = (time_ns() // 1_000_000) - startTime print('Execution time in seconds: ' + str(executionTime))
2.46875
2
Linear_Regression.py
svdeepak99/TSA-Twitter_Sentiment_Analysis
0
11930
<filename>Linear_Regression.py from keras.models import Sequential, load_model from keras.layers import Dense import csv import numpy as np import os LOAD_MODEL = False with open("Linear_Regression/Normalized_Attributes.csv", "r", newline='') as fp: reader = csv.reader(fp) headings = next(reader) dataset = np.array(list(reader), dtype=np.float) with open("Linear_Regression/VADER_Sentiment.csv", "r", newline='') as fp: reader = csv.reader(fp) outputs = np.array([x[0] for x in list(reader)]) if os.path.isfile("Linear_Regression/model/regression_full.h5") and LOAD_MODEL: model = load_model("Linear_Regression/model/regression_full.h5") else: model = Sequential() model.add(Dense(1, input_dim = 33, activation='linear')) model.compile(loss='mse', optimizer='rmsprop', metrics=['mse']) model.fit(x=dataset, y=outputs, epochs=40, verbose=1) model.save("Linear_Regression/model/regression_full.h5") model.summary() weights = model.get_weights() weights_list = [] for i, w in enumerate(weights[0]): print(f'{i+1}) {headings[i]} : {w[0]}') weights_list.append([headings[i], w[0]]) print(f'34) BIAS: {weights[1][0]}\n') weights_list.append(['BIAS', weights[1][0]]) with open("Linear_Regression/Full_weights.csv", "w", newline='') as fp: writer = csv.writer(fp) writer.writerows(weights_list) print(len(weights), len(weights[0]), len(weights[1])) print(model.predict(dataset[:10])) print(outputs[:10]) print(np.sum(dataset[0]*np.array([x[0] for x in weights[0]]))+weights[1][0], model.predict(np.array([dataset[0]])))
2.890625
3
UserCode/bressler/multibubblescintillationcheck.py
cericdahl/SBCcode
4
11931
<filename>UserCode/bressler/multibubblescintillationcheck.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Mar 2 19:33:02 2021 @author: bressler """ import SBCcode as sbc import numpy as np import pulse_integrator as pi import gc def check_multibub_scintillation(run, event, at0, PMTgain, PMTwindow): tstart = PMTwindow[0] t_end= PMTwindow[1] scintillation_signal = 0 datadir = '/bluearc/storage/SBC-17-data' e = sbc.DataHandling.GetSBCEvent.GetEvent(datadir+'/'+run,event) cgate = e["fastDAQ"]["CAMgate"] fdt = e["fastDAQ"]["time"] LED_on = [fdt[i] for i in range(len(cgate)) if cgate[i]<-0.5] look_times = [x for x in LED_on if (x < 0 and abs(x-at0)<tstart)] #print(str(len(LED_on)/len(fdt))) if len(look_times)>0: LED_during_window = True else: LED_during_window = False dcam = np.diff(cgate) fdt = e["fastDAQ"]["time"] camOffTimes = [fdt[i] for i in range(len(dcam)) if dcam[i] > 0.5] pmttracetime = e["PMTtraces"]["t0_sec"][:,0]+e["PMTtraces"]["t0_frac"][:,0] d=sbc.AnalysisModules.PMTfastDAQalignment.PMTandFastDAQalignment(e) pmtalign = d["PMT_trigt0_sec"]+d["PMT_trigt0_frac"] tracetimes = pmttracetime - pmtalign i=0 # to match the indexing of the pre-made code I had 1??? candidate = 0 for t in (tracetimes-at0): # loop through every PMT trace for the event if t<t_end and t>tstart: # if the trace time is within 500 microsec before acoustic t0 """ lastCamOff = 0 for k in range(len(camOffTimes)): if t+at0 > camOffTimes[k]: lastCamOff = camOffTimes[k] elif t+at0 < camOffTimes[k]: break if t+at0-lastCamOff > 25e-6: # if the trace time is more than 25 microseconds away from a camera gate rise """ #take abs to get positive area: trace = np.fabs(e["PMTtraces"]["traces"][i][0]) #if ch0 saturated, stitch in low res channel: if max(trace) == 128: trace = pi.stitchTraces(trace,np.fabs(e["PMTtraces"]["traces"][i][1])) dt = e["PMTtraces"]["dt"][i][0] #integrate and convert to phe: [phe,n,totInt,pktimes] = pi.SBC_pulse_integrator_bressler(trace,dt) if phe != None: phe /= PMTgain #keep track of largest candidate: if phe > candidate: candidate = phe i+=1 #i.e. if there is a candidate PMT trace with area greater than zero if candidate > 0: scintillation_signal = candidate gc.collect() return [LED_during_window, scintillation_signal] def main(): returned = check_multibub_scintillation('/bluearc/storage/SBC-17-data/20170703_5', 5, -0.1) if __name__ == "__main__": main()
2.21875
2
app/core/models.py
echosisdev/openmrs-disa-sync
0
11932
from django.db import models from django.db.models.signals import pre_save, post_save from core.utils.constants import Constants from core.utils.data_convertion import DataConversion class ExcelFile(models.Model): file_name = models.FileField(upload_to='uploads') date_created = models.DateTimeField(auto_now_add=True) activated = models.BooleanField(default=False) def __str__(self): return f'File Id{self.id} File name {self.file_name}' class CsvFile(models.Model): file_name = models.FileField(upload_to='uploads') date_uploaded = models.DateTimeField(auto_now_add=True) activated = models.BooleanField(default=False) def __str__(self): return f'File Id{self.id} File name {self.file_name}' class ViralLoad(models.Model): laboratory_id = models.CharField(max_length=100, null=True, blank=True) sector = models.CharField(max_length=30, blank=True, null=True) number_orig_lab = models.CharField(max_length=100, blank=True, null=True) province = models.CharField(max_length=100, blank=True, null=True) district = models.CharField(max_length=100, blank=True, null=True) health_facility = models.CharField(max_length=100, blank=True, null=True) patient_name = models.CharField(max_length=100, blank=True, null=True) gender = models.CharField(max_length=100, blank=True, null=True) reference = models.CharField(max_length=100, blank=True, null=True) capture_date = models.DateField(null=True, blank=True) access_date = models.DateField(null=True, blank=True) nid = models.CharField(max_length=100, blank=True, null=True) viral_load = models.CharField(max_length=100, null=True, blank=True) viral_load_qualitative = models.CharField( max_length=100, blank=True, null=True) synced = models.BooleanField(default=False) formatted_nid = models.CharField(max_length=100, blank=True, null=True) class Meta: verbose_name = 'Viral Load' verbose_name_plural = 'Viral Loads' def __str__(self): return self.patient_name class Patient(models.Model): patient_uuid = models.CharField(max_length=500) #person_id = models.IntegerField() nid = models.CharField(max_length=100, blank=True, null=True) patient_name = models.CharField(max_length=100, blank=True, null=True) def __str__(self): return self.patient_name class Encounter(models.Model): encounterDatetime = models.DateTimeField(auto_now_add=True) patient = models.ForeignKey(Patient, on_delete=models.CASCADE) encounterType_uuid = models.CharField( max_length=255, default=Constants().get_uuids().get('encounter_type')) location_uuid = models.CharField( max_length=255, default=Constants().get_uuids().get('hpt')) form_uuid = models.CharField( max_length=255, default=Constants().get_uuids().get('form')) synced = models.BooleanField(default=False) def __str__(self): return self.patient.name class Observation(models.Model): patient = models.ForeignKey( Patient, on_delete=models.CASCADE) obsDateTime = models.DateTimeField(auto_now_add=True) concept = models.CharField(max_length=255) value_numeric = models.PositiveIntegerField(null=True, blank=True) value_coded = models.PositiveIntegerField(null=True, blank=True) value_datetime = models.DateTimeField(null=True, blank=True) encounter = models.ForeignKey(Encounter, on_delete=models.CASCADE) location = models.CharField( max_length=255, default=Constants().get_uuids().get('hpt')) value = models.CharField(max_length=255) voided = models.BooleanField(default=False) synced = models.BooleanField(default=False) def __str__(self): return self.id # def insert_formatted_nid(sender, instance, created, *args, **kwargs): # if created: # instance.formatted_nid = DataConversion.format_nid(instance.nid) # print(instance.formatted_nid) # post_save.connect(insert_formatted_nid, sender=ViralLoad)
2.046875
2
ceibacli/job_schedulers/slurm.py
cffbots/ceiba-cli
2
11933
<gh_stars>1-10 """Interface to the `SLURM job scheduler <https://slurm.schedmd.com/documentation.html>`_ .. autofunction:: create_slurm_script """ from pathlib import Path from typing import Any, Dict, List from ..utils import Options def create_slurm_script(opts: Options, jobs: List[Dict[str, Any]], jobs_metadata: List[Options]) -> str: """Create a script to run the workflow using the SLURM job schedule.""" slurm_file = Path("launch.sh") # Get SLURM configuration scheduler = opts.scheduler # Use the configuration provided by the user if scheduler.free_format is not None: script = scheduler.free_format else: script = make_script(opts.scheduler) # Append command to run the workflow for meta, job in zip(jobs_metadata, jobs): input_file = meta.input.absolute().as_posix() workdir = opts.workdir.absolute().as_posix() script += f'\ncd {workdir} && {opts.command} {input_file}' with open(slurm_file, 'w') as handler: handler.write(script) return f"sbatch {slurm_file.absolute().as_posix()}" def make_script(scheduler: Options) -> str: """Create a SLURM script using the ``scheduler`` options.""" arguments = {"cpus-per-task", "partition"} script = f"""#!/bin/bash #SBATCH -N {scheduler.nodes} #SBATCH -t {scheduler.wall_time} """ # Add optional arguments for arg in arguments: value = scheduler.get(arg, None) if value is not None: script += f"#SBATCH --{arg} {value}\n" return script
2.4375
2
thumbor/url.py
wking/thumbor
0
11934
#!/usr/bin/python # -*- coding: utf-8 -*- # thumbor imaging service # https://github.com/thumbor/thumbor/wiki # Licensed under the MIT license: # http://www.opensource.org/licenses/mit-license # Copyright (c) 2011 globo.com <EMAIL> import re from urllib import quote class Url(object): unsafe_or_hash = r'(?:(?:(?P<unsafe>unsafe)|(?P<hash>.+?))/)?' debug = '(?:(?P<debug>debug)/)?' meta = '(?:(?P<meta>meta)/)?' trim = '(?:(?P<trim>trim(?::(?:top-left|bottom-right))?(?::\d+)?)/)?' crop = '(?:(?P<crop_left>\d+)x(?P<crop_top>\d+):(?P<crop_right>\d+)x(?P<crop_bottom>\d+)/)?' fit_in = '(?:(?P<adaptive>adaptive-)?(?P<full>full-)?(?P<fit_in>fit-in)/)?' dimensions = '(?:(?P<horizontal_flip>-)?(?P<width>(?:\d+|orig))?x(?P<vertical_flip>-)?(?P<height>(?:\d+|orig))?/)?' halign = r'(?:(?P<halign>left|right|center)/)?' valign = r'(?:(?P<valign>top|bottom|middle)/)?' smart = r'(?:(?P<smart>smart)/)?' filters = r'(?:filters:(?P<filters>.+?\))/)?' image = r'(?P<image>.+)' compiled_regex = None @classmethod def regex(cls, has_unsafe_or_hash=True): reg = ['/?'] if has_unsafe_or_hash: reg.append(cls.unsafe_or_hash) reg.append(cls.debug) reg.append(cls.meta) reg.append(cls.trim) reg.append(cls.crop) reg.append(cls.fit_in) reg.append(cls.dimensions) reg.append(cls.halign) reg.append(cls.valign) reg.append(cls.smart) reg.append(cls.filters) reg.append(cls.image) return ''.join(reg) @classmethod def parse_decrypted(cls, url): if cls.compiled_regex: reg = cls.compiled_regex else: reg = cls.compiled_regex = re.compile(cls.regex(has_unsafe_or_hash=False)) result = reg.match(url) if not result: return None result = result.groupdict() int_or_0 = lambda value: 0 if value is None else int(value) values = { 'debug': result['debug'] == 'debug', 'meta': result['meta'] == 'meta', 'trim': result['trim'], 'crop': { 'left': int_or_0(result['crop_left']), 'top': int_or_0(result['crop_top']), 'right': int_or_0(result['crop_right']), 'bottom': int_or_0(result['crop_bottom']) }, 'adaptive': result['adaptive'] == 'adaptive', 'full': result['full'] == 'full', 'fit_in': result['fit_in'] == 'fit-in', 'width': result['width'] == 'orig' and 'orig' or int_or_0(result['width']), 'height': result['height'] == 'orig' and 'orig' or int_or_0(result['height']), 'horizontal_flip': result['horizontal_flip'] == '-', 'vertical_flip': result['vertical_flip'] == '-', 'halign': result['halign'] or 'center', 'valign': result['valign'] or 'middle', 'smart': result['smart'] == 'smart', 'filters': result['filters'] or '', 'image': 'image' in result and result['image'] or None } return values @classmethod # NOQA def generate_options(cls, debug=False, width=0, height=0, smart=False, meta=False, trim=None, adaptive=False, full=False, fit_in=False, horizontal_flip=False, vertical_flip=False, halign='center', valign='middle', crop_left=None, crop_top=None, crop_right=None, crop_bottom=None, filters=None): url = [] if debug: url.append('debug') if meta: url.append('meta') if trim: if isinstance(trim, bool): url.append('trim') else: url.append('trim:%s' % trim) crop = crop_left or crop_top or crop_right or crop_bottom if crop: url.append('%sx%s:%sx%s' % ( crop_left, crop_top, crop_right, crop_bottom )) if fit_in: fit_ops = [] if adaptive: fit_ops.append('adaptive') if full: fit_ops.append('full') fit_ops.append('fit-in') url.append('-'.join(fit_ops)) if horizontal_flip: width = '-%s' % width if vertical_flip: height = '-%s' % height if width or height: url.append('%sx%s' % (width, height)) if halign != 'center': url.append(halign) if valign != 'middle': url.append(valign) if smart: url.append('smart') if filters: url.append('filters:%s' % filters) return '/'.join(url) @classmethod def encode_url(kls, url): return quote(url, '/:?%=&()~",\'$')
2.234375
2
Projects/herdimmunity/Person.py
Tech-at-DU/ACS-1111.1-Object-Oriented-Programming
0
11935
import random from Virus import Virus class Person: ''' The simulation will contain people who will make up a population.''' def __init__(self, is_vaccinated, infection=None): ''' We start out with is_alive = True All other values will be set by the simulation through the parameters when it instantiates each Person object. ''' self.is_alive = True #boolean self.is_vaccinated = is_vaccinated #boolean self.infection = infection #virus object def did_survive_infection(self): ''' Generate a random number between 0.0 and 1.0 and compare to the virus's mortality_num. If the random number is smaller, person dies from the disease. Set the person's is alive attribute to False If Person survives, they become vaccinated and they have no infection (set the vaccinated attibute to True and the infection to None) Return True if they survived the infection and False if they did not. ''' #TODO: finish this method pass
3.84375
4
Cursos/Alura/Python3_Avancando_na_orientacao_a_objetos/models_playlist3.py
ramonvaleriano/python-
0
11936
<reponame>ramonvaleriano/python-<filename>Cursos/Alura/Python3_Avancando_na_orientacao_a_objetos/models_playlist3.py class Programa: def __init__(self, nome, ano): self._nome = nome.title() self.ano = ano self._likes = 0 @property def likes(self): return self._likes def dar_like(self): self._likes += 1 @property def nome(self): return self._nome @nome.setter def nome(self, novo_nome): self._nome = novo_nome.title() def __str__(self): return f'{self.nome} - {self.ano} - {self.likes}' class Filme(Programa): def __init__(self, nome, ano, duracao): super().__init__(nome, ano) self.duracao = duracao def __str__(self): return f'{self.nome} - {self.ano} - {self.duracao} min - {self.likes}' class Serie(Programa): def __init__(self, nome, ano, temporadas): super(Serie, self).__init__(nome, ano) self.temporadas = temporadas def __str__(self): return f'{self.nome} - {self.ano} - {self.temporadas} temporadas - {self.likes}' class Playlist: def __init__(self, nome, programas): self.nome = nome.title() self._programas = programas def __getitem__(self, item): return self._programas[item] @property def listagem(self): return self._programas def __len__(self): return len(self._programas) vingadores = Filme('Vigadores - <NAME>', 2018, 160) atlanta = Serie('Atlatan', 2018, 2) tmep = Filme('Todo mundo em pânico', 1999, 100) demolidor = Serie('Demolidor', 2016, 2) filmes_e_series = [vingadores, atlanta, demolidor, tmep] playlist_fim_de_semana = Playlist('fim de semana', filmes_e_series) print(f'Tamonho do playlist: {len(playlist_fim_de_semana)}') for dados in playlist_fim_de_semana: print(dados)
3.765625
4
blackjack/game.py
cuiqui/blackjack
0
11937
import constants as c from deck import Deck from player import Human, RandomAI class Game: def __init__(self): self.deck = None self.players = None self.scores = None self.rounds_left = None self.game_over = False def new(self): self.game_over = False self.rounds_left = c.ROUNDS self.players = [Human(), RandomAI()] self.scores = {str(k): 0 for k in self.players} self.new_round() def new_round(self): self.deck = Deck() self.deck.shuffle() for player in self.players: player.hand = [] self.deal(player=player, quantity=c.INITIAL_HAND) def deal(self, player, quantity=1): for card in self.deck.draw(quantity): player.hand.append(card) def turn(self, player): score = None action = player.play() if action == 'hit': self.deal(player) if player.get_score() > c.POINTS: score = 0 elif action == 'stay': score = player.get_score() return score def balance(self, scores): print('----- Scores -----') print(f'Round scores (points made in round): {scores}') tie = True winner = scores.popitem() for k, v in scores.items(): if v > winner[1]: winner = (k, v) tie = False elif v < winner[1]: tie = False if not tie: self.scores[winner[0]] += 1 print(f'General scores (rounds won by each): {self.scores}') def run(self): # while there are still rounds left while self.rounds_left: # set round scores to empty scores = {} # for each player, do a whole turn, which can involve # multiple actions, i.e., two or more "hits" for player in self.players: print(f'---- {str(player)} turn ----') # turn is not over until we receive a score, # whether it's 0, which means it overstepped # or 0 < x <= 21 turn_over = False while not turn_over: # do a turn until we get a score, if we don't # have a score, that means that the engine # "hit" and didn't overstepped, so it's still # its turn. score = self.turn(player) if score is not None: print(f'Hand: {[str(e) for e in player.hand]}, points: {player.get_score()}') # store scores for this player in this round # and hand control over scores[str(player)] = score turn_over = True # do a balance after finishing round self.balance(scores) # begin new round and reduce rounds left by 1 self.new_round() self.rounds_left -= 1 print(f'Rounds left: {self.rounds_left}') if __name__ == '__main__': g = Game() g.new() g.run()
3.328125
3
numba/tests/__init__.py
mawanda-jun/numba
1
11938
<reponame>mawanda-jun/numba from numba import unittest_support as unittest import gc from os.path import dirname, join import multiprocessing import sys import time import warnings from unittest.suite import TestSuite from numba.testing import load_testsuite from numba.testing import ddt # for backward compatibility try: import faulthandler except ImportError: faulthandler = None else: try: # May fail in IPython Notebook with UnsupportedOperation faulthandler.enable() except Exception as e: msg = "Failed to enable faulthandler due to:\n{err}" warnings.warn(msg.format(err=e)) def load_tests(loader, tests, pattern): suite = TestSuite() suite.addTests(load_testsuite(loader, dirname(__file__))) # Numba CUDA tests are located in a separate directory: cuda_dir = join(dirname(dirname(__file__)), 'cuda/tests') suite.addTests(loader.discover(cuda_dir)) # Numba ROC tests are located in a separate directory roc_dir = join(dirname(dirname(__file__)), 'roc/tests') suite.addTests(loader.discover(roc_dir)) return suite
2.109375
2
src/clustar/fit.py
clustar/Clustar
4
11939
""" Clustar module for fitting-related methods. This module is designed for the 'ClustarData' object. All listed methods take an input parameter of a 'ClustarData' object and return a 'ClustarData' object after processing the method. As a result, all changes are localized within the 'ClustarData' object. Visit <https://clustar.github.io/> for additional information. """ from clustar import graph from scipy import ndimage, stats from shapely import affinity, geometry import numpy as np def compute_fit(cd): """ Computes the normalized bivariate gaussian fit for the 'Group' objects. Parameters ---------- cd : ClustarData 'ClustarData' object required for processing. Returns ------- ClustarData """ i = 0 while i < len(cd.groups): group = cd.groups[i] try: rv = stats.multivariate_normal([group.stats.x_bar, group.stats.y_bar], group.stats.covariance_matrix) except ValueError: del cd.groups[i] continue bvg = rv.pdf(group.image.pos) bvg *= np.max(group.image.data) / np.max(bvg) group.res.data = 1 - (bvg / group.image.data) group.fit.bvg = bvg group.fit.rv = rv i += 1 return cd def compute_ellipse(cd): """ Computes the ellipse parameters and localized residuals for the 'Group' objects. Parameters ---------- cd : ClustarData 'ClustarData' object required for processing. Returns ------- ClustarData """ for group in cd.groups: a = group.stats.x_len / 2 b = group.stats.y_len / 2 theta = np.linspace(0, np.pi * 2, 360) r = a * b / np.sqrt((b * np.cos(theta)) ** 2 + (a * np.sin(theta)) ** 2) xy = np.stack([group.stats.x_bar + r * np.cos(theta), group.stats.y_bar + r * np.sin(theta)], 1) ellipse = affinity.rotate(geometry.Polygon(xy), group.stats.degrees, (group.stats.x_bar, group.stats.y_bar)) pos = np.array([[i, j] for i in range(group.image.data.shape[0]) for j in range(group.image.data.shape[1])]) inside = np.array([p for p in pos if ellipse.contains(geometry.Point(p))]) outside = np.array([p for p in pos if not ellipse.contains(geometry.Point(p))]) group.fit.ellipse = ellipse group.res.pos = pos group.res.inside = inside group.res.outside = outside return cd def compute_metrics(cd): """ Computes the evaluation metrics for the 'Group' objects. Parameters ---------- cd : ClustarData 'ClustarData' object required for processing. Returns ------- ClustarData """ for group in cd.groups: res = group.res output = np.abs(res.data[res.inside[:, 0], res.inside[:, 1]]) output[output < 0] = 0 output[output > 1] = 1 bias = group.image.data[res.inside[:, 0], res.inside[:, 1]] group.metrics.standard_deviation = np.std(output) group.metrics.variance = group.metrics.standard_deviation ** 2 group.metrics.average = np.mean(output) group.metrics.weighted_average = np.average(output, weights=bias) group.res.output = output return cd def compute_peaks(cd): """ Computes the number of peaks along the major and minor axes for the 'Group' objects. Parameters ---------- cd : ClustarData 'ClustarData' object required for processing. Returns ------- ClustarData """ for group in cd.groups: res = np.array(group.res.data, copy=True) res_out = group.res.outside res[res_out[:, 0], res_out[:, 1]] = 0 r_major = np.abs(ndimage.rotate(res, group.stats.degrees)) r_minor = np.abs(ndimage.rotate(res, group.stats.degrees + 90)) major_idx = graph.critical_points(r_major) minor_idx = graph.critical_points(r_minor) major_idx = [major_idx[i] for i in range(len(major_idx)) if i % 2 == 0] minor_idx = [minor_idx[i] for i in range(len(minor_idx)) if i % 2 == 0] group.fit.major_peaks = len(major_idx) group.fit.minor_peaks = len(minor_idx) group.res.clean = res return cd def validate(cd): """ Determines which 'Group' objects are flagged for manual review by using the specified validation parameters. Parameters ---------- cd : ClustarData 'ClustarData' object required for processing. Returns ------- ClustarData """ attribute = cd.params.metric.lower() threshold = cd.params.threshold for group in cd.groups: metric = getattr(group.metrics, attribute) if metric > threshold: group.flag = True cd.flag = True if cd.params.evaluate_peaks and \ ((group.fit.major_peaks in [2, 4]) or (group.fit.minor_peaks in [2, 4])): group.flag = False cd.flag = False return cd
2.65625
3
tests/wizard/namedwizardtests/urls.py
felixxm/django-formtools
0
11940
<filename>tests/wizard/namedwizardtests/urls.py from django.conf.urls import url from .forms import ( CookieContactWizard, Page1, Page2, Page3, Page4, SessionContactWizard, ) def get_named_session_wizard(): return SessionContactWizard.as_view( [('form1', Page1), ('form2', Page2), ('form3', Page3), ('form4', Page4)], url_name='nwiz_session', done_step_name='nwiz_session_done' ) def get_named_cookie_wizard(): return CookieContactWizard.as_view( [('form1', Page1), ('form2', Page2), ('form3', Page3), ('form4', Page4)], url_name='nwiz_cookie', done_step_name='nwiz_cookie_done' ) urlpatterns = [ url(r'^nwiz_session/(?P<step>.+)/$', get_named_session_wizard(), name='nwiz_session'), url(r'^nwiz_session/$', get_named_session_wizard(), name='nwiz_session_start'), url(r'^nwiz_cookie/(?P<step>.+)/$', get_named_cookie_wizard(), name='nwiz_cookie'), url(r'^nwiz_cookie/$', get_named_cookie_wizard(), name='nwiz_cookie_start'), ]
2.09375
2
setup.py
ajayp10/derive_event_pm4py
0
11941
<gh_stars>0 import pathlib from setuptools import setup CURRENT_PATH = pathlib.Path(__file__).parent README = (CURRENT_PATH/"README.md").read_text() setup( name="derive_event_pm4py", version="1.0.1", description="It derives new events based on rules provided as inputs.", long_description=README, long_description_content_type="text/markdown", url="https://github.com/ajayp10/derive_event_pm4py", author="<NAME>", author_email="<EMAIL>", license="MIT", classifiers=[ "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", ], packages=["derive_event"], include_package_data=True, install_requires=['pandas', 'numpy', 'pm4py', ], entry_points={ "console_scripts": [ "derive=derive_event.derive:main", ] }, )
1.453125
1
tests/service/ai/test_not_killing_itself_ai.py
jonashellmann/informaticup21-team-chillow
3
11942
import unittest from datetime import datetime, timezone from typing import List from chillow.service.ai.not_killing_itself_ai import NotKillingItselfAI from chillow.model.action import Action from chillow.model.cell import Cell from chillow.model.direction import Direction from chillow.model.game import Game from chillow.model.player import Player from chillow.service.game_service import GameService class NotKillingItselfAITest(unittest.TestCase): def test_ai_should_choose_the_own_non_killing_itself_action(self): player1 = Player(1, 0, 0, Direction.up, 1, True, "") player2 = Player(2, 4, 4, Direction.down, 3, True, "") players = [player1, player2] cells = [[Cell([player1]), Cell(), Cell(), Cell(), Cell()], [Cell(), Cell(), Cell(), Cell(), Cell()], [Cell(), Cell(), Cell(), Cell(), Cell()], [Cell(), Cell(), Cell(), Cell(), Cell()], [Cell(), Cell(), Cell(), Cell(), Cell([player2])]] time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc) game = Game(5, 5, cells, players, 2, True, time) game_service = GameService(game) sut = NotKillingItselfAI(player1, [], 3, 0, 3) actions: List[Action] = sut.find_surviving_actions(game_service, 3) self.assertTrue(Action.turn_right in actions) self.assertTrue(len(actions) == 1) def test_ai_should_choose_the_correct_list_of_actions_non_killing_itself(self): player1 = Player(1, 0, 1, Direction.up, 1, True, "") player2 = Player(2, 4, 4, Direction.down, 3, True, "") players = [player1, player2] cells = [[Cell(), Cell(), Cell(), Cell(), Cell()], [Cell([player1]), Cell(), Cell(), Cell(), Cell()], [Cell(), Cell(), Cell(), Cell(), Cell()], [Cell(), Cell(), Cell(), Cell(), Cell()], [Cell(), Cell(), Cell(), Cell(), Cell([player2])]] time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc) game = Game(5, 5, cells, players, 2, True, time) game_service = GameService(game) sut = NotKillingItselfAI(player1, [], 3, 0, 3) actions: List[Action] = sut.find_surviving_actions(game_service, 3) self.assertTrue(Action.change_nothing in actions) self.assertTrue(Action.turn_right in actions) self.assertTrue(len(actions) == 2) def test_ai_should_choose_the_correct_list_of_actions_non_killing_itself2(self): player1 = Player(1, 1, 2, Direction.up, 1, True, "") player2 = Player(2, 1, 1, Direction.down, 3, True, "") players = [player1, player2] cells = [[Cell(), Cell(), Cell(), Cell(), Cell()], [Cell(), Cell([player2]), Cell(), Cell(), Cell()], [Cell(), Cell([player1]), Cell(), Cell(), Cell()], [Cell(), Cell(), Cell(), Cell(), Cell()], [Cell(), Cell(), Cell(), Cell(), Cell()]] time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc) game = Game(5, 5, cells, players, 2, True, time) game_service = GameService(game) sut = NotKillingItselfAI(player1, [], 3, 0, 3) actions: List[Action] = sut.find_surviving_actions(game_service, 3) self.assertTrue(Action.turn_left in actions) self.assertTrue(Action.turn_right in actions) self.assertTrue(len(actions) == 2) def test_ai_should_choose_the_correct_list_of_actions_non_killing_itself_in_turn_6(self): player1 = Player(1, 0, 4, Direction.up, 3, True, "") player2 = Player(2, 0, 1, Direction.down, 3, True, "") players = [player1, player2] cells = [[Cell(), Cell(), Cell(), Cell(), Cell()], [Cell([player2]), Cell(), Cell(), Cell(), Cell()], [Cell(), Cell(), Cell(), Cell(), Cell()], [Cell(), Cell(), Cell(), Cell(), Cell()], [Cell([player1]), Cell(), Cell(), Cell(), Cell()]] time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc) game = Game(5, 5, cells, players, 2, True, time) game_service = GameService(game) game_service.turn.turn_ctr = 6 sut = NotKillingItselfAI(player1, [], 4, 0, 3) actions: List[Action] = sut.find_surviving_actions(game_service, 1) self.assertTrue(Action.slow_down in actions) self.assertTrue(Action.turn_right in actions) self.assertTrue(Action.speed_up in actions) self.assertTrue(len(actions) == 3) def test_ai_should_not_choose_speed_up_if_max_speed_is_allready_reached(self): MAX_SPEED = 3 player1 = Player(1, 0, 4, Direction.up, MAX_SPEED, True, "") player2 = Player(2, 0, 1, Direction.down, 3, True, "") players = [player1, player2] cells = [[Cell(), Cell(), Cell(), Cell(), Cell()], [Cell([player2]), Cell(), Cell(), Cell(), Cell()], [Cell(), Cell(), Cell(), Cell(), Cell()], [Cell(), Cell(), Cell(), Cell(), Cell()], [Cell([player1]), Cell(), Cell(), Cell(), Cell()]] time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc) game = Game(5, 5, cells, players, 2, True, time) game_service = GameService(game) sut = NotKillingItselfAI(player1, [], MAX_SPEED, 0, 3) actions: List[Action] = sut.find_surviving_actions(game_service, 1) self.assertTrue(Action.slow_down in actions) self.assertTrue(Action.turn_right in actions) self.assertTrue(len(actions) == 2) def test_ai_should_calc_action_with_max_distance(self): player1 = Player(1, 0, 4, Direction.up, 1, True, "") player2 = Player(2, 0, 1, Direction.down, 3, True, "") players = [player1, player2] cells = [[Cell(), Cell(), Cell(), Cell(), Cell()], [Cell([player2]), Cell(), Cell(), Cell(), Cell()], [Cell(), Cell(), Cell(), Cell(), Cell()], [Cell(), Cell(), Cell(), Cell(), Cell()], [Cell([player1]), Cell(), Cell(), Cell(), Cell()]] time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc) game = Game(5, 5, cells, players, 2, True, time) game_service = GameService(game) sut = NotKillingItselfAI(player1, [], 3, 0, 3) actions: List[Action] = sut.calc_action_with_max_distance_to_visited_cells(game_service, [Action.speed_up, Action.change_nothing, Action.turn_right]) self.assertTrue(Action.turn_right in actions) self.assertTrue(len(actions) == 1) def test_ai_should_calc_all_action_with_max_distance_with_max_worse_distance(self): MAX_WORSE_DISTANCE = 1 player1 = Player(1, 0, 4, Direction.up, 1, True, "") player2 = Player(2, 4, 4, Direction.down, 3, True, "") players = [player1, player2] cells = [[Cell(), Cell(), Cell(), Cell(), Cell()], [Cell(), Cell(), Cell(), Cell(), Cell()], [Cell(), Cell(), Cell(), Cell(), Cell()], [Cell(), Cell(), Cell(), Cell(), Cell()], [Cell([player1]), Cell(), Cell(), Cell(), Cell([player2])]] time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc) game = Game(5, 5, cells, players, 2, True, time) game_service = GameService(game) sut = NotKillingItselfAI(player1, [], 3, MAX_WORSE_DISTANCE, 3) actions: List[Action] = sut.calc_action_with_max_distance_to_visited_cells(game_service, [Action.speed_up, Action.change_nothing, Action.turn_right]) self.assertTrue(Action.speed_up in actions) self.assertTrue(Action.change_nothing in actions) self.assertTrue(Action.turn_right in actions) self.assertTrue(len(actions) == 3) def test_get_information(self): player = Player(1, 0, 4, Direction.up, 1, True, "") sut = NotKillingItselfAI(player, [], 3, 1, 3) expected = "max_speed=3, max_worse_distance=1, depth=3" result = sut.get_information() self.assertEqual(expected, result) def test_ai_should_choose_the_correct_list_of_actions_non_killing_itself_with_depth_greater_than_one(self): player1 = Player(1, 1, 2, Direction.up, 1, True, "") player2 = Player(2, 1, 1, Direction.down, 3, True, "") players = [player1, player2] cells = [[Cell(), Cell(), Cell(), Cell(), Cell()], [Cell([player2]), Cell([player2]), Cell(), Cell(), Cell()], [Cell(), Cell([player1]), Cell(), Cell(), Cell()], [Cell([player2]), Cell(), Cell(), Cell(), Cell()], [Cell(), Cell(), Cell(), Cell(), Cell()]] time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc) game = Game(5, 5, cells, players, 2, True, time) game_service = GameService(game) sut = NotKillingItselfAI(player1, [], 3, 0, 2) actions: List[Action] = sut.find_surviving_actions(game_service, 2) self.assertTrue(Action.turn_right in actions) self.assertTrue(len(actions) == 1) def test_ai_should_choose_empty_list_with_depth_greater_than_one_and_no_surviving_action(self): player1 = Player(1, 1, 2, Direction.up, 1, True, "") player2 = Player(2, 1, 1, Direction.down, 3, True, "") players = [player1, player2] cells = [[Cell(), Cell(), Cell(), Cell(), Cell()], [Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()], [Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()], [Cell([player2]), Cell(), Cell([player2]), Cell(), Cell()], [Cell(), Cell(), Cell(), Cell(), Cell()]] time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc) game = Game(5, 5, cells, players, 2, True, time) game_service = GameService(game) sut = NotKillingItselfAI(player1, [], 3, 0, 2) actions: List[Action] = sut.find_surviving_actions(game_service, 2) self.assertTrue(len(actions) == 0) def test_ai_should_choose_correct_list_with_depth_three_and_surviving_action(self): player1 = Player(1, 1, 2, Direction.up, 1, True, "") player2 = Player(2, 1, 1, Direction.down, 3, True, "") players = [player1, player2] cells = [[Cell(), Cell(), Cell(), Cell(), Cell()], [Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()], [Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()], [Cell([player2]), Cell(), Cell(), Cell(), Cell()], [Cell(), Cell(), Cell(), Cell(), Cell()]] time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc) game = Game(5, 5, cells, players, 2, True, time) game_service = GameService(game) sut = NotKillingItselfAI(player1, [], 3, 0, 3) actions: List[Action] = sut.find_surviving_actions(game_service, 3) self.assertTrue(Action.turn_right in actions) self.assertTrue(len(actions) == 1) def test_ai_should_choose_empty_list_with_depth_three_and_no_surviving_action(self): player1 = Player(1, 1, 2, Direction.up, 1, True, "") player2 = Player(2, 1, 1, Direction.down, 3, True, "") players = [player1, player2] cells = [[Cell(), Cell(), Cell(), Cell(), Cell()], [Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()], [Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()], [Cell([player2]), Cell([player2]), Cell(), Cell([player2]), Cell()], [Cell(), Cell(), Cell([player2]), Cell(), Cell()]] time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc) game = Game(5, 5, cells, players, 2, True, time) game_service = GameService(game) sut = NotKillingItselfAI(player1, [], 3, 0, 3) actions: List[Action] = sut.find_surviving_actions(game_service, 3) self.assertTrue(len(actions) == 0) def test_ai_should_choose_best_list_of_actions_by_depth_from_lower_depth(self): player1 = Player(1, 1, 2, Direction.up, 1, True, "") player2 = Player(2, 1, 1, Direction.down, 3, True, "") players = [player1, player2] cells = [[Cell(), Cell(), Cell(), Cell(), Cell()], [Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()], [Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()], [Cell([player2]), Cell([player2]), Cell(), Cell([player2]), Cell()], [Cell(), Cell(), Cell([player2]), Cell(), Cell()]] time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc) game = Game(5, 5, cells, players, 2, True, time) game_service = GameService(game) sut = NotKillingItselfAI(player1, [], 3, 0, 5) actions: List[Action] = sut.find_surviving_actions_with_best_depth(game_service) self.assertTrue(Action.turn_right in actions) self.assertTrue(len(actions) == 1) def test_ai_should_choose_best_list_of_actions_by_depth(self): player1 = Player(1, 1, 2, Direction.up, 1, True, "") player2 = Player(2, 1, 1, Direction.down, 3, True, "") players = [player1, player2] cells = [[Cell(), Cell(), Cell(), Cell(), Cell()], [Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()], [Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()], [Cell([player2]), Cell(), Cell(), Cell([player2]), Cell()], [Cell(), Cell(), Cell([player2]), Cell(), Cell()]] time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc) game = Game(5, 5, cells, players, 2, True, time) game_service = GameService(game) sut = NotKillingItselfAI(player1, [], 3, 0, 5) actions: List[Action] = sut.find_surviving_actions_with_best_depth(game_service) self.assertTrue(Action.turn_right in actions) self.assertTrue(len(actions) == 1) def test_ai_should_choose_best_list_of_actions_in_lowest_possible_depth(self): player1 = Player(1, 1, 2, Direction.up, 1, True, "") player2 = Player(2, 1, 1, Direction.down, 3, True, "") players = [player1, player2] cells = [[Cell(), Cell(), Cell(), Cell(), Cell()], [Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()], [Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()], [Cell([player2]), Cell(), Cell([player2]), Cell([player2]), Cell()], [Cell(), Cell(), Cell([player2]), Cell(), Cell()]] time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc) game = Game(5, 5, cells, players, 2, True, time) game_service = GameService(game) sut = NotKillingItselfAI(player1, [], 3, 0, 5) actions: List[Action] = sut.find_surviving_actions_with_best_depth(game_service) self.assertTrue(Action.turn_left in actions) self.assertTrue(Action.turn_right in actions) self.assertTrue(len(actions) == 2)
3
3
setup.py
meisanggou/ldapuser
0
11943
<reponame>meisanggou/ldapuser #! /usr/bin/env python # coding: utf-8 # __author__ = 'meisanggou' try: from setuptools import setup except ImportError: from distutils.core import setup import sys if sys.version_info <= (2, 7): sys.stderr.write("ERROR: ldap-user requires Python Version 2.7 or above.\n") sys.stderr.write("Your Python Version is %s.%s.%s.\n" % sys.version_info[:3]) sys.exit(1) name = "ldap-user" version = "0.5" url = "https://github.com/meisanggou/ldapuser" license = "MIT" author = "meisanggou" short_description = "use ldap verify user" long_description = """use ldap verify user""" keywords = "ldap-user" install_requires = ["python-ldap", "six"] entry_points = {'console_scripts': [ 'jy-ldap-config=ldap_user.cli:create_config' ]} setup(name=name, version=version, author=author, author_email="<EMAIL>", url=url, packages=["ldap_user", "ldap_user/util"], license=license, description=short_description, long_description=long_description, keywords=keywords, install_requires=install_requires, entry_points=entry_points )
1.945313
2
tests/test_infection.py
chinapnr/covid-19-data
3
11944
import json import pytest @pytest.mark.usefixtures('client', 'headers') class TestInfection: def test_infection_region_tc01(self, client, headers): # db has data BETWEEN 2020-03-22 2020-03-24 region = 'China' payload = { 'region': region, 'start_date': '2020-03-22', 'end_date': '2020-03-24', 'include_hmt': 'false' } response = client.get('/infection/region', params=payload, headers=headers) assert response.status_code == 200 print("response: ", response.text) response_data = json.loads(response.text)['data'] assert response_data def test_infection_region_tc02(self, client, headers): # db has no data BETWEEN 2020-03-25 2020-03-26 region = 'China' payload = { 'region': region, 'start_date': '2020-03-25', 'end_date': '2020-03-26', 'include_hmt': 'false' } response = client.get('/infection/region', params=payload, headers=headers) assert response.status_code == 200 print("response: ", response.text) response_data = json.loads(response.text)['data'] assert response_data def test_infection_region_tc03(self, client, headers): # db has data BETWEEN 2020-03-22 2020-03-24 # look up detail region = 'China' payload = { 'region': region, 'start_date': '2020-03-22', 'end_date': '2020-03-24', 'include_hmt': 'true' } response = client.get('/infection/region', params=payload, headers=headers) assert response.status_code == 200 print("response: ", response.text) response_data = json.loads(response.text)['data'] assert response_data def test_infection_region_tc04(self, client, headers): # db has data BETWEEN 2020-03-22 2020-03-24 # look up detail region = 'China' payload = { 'region': region, 'start_date': '2020-03-22', # 'end_date': '2020-03-24', 'include_hmt': 'true' } response = client.get('/infection/region', params=payload, headers=headers) assert response.status_code == 200 print("response: ", response.text) response_data = json.loads(response.text)['data'] assert response_data def test_infection_region_tc05(self, client, headers): # db has data BETWEEN 2020-03-22 2020-03-24 # look up detail region = 'China' payload = { 'region': region, 'start_date': '2020-01-22', # 'end_date': '2020-03-24', 'include_hmt': 'true' } response = client.get('/infection/region', params=payload, headers=headers) assert response.status_code == 400 print("response: ", response.text) response_data = json.loads(response.text)['code'] assert response_data == "30018" def test_infection_region_detail(self, client, headers): region = 'China' payload = { 'region': region, 'start_date': '2020-03-22', 'end_date': '2020-03-24', 'include_hmt': 'true' } response = client.get('/infection/region/detail', params=payload, headers=headers) assert response.status_code == 200 print("response: ", response.text) response_data = json.loads(response.text)['data'] assert response_data @pytest.mark.skip def test_infection_area(self, client, headers): region = 'China' area = 'Chongqing' payload = { 'region': region, 'area': area, 'start_date': '2020-03-22', 'end_date': '2020-03-24' } response = client.get('/infection/area', params=payload, headers=headers) assert response.status_code == 200 print("response: ", response.text) response_data = json.loads(response.text)['data'] assert response_data def test_infection_global(self, client, headers): response = client.get('/infection/global', headers=headers) assert response.status_code == 200 print("response: ", response.text) response_data = json.loads(response.text)['data'] assert response_data
2.3125
2
tests/test_util_owsutil.py
TimFranken/pydov
0
11945
"""Module grouping tests for the pydov.util.owsutil module.""" import copy import re import pytest from numpy.compat import unicode from owslib.etree import etree from owslib.fes import ( PropertyIsEqualTo, FilterRequest, ) from owslib.iso import MD_Metadata from owslib.util import nspath_eval from pydov.util import owsutil from pydov.util.errors import ( MetadataNotFoundError, FeatureCatalogueNotFoundError, ) from pydov.util.location import ( Within, Box, ) from tests.test_search_boring import ( md_metadata, mp_remote_md, mp_remote_describefeaturetype, mp_remote_fc, location_md_metadata, location_fc_featurecatalogue, location_wfs_describefeaturetype, ) from tests.test_search import ( wfs, mp_wfs, mp_remote_fc_notfound ) def clean_xml(xml): """Clean the given XML string of namespace definition, namespace prefixes and syntactical but otherwise meaningless differences. Parameters ---------- xml : str String representation of XML document. Returns ------- str String representation of cleaned XML document. """ # remove xmlns namespace definitions r = re.sub(r'[ ]+xmlns:[^=]+="[^"]+"', '', xml) # remove namespace prefixes in tags r = re.sub(r'<(/?)[^:]+:([^ >]+)([ >])', r'<\1\2\3', r) # remove extra spaces in tags r = re.sub(r'[ ]+/>', '/>', r) # remove extra spaces between tags r = re.sub(r'>[ ]+<', '><', r) return r class TestOwsutil(object): """Class grouping tests for the pydov.util.owsutil module.""" def test_get_csw_base_url(self, wfs): """Test the owsutil.get_csw_base_url method. Test whether the CSW base URL of the dov-pub:Boringen layer is correct. Parameters ---------- wfs : pytest.fixture returning owslib.wfs.WebFeatureService WebFeatureService based on the local GetCapabilities. """ contentmetadata = wfs.contents['dov-pub:Boringen'] assert owsutil.get_csw_base_url(contentmetadata) == \ 'https://www.dov.vlaanderen.be/geonetwork/srv/nl/csw' def test_get_csw_base_url_nometadataurls(self, wfs): """Test the owsutil.get_csw_base_url method for a layer without metdata urls. Test whether a MetadataNotFoundError is raised. Parameters ---------- wfs : pytest.fixture returning owslib.wfs.WebFeatureService WebFeatureService based on the local GetCapabilities. """ contents = copy.deepcopy(wfs.contents) contentmetadata = contents['dov-pub:Boringen'] contentmetadata.metadataUrls = [] with pytest.raises(MetadataNotFoundError): owsutil.get_csw_base_url(contentmetadata) def test_get_featurecatalogue_uuid(self, md_metadata): """Test the owsutil.get_featurecatalogue_uuid method. Test whether the featurecatalogue uuid of the dov-pub:Boringen layer is correct. Parameters ---------- md_metadata : pytest.fixture providing owslib.iso.MD_Metadata Parsed metadata describing the Boringen WFS layer in more detail, in the ISO 19115/19139 format. """ assert owsutil.get_featurecatalogue_uuid(md_metadata) == \ 'c0cbd397-520f-4ee1-aca7-d70e271eeed6' def test_get_featurecatalogue_uuid_nocontentinfo(self, md_metadata): """Test the owsutil.get_featurecatalogue_uuid method when the metadata is missing the gmd:contentInfo element. Test whether a FeatureCatalogueNotFoundError is raised. Parameters ---------- md_metadata : pytest.fixture providing owslib.iso.MD_Metadata Parsed metadata describing the Boringen WFS layer in more detail, in the ISO 19115/19139 format. """ tree = etree.fromstring(md_metadata.xml) root = tree.find('{http://www.isotc211.org/2005/gmd}MD_Metadata') for ci in tree.findall( './/{http://www.isotc211.org/2005/gmd}contentInfo'): root.remove(ci) md_metadata.xml = etree.tostring(tree) with pytest.raises(FeatureCatalogueNotFoundError): owsutil.get_featurecatalogue_uuid(md_metadata) def test_get_featurecatalogue_uuid_nouuidref(self, md_metadata): """Test the owsutil.get_featurecatalogue_uuid method when the gmd:contentInfo element is missing a 'uuidref' attribute. Test whether a FeatureCatalogueNotFoundError is raised. Parameters ---------- md_metadata : pytest.fixture providing owslib.iso.MD_Metadata Parsed metadata describing the Boringen WFS layer in more detail, in the ISO 19115/19139 format. """ tree = etree.fromstring(md_metadata.xml) for ci in tree.findall(nspath_eval( 'gmd:MD_Metadata/gmd:contentInfo/' 'gmd:MD_FeatureCatalogueDescription/' 'gmd:featureCatalogueCitation', {'gmd': 'http://www.isotc211.org/2005/gmd'})): ci.attrib.pop('uuidref') md_metadata.xml = etree.tostring(tree) with pytest.raises(FeatureCatalogueNotFoundError): owsutil.get_featurecatalogue_uuid(md_metadata) def test_get_namespace(self, wfs, mp_remote_describefeaturetype): """Test the owsutil.get_namespace method. Test whether the namespace of the dov-pub:Boringen layer is correct. Parameters ---------- wfs : pytest.fixture returning owslib.wfs.WebFeatureService WebFeatureService based on the local GetCapabilities. mp_remote_describefeaturetype : pytest.fixture Monkeypatch the call to a remote DescribeFeatureType of the dov-pub:Boringen layer. """ assert owsutil.get_namespace(wfs, 'dov-pub:Boringen') == \ 'http://dov.vlaanderen.be/ocdov/dov-pub' def test_get_remote_featurecatalogue(self, mp_remote_fc): """Test the owsutil.get_remote_featurecatalogue method. Test whether the feature catalogue of the dov-pub:Boringen layer matches the format described in the docs. Parameters ---------- mp_remote_fc : pytest.fixture Monkeypatch the call to get the remote feature catalogue of the dov-pub:Boringen layer. """ fc = owsutil.get_remote_featurecatalogue( 'https://www.dov.vlaanderen.be/geonetwork/srv/nl/csw', 'c0cbd397-520f-4ee1-aca7-d70e271eeed6') assert type(fc) is dict assert 'definition' in fc assert type(fc['definition']) in (str, unicode) assert 'attributes' in fc assert type(fc['attributes']) is dict attrs = fc['attributes'] if len(attrs) > 0: for attr in attrs.values(): assert type(attr) is dict assert 'definition' in attr assert type(attr['definition']) in (str, unicode) assert 'values' in attr assert type(attr['values']) is list if len(attr['values']) > 0: for v in attr['values']: assert type(v) in (str, unicode) assert len(attr['values']) == len(set(attr['values'])) assert 'multiplicity' in attr mp = attr['multiplicity'] assert type(mp) is tuple assert len(mp) == 2 assert mp[0] in (0, 1) assert (type(mp[1]) is int and mp[1] > 0) or mp[1] == 'Inf' def test_get_remote_featurecataloge_baduuid(self, mp_remote_fc_notfound): """Test the owsutil.get_remote_featurecatalogue method with an inexistent feature catalogue uuid. Test whether a FeatureCatalogueNotFoundError is raised. Parameters ---------- mp_remote_fc_notfound : pytest.fixture Monkeypatch the call to get an inexistent remote featurecatalogue. """ with pytest.raises(FeatureCatalogueNotFoundError): owsutil.get_remote_featurecatalogue( 'https://www.dov.vlaanderen.be/geonetwork/srv/nl/csw', 'badfc000-0000-0000-0000-badfc00badfc') def test_get_remote_metadata(self, md_metadata): """Test the owsutil.get_remote_metadata method. Test whether the resulting MD_Metadata is correct. Parameters ---------- md_metadata : pytest.fixture returning owslib.iso.MD_Metadata Parsed metadata describing the Boringen WFS layer in more detail, in the ISO 19115/19139 format. """ assert type(md_metadata) is MD_Metadata def test_get_remote_metadata_nometadataurls(self, wfs): """Test the owsutil.get_remote_metadata method when the WFS layer missed metadata URLs. Test whether a MetadataNotFoundError is raised. Parameters ---------- wfs : pytest.fixture returning owslib.wfs.WebFeatureService WebFeatureService based on the local GetCapabilities. """ contents = copy.deepcopy(wfs.contents) contentmetadata = contents['dov-pub:Boringen'] contentmetadata.metadataUrls = [] with pytest.raises(MetadataNotFoundError): owsutil.get_remote_metadata(contentmetadata) def test_wfs_build_getfeature_request_onlytypename(self): """Test the owsutil.wfs_build_getfeature_request method with only a typename specified. Test whether the XML of the WFS GetFeature call is generated correctly. """ xml = owsutil.wfs_build_getfeature_request('dov-pub:Boringen') assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml( '<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" ' 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' 'service="WFS" version="1.1.0" ' 'xsi:schemaLocation="http://www.opengis.net/wfs ' 'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd"><wfs:Query ' 'typeName="dov-pub:Boringen"><ogc:Filter ' 'xmlns:ogc="http://www.opengis.net/ogc"/></wfs:Query></wfs' ':GetFeature>') def test_wfs_build_getfeature_request_bbox_nogeometrycolumn(self): """Test the owsutil.wfs_build_getfeature_request method with a location argument but without the geometry_column argument. Test whether an AttributeError is raised. """ with pytest.raises(AttributeError): xml = owsutil.wfs_build_getfeature_request( 'dov-pub:Boringen', location=Within(Box(151650, 214675, 151750, 214775))) def test_wfs_build_getfeature_request_bbox(self): """Test the owsutil.wfs_build_getfeature_request method with a typename, box and geometry_column. Test whether the XML of the WFS GetFeature call is generated correctly. """ xml = owsutil.wfs_build_getfeature_request( 'dov-pub:Boringen', location=Within(Box(151650, 214675, 151750, 214775)), geometry_column='geom') assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml( '<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" ' 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' 'service="WFS" version="1.1.0" ' 'xsi:schemaLocation="http://www.opengis.net/wfs ' 'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd"><wfs:Query ' 'typeName="dov-pub:Boringen"><ogc:Filter ' 'xmlns:ogc="http://www.opengis.net/ogc"><ogc:Within> ' '<ogc:PropertyName>geom</ogc:PropertyName><gml:Envelope ' 'xmlns:gml="http://www.opengis.net/gml" srsDimension="2" ' 'srsName="http://www.opengis.net/gml/srs/epsg.xml#31370"><gml' ':lowerCorner>151650.000 ' '214675.000</gml:lowerCorner><gml:upperCorner>151750.000 ' '214775.000</gml:upperCorner></gml:Envelope></ogc:Within></ogc' ':Filter></wfs:Query></wfs:GetFeature>') def test_wfs_build_getfeature_request_propertyname(self): """Test the owsutil.wfs_build_getfeature_request method with a list of propertynames. Test whether the XML of the WFS GetFeature call is generated correctly. """ xml = owsutil.wfs_build_getfeature_request( 'dov-pub:Boringen', propertyname=['fiche', 'diepte_tot_m']) assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml( '<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" ' 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' 'service="WFS" version="1.1.0" ' 'xsi:schemaLocation="http://www.opengis.net/wfs ' 'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd"> <wfs:Query ' 'typeName="dov-pub:Boringen"> ' '<wfs:PropertyName>fiche</wfs:PropertyName> ' '<wfs:PropertyName>diepte_tot_m</wfs:PropertyName> <ogc:Filter/> ' '</wfs:Query> </wfs:GetFeature>') def test_wfs_build_getfeature_request_filter(self): """Test the owsutil.wfs_build_getfeature_request method with an attribute filter. Test whether the XML of the WFS GetFeature call is generated correctly. """ query = PropertyIsEqualTo(propertyname='gemeente', literal='Herstappe') filter_request = FilterRequest() filter_request = filter_request.setConstraint(query) try: filter_request = etree.tostring(filter_request, encoding='unicode') except LookupError: # Python2.7 without lxml uses 'utf-8' instead. filter_request = etree.tostring(filter_request, encoding='utf-8') xml = owsutil.wfs_build_getfeature_request( 'dov-pub:Boringen', filter=filter_request) assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml( '<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" ' 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' 'service="WFS" version="1.1.0" ' 'xsi:schemaLocation="http://www.opengis.net/wfs ' 'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd"> <wfs:Query ' 'typeName="dov-pub:Boringen"> <ogc:Filter> ' '<ogc:PropertyIsEqualTo> ' '<ogc:PropertyName>gemeente</ogc:PropertyName> ' '<ogc:Literal>Herstappe</ogc:Literal> </ogc:PropertyIsEqualTo> ' '</ogc:Filter> </wfs:Query> </wfs:GetFeature>') def test_wfs_build_getfeature_request_bbox_filter(self): """Test the owsutil.wfs_build_getfeature_request method with an attribute filter, a box and a geometry_column. Test whether the XML of the WFS GetFeature call is generated correctly. """ query = PropertyIsEqualTo(propertyname='gemeente', literal='Herstappe') filter_request = FilterRequest() filter_request = filter_request.setConstraint(query) try: filter_request = etree.tostring(filter_request, encoding='unicode') except LookupError: # Python2.7 without lxml uses 'utf-8' instead. filter_request = etree.tostring(filter_request, encoding='utf-8') xml = owsutil.wfs_build_getfeature_request( 'dov-pub:Boringen', filter=filter_request, location=Within(Box(151650, 214675, 151750, 214775)), geometry_column='geom') assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml( '<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" ' 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' 'service="WFS" version="1.1.0" ' 'xsi:schemaLocation="http://www.opengis.net/wfs ' 'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd"> <wfs:Query ' 'typeName="dov-pub:Boringen"> <ogc:Filter> <ogc:And> ' '<ogc:PropertyIsEqualTo> ' '<ogc:PropertyName>gemeente</ogc:PropertyName> ' '<ogc:Literal>Herstappe</ogc:Literal> </ogc:PropertyIsEqualTo> ' '<ogc:Within> <ogc:PropertyName>geom</ogc:PropertyName> ' '<gml:Envelope xmlns:gml="http://www.opengis.net/gml" ' 'srsDimension="2" ' 'srsName="http://www.opengis.net/gml/srs/epsg.xml#31370"> ' '<gml:lowerCorner>151650.000 214675.000</gml:lowerCorner> ' '<gml:upperCorner>151750.000 214775.000</gml:upperCorner> ' '</gml:Envelope> </ogc:Within> </ogc:And> </ogc:Filter> ' '</wfs:Query> </wfs:GetFeature>') def test_wfs_build_getfeature_request_bbox_filter_propertyname(self): """Test the owsutil.wfs_build_getfeature_request method with an attribute filter, a box, a geometry_column and a list of propertynames. Test whether the XML of the WFS GetFeature call is generated correctly. """ query = PropertyIsEqualTo(propertyname='gemeente', literal='Herstappe') filter_request = FilterRequest() filter_request = filter_request.setConstraint(query) try: filter_request = etree.tostring(filter_request, encoding='unicode') except LookupError: # Python2.7 without lxml uses 'utf-8' instead. filter_request = etree.tostring(filter_request, encoding='utf-8') xml = owsutil.wfs_build_getfeature_request( 'dov-pub:Boringen', filter=filter_request, location=Within(Box(151650, 214675, 151750, 214775)), geometry_column='geom', propertyname=['fiche', 'diepte_tot_m']) assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml( '<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" ' 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' 'service="WFS" version="1.1.0" ' 'xsi:schemaLocation="http://www.opengis.net/wfs ' 'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd"> <wfs:Query ' 'typeName="dov-pub:Boringen"> ' '<wfs:PropertyName>fiche</wfs:PropertyName> ' '<wfs:PropertyName>diepte_tot_m</wfs:PropertyName> <ogc:Filter> ' '<ogc:And> <ogc:PropertyIsEqualTo> ' '<ogc:PropertyName>gemeente</ogc:PropertyName> ' '<ogc:Literal>Herstappe</ogc:Literal> </ogc:PropertyIsEqualTo> ' '<ogc:Within> <ogc:PropertyName>geom</ogc:PropertyName> ' '<gml:Envelope xmlns:gml="http://www.opengis.net/gml" ' 'srsDimension="2" ' 'srsName="http://www.opengis.net/gml/srs/epsg.xml#31370"> ' '<gml:lowerCorner>151650.000 214675.000</gml:lowerCorner> ' '<gml:upperCorner>151750.000 214775.000</gml:upperCorner> ' '</gml:Envelope> </ogc:Within> </ogc:And> </ogc:Filter> ' '</wfs:Query> </wfs:GetFeature>')
2.421875
2
manage.py
jessekl/twiliochallenge
0
11946
# -*- coding: utf-8 -*- """ manage ~~~~~~ Flask-Script Manager """ import os from flask.ext.script import Manager from flask.ext.migrate import MigrateCommand from fbone import create_app from fbone.extensions import db from fbone.utils import PROJECT_PATH, MALE from fbone.modules.user import User, ADMIN, ACTIVE from fbone.modules.movies import Movie from fbone.modules.user.commands import CreateUserCommand, DeleteUserCommand, ListUsersCommand app = create_app() manager = Manager(create_app) manager.add_option('-c', '--config', dest='config', required=False) manager.add_command('create_user', CreateUserCommand()) manager.add_command('delete_user', DeleteUserCommand()) manager.add_command('list_users', ListUsersCommand()) manager.add_command('db', MigrateCommand) @manager.command def initdb(): """Init/reset database.""" db.drop_all() db.create_all() admin = User( name=u'admin', fullname=u'<NAME>', email=u'<EMAIL>', password=u'<PASSWORD>', role_code=ADMIN, status_code=ACTIVE, gender_code=MALE, bio=u'FSU Grad. Go Noles!') db.session.add(admin) db.session.commit() @manager.command def tests(): """Run the tests.""" import pytest exit_code = pytest.main([os.path.join(PROJECT_PATH, 'tests'), '--verbose']) return exit_code if __name__ == "__main__": manager.run()
2.1875
2
llvmsqlite_util/benchmarking/micro/aggregate.py
KowalskiThomas/LLVMSQLite
0
11947
<reponame>KowalskiThomas/LLVMSQLite import os sql_files = [x for x in os.listdir(".") if x.endswith("sql")] sql_files = list(sorted(sql_files, key = lambda x : int(x.split('.')[0]))) result = "" for i, f in enumerate(sql_files): i = i + 1 i = f.replace(".sql", "") with open(f) as sql: result += f"--- Query {i}\n" result += sql.read().strip() result += "\n\n\n" result = result.strip() with open("output.txt", 'w') as f: f.write(result)
2.46875
2
demos/crane/main.py
Starli8ht/KivyMD
0
11948
""" MDCrane demo ============= .. seealso:: `Material Design spec, Crane <https://material.io/design/material-studies/crane.html#>` Crane is a travel app that helps users find and book travel, lodging, and restaurant options that match their input preferences. """ import os import sys from pathlib import Path from kivy.lang import Builder from kivymd.app import MDApp if getattr(sys, "frozen", False): # bundle mode with PyInstaller os.environ["CRANE_ROOT"] = sys._MEIPASS else: os.environ["CRANE_ROOT"] = str(Path(__file__).parent) KV_DIR = f"{os.path.dirname(__file__)}/libs/kv/" for kv_file in os.listdir(KV_DIR): with open(os.path.join(KV_DIR, kv_file), encoding="utf-8") as kv: Builder.load_string(kv.read()) KV = """ #:import FadeTransition kivy.uix.screenmanager.FadeTransition #:import CraneRootScreen libs.baseclass.root_screen.CraneRootScreen ScreenManager: transition: FadeTransition() CraneRootScreen: name: "crane root screen" """ class MDCrane(MDApp): def __init__(self, **kwargs): super().__init__(**kwargs) self.title = "Crane" self.icon = f"{os.environ['CRANE_ROOT']}/assets/images/logo.png" self.theme_cls.primary_palette = "Gray" self.theme_cls.primary_hue = "100" def build(self): FONT_PATH = f"{os.environ['CRANE_ROOT']}/assets/fonts/" self.theme_cls.font_styles.update( { "H1": [FONT_PATH + "Raleway-Light", 96, False, -1.5], "H2": [FONT_PATH + "Raleway-Regular", 60, False, -0.5], "H3": [FONT_PATH + "Raleway-SemiBold", 48, False, 0], "H4": [FONT_PATH + "Raleway-SemiBold", 34, False, 0.25], "H5": [FONT_PATH + "Raleway-SemiBold", 24, False, 0], "H6": [FONT_PATH + "Raleway-SemiBold", 20, False, 0.15], "Subtitle1": [ FONT_PATH + "Raleway-Medium", 16, False, 0.15, ], "Subtitle2": [ FONT_PATH + "Raleway-SemiBold", 14, False, 0.1, ], "Body1": [FONT_PATH + "Raleway-SemiBold", 16, False, 0.5], "Body2": [FONT_PATH + "Raleway-Regular", 14, False, 0.25], "Button": [FONT_PATH + "Raleway-SemiBold", 14, True, 1.25], "Caption": [ FONT_PATH + "Raleway-Medium", 12, False, 0.4, ], "Overline": [ FONT_PATH + "Raleway-SemiBold", 12, True, 1.5, ], } ) return Builder.load_string(KV) MDCrane().run()
2.28125
2
tasks/lgutil/graph_net.py
HimmelStein/lg-flask
0
11949
<gh_stars>0 # -*- coding: utf-8 -*- from nltk.parse import DependencyGraph from collections import defaultdict import random import sys import copy from json import dumps from pprint import pprint try: from .lg_graph import LgGraph except: sys.path.append("/Users/tdong/git/lg-flask/tasks/lgutil") from .lg_graph import LgGraph class GraphNet(DependencyGraph): """ {'address': 1, 'ctag': 'PRO', 'deps': defaultdict(list, {'remove-link-verb':[..]}), 'feats': '3|Sg|Masc|Nom', 'head': 2, 'lemma': 'er', --> 'lemma' : <sentence of the ldg> 'tag': 'PPER', 'word': 'Er' --> 'ldg': <graph> } tag, ctag, and feats are not used! """ def __init__(self, ldg=None): DependencyGraph.__init__(self) self.nodes = defaultdict(lambda: {'address': None, 'ldg': 0, 'gid': 1, #has the same value of the gid of nodes in ldg. 'lemma': None, 'head': None, 'deps': defaultdict(int), 'remaining_ops': defaultdict(list), #list(LgGraph.operator_dic.keys()), 'ctag': None, 'tag': None, 'feats': None, }) self.git_list = [1] self.nodes[0].update( {'address': 0, 'head': -1, 'ldg': 'TOP', 'gid': 1, #has the same value of the gid of nodes in ldg. 'remaining_ops': defaultdict(list), } ) if isinstance(ldg, LgGraph): self.nodes[0]['ldg'] = ldg if isinstance(ldg, GraphNet): self.nodes = ldg self.git_list = ldg.get_git_list() def get_next_gid(self): gid = random.randint(2,99) while gid in self.git_list: gid = random.randint(2, 99) self.git_list.append(gid) return gid def get_git_list(self): return list(self.nodes.keys()) def set_gid(self, gid): for node in self.nodes.values(): node['gid'] = gid if isinstance(node['ldg'], LgGraph): node['ldg'].set_gid(gid) def set_head(self, gid, address=1): self.nodes[address]['head'] = gid def set_key_address_same_as_gid(self, address, newGid): if address in self.nodes.keys(): self.nodes[newGid] = copy.deepcopy(self.nodes[address]) self.nodes[newGid]['address'] = newGid del self.nodes[address] def to_json(self): dic = {} for nodeId in self.nodes.keys(): dic[nodeId] = self.nodes[nodeId] if isinstance(dic[nodeId]['ldg'], LgGraph): dic[nodeId]['ldg'] = dic[nodeId]['ldg'].ldg2json() pprint(dic) return dic def _remove_node(self, address): del self.nodes[address] def gen_ldg_in_net(self): for node in self.nodes.values(): if isinstance(node['ldg'], LgGraph): yield node def fork_ldg(self, ldg=None): """ if ldg == None if ldg != None :param ldg: :return: """ if isinstance(ldg, LgGraph): gid = ldg.get_gid() newGid = self.get_next_gid() cpLdg = copy.deepcopy(ldg) cpLdg.set_gid(newGid) self.nodes[newGid]['ldg'] = cpLdg self.nodes[newGid]['address']= newGid self.nodes[newGid]['head'] = gid self.nodes[newGid]['gid'] = newGid # has the same value of the gid of nodes in ldg. self.nodes[newGid]['remaining_ops'] = list(LgGraph.operator_dic.keys()) self.nodes[gid]['deps'].update({'fork'+str(newGid): newGid}) else: newGid = self.get_next_gid() self.nodes[newGid].update( {'address': newGid, 'head': 0, 'ldg': None, 'gid': newGid, # has the same value of the gid of nodes in ldg. 'remaining_ops': [] } ) self.nodes[0]['deps'].update({'fork'+str(newGid): newGid}) return newGid def change_to_ER_graph(self): """ change the ldg into an ER graph :return: """ for node in self.nodes.values(): lgGraph = node['ldg'] if lgGraph: erGraph = lgGraph.get_ER_graph() node['ldg'] = erGraph def gen_ER_graph(self, ldg): fork_gid = self.fork_ldg(ldg = ldg) for graphNode in list(self.gen_ldg_in_net()): print('in gen_ER_graph') lgGraph = graphNode['ldg'] erGraph = lgGraph.get_ER_graph() print('** ergraph') newGraphNet = GraphNet(ldg = erGraph) return newGraphNet #newGraphNet.to_json() #newGraphNet.remove_by_address(0) #newGid = self.get_next_gid() #newGraphNet.set_gid(newGid) #gid = int(lgGraph.get_gid()) #newGraphNet.set_key_address_same_as_gid(1, newGid) #newGraphNet.set_head(gid, address=newGid) #self.nodes.update(newGraphNet.nodes) #applied = True #return applied def apply_graph_operation(self, operator): """ apply operator to all nodes with non-null 'ldg' key of self, except the TOP node for node in self.nodes.values(): if node.applicatable(operator){ newNode = node.apply(operator) newGid = self.get_next_gid() newNode.set_gid(newGid) gid = node.get_gid() newNodeInNet = GraphNet(ldg = new_node) newNodeInNet['head'] = gid self.nodes[gid]['deps'].append(newGid) self.nodes[newGid] = newNodeInNet } :param operator: :return: """ def remove_operator_from_node(node, operator): if operator in node['remaining_ops']: index = node['remaining_ops'].index(operator) del node['remaining_ops'][index] return node applied = False for graphNode in list(self.gen_ldg_in_net()): lgGraph = graphNode['ldg'] if operator in graphNode['remaining_ops'] and lgGraph.is_applicable(operator): graphNode = remove_operator_from_node(graphNode, operator) newGraph = lgGraph.apply_operator(operator) newGraphNet = GraphNet(ldg = newGraph) newGraphNet.remove_by_address(0) newGid = self.get_next_gid() newGraphNet.set_gid(newGid) gid = int(lgGraph.get_gid()) newGraphNet.set_key_address_same_as_gid(1, newGid) newGraphNet.set_head(gid, address=newGid) self.nodes[gid]['deps'][operator].append(newGid) self.nodes.update(newGraphNet.nodes) applied = True else: graphNode = remove_operator_from_node(graphNode, operator) return applied def apply_all_graph_operators(self): """ this function shall generate all possible graphs while True: applied = False for operator in LgGraph.operator_dic.keys(): applied = applied or self.apply_graph_operation(operator) if not applied: break """ self.gen_ER_graph() while True: applied = False for operator in LgGraph.operator_dic.keys(): applied = applied or self.apply_graph_operation(operator) if not applied: break if __name__ == '__main__': LgGraph0 = LgGraph() LgGraph0.set_sample_snt_ldg_from_db(lan='de', table='pons', num=0) GraphNet0 = GraphNet(ldg = LgGraph0) GraphNet0.apply_graph_operation('remove-link-verb') pprint(GraphNet0.to_json())
2.09375
2
utils/transformations/char_level/char_dces_substitute.py
Yzx835/AISafety
0
11950
# !/usr/bin/env python # coding=UTF-8 """ @Author: <NAME> @LastEditors: <NAME> @Description: @Date: 2021-09-24 @LastEditTime: 2022-04-17 源自OpenAttack的DCESSubstitute """ import random from typing import NoReturn, List, Any, Optional import numpy as np from utils.transformations.base import CharSubstitute from utils.assets import fetch from utils.misc import DEFAULTS __all__ = [ "CharacterDCESSubstitute", ] class CharacterDCESSubstitute(CharSubstitute): """ """ __name__ = "CharacterDCESSubstitute" def __init__( self, threshold: float, random_one: bool = False, **kwargs: Any ) -> NoReturn: """ """ super().__init__(**kwargs) self.threshold = threshold dces_dict = fetch("dces") self.descs = dces_dict["descs"] self.neigh = dces_dict["neigh"] self.random_one = random_one def _get_candidates( self, word: str, pos_tag: Optional[str] = None, num: Optional[int] = None, ) -> List[str]: """ """ candidate_words = [] if self.random_one: i = DEFAULTS.RNG.integers(0, len(word)) repl_letters = self._apply_dces(word[i], self.threshold) if len(repl_letters) > 0: repl_letter = random.choice(repl_letters) candidate_word = word[:i] + repl_letter + word[i + 1 :] candidate_words.append(candidate_word) else: for i in range(len(word)): for repl_letter in self._apply_dces(word[i], self.threshold): candidate_word = word[:i] + repl_letter + word[i + 1 :] candidate_words.append(candidate_word) if num: candidate_words = candidate_words[:num] return candidate_words def _apply_dces(self, char: str, threshold: float) -> List[str]: """ """ c = get_hex_string(char) if c in self.descs: description = self.descs[c]["description"] else: return [] tokens = description.split(" ") case = "unknown" identifiers = [] for token in tokens: if len(token) == 1: identifiers.append(token) elif token == "SMALL": case = "SMALL" elif token == "CAPITAL": case = "CAPITAL" matches = [] match_ids = [] for i in identifiers: for idx, val in self.descs.items(): desc_toks = val["description"].split(" ") if ( i in desc_toks and not np.any(np.in1d(desc_toks, _disallowed)) and not np.any(np.in1d(idx, _disallowed_codes)) and not int(idx, 16) > 30000 ): desc_toks = np.array(desc_toks) case_descriptor = desc_toks[ (desc_toks == "SMALL") | (desc_toks == "CAPITAL") ] if len(case_descriptor) > 1: case_descriptor = case_descriptor[0] elif len(case_descriptor) == 0: case = "unknown" if case == "unknown" or case == case_descriptor: match_ids.append(idx) matches.append(val["vec"]) if len(matches) == 0: return [] match_vecs = np.stack(matches) Y = match_vecs self.neigh.fit(Y) X = self.descs[c]["vec"].reshape(1, -1) if Y.shape[0] > threshold: dists, idxs = self.neigh.kneighbors(X, threshold, return_distance=True) else: dists, idxs = self.neigh.kneighbors(X, Y.shape[0], return_distance=True) probs = dists.flatten() charcodes = [match_ids[idx] for idx in idxs.flatten()] chars = [] for idx, charcode in enumerate(charcodes): if probs[idx] < threshold: chars.append(chr(int(charcode, 16))) return chars @property def deterministic(self) -> bool: return not self.random_one def extra_repr_keys(self) -> List[str]: return super().extra_repr_keys() + [ "threshold", "random_one", ] _disallowed = [ "TAG", "MALAYALAM", "BAMUM", "HIRAGANA", "RUNIC", "TAI", "SUNDANESE", "BATAK", "LEPCHA", "CHAM", "TELUGU", "DEVANGARAI", "BUGINESE", "MYANMAR", "LINEAR", "SYLOTI", "PHAGS-PA", "CHEROKEE", "CANADIAN", "YI", "LYCIAN", "HANGUL", "KATAKANA", "JAVANESE", "ARABIC", "KANNADA", "BUHID", "TAGBANWA", "DESERET", "REJANG", "BOPOMOFO", "PERMIC", "OSAGE", "TAGALOG", "MEETEI", "CARIAN", "UGARITIC", "ORIYA", "ELBASAN", "CYPRIOT", "HANUNOO", "GUJARATI", "LYDIAN", "MONGOLIAN", "AVESTAN", "MEROITIC", "KHAROSHTHI", "HUNGARIAN", "KHUDAWADI", "ETHIOPIC", "PERSIAN", "OSMANYA", "ELBASAN", "TIBETAN", "BENGALI", "TURKIC", "THROWING", "HANIFI", "BRAHMI", "KAITHI", "LIMBU", "LAO", "CHAKMA", "DEVANAGARI", "ITALIC", "CJK", "MEDEFAIDRIN", "DIAMOND", "SAURASHTRA", "ADLAM", "DUPLOYAN", ] _disallowed_codes = [ "1F1A4", "A7AF", ] def get_hex_string(ch: str) -> str: return "{:04x}".format(ord(ch)).upper()
2.59375
3
piptools/repositories/base.py
LaudateCorpus1/pip-tools
2
11951
<filename>piptools/repositories/base.py import optparse from abc import ABCMeta, abstractmethod from contextlib import contextmanager from typing import Iterator, Optional, Set from pip._internal.index.package_finder import PackageFinder from pip._internal.models.index import PyPI from pip._internal.network.session import PipSession from pip._internal.req import InstallRequirement class BaseRepository(metaclass=ABCMeta): DEFAULT_INDEX_URL = PyPI.simple_url def clear_caches(self) -> None: """Should clear any caches used by the implementation.""" @abstractmethod def find_best_match( self, ireq: InstallRequirement, prereleases: Optional[bool] ) -> InstallRequirement: """ Returns a pinned InstallRequirement object that indicates the best match for the given InstallRequirement according to the external repository. """ @abstractmethod def get_dependencies(self, ireq: InstallRequirement) -> Set[InstallRequirement]: """ Given a pinned, URL, or editable InstallRequirement, returns a set of dependencies (also InstallRequirements, but not necessarily pinned). They indicate the secondary dependencies for the given requirement. """ @abstractmethod def get_hashes(self, ireq: InstallRequirement) -> Set[str]: """ Given a pinned InstallRequirement, returns a set of hashes that represent all of the files for a given requirement. It is not acceptable for an editable or unpinned requirement to be passed to this function. """ @abstractmethod @contextmanager def allow_all_wheels(self) -> Iterator[None]: """ Monkey patches pip.Wheel to allow wheels from all platforms and Python versions. """ @abstractmethod def copy_ireq_dependencies( self, source: InstallRequirement, dest: InstallRequirement ) -> None: """ Notifies the repository that `dest` is a copy of `source`, and so it has the same dependencies. Otherwise, once we prepare an ireq to assign it its name, we would lose track of those dependencies on combining that ireq with others. """ @property @abstractmethod def options(self) -> optparse.Values: """Returns parsed pip options""" @property @abstractmethod def session(self) -> PipSession: """Returns a session to make requests""" @property @abstractmethod def finder(self) -> PackageFinder: """Returns a package finder to interact with simple repository API (PEP 503)"""
2.109375
2
tfx/orchestration/portable/execution_publish_utils.py
johnPertoft/tfx
0
11952
# Copyright 2020 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Portable library for registering and publishing executions.""" import copy import os from typing import List, Mapping, MutableMapping, Optional, Sequence, cast from absl import logging from tfx import types from tfx.orchestration import metadata from tfx.orchestration.portable.mlmd import execution_lib from tfx.proto.orchestration import execution_result_pb2 from ml_metadata.proto import metadata_store_pb2 def _check_validity(new_artifact: metadata_store_pb2.Artifact, original_artifact: types.Artifact, has_multiple_artifacts: bool) -> None: """Check the validity of new artifact against the original artifact.""" if new_artifact.type_id != original_artifact.type_id: raise RuntimeError('Executor output should not change artifact type.') if has_multiple_artifacts: # If there are multiple artifacts in the executor output, their URIs should # be a direct sub-dir of the system generated URI. if os.path.dirname(new_artifact.uri) != original_artifact.uri: raise RuntimeError( 'When there are multiple artifacts to publish, their URIs ' 'should be direct sub-directories of the URI of the system generated ' 'artifact.') else: # If there is only one output artifact, its URI should not be changed if new_artifact.uri != original_artifact.uri: # TODO(b/175426744): Data Binder will modify the uri. logging.warning( 'When there is one artifact to publish, the URI of it should be ' 'identical to the URI of system generated artifact.') def publish_cached_execution( metadata_handler: metadata.Metadata, contexts: Sequence[metadata_store_pb2.Context], execution_id: int, output_artifacts: Optional[MutableMapping[str, Sequence[types.Artifact]]] = None, ) -> None: """Marks an existing execution as using cached outputs from a previous execution. Args: metadata_handler: A handler to access MLMD. contexts: MLMD contexts to associated with the execution. execution_id: The id of the execution. output_artifacts: Output artifacts of the execution. Each artifact will be linked with the execution through an event with type OUTPUT. """ [execution] = metadata_handler.store.get_executions_by_id([execution_id]) execution.last_known_state = metadata_store_pb2.Execution.CACHED execution_lib.put_execution( metadata_handler, execution, contexts, input_artifacts=None, output_artifacts=output_artifacts) def _set_execution_result_if_not_empty( executor_output: Optional[execution_result_pb2.ExecutorOutput], execution: metadata_store_pb2.Execution) -> bool: """Sets execution result as a custom property of the execution.""" if executor_output and (executor_output.execution_result.result_message or executor_output.execution_result.metadata_details or executor_output.execution_result.code): # TODO(b/190001754): Consider either switching to base64 encoding or using # a proto descriptor pool to circumvent TypeError which may be raised when # converting embedded `Any` protos. try: execution_lib.set_execution_result(executor_output.execution_result, execution) except TypeError: logging.exception( 'Skipped setting execution_result as custom property of the ' 'execution due to error') def publish_succeeded_execution( metadata_handler: metadata.Metadata, execution_id: int, contexts: Sequence[metadata_store_pb2.Context], output_artifacts: Optional[MutableMapping[str, Sequence[types.Artifact]]] = None, executor_output: Optional[execution_result_pb2.ExecutorOutput] = None ) -> Optional[MutableMapping[str, List[types.Artifact]]]: """Marks an existing execution as success. Also publishes the output artifacts produced by the execution. This method will also merge the executor produced info into system generated output artifacts. The `last_know_state` of the execution will be changed to `COMPLETE` and the output artifacts will be marked as `LIVE`. Args: metadata_handler: A handler to access MLMD. execution_id: The id of the execution to mark successful. contexts: MLMD contexts to associated with the execution. output_artifacts: Output artifacts skeleton of the execution, generated by the system. Each artifact will be linked with the execution through an event with type OUTPUT. executor_output: Executor outputs. `executor_output.output_artifacts` will be used to update system-generated output artifacts passed in through `output_artifacts` arg. There are three contraints to the update: 1. The keys in `executor_output.output_artifacts` are expected to be a subset of the system-generated output artifacts dict. 2. An update to a certain key should contains all the artifacts under that key. 3. An update to an artifact should not change the type of the artifact. Returns: The maybe updated output_artifacts, note that only outputs whose key are in executor_output will be updated and others will be untouched. That said, it can be partially updated. Raises: RuntimeError: if the executor output to a output channel is partial. """ output_artifacts = copy.deepcopy(output_artifacts) or {} output_artifacts = cast(MutableMapping[str, List[types.Artifact]], output_artifacts) if executor_output: if not set(executor_output.output_artifacts.keys()).issubset( output_artifacts.keys()): raise RuntimeError( 'Executor output %s contains more keys than output skeleton %s.' % (executor_output, output_artifacts)) for key, artifact_list in output_artifacts.items(): if key not in executor_output.output_artifacts: continue updated_artifact_list = executor_output.output_artifacts[key].artifacts # We assume the original output dict must include at least one output # artifact and all artifacts in the list share the same type. original_artifact = artifact_list[0] # Update the artifact list with what's in the executor output artifact_list.clear() # TODO(b/175426744): revisit this: # 1) Whether multiple output is needed or not after TFX componets # are upgraded. # 2) If multiple output are needed and is a common practice, should we # use driver instead to create the list of output artifact instead # of letting executor to create them. for proto_artifact in updated_artifact_list: _check_validity(proto_artifact, original_artifact, len(updated_artifact_list) > 1) python_artifact = types.Artifact(original_artifact.artifact_type) python_artifact.set_mlmd_artifact(proto_artifact) artifact_list.append(python_artifact) # Marks output artifacts as LIVE. for artifact_list in output_artifacts.values(): for artifact in artifact_list: artifact.mlmd_artifact.state = metadata_store_pb2.Artifact.LIVE [execution] = metadata_handler.store.get_executions_by_id([execution_id]) execution.last_known_state = metadata_store_pb2.Execution.COMPLETE _set_execution_result_if_not_empty(executor_output, execution) execution_lib.put_execution( metadata_handler, execution, contexts, output_artifacts=output_artifacts) return output_artifacts def publish_failed_execution( metadata_handler: metadata.Metadata, contexts: Sequence[metadata_store_pb2.Context], execution_id: int, executor_output: Optional[execution_result_pb2.ExecutorOutput] = None ) -> None: """Marks an existing execution as failed. Args: metadata_handler: A handler to access MLMD. contexts: MLMD contexts to associated with the execution. execution_id: The id of the execution. executor_output: The output of executor. """ [execution] = metadata_handler.store.get_executions_by_id([execution_id]) execution.last_known_state = metadata_store_pb2.Execution.FAILED _set_execution_result_if_not_empty(executor_output, execution) execution_lib.put_execution(metadata_handler, execution, contexts) def publish_internal_execution( metadata_handler: metadata.Metadata, contexts: Sequence[metadata_store_pb2.Context], execution_id: int, output_artifacts: Optional[MutableMapping[str, Sequence[types.Artifact]]] = None ) -> None: """Marks an exeisting execution as as success and links its output to an INTERNAL_OUTPUT event. Args: metadata_handler: A handler to access MLMD. contexts: MLMD contexts to associated with the execution. execution_id: The id of the execution. output_artifacts: Output artifacts of the execution. Each artifact will be linked with the execution through an event with type INTERNAL_OUTPUT. """ [execution] = metadata_handler.store.get_executions_by_id([execution_id]) execution.last_known_state = metadata_store_pb2.Execution.COMPLETE execution_lib.put_execution( metadata_handler, execution, contexts, output_artifacts=output_artifacts, output_event_type=metadata_store_pb2.Event.INTERNAL_OUTPUT) def register_execution( metadata_handler: metadata.Metadata, execution_type: metadata_store_pb2.ExecutionType, contexts: Sequence[metadata_store_pb2.Context], input_artifacts: Optional[MutableMapping[str, Sequence[types.Artifact]]] = None, exec_properties: Optional[Mapping[str, types.Property]] = None, ) -> metadata_store_pb2.Execution: """Registers a new execution in MLMD. Along with the execution: - the input artifacts will be linked to the execution. - the contexts will be linked to both the execution and its input artifacts. Args: metadata_handler: A handler to access MLMD. execution_type: The type of the execution. contexts: MLMD contexts to associated with the execution. input_artifacts: Input artifacts of the execution. Each artifact will be linked with the execution through an event. exec_properties: Execution properties. Will be attached to the execution. Returns: An MLMD execution that is registered in MLMD, with id populated. """ execution = execution_lib.prepare_execution( metadata_handler, execution_type, metadata_store_pb2.Execution.RUNNING, exec_properties) return execution_lib.put_execution( metadata_handler, execution, contexts, input_artifacts=input_artifacts)
1.695313
2
src/dctm/datasets.py
spotify-research/dctm
11
11953
<reponame>spotify-research/dctm<filename>src/dctm/datasets.py<gh_stars>10-100 # # Copyright 2020 Spotify AB # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Datasets utilities. If you use nltk you may need the following: nltk.download('words') nltk.download('punkt') nltk.download('wordnet') """ import os import nltk import numpy as np import pandas as pd from nltk import word_tokenize from nltk.stem import WordNetLemmatizer from nltk.stem.snowball import SnowballStemmer from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS, CountVectorizer from sklearn.utils import Bunch ENGLISH_WORDS = set(nltk.corpus.words.words()) STEMMER = SnowballStemmer('english') class LemmaTokenizer: def __init__(self, stem=False): self.wnl = WordNetLemmatizer() if stem: self.stemmer = SnowballStemmer('english') else: self.stemmer = Bunch(stem=lambda x: x) def __call__(self, doc): return [ self.wnl.lemmatize(self.stemmer.stem(t)) for t in word_tokenize(doc) if t.lower() in ENGLISH_WORDS ] def get_neurips(filename: str): """Get NeurIPS dataset. Args: filename (str): Location of the file for NeurIPS dataset. """ df = pd.read_csv(filename, header=0, index_col=0) year = np.array([x.split('_')[0] for x in df.columns]) # preprocess df = df.loc[df.index.dropna()] df = df.loc[~df.index.isin(ENGLISH_STOP_WORDS)] df.index = [STEMMER.stem(x) for x in df.index.tolist()] # merge same words together df = df.groupby(level=0).sum() vocabulary = df.sum(axis=1) return df, year, vocabulary def get_sotu(path: str, stem=False): df = {} for filename in sorted(os.listdir(path)): fn = os.path.join(path, filename) df[filename] = ' '.join( [x.decode("utf-8") for x in open(fn, 'rb').readlines()]) df = pd.Series(df) df.index = df.index.str.split('.txt').map(lambda x: x[0]) df = pd.DataFrame(df, columns=['text']) df['years'] = df.index.str.split('_').map(lambda x: int(x[1])) df['author'] = df.index.str.split('_').map(lambda x: x[0]) stopwords_english = LemmaTokenizer(stem=stem)( ' '.join(list(ENGLISH_STOP_WORDS))) vect = CountVectorizer( max_df=0.9, min_df=50, stop_words=stopwords_english, tokenizer=LemmaTokenizer(stem=stem)) corpus = vect.fit_transform(df.text) vocabulary = np.array(vect.get_feature_names()) keep = np.array(corpus.sum(axis=1) > 0).flatten() corpus = corpus[keep] df = df.loc[keep] return df, corpus, vocabulary import json def get_doj(filename: str = 'data/doj.json', stem=True, min_counts=50): df = [] with open(filename, 'r') as f: for line in f: df.append(json.loads(line)) df = pd.DataFrame(df).set_index('id') df.index = range(df.shape[0]) df['text'] = df.title + ' ' + df.contents days = pd.to_datetime( df.date.str.split('T').map(lambda x: x[0]).str.split('-').map( lambda x: '-'.join(x[:-1])), format='%Y-%m') df['days'] = days df['time_delta'] = (df.days - df.days.min()).dt.days stop_words = LemmaTokenizer(stem=stem)( ' '.join(list(ENGLISH_STOP_WORDS))) vectorizer = CountVectorizer( max_df=0.85, min_df=min_counts, stop_words=stop_words, tokenizer=LemmaTokenizer(stem=stem)) corpus = vectorizer.fit_transform(df.text) vocabulary = np.array(vectorizer.get_feature_names()) keep = np.array(corpus.sum(axis=1) > 0).flatten() corpus = corpus[keep] df = df.loc[keep] return df, corpus, vocabulary def train_test_split(X, index_points, train_size=0.75, return_sorted=True): unique_index_points = np.unique(index_points) train_idx = np.random.choice( unique_index_points, int(len(unique_index_points) * train_size), replace=False) tr_idx = np.array([x in train_idx for x in index_points.flatten()]) index_tr = index_points[tr_idx] X_tr = X[tr_idx] test_idx = np.unique(list(set(unique_index_points) - set(train_idx))) ts_idx = np.array([x in test_idx for x in index_points.flatten()]) index_ts = index_points[ts_idx] X_ts = X[ts_idx] idx = np.argsort(index_tr, axis=0).flatten() X_tr_sorted = X_tr[idx] index_tr_sorted = index_tr[idx] idx = np.argsort(index_ts, axis=0).flatten() X_ts_sorted = X_ts[idx] index_ts_sorted = index_ts[idx] return_list = [X_tr, X_ts, index_tr, index_ts] if return_sorted: return_list += [ X_tr_sorted, X_ts_sorted, index_tr_sorted, index_ts_sorted ] return return_list def print_to_file_for_gdtm(df, vocabulary, corpus, filename='test', path='.'): """Utility function to save datasets for gDTM. Args: df ([type]): [description] vocabulary ([type]): [description] corpus ([type]): [description] filename (str, optional): [description]. Defaults to 'test'. """ with open(os.path.join(path, '{}_corpus.txt'.format(filename)), 'w') as f: n_times = df.years.unique().size f.writelines('{}\n'.format(n_times)) for name, group in df.groupby('years')[0]: n_docs = group.shape[0] f.writelines('{}\n{}\n'.format(name.timestamp(), n_docs)) idx = group.index.values # np.array([df.index.get_loc(x) for x in group.index]) for c in corpus[idx]: d = c.todok() f.writelines( str(len(d)) + ' ' + ' '.join( '{}:{}'.format(x[1], int(v)) for x, v in d.items()) + '\n') with open(os.path.join(path, '{}_lexicon.txt'.format(filename)), 'w') as f: f.writelines('\n'.join(vocabulary))
2.5
2
torchreid/optim/sam.py
opencv/deep-person-reid
1
11954
# Copyright 2020 Google Research # SPDX-License-Identifier: Apache-2.0 # # Copyright (C) 2020-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # ''' Imported from: https://github.com/google-research/sam ''' import torch class SAM(torch.optim.Optimizer): def __init__(self, params, base_optimizer, rho=0.05, adaptive=True, **kwargs): assert rho >= 0.0, f"Invalid rho, should be non-negative: {rho}" self.base_optimizer = base_optimizer defaults = dict(rho=rho, adaptive=adaptive, **self.base_optimizer.defaults) super().__init__(params, defaults) self.rho = rho self.adaptive = adaptive self.param_groups = self.base_optimizer.param_groups @torch.no_grad() def first_step(self, zero_grad=False): if self._has_overflow(self.param_groups): if zero_grad: self.zero_grad() return True grad_norm = self._grad_norm() for group in self.param_groups: scale = self.rho / (grad_norm + 1e-12) for p in group["params"]: if p.grad is None: continue self.state[p]["old_p"] = p.data.clone() e_w = (torch.pow(p, 2) if self.adaptive else 1.0) * p.grad * scale.to(p) p.add_(e_w) # climb to the local maximum "w + e(w)" if zero_grad: self.zero_grad() return False @torch.no_grad() def second_step(self, zero_grad=False): if self._has_overflow(self.param_groups): if zero_grad: self.zero_grad() return for group in self.param_groups: for p in group["params"]: if p.grad is None: continue p.data = self.state[p]["old_p"] # get back to "w" from "w + e(w)" self.base_optimizer.step() # do the actual "sharpness-aware" update if zero_grad: self.zero_grad() @torch.no_grad() def step(self): raise NotImplementedError("SAM doesn't work like the other optimizers," " you should first call `first_step` and the `second_step`;") def _grad_norm(self): shared_device = self.param_groups[0]["params"][0].device # put everything on the same device, in case of model parallelism norm = torch.norm( torch.stack([ ((torch.abs(p) if self.adaptive else 1.0) * p.grad).norm(p=2).to(shared_device) for group in self.param_groups for p in group["params"] if p.grad is not None ]), p=2 ) return norm @staticmethod def _has_overflow(params): ''' Check whether the gradient overflow occurred in model parameters ''' def _has_inf_or_nan(x): try: # if x is half, the .float() incurs an additional deep copy, but it's necessary if # Pytorch's .sum() creates a one-element tensor of the same type as x # (which is true for some recent version of pytorch). cpu_sum = float(x.float().sum()) # More efficient version that can be used if .sum() returns a Python scalar # cpu_sum = float(x.sum()) except RuntimeError as instance: # We want to check if inst is actually an overflow exception. # RuntimeError could come from a different error. # If so, we still want the exception to propagate. if "value cannot be converted" not in instance.args[0]: raise return True else: if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum: return True return False for group in params: for p in group["params"]: if p.grad is not None and _has_inf_or_nan(p.grad.data): return True return False
1.96875
2
src/konfiger_stream.py
konfiger/konfiger-python
4
11955
<reponame>konfiger/konfiger-python """ The MIT License Copyright 2020 <NAME> <<EMAIL>>. """ import os.path from .konfiger_util import type_of, is_string, is_char, is_bool, escape_string, un_escape_string def file_stream(file_path, delimiter = '=', separator = '\n', err_tolerance = False): return KonfigerStream(file_path, delimiter, separator, err_tolerance, True) def string_stream(raw_string, delimiter = '=', separator = '\n', err_tolerance = False): return KonfigerStream(raw_string, delimiter, separator, err_tolerance, False) def validate_file_existence(file_path): if not file_path: raise TypeError("The file path cannot be None") if not is_string(file_path): raise TypeError("Invalid argument expecting str found " + str(type(file_path))) if not os.path.isfile(file_path): raise FileNotFoundError("The file does not exists: " + file_path) class KonfigerStream: def __init__(self, stream_obj, delimiter, separator, err_tolerance, is_file): self.stream_obj = stream_obj self.delimiter = delimiter self.separator = separator self.err_tolerance = err_tolerance self.is_file = is_file self.trimming_key = True self.trimming_value = True self.comment_prefix = "//" self.continuation_char = "\\" self.is_first = 0 if is_file: validate_file_existence(stream_obj) else: if not is_string(stream_obj): raise TypeError("Invalid argument expecting str found " + str(type(stream_obj))) if not is_bool(err_tolerance): raise TypeError("Invalid argument for err_tolerance expecting bool found " + str(type(err_tolerance))) if delimiter and not separator: raise TypeError("Invalid length of argument, separator or delimiter parameter is missing") if not is_char(self.delimiter): raise TypeError("Invalid argument for delimiter expecting char found " + str(type(self.delimiter))) if not is_char(self.separator): raise TypeError("Invalid argument for separator expecting char found " + str(type(self.separator))) self.read_position = 0 self.has_next_ = False self.done_reading_ = False def is_trimming_key(self): return self.trimming_key def set_trimming_key(self, trimming_key): if not is_bool(trimming_key): raise TypeError("Invalid argument, expecting a bool found " + str(type(trimming_key))) self.trimming_key = trimming_key def is_trimming_value(self): return self.trimming_value def set_trimming_value(self, trimming_value): if not is_bool(trimming_value): raise TypeError("Invalid argument, expecting a bool found " + str(type(trimming_value))) self.trimming_value = trimming_value def get_comment_prefix(self): return self.comment_prefix def set_comment_prefix(self, comment_prefix): if not is_string(comment_prefix): raise TypeError("Invalid argument for comment prefix expecting str found " + str(type(comment_prefix))) self.comment_prefix = comment_prefix def get_continuation_char(self): return self.continuation_char def set_continuation_char(self, continuation_char): if not is_char(continuation_char): raise TypeError("Invalid argument for continuation char expecting char found " + str(type(continuation_char))) self.continuation_char = continuation_char def is_error_tolerant(self): return self.err_tolerance def error_tolerance(self, err_tolerance): if not is_bool(err_tolerance): raise TypeError("Invalid argument for err_tolerance expecting char found " + str(type(err_tolerance))) self.err_tolerance = err_tolerance def has_next(self): if not self.done_reading_: comment_size = len(self.comment_prefix) sub_count = 0 if self.is_file: with open(self.stream_obj, "r") as f: byte = f.read(1) f.seek(self.read_position) if not byte: self.done_reading() return self.has_next_ while byte: byte = f.read(1) while sub_count < comment_size and byte == self.comment_prefix[sub_count]: sub_count += 1 f.seek(self.read_position+sub_count) byte = f.read(1) self.is_first |= 1 if sub_count == comment_size: self.read_position += 1 while byte and byte != self.separator: self.read_position += 1 f.seek(self.read_position) byte = f.read(1) return self.has_next() if byte.strip() == '': self.read_position += 1 f.seek(self.read_position) continue self.has_next_ = True return self.has_next_ self.has_next_ = False return self.has_next_ else: while self.read_position < len(self.stream_obj): while sub_count < comment_size and self.stream_obj[sub_count+self.read_position] == self.comment_prefix[sub_count]: sub_count += 1 if sub_count == comment_size: self.read_position += 1 while self.read_position < len(self.stream_obj) and self.stream_obj[self.read_position] != self.separator: self.read_position += 1 self.read_position += 1 return self.has_next() if self.stream_obj[self.read_position].strip() == "": self.read_position += 1 continue self.has_next_ = True return self.has_next_ self.has_next_ = False return self.has_next_ return self.has_next_ def next(self): if self.done_reading_: raise BufferError("You cannot read beyound the stream length, always use has_next() to verify the Stream still has an entry") key = "" value = "" parse_key = True prev_char = None prev_prev_char = None i = '\0' line = 1 column = 0 if self.is_file: with open(self.stream_obj, "r") as f: while True: byte = f.read(1) f.seek(self.read_position) if not byte: if key != "": if parse_key == True and self.err_tolerance == False: raise LookupError("Invalid entry detected near Line " + str(line) + ":" + str(column)) self.done_reading() break self.read_position += 1 char_ = f.read(1) column += 1 if char_ == '\n': line += 1 column = 0 if not parse_key and prev_char == self.continuation_char and prev_prev_char != '\\': if value[len(value)-1] == '\r': value = value[:-2] else: value = value[:-1] while char_.strip() == "": f.seek(self.read_position) self.read_position += 1 char_ = f.read(1) self.read_position -= 1 continue if char_ == self.separator and prev_char != '^': if len(key) == 0 and value == "": continue if parse_key == True and self.err_tolerance == False: raise LookupError("Invalid entry detected near Line " + str(line) + ":" + str(column)) break if char_ == self.delimiter and parse_key: if value != "" and self.err_tolerance != False: raise LookupError("The input is imporperly sepreated near Line " + str(line) + ":" + str(column)+". Check the separator") parse_key = False continue if parse_key == True: key += char_ else: value += char_ prev_prev_char = prev_prev_char if char_ == '\r' else prev_char prev_char = ('\0' if prev_char != '\\' else '\\') if char_ == '\r' else char_ else: for self.read_position in range(self.read_position, len(self.stream_obj)+1): if self.read_position == len(self.stream_obj): if key != "": if parse_key == True and self.err_tolerance == False: raise LookupError("Invalid entry detected near Line " + str(line) + ":" + str(column)) self.done_reading() break character = self.stream_obj[self.read_position] column += 1 if character == '\n': line += 1 column = 0 if not parse_key and prev_char == self.continuation_char and prev_prev_char != '\\': if value[len(value)-1] == '\r': value = value[:-2] else: value = value[:-1] while character.strip() == "": self.read_position += 1 character = self.stream_obj[self.read_position] self.read_position -= 1 continue if character == self.separator and prev_char != '^' and not parse_key: if key == "" and value =="": continue if parse_key == True and self.err_tolerance == False: raise LookupError("Invalid entry detected near Line " + str(line) + ":" + str(column)) break if character == self.delimiter and parse_key: if value != "" and self.err_tolerance == False: raise LookupError("The input is imporperly sepreated near Line " + str(line) + ":" + str(column)+". Check the separator") parse_key = False continue if parse_key: key += character else: value += character prev_prev_char = prev_prev_char if character == '\r' else prev_char prev_char = ('\0' if prev_char != '\\' else '\\') if character == '\r' else character self.read_position += 1 return ( key.strip() if self.trimming_key else key, un_escape_string(value, self.separator).strip() if self.trimming_value else un_escape_string(value, self.separator) ) def done_reading(self): self.has_next_ = False self.done_reading_ = True
2.65625
3
matematik.py
Drummersbrother/math_for_school
0
11956
<filename>matematik.py import math import numpy as np import collections import scipy.stats as sst import matplotlib.pyplot as plt def plot(*args, **kwargs): plt.plot(*args, **kwargs) plt.show() def linregshow(x, y, col: str="r"): linregresult = sst.linregress(list(zip(x, y))) plot(x, y, col, x, [(val * linregresult.slope) + linregresult.intercept for val in x]) return linregresult def list_or_starargs(func): """This is a decorator to specify that a function either takes iterable input in the form of an iterable or a list of passed arguments. If other arguments are needed, the function will need to use kwargs. This passes the list as the first argument.""" def decorated(*args, **kwargs): if isinstance(args[0], collections.Iterable): data = args[0] # We make generators into lists data = [val for val in data] else: data = args return func(data, **kwargs) return decorated @list_or_starargs def spridning(data): """Returns the size of the range of values in the data.""" return max(data) - min(data) @list_or_starargs def medel(data): """Returns the arithmetic mean.""" return sum(data) / len(data) @list_or_starargs def median(data): """Returns the median.""" # We sort the data data = sorted(data) length = len(data) if length % 2 == 0: return medel(data[length // 2], data[(length // 2) - 1]) else: return data[int(length // 2)] @list_or_starargs def kvartiler(data): """Returns the three quartiles of the data in order: lower, median, higher.""" # We sort the data data = sorted(data) # We divide the data into two lists length = len(data) if length % 2 == 1: low_list = data[:(length // 2)] high_list = data[((length // 2) + 1):] else: low_list = data[:int(length / 2)] high_list = data[int(length / 2):] # We return the three quartiles return median(low_list), median(data), median(high_list) def standardav(data, stick=False): """Returns the standard deviation of the input data, which has to be an iterable. stick specifies if it should be treated like non-total set of values (divide by n-1 instead of n).""" div_by = len(data) if (not stick) else (len(data) - 1) medelv = medel(data) return math.sqrt(sum([(val-medelv)**2 for val in data]) / div_by) def normal_d(x, u, o): """Returns the value of a normal/standard distribution at the value x. u is Mu, and o is the standard deviation.""" return (1 / (o * math.sqrt(2*math.pi))) * (math.e ** (-(((x-u)**2) / (2 * (o**2)))))
3.734375
4
docly/ioutils/__init__.py
autosoft-dev/docly
29
11957
import os from pathlib import Path import requests import shutil import sys from distutils.version import LooseVersion import time from tqdm import tqdm from docly.parser import parser as py_parser from docly.tokenizers import tokenize_code_string from docly import __version__ # from c2nl.objects import Code UPDATE_CHECK_URL = "http://3.80.2.138:8584/vercheck/check-version/" # UPDATE_CHECK_URL = "http://127.0.0.1:5000/vercheck/check-version/" interaction_cache = lambda : Path(Path.home() / ".docly" / "interaction_cache") CACHE_DIR = (Path().home() / ".docly" / "file_cache") cache_exists = lambda : CACHE_DIR.exists() make_cache_dir = lambda : os.mkdir(str(CACHE_DIR)) def _compare_installed_version_with_latest(v1, v2): try: current_version = LooseVersion(v1) latest_version = LooseVersion(v2) assert current_version == latest_version return True except AssertionError: return False def look_for_update(): with requests.sessions.Session() as s: try: r = s.get(UPDATE_CHECK_URL, timeout=2) r.raise_for_status() if not _compare_installed_version_with_latest(__version__, r.text): i_c = interaction_cache() return True return False except Exception: i_c = interaction_cache() if not i_c.exists(): os.mkdir(i_c) if not (i_c / "icache.txt").exists(): with open((i_c / "icache.txt"), "w") as f: f.write(str(int(time.time())) + "\n") else: with open((i_c / "icache.txt"), "a") as f: f.write(str(int(time.time())) + "\n") return False def is_dir(base_path): if isinstance(base_path, Path): return base_path.is_dir() elif isinstance(base_path, str): return Path(base_path).is_dir() else: return False def is_python_file(file_path): if isinstance(file_path, Path): return file_path.suffix == ".py" elif isinstance(file_path, str): return Path(file_path).suffix == ".py" else: return False def is_ipynb_notebook(file_path): if isinstance(file_path, Path): return file_path.suffix == ".ipynb" elif isinstance(file_path, str): return Path(file_path).suffix == ".ipynb" else: return False def download_from_url(url, dst): """ @param: url to download file @param: dst place to put the file """ file_size = int(requests.head(url).headers["Content-Length"]) if os.path.exists(dst): first_byte = os.path.getsize(dst) else: first_byte = 0 if first_byte >= file_size: return file_size header = {"Range": "bytes=%s-%s" % (first_byte, file_size)} pbar = tqdm( total=file_size, initial=first_byte, unit='B', unit_scale=True, desc=dst.split('/')[-1]) req = requests.get(url, headers=header, stream=True) with(open(dst, 'ab')) as f: for chunk in req.iter_content(chunk_size=1024): if chunk: f.write(chunk) pbar.update(1024) pbar.close() return file_size def check_out_path(target_path: Path): """" This function recursively yields all contents of a pathlib.Path object """ yield target_path for file in target_path.iterdir(): if file.is_dir(): yield from check_out_path(file) else: yield file.absolute() def process_file(file_path: Path, ts_lib_path: str, use_old=False): result, parser_obj = py_parser.parse(file_path, ts_lib_path) func_and_params = parser_obj.get_all_function_names_with_params() if result: for func_name, data in py_parser.get_func_body_and_docstr(parser_obj): # print(py_toeknizer.tokenize_code_string(func_body)) # code.tokens = tokenizer.tokenize(func_body).data # code.text = func_body (func_body, docstr), start, end = data ret_start = (start[0]+1, start[1]) params = func_and_params[func_name] code_str = [tokenize_code_string(func_body)] if use_old else func_body yield code_str, params, ret_start, func_name, docstr.strip() def query_yes_no(question, default="yes"): """Ask a yes/no question and return their answer. "question" is a string that is presented to the user. "default" is the presumed answer if the user just hits <Enter>. It must be "yes", "no", or None (meaning an answer is required of the user). The "answer" return value is True for "yes" or False for "no". """ valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False} if default is None: prompt = " [y/n] " elif default == "yes": prompt = " [Y/n] " elif default == "no": prompt = " [y/N] " else: raise ValueError("invalid default answer: '{}}'".format(default)) while True: print(question + prompt) choice = input().lower() if default is not None and choice == '': return valid[default] elif choice in valid: return valid[choice] else: print("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n")
2.359375
2
java/version.bzl
symonk/selenium
0
11958
SE_VERSION = "4.2.1"
0.96875
1
scripts/preprocess_for_prediction.py
jmueller95/deepgrind
0
11959
<filename>scripts/preprocess_for_prediction.py import pandas as pd import utils def check_msms_model_name(converter): def wrapper(*args, **kwargs): if kwargs['style'] not in ["pdeep", "prosit"]: raise Exception("MSMS model must be 'pdeep' or 'prosit'") converter(*args, **kwargs) return wrapper @check_msms_model_name def _convert_for_msms(comet_df, style, output): if style == "prosit": res = pd.DataFrame( {"modified_sequence": comet_df.Peptide.apply( lambda pep: utils.find_modifications(pep[2:-2], style="prosit")).values, "collision_energy": snakemake.params['collision_energy'], "precursor_charge": comet_df.apply(lambda row: row.Charge1 + 2 * row.Charge2 + 3 * row.Charge3, axis=1)}) res.dropna(inplace=True) res.to_csv(output, sep=",", header=True, index=False) else: res = pd.DataFrame( comet_df.Peptide.apply(lambda pep: utils.find_modifications(pep[2:-2], style="pdeep")).to_list(), columns=["peptide", "modification"]) # The charge is one-hot encoded in the comet df, so we can resolve this into 1,2 or 3 by multiplying 1,2 and 3 # with the entries of Charge1, Charge2 and Charge3 res["charge"] = comet_df.apply(lambda row: row.Charge1 + 2 * row.Charge2 + 3 * row.Charge3, axis=1) res.dropna(inplace=True) res.to_csv(output, sep="\t", header=True, index=False) @check_msms_model_name def _convert_for_rt(comet_df, style, output): if style == "prosit": res = pd.DataFrame( {"modified_sequence": comet_df.Peptide.apply(lambda pep: utils.find_modifications(pep[2:-2], style="prosit")).values}) res.dropna(inplace=True) res.to_csv(output, sep=",", header=True, index=False) else: raise Exception("Not implemented. Right now, the only accepted RT Model is 'prosit'.") def main(): # Parse the input file: comet_df = pd.read_csv(snakemake.input[0], sep="\t", header=0, usecols=["Peptide", "Charge1", "Charge2", "Charge3"], index_col=False) # Determine if MSMS and RT prediction will be performed jointly or separately if "msms_model" in dict(snakemake.params) and "rt_model" in dict(snakemake.params): _convert_for_msms(comet_df, style=snakemake.params['msms_model'].lower(), output=snakemake.output['msms_prediction_input']) _convert_for_rt(comet_df, style=snakemake.params['rt_model'].lower(), output=snakemake.output['rt_prediction_input']) else: # If only one model was supplied, the prediction will be joint # Only convert the input for msms in that case _convert_for_msms(comet_df, style=snakemake.params['model'].lower(), output=snakemake.output['prediction_input']) if __name__ == '__main__': main()
2.578125
3
GettingStarted/gettingstarted.py
rohitp934/roadtoadatascientist
0
11960
#importing necessary modules from sklearn.linear_model import Perceptron from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import accuracy_score import numpy as np # Data and labels Xtrain = [[182, 80, 34], [176, 70, 33], [161, 60, 28], [154, 55, 27], [166, 63, 30], [189, 90, 36], [175, 63, 28], [177, 71, 30], [159, 52, 27], [171, 72, 32], [181, 85, 34]] Ytrain = ['male', 'male', 'female', 'female', 'male', 'male', 'female', 'female', 'female', 'male', 'male'] Xval = [[163, 62, 28], [182, 80, 35], [150, 50, 24], [160, 57, 27], [175, 62, 30], [183, 67, 32], [177, 64, 29], [164, 62, 29], [157, 53, 23], [170, 73, 32], [169, 59, 29]] Yval = ['female', 'male', 'female', 'female', 'male', 'male', 'female', 'female', 'female', 'male', 'female'] # initializing the ML models knn = KNeighborsClassifier() perceptron = Perceptron() # Fitting the models knn.fit(Xtrain, Ytrain) perceptron.fit(Xtrain, Ytrain) # Testing using our input data pred_knn = knn.predict(Xval) acc_knn = accuracy_score(Yval, pred_knn) * 100 print(f'Accuracy for knn: {acc_knn}') pred_perceptron = perceptron.predict(Xval) acc_perceptron = accuracy_score(Yval, pred_perceptron) * 100 print(f'Accuracy for perceptron: {acc_perceptron}') # The best classifier out of the two models index = np.argmax([acc_knn, acc_perceptron]) #argmax function assigns the index of the maximum value to the variable classifiers = {0: 'KNN', 1:'PER'} print(f'Best gender classifier is {classifiers[index]}')
3.203125
3
libs/optimizers.py
bxtkezhan/AILabs
0
11961
<gh_stars>0 import numpy as np class SGD: def __init__(self, lr=0.01, momentum=0.0, decay=0.0, nesterov=False, maximum=None, minimum=None): self.lr = lr self.momentum = momentum self.decay = decay self.nesterov = nesterov self.idx = None self.maximum = maximum or lr self.minimum = minimum or 0.0 if self.maximum <= self.minimum: raise TypeError('maximum 必须大于 minimum') def __call__(self, sample_size, batch_size, status='begin'): if status == 'begin': self.idx = np.arange(sample_size) elif status == 'time': self.idx = np.random.permutation(self.idx) self.lr = self.lr - self.decay self.lr = min(self.lr, self.maximum) self.lr = max(self.lr, self.minimum) elif status == 'epoch': pass return self.idx, self.lr
2.296875
2
pennylane/transforms/qcut.py
therooler/pennylane
0
11962
# Copyright 2022 Xanadu Quantum Technologies Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Functions for performing quantum circuit cutting. """ import copy import inspect import string import uuid import warnings from collections.abc import Sequence as SequenceType from dataclasses import InitVar, dataclass from functools import partial from itertools import compress, product from pathlib import Path from typing import Any, Callable, ClassVar, Dict, List, Optional, Sequence, Tuple, Union from networkx import MultiDiGraph, has_path, weakly_connected_components import pennylane as qml from pennylane import apply, expval from pennylane import numpy as np from pennylane.grouping import string_to_pauli_word from pennylane.measurements import Expectation, MeasurementProcess, Sample from pennylane.operation import Operation, Operator, Tensor from pennylane.ops.qubit.non_parametric_ops import WireCut from pennylane.tape import QuantumTape from pennylane.wires import Wires from .batch_transform import batch_transform class MeasureNode(Operation): """Placeholder node for measurement operations""" num_wires = 1 grad_method = None def __init__(self, *params, wires=None, do_queue=True, id=None): id = id or str(uuid.uuid4()) super().__init__(*params, wires=wires, do_queue=do_queue, id=id) class PrepareNode(Operation): """Placeholder node for state preparations""" num_wires = 1 grad_method = None def __init__(self, *params, wires=None, do_queue=True, id=None): id = id or str(uuid.uuid4()) super().__init__(*params, wires=wires, do_queue=do_queue, id=id) def replace_wire_cut_node(node: WireCut, graph: MultiDiGraph): """ Replace a :class:`~.WireCut` node in the graph with a :class:`~.MeasureNode` and :class:`~.PrepareNode`. .. note:: This function is designed for use as part of the circuit cutting workflow. Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details. Args: node (WireCut): the :class:`~.WireCut` node to be replaced with a :class:`~.MeasureNode` and :class:`~.PrepareNode` graph (nx.MultiDiGraph): the graph containing the node to be replaced **Example** Consider the following circuit with a manually-placed wire cut: .. code-block:: python wire_cut = qml.WireCut(wires=0) with qml.tape.QuantumTape() as tape: qml.RX(0.4, wires=0) qml.apply(wire_cut) qml.RY(0.5, wires=0) qml.expval(qml.PauliZ(0)) We can find the circuit graph and remove the wire cut node using: >>> graph = qml.transforms.qcut.tape_to_graph(tape) >>> qml.transforms.qcut.replace_wire_cut_node(wire_cut, graph) """ predecessors = graph.pred[node] successors = graph.succ[node] predecessor_on_wire = {} for op, data in predecessors.items(): for d in data.values(): wire = d["wire"] predecessor_on_wire[wire] = op successor_on_wire = {} for op, data in successors.items(): for d in data.values(): wire = d["wire"] successor_on_wire[wire] = op order = graph.nodes[node]["order"] graph.remove_node(node) for wire in node.wires: predecessor = predecessor_on_wire.get(wire, None) successor = successor_on_wire.get(wire, None) meas = MeasureNode(wires=wire) prep = PrepareNode(wires=wire) # We are introducing a degeneracy in the order of the measure and prepare nodes # here but the order can be inferred as MeasureNode always precedes # the corresponding PrepareNode graph.add_node(meas, order=order) graph.add_node(prep, order=order) graph.add_edge(meas, prep, wire=wire) if predecessor is not None: graph.add_edge(predecessor, meas, wire=wire) if successor is not None: graph.add_edge(prep, successor, wire=wire) def replace_wire_cut_nodes(graph: MultiDiGraph): """ Replace each :class:`~.WireCut` node in the graph with a :class:`~.MeasureNode` and :class:`~.PrepareNode`. .. note:: This function is designed for use as part of the circuit cutting workflow. Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details. Args: graph (nx.MultiDiGraph): The graph containing the :class:`~.WireCut` nodes to be replaced **Example** Consider the following circuit with manually-placed wire cuts: .. code-block:: python wire_cut_0 = qml.WireCut(wires=0) wire_cut_1 = qml.WireCut(wires=1) multi_wire_cut = qml.WireCut(wires=[0, 1]) with qml.tape.QuantumTape() as tape: qml.RX(0.4, wires=0) qml.apply(wire_cut_0) qml.RY(0.5, wires=0) qml.apply(wire_cut_1) qml.CNOT(wires=[0, 1]) qml.apply(multi_wire_cut) qml.RZ(0.6, wires=1) qml.expval(qml.PauliZ(0)) We can find the circuit graph and remove all the wire cut nodes using: >>> graph = qml.transforms.qcut.tape_to_graph(tape) >>> qml.transforms.qcut.replace_wire_cut_nodes(graph) """ for op in list(graph.nodes): if isinstance(op, WireCut): replace_wire_cut_node(op, graph) def _add_operator_node(graph: MultiDiGraph, op: Operator, order: int, wire_latest_node: dict): """ Helper function to add operators as nodes during tape to graph conversion. """ graph.add_node(op, order=order) for wire in op.wires: if wire_latest_node[wire] is not None: parent_op = wire_latest_node[wire] graph.add_edge(parent_op, op, wire=wire) wire_latest_node[wire] = op def tape_to_graph(tape: QuantumTape) -> MultiDiGraph: """ Converts a quantum tape to a directed multigraph. .. note:: This operation is designed for use as part of the circuit cutting workflow. Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details. Args: tape (QuantumTape): tape to be converted into a directed multigraph Returns: nx.MultiDiGraph: a directed multigraph that captures the circuit structure of the input tape **Example** Consider the following tape: .. code-block:: python with qml.tape.QuantumTape() as tape: qml.RX(0.4, wires=0) qml.RY(0.9, wires=0) qml.CNOT(wires=[0, 1]) qml.expval(qml.PauliZ(1)) Its corresponding circuit graph can be found using >>> qml.transforms.qcut.tape_to_graph(tape) <networkx.classes.multidigraph.MultiDiGraph at 0x7fe41cbd7210> """ graph = MultiDiGraph() wire_latest_node = {w: None for w in tape.wires} for order, op in enumerate(tape.operations): _add_operator_node(graph, op, order, wire_latest_node) order += 1 # pylint: disable=undefined-loop-variable for m in tape.measurements: obs = getattr(m, "obs", None) if obs is not None and isinstance(obs, Tensor): if m.return_type is Sample: raise ValueError( "Sampling from tensor products of observables " "is not supported in circuit cutting" ) for o in obs.obs: m_ = MeasurementProcess(m.return_type, obs=o) _add_operator_node(graph, m_, order, wire_latest_node) elif m.return_type is Sample and obs is None: for w in m.wires: s_ = qml.sample(qml.Projector([1], wires=w)) _add_operator_node(graph, s_, order, wire_latest_node) else: _add_operator_node(graph, m, order, wire_latest_node) order += 1 return graph # pylint: disable=too-many-branches def fragment_graph(graph: MultiDiGraph) -> Tuple[Tuple[MultiDiGraph], MultiDiGraph]: """ Fragments a graph into a collection of subgraphs as well as returning the communication (`quotient <https://en.wikipedia.org/wiki/Quotient_graph>`__) graph. The input ``graph`` is fragmented by disconnecting each :class:`~.MeasureNode` and :class:`~.PrepareNode` pair and finding the resultant disconnected subgraph fragments. Each node of the communication graph represents a subgraph fragment and the edges denote the flow of qubits between fragments due to the removed :class:`~.MeasureNode` and :class:`~.PrepareNode` pairs. .. note:: This operation is designed for use as part of the circuit cutting workflow. Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details. Args: graph (nx.MultiDiGraph): directed multigraph containing measure and prepare nodes at cut locations Returns: Tuple[Tuple[nx.MultiDiGraph], nx.MultiDiGraph]: the subgraphs of the cut graph and the communication graph. **Example** Consider the following circuit with manually-placed wire cuts: .. code-block:: python wire_cut_0 = qml.WireCut(wires=0) wire_cut_1 = qml.WireCut(wires=1) multi_wire_cut = qml.WireCut(wires=[0, 1]) with qml.tape.QuantumTape() as tape: qml.RX(0.4, wires=0) qml.apply(wire_cut_0) qml.RY(0.5, wires=0) qml.apply(wire_cut_1) qml.CNOT(wires=[0, 1]) qml.apply(multi_wire_cut) qml.RZ(0.6, wires=1) qml.expval(qml.PauliZ(0)) We can find the corresponding graph, remove all the wire cut nodes, and find the subgraphs and communication graph by using: >>> graph = qml.transforms.qcut.tape_to_graph(tape) >>> qml.transforms.qcut.replace_wire_cut_nodes(graph) >>> qml.transforms.qcut.fragment_graph(graph) ((<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311940>, <networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311c10>, <networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e2820>, <networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e27f0>), <networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e26a0>) """ graph_copy = graph.copy() cut_edges = [] measure_nodes = [n for n in graph.nodes if isinstance(n, MeasurementProcess)] for node1, node2, wire_key in graph.edges: if isinstance(node1, MeasureNode): assert isinstance(node2, PrepareNode) cut_edges.append((node1, node2, wire_key)) graph_copy.remove_edge(node1, node2, key=wire_key) subgraph_nodes = weakly_connected_components(graph_copy) subgraphs = tuple(MultiDiGraph(graph_copy.subgraph(n)) for n in subgraph_nodes) communication_graph = MultiDiGraph() communication_graph.add_nodes_from(range(len(subgraphs))) for node1, node2, _ in cut_edges: for i, subgraph in enumerate(subgraphs): if subgraph.has_node(node1): start_fragment = i if subgraph.has_node(node2): end_fragment = i if start_fragment != end_fragment: communication_graph.add_edge(start_fragment, end_fragment, pair=(node1, node2)) else: # The MeasureNode and PrepareNode pair live in the same fragment and did not result # in a disconnection. We can therefore remove these nodes. Note that we do not need # to worry about adding back an edge between the predecessor to node1 and the successor # to node2 because our next step is to convert the fragment circuit graphs to tapes, # a process that does not depend on edge connections in the subgraph. subgraphs[start_fragment].remove_node(node1) subgraphs[end_fragment].remove_node(node2) terminal_indices = [i for i, s in enumerate(subgraphs) for n in measure_nodes if s.has_node(n)] subgraphs_connected_to_measurements = [] subgraphs_indices_to_remove = [] prepare_nodes_removed = [] for i, s in enumerate(subgraphs): if any(has_path(communication_graph, i, t) for t in terminal_indices): subgraphs_connected_to_measurements.append(s) else: subgraphs_indices_to_remove.append(i) prepare_nodes_removed.extend([n for n in s.nodes if isinstance(n, PrepareNode)]) measure_nodes_to_remove = [ m for p in prepare_nodes_removed for m, p_, _ in cut_edges if p is p_ ] communication_graph.remove_nodes_from(subgraphs_indices_to_remove) for m in measure_nodes_to_remove: for s in subgraphs_connected_to_measurements: if s.has_node(m): s.remove_node(m) return subgraphs_connected_to_measurements, communication_graph def _find_new_wire(wires: Wires) -> int: """Finds a new wire label that is not in ``wires``.""" ctr = 0 while ctr in wires: ctr += 1 return ctr # pylint: disable=protected-access def graph_to_tape(graph: MultiDiGraph) -> QuantumTape: """ Converts a directed multigraph to the corresponding :class:`~.QuantumTape`. To account for the possibility of needing to perform mid-circuit measurements, if any operations follow a :class:`MeasureNode` operation on a given wire then these operations are mapped to a new wire. .. note:: This function is designed for use as part of the circuit cutting workflow. Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details. Args: graph (nx.MultiDiGraph): directed multigraph to be converted to a tape Returns: QuantumTape: the quantum tape corresponding to the input graph **Example** Consider the following circuit: .. code-block:: python with qml.tape.QuantumTape() as tape: qml.RX(0.4, wires=0) qml.RY(0.5, wires=1) qml.CNOT(wires=[0, 1]) qml.transforms.qcut.MeasureNode(wires=1) qml.transforms.qcut.PrepareNode(wires=1) qml.CNOT(wires=[1, 0]) qml.expval(qml.PauliZ(0)) This circuit contains operations that follow a :class:`~.MeasureNode`. These operations will subsequently act on wire ``2`` instead of wire ``1``: >>> graph = qml.transforms.qcut.tape_to_graph(tape) >>> tape = qml.transforms.qcut.graph_to_tape(graph) >>> print(tape.draw()) 0: ──RX(0.4)──────╭C───────────────╭X──┤ ⟨Z⟩ 1: ──RY(0.5)──────╰X──MeasureNode──│───┤ 2: ──PrepareNode───────────────────╰C──┤ """ wires = Wires.all_wires([n.wires for n in graph.nodes]) ordered_ops = sorted( [(order, op) for op, order in graph.nodes(data="order")], key=lambda x: x[0] ) wire_map = {w: w for w in wires} reverse_wire_map = {v: k for k, v in wire_map.items()} copy_ops = [copy.copy(op) for _, op in ordered_ops if not isinstance(op, MeasurementProcess)] copy_meas = [copy.copy(op) for _, op in ordered_ops if isinstance(op, MeasurementProcess)] observables = [] with QuantumTape() as tape: for op in copy_ops: new_wires = Wires([wire_map[w] for w in op.wires]) # TODO: find a better way to update operation wires op._wires = new_wires apply(op) if isinstance(op, MeasureNode): assert len(op.wires) == 1 measured_wire = op.wires[0] new_wire = _find_new_wire(wires) wires += new_wire original_wire = reverse_wire_map[measured_wire] wire_map[original_wire] = new_wire reverse_wire_map[new_wire] = original_wire if copy_meas: return_types = set(meas.return_type for meas in copy_meas) if len(return_types) > 1: raise ValueError( "Only a single return type can be used for measurement " "nodes in graph_to_tape" ) return_type = return_types.pop() if return_type not in {Sample, Expectation}: raise ValueError( "Invalid return type. Only expectation value and sampling measurements " "are supported in graph_to_tape" ) for meas in copy_meas: obs = meas.obs obs._wires = Wires([wire_map[w] for w in obs.wires]) observables.append(obs) if return_type is Sample: apply(meas) if return_type is Expectation: if len(observables) > 1: qml.expval(Tensor(*observables)) else: qml.expval(obs) return tape def _get_measurements( group: Sequence[Operator], measurements: Sequence[MeasurementProcess] ) -> List[MeasurementProcess]: """Pairs each observable in ``group`` with the circuit ``measurements``. Only a single measurement of an expectation value is currently supported in ``measurements``. Args: group (Sequence[Operator]): a collection of observables measurements (Sequence[MeasurementProcess]): measurements from the circuit Returns: List[MeasurementProcess]: the expectation values of ``g @ obs``, where ``g`` is iterated over ``group`` and ``obs`` is the observable composing the single measurement in ``measurements`` """ if len(group) == 0: # This ensures the measurements of the original tape are carried over to the # following tape configurations in the absence of any MeasureNodes in the fragment return measurements n_measurements = len(measurements) if n_measurements > 1: raise ValueError( "The circuit cutting workflow only supports circuits with a single output " "measurement" ) if n_measurements == 0: return [expval(g) for g in group] measurement = measurements[0] if measurement.return_type is not Expectation: raise ValueError( "The circuit cutting workflow only supports circuits with expectation " "value measurements" ) obs = measurement.obs return [expval(copy.copy(obs) @ g) for g in group] def _prep_zero_state(wire): qml.Identity(wire) def _prep_one_state(wire): qml.PauliX(wire) def _prep_plus_state(wire): qml.Hadamard(wire) def _prep_minus_state(wire): qml.PauliX(wire) qml.Hadamard(wire) def _prep_iplus_state(wire): qml.Hadamard(wire) qml.S(wires=wire) def _prep_iminus_state(wire): qml.PauliX(wire) qml.Hadamard(wire) qml.S(wires=wire) PREPARE_SETTINGS = [_prep_zero_state, _prep_one_state, _prep_plus_state, _prep_iplus_state] def expand_fragment_tape( tape: QuantumTape, ) -> Tuple[List[QuantumTape], List[PrepareNode], List[MeasureNode]]: """ Expands a fragment tape into a sequence of tapes for each configuration of the contained :class:`MeasureNode` and :class:`PrepareNode` operations. .. note:: This function is designed for use as part of the circuit cutting workflow. Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details. Args: tape (QuantumTape): the fragment tape containing :class:`MeasureNode` and :class:`PrepareNode` operations to be expanded Returns: Tuple[List[QuantumTape], List[PrepareNode], List[MeasureNode]]: the tapes corresponding to each configuration and the order of preparation nodes and measurement nodes used in the expansion **Example** Consider the following circuit, which contains a :class:`~.MeasureNode` and :class:`~.PrepareNode` operation: .. code-block:: python with qml.tape.QuantumTape() as tape: qml.transforms.qcut.PrepareNode(wires=0) qml.RX(0.5, wires=0) qml.transforms.qcut.MeasureNode(wires=0) We can expand over the measurement and preparation nodes using: >>> tapes, prep, meas = qml.transforms.qcut.expand_fragment_tape(tape) >>> for t in tapes: ... print(qml.drawer.tape_text(t, decimals=1)) 0: ──I──RX(0.5)─┤ <I> <Z> 0: ──I──RX(0.5)─┤ <X> 0: ──I──RX(0.5)─┤ <Y> 0: ──X──RX(0.5)─┤ <I> <Z> 0: ──X──RX(0.5)─┤ <X> 0: ──X──RX(0.5)─┤ <Y> 0: ──H──RX(0.5)─┤ <I> <Z> 0: ──H──RX(0.5)─┤ <X> 0: ──H──RX(0.5)─┤ <Y> 0: ──H──S──RX(0.5)─┤ <I> <Z> 0: ──H──S──RX(0.5)─┤ <X> 0: ──H──S──RX(0.5)─┤ <Y> """ prepare_nodes = [o for o in tape.operations if isinstance(o, PrepareNode)] measure_nodes = [o for o in tape.operations if isinstance(o, MeasureNode)] wire_map = {mn.wires[0]: i for i, mn in enumerate(measure_nodes)} n_meas = len(measure_nodes) if n_meas >= 1: measure_combinations = qml.grouping.partition_pauli_group(len(measure_nodes)) else: measure_combinations = [[""]] tapes = [] for prepare_settings in product(range(len(PREPARE_SETTINGS)), repeat=len(prepare_nodes)): for measure_group in measure_combinations: if n_meas >= 1: group = [ string_to_pauli_word(paulis, wire_map=wire_map) for paulis in measure_group ] else: group = [] prepare_mapping = { n: PREPARE_SETTINGS[s] for n, s in zip(prepare_nodes, prepare_settings) } with QuantumTape() as tape_: for op in tape.operations: if isinstance(op, PrepareNode): w = op.wires[0] prepare_mapping[op](w) elif not isinstance(op, MeasureNode): apply(op) with qml.tape.stop_recording(): measurements = _get_measurements(group, tape.measurements) for meas in measurements: apply(meas) tapes.append(tape_) return tapes, prepare_nodes, measure_nodes MC_STATES = [ _prep_zero_state, _prep_one_state, _prep_plus_state, _prep_minus_state, _prep_iplus_state, _prep_iminus_state, _prep_zero_state, _prep_one_state, ] def _identity(wire): qml.sample(qml.Identity(wires=wire)) def _pauliX(wire): qml.sample(qml.PauliX(wires=wire)) def _pauliY(wire): qml.sample(qml.PauliY(wires=wire)) def _pauliZ(wire): qml.sample(qml.PauliZ(wires=wire)) MC_MEASUREMENTS = [ _identity, _identity, _pauliX, _pauliX, _pauliY, _pauliY, _pauliZ, _pauliZ, ] def expand_fragment_tapes_mc( tapes: Sequence[QuantumTape], communication_graph: MultiDiGraph, shots: int ) -> Tuple[List[QuantumTape], np.ndarray]: """ Expands fragment tapes into a sequence of random configurations of the contained pairs of :class:`MeasureNode` and :class:`PrepareNode` operations. For each pair, a measurement is sampled from the Pauli basis and a state preparation is sampled from the corresponding pair of eigenstates. A settings array is also given which tracks the configuration pairs. Since each of the 4 measurements has 2 possible eigenvectors, all configurations can be uniquely identified by 8 values. The number of rows is determined by the number of cuts and the number of columns is determined by the number of shots. .. note:: This function is designed for use as part of the sampling-based circuit cutting workflow. Check out the :func:`~.cut_circuit_mc` transform for more details. Args: tapes (Sequence[QuantumTape]): the fragment tapes containing :class:`MeasureNode` and :class:`PrepareNode` operations to be expanded communication_graph (nx.MultiDiGraph): the communication (quotient) graph of the fragmented full graph shots (int): number of shots Returns: Tuple[List[QuantumTape], np.ndarray]: the tapes corresponding to each configuration and the settings that track each configuration pair **Example** Consider the following circuit that contains a sample measurement: .. code-block:: python with qml.tape.QuantumTape() as tape: qml.Hadamard(wires=0) qml.CNOT(wires=[0, 1]) qml.WireCut(wires=1) qml.CNOT(wires=[1, 2]) qml.sample(wires=[0, 1, 2]) We can generate the fragment tapes using the following workflow: >>> g = qml.transforms.qcut.tape_to_graph(tape) >>> qml.transforms.qcut.replace_wire_cut_nodes(g) >>> subgraphs, communication_graph = qml.transforms.qcut.fragment_graph(g) >>> tapes = [qml.transforms.qcut.graph_to_tape(sg) for sg in subgraphs] We can then expand over the measurement and preparation nodes to generate random configurations using: .. code-block:: python >>> configs, settings = qml.transforms.qcut.expand_fragment_tapes_mc(tapes, communication_graph, 3) >>> print(settings) [[1 6 2]] >>> for i, (c1, c2) in enumerate(zip(configs[0], configs[1])): ... print(f"config {i}:") ... print(c1.draw()) ... print("") ... print(c2.draw()) ... print("") ... config 0: 0: ──H─╭C─┤ Sample[|1⟩⟨1|] 1: ────╰X─┤ Sample[I] 1: ──X─╭C─┤ Sample[|1⟩⟨1|] 2: ────╰X─┤ Sample[|1⟩⟨1|] config 1: 0: ──H─╭C─┤ Sample[|1⟩⟨1|] 1: ────╰X─┤ Sample[Z] 1: ──I─╭C─┤ Sample[|1⟩⟨1|] 2: ────╰X─┤ Sample[|1⟩⟨1|] config 2: 0: ──H─╭C─┤ Sample[|1⟩⟨1|] 1: ────╰X─┤ Sample[X] 1: ──H─╭C─┤ Sample[|1⟩⟨1|] 2: ────╰X─┤ Sample[|1⟩⟨1|] """ pairs = [e[-1] for e in communication_graph.edges.data("pair")] settings = np.random.choice(range(8), size=(len(pairs), shots), replace=True) meas_settings = {pair[0].id: setting for pair, setting in zip(pairs, settings)} prep_settings = {pair[1].id: setting for pair, setting in zip(pairs, settings)} all_configs = [] for tape in tapes: frag_config = [] for shot in range(shots): with qml.tape.QuantumTape() as new_tape: for op in tape.operations: w = op.wires[0] if isinstance(op, PrepareNode): MC_STATES[prep_settings[op.id][shot]](w) elif not isinstance(op, MeasureNode): qml.apply(op) for meas in tape.measurements: qml.apply(meas) for op in tape.operations: meas_w = op.wires[0] if isinstance(op, MeasureNode): MC_MEASUREMENTS[meas_settings[op.id][shot]](meas_w) frag_config.append(new_tape) all_configs.append(frag_config) return all_configs, settings def _reshape_results(results: Sequence, shots: int) -> List[List]: """ Helper function to reshape ``results`` into a two-dimensional nested list whose number of rows is determined by the number of shots and whose number of columns is determined by the number of cuts. """ results = [qml.math.flatten(r) for r in results] results = [results[i : i + shots] for i in range(0, len(results), shots)] results = list(map(list, zip(*results))) # calculate list-based transpose return results def qcut_processing_fn_sample( results: Sequence, communication_graph: MultiDiGraph, shots: int ) -> List: """ Function to postprocess samples for the :func:`cut_circuit_mc() <pennylane.cut_circuit_mc>` transform. This removes superfluous mid-circuit measurement samples from fragment circuit outputs. .. note:: This function is designed for use as part of the sampling-based circuit cutting workflow. Check out the :func:`qml.cut_circuit_mc() <pennylane.cut_circuit_mc>` transform for more details. Args: results (Sequence): a collection of sample-based execution results generated from the random expansion of circuit fragments over measurement and preparation node configurations communication_graph (nx.MultiDiGraph): the communication graph determining connectivity between circuit fragments shots (int): the number of shots Returns: List[tensor_like]: the sampled output for all terminal measurements over the number of shots given """ res0 = results[0] results = _reshape_results(results, shots) out_degrees = [d for _, d in communication_graph.out_degree] samples = [] for result in results: sample = [] for fragment_result, out_degree in zip(result, out_degrees): sample.append(fragment_result[: -out_degree or None]) samples.append(np.hstack(sample)) return [qml.math.convert_like(np.array(samples), res0)] def qcut_processing_fn_mc( results: Sequence, communication_graph: MultiDiGraph, settings: np.ndarray, shots: int, classical_processing_fn: callable, ): """ Function to postprocess samples for the :func:`cut_circuit_mc() <pennylane.cut_circuit_mc>` transform. This takes a user-specified classical function to act on bitstrings and generates an expectation value. .. note:: This function is designed for use as part of the sampling-based circuit cutting workflow. Check out the :func:`qml.cut_circuit_mc() <pennylane.cut_circuit_mc>` transform for more details. Args: results (Sequence): a collection of sample-based execution results generated from the random expansion of circuit fragments over measurement and preparation node configurations communication_graph (nx.MultiDiGraph): the communication graph determining connectivity between circuit fragments settings (np.ndarray): Each element is one of 8 unique values that tracks the specific measurement and preparation operations over all configurations. The number of rows is determined by the number of cuts and the number of columns is determined by the number of shots. shots (int): the number of shots classical_processing_fn (callable): A classical postprocessing function to be applied to the reconstructed bitstrings. The expected input is a bitstring; a flat array of length ``wires`` and the output should be a single number within the interval :math:`[-1, 1]`. Returns: float or tensor_like: the expectation value calculated in accordance to Eq. (35) of `Peng et al. <https://arxiv.org/abs/1904.00102>`__ """ res0 = results[0] results = _reshape_results(results, shots) out_degrees = [d for _, d in communication_graph.out_degree] evals = (0.5, 0.5, 0.5, -0.5, 0.5, -0.5, 0.5, -0.5) expvals = [] for result, setting in zip(results, settings.T): sample_terminal = [] sample_mid = [] for fragment_result, out_degree in zip(result, out_degrees): sample_terminal.append(fragment_result[: -out_degree or None]) sample_mid.append(fragment_result[-out_degree or len(fragment_result) :]) sample_terminal = np.hstack(sample_terminal) sample_mid = np.hstack(sample_mid) assert set(sample_terminal).issubset({np.array(0), np.array(1)}) assert set(sample_mid).issubset({np.array(-1), np.array(1)}) # following Eq.(35) of Peng et.al: https://arxiv.org/abs/1904.00102 f = classical_processing_fn(sample_terminal) if not -1 <= f <= 1: raise ValueError( "The classical processing function supplied must " "give output in the interval [-1, 1]" ) sigma_s = np.prod(sample_mid) t_s = f * sigma_s c_s = np.prod([evals[s] for s in setting]) K = len(sample_mid) expvals.append(8**K * c_s * t_s) return qml.math.convert_like(np.mean(expvals), res0) @batch_transform def cut_circuit_mc( tape: QuantumTape, classical_processing_fn: Optional[callable] = None, auto_cutter: Union[bool, Callable] = False, max_depth: int = 1, shots: Optional[int] = None, device_wires: Optional[Wires] = None, **kwargs, ) -> Tuple[Tuple[QuantumTape], Callable]: """ Cut up a circuit containing sample measurements into smaller fragments using a Monte Carlo method. Following the approach of `Peng et al. <https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.125.150504>`__, strategic placement of :class:`~.WireCut` operations can allow a quantum circuit to be split into disconnected circuit fragments. A circuit containing sample measurements can be cut and processed using Monte Carlo (MC) methods. This transform employs MC methods to allow for sampled measurement outcomes to be recombined to full bitstrings and, if a classical processing function is supplied, an expectation value will be evaluated. Args: tape (QuantumTape): the tape of the full circuit to be cut classical_processing_fn (callable): A classical postprocessing function to be applied to the reconstructed bitstrings. The expected input is a bitstring; a flat array of length ``wires``, and the output should be a single number within the interval :math:`[-1, 1]`. If not supplied, the transform will output samples. auto_cutter (Union[bool, Callable]): Toggle for enabling automatic cutting with the default :func:`~.kahypar_cut` partition method. Can also pass a graph partitioning function that takes an input graph and returns a list of edges to be cut based on a given set of constraints and objective. The default :func:`~.kahypar_cut` function requires KaHyPar to be installed using ``pip install kahypar`` for Linux and Mac users or visiting the instructions `here <https://kahypar.org>`__ to compile from source for Windows users. max_depth (int): The maximum depth used to expand the circuit while searching for wire cuts. Only applicable when transforming a QNode. shots (int): Number of shots. When transforming a QNode, this argument is set by the device's ``shots`` value or at QNode call time (if provided). Required when transforming a tape. device_wires (Wires): Wires of the device that the cut circuits are to be run on. When transforming a QNode, this argument is optional and will be set to the QNode's device wires. Required when transforming a tape. kwargs: Additional keyword arguments to be passed to a callable ``auto_cutter`` argument. For the default KaHyPar cutter, please refer to the docstring of functions :func:`~.find_and_place_cuts` and :func:`~.kahypar_cut` for the available arguments. Returns: Callable: Function which accepts the same arguments as the QNode. When called, this function will sample from the partitioned circuit fragments and combine the results using a Monte Carlo method. **Example** The following :math:`3`-qubit circuit contains a :class:`~.WireCut` operation and a :func:`~.sample` measurement. When decorated with ``@qml.cut_circuit_mc``, we can cut the circuit into two :math:`2`-qubit fragments: .. code-block:: python dev = qml.device("default.qubit", wires=2, shots=1000) @qml.cut_circuit_mc @qml.qnode(dev) def circuit(x): qml.RX(0.89, wires=0) qml.RY(0.5, wires=1) qml.RX(1.3, wires=2) qml.CNOT(wires=[0, 1]) qml.WireCut(wires=1) qml.CNOT(wires=[1, 2]) qml.RX(x, wires=0) qml.RY(0.7, wires=1) qml.RX(2.3, wires=2) return qml.sample(wires=[0, 2]) we can then execute the circuit as usual by calling the QNode: >>> x = 0.3 >>> circuit(x) tensor([[1, 1], [0, 1], [0, 1], ..., [0, 1], [0, 1], [0, 1]], requires_grad=True) Furthermore, the number of shots can be temporarily altered when calling the qnode: >>> results = circuit(x, shots=123) >>> results.shape (123, 2) Alternatively, if the optimal wire-cut placement is unknown for an arbitrary circuit, the ``auto_cutter`` option can be enabled to make attempts in finding such a optimal cut. The following examples shows this capability on the same circuit as above but with the :class:`~.WireCut` removed: .. code-block:: python @qml.cut_circuit_mc(auto_cutter=True) @qml.qnode(dev) def circuit(x): qml.RX(0.89, wires=0) qml.RY(0.5, wires=1) qml.RX(1.3, wires=2) qml.CNOT(wires=[0, 1]) qml.CNOT(wires=[1, 2]) qml.RX(x, wires=0) qml.RY(0.7, wires=1) qml.RX(2.3, wires=2) return qml.sample(wires=[0, 2]) >>> results = circuit(x, shots=123) >>> results.shape (123, 2) .. UsageDetails:: Manually placing :class:`~.WireCut` operations and decorating the QNode with the ``cut_circuit_mc()`` batch transform is the suggested entrypoint into sampling-based circuit cutting using the Monte Carlo method. However, advanced users also have the option to work directly with a :class:`~.QuantumTape` and manipulate the tape to perform circuit cutting using the below functionality: .. autosummary:: :toctree: ~transforms.qcut.tape_to_graph ~transforms.qcut.find_and_place_cuts ~transforms.qcut.replace_wire_cut_nodes ~transforms.qcut.fragment_graph ~transforms.qcut.graph_to_tape ~transforms.qcut.remap_tape_wires ~transforms.qcut.expand_fragment_tapes_mc ~transforms.qcut.qcut_processing_fn_sample ~transforms.qcut.qcut_processing_fn_mc The following shows how these elementary steps are combined as part of the ``cut_circuit_mc()`` transform. Consider the circuit below: .. code-block:: python np.random.seed(42) with qml.tape.QuantumTape() as tape: qml.Hadamard(wires=0) qml.CNOT(wires=[0, 1]) qml.PauliX(wires=1) qml.WireCut(wires=1) qml.CNOT(wires=[1, 2]) qml.sample(wires=[0, 1, 2]) >>> print(tape.draw()) 0: ──H─╭C───────────┤ ╭Sample 1: ────╰X──X──//─╭C─┤ ├Sample 2: ──────────────╰X─┤ ╰Sample To cut the circuit, we first convert it to its graph representation: >>> graph = qml.transforms.qcut.tape_to_graph(tape) If, however, the optimal location of the :class:`~.WireCut` is unknown, we can use :func:`~.find_and_place_cuts` to make attempts in automatically finding such a cut given the device constraints. Using the same circuit as above but with the :class:`~.WireCut` removed, a slightly different cut with identical cost can be discovered and placed into the circuit with automatic cutting: .. code-block:: python with qml.tape.QuantumTape() as uncut_tape: qml.Hadamard(wires=0) qml.CNOT(wires=[0, 1]) qml.PauliX(wires=1) qml.CNOT(wires=[1, 2]) qml.sample(wires=[0, 1, 2]) >>> cut_graph = qml.transforms.qcut.find_and_place_cuts( ... graph=qml.transforms.qcut.tape_to_graph(uncut_tape), ... cut_strategy=qml.transforms.qcut.CutStrategy(max_free_wires=2), ... ) >>> print(qml.transforms.qcut.graph_to_tape(cut_graph).draw()) 0: ──H─╭C───────────┤ Sample[|1⟩⟨1|] 1: ────╰X──//──X─╭C─┤ Sample[|1⟩⟨1|] 2: ──────────────╰X─┤ Sample[|1⟩⟨1|] Our next step, using the original manual cut placement, is to remove the :class:`~.WireCut` nodes in the graph and replace with :class:`~.MeasureNode` and :class:`~.PrepareNode` pairs. >>> qml.transforms.qcut.replace_wire_cut_nodes(graph) The :class:`~.MeasureNode` and :class:`~.PrepareNode` pairs are placeholder operations that allow us to cut the circuit graph and then randomly select measurement and preparation configurations at cut locations. First, the :func:`~.fragment_graph` function pulls apart the graph into disconnected components as well as returning the `communication_graph <https://en.wikipedia.org/wiki/Quotient_graph>`__ detailing the connectivity between the components. >>> fragments, communication_graph = qml.transforms.qcut.fragment_graph(graph) We now convert the ``fragments`` back to :class:`~.QuantumTape` objects >>> fragment_tapes = [qml.transforms.qcut.graph_to_tape(f) for f in fragments] The circuit fragments can now be visualized: >>> print(fragment_tapes[0].draw()) 0: ──H─╭C─────────────────┤ Sample[|1⟩⟨1|] 1: ────╰X──X──MeasureNode─┤ >>> print(fragment_tapes[1].draw()) 1: ──PrepareNode─╭C─┤ Sample[|1⟩⟨1|] 2: ──────────────╰X─┤ Sample[|1⟩⟨1|] Additionally, we must remap the tape wires to match those available on our device. >>> dev = qml.device("default.qubit", wires=2, shots=1) >>> fragment_tapes = [ ... qml.transforms.qcut.remap_tape_wires(t, dev.wires) for t in fragment_tapes ... ] Note that the number of shots on the device is set to :math:`1` here since we will only require one execution per fragment configuration. In the following steps we introduce a shots value that will determine the number of fragment configurations. When using the ``cut_circuit_mc()`` decorator with a QNode, this shots value is automatically inferred from the provided device. Next, each circuit fragment is randomly expanded over :class:`~.MeasureNode` and :class:`~.PrepareNode` configurations. For each pair, a measurement is sampled from the Pauli basis and a state preparation is sampled from the corresponding pair of eigenstates. A settings array is also given which tracks the configuration pairs. Since each of the 4 measurements has 2 possible eigenvectors, all configurations can be uniquely identified by 8 values. The number of rows is determined by the number of cuts and the number of columns is determined by the number of shots. >>> shots = 3 >>> configurations, settings = qml.transforms.qcut.expand_fragment_tapes_mc( ... fragment_tapes, communication_graph, shots=shots ... ) >>> tapes = tuple(tape for c in configurations for tape in c) >>> settings tensor([[6, 3, 4]], requires_grad=True) Each configuration is drawn below: >>> for t in tapes: ... print(t.draw()) ... print("") .. code-block:: 0: ──H─╭C────┤ Sample[|1⟩⟨1|] 1: ────╰X──X─┤ Sample[Z] 0: ──H─╭C────┤ Sample[|1⟩⟨1|] 1: ────╰X──X─┤ Sample[X] 0: ──H─╭C────┤ Sample[|1⟩⟨1|] 1: ────╰X──X─┤ Sample[Y] 0: ──I─╭C─┤ Sample[|1⟩⟨1|] 1: ────╰X─┤ Sample[|1⟩⟨1|] 0: ──X──S─╭C─┤ Sample[|1⟩⟨1|] 1: ───────╰X─┤ Sample[|1⟩⟨1|] 0: ──H─╭C─┤ Sample[|1⟩⟨1|] 1: ────╰X─┤ Sample[|1⟩⟨1|] The last step is to execute the tapes and postprocess the results using :func:`~.qcut_processing_fn_sample`, which processes the results to approximate the original full circuit output bitstrings. >>> results = qml.execute(tapes, dev, gradient_fn=None) >>> qml.transforms.qcut.qcut_processing_fn_sample( ... results, ... communication_graph, ... shots=shots, ... ) [array([[0., 0., 0.], [1., 0., 0.], [1., 0., 0.]])] Alternatively, it is possible to calculate an expectation value if a classical processing function is provided that will accept the reconstructed circuit bitstrings and return a value in the interval :math:`[-1, 1]`: .. code-block:: def fn(x): if x[0] == 0: return 1 if x[0] == 1: return -1 >>> qml.transforms.qcut.qcut_processing_fn_mc( ... results, ... communication_graph, ... settings, ... shots, ... fn ... ) array(-4.) Using the Monte Carlo approach of [Peng et. al](https://arxiv.org/abs/1904.00102), the `cut_circuit_mc` transform also supports returning sample-based expectation values of observables that are diagonal in the computational basis, as shown below for a `ZZ` measurement on wires `0` and `2`: .. code-block:: dev = qml.device("default.qubit", wires=2, shots=10000) def observable(bitstring): return (-1) ** np.sum(bitstring) @qml.cut_circuit_mc(classical_processing_fn=observable) @qml.qnode(dev) def circuit(x): qml.RX(0.89, wires=0) qml.RY(0.5, wires=1) qml.RX(1.3, wires=2) qml.CNOT(wires=[0, 1]) qml.WireCut(wires=1) qml.CNOT(wires=[1, 2]) qml.RX(x, wires=0) qml.RY(0.7, wires=1) qml.RX(2.3, wires=2) return qml.sample(wires=[0, 2]) We can now approximate the expectation value of the observable using >>> circuit(x) tensor(-0.776, requires_grad=True) """ # pylint: disable=unused-argument, too-many-arguments if len(tape.measurements) != 1: raise ValueError( "The Monte Carlo circuit cutting workflow only supports circuits " "with a single output measurement" ) if not all(m.return_type is Sample for m in tape.measurements): raise ValueError( "The Monte Carlo circuit cutting workflow only supports circuits " "with sampling-based measurements" ) for meas in tape.measurements: if meas.obs is not None: raise ValueError( "The Monte Carlo circuit cutting workflow only " "supports measurements in the computational basis. Please only specify " "wires to be sampled within qml.sample(), do not pass observables." ) g = tape_to_graph(tape) if auto_cutter is True or callable(auto_cutter): cut_strategy = kwargs.pop("cut_strategy", None) or CutStrategy( max_free_wires=len(device_wires) ) g = find_and_place_cuts( graph=g, cut_method=auto_cutter if callable(auto_cutter) else kahypar_cut, cut_strategy=cut_strategy, **kwargs, ) replace_wire_cut_nodes(g) fragments, communication_graph = fragment_graph(g) fragment_tapes = [graph_to_tape(f) for f in fragments] fragment_tapes = [remap_tape_wires(t, device_wires) for t in fragment_tapes] configurations, settings = expand_fragment_tapes_mc( fragment_tapes, communication_graph, shots=shots ) tapes = tuple(tape for c in configurations for tape in c) if classical_processing_fn: return tapes, partial( qcut_processing_fn_mc, communication_graph=communication_graph, settings=settings, shots=shots, classical_processing_fn=classical_processing_fn, ) return tapes, partial( qcut_processing_fn_sample, communication_graph=communication_graph, shots=shots ) @cut_circuit_mc.custom_qnode_wrapper def qnode_execution_wrapper_mc(self, qnode, targs, tkwargs): """Here, we overwrite the QNode execution wrapper in order to replace execution variables""" transform_max_diff = tkwargs.pop("max_diff", None) tkwargs.setdefault("device_wires", qnode.device.wires) if "shots" in inspect.signature(qnode.func).parameters: raise ValueError( "Detected 'shots' as an argument of the quantum function to transform. " "The 'shots' argument name is reserved for overriding the number of shots " "taken by the device." ) def _wrapper(*args, **kwargs): if tkwargs.get("shots", False): raise ValueError( "Cannot provide a 'shots' value directly to the cut_circuit_mc " "decorator when transforming a QNode. Please provide the number of shots in " "the device or when calling the QNode." ) shots = kwargs.pop("shots", False) shots = shots or qnode.device.shots if shots is None: raise ValueError( "A shots value must be provided in the device " "or when calling the QNode to be cut" ) qnode.construct(args, kwargs) tapes, processing_fn = self.construct(qnode.qtape, *targs, **tkwargs, shots=shots) interface = qnode.interface execute_kwargs = getattr(qnode, "execute_kwargs", {}).copy() max_diff = execute_kwargs.pop("max_diff", 2) max_diff = transform_max_diff or max_diff gradient_fn = getattr(qnode, "gradient_fn", qnode.diff_method) gradient_kwargs = getattr(qnode, "gradient_kwargs", {}) if interface is None or not self.differentiable: gradient_fn = None execute_kwargs["cache"] = False res = qml.execute( tapes, device=qnode.device, gradient_fn=gradient_fn, interface=interface, max_diff=max_diff, override_shots=1, gradient_kwargs=gradient_kwargs, **execute_kwargs, ) out = processing_fn(res) if isinstance(out, list) and len(out) == 1: return out[0] return out return _wrapper def _get_symbol(i): """Finds the i-th ASCII symbol. Works for lowercase and uppercase letters, allowing i up to 51.""" if i >= len(string.ascii_letters): raise ValueError( "Set the use_opt_einsum argument to True when applying more than " f"{len(string.ascii_letters)} wire cuts to a circuit" ) return string.ascii_letters[i] # pylint: disable=too-many-branches def contract_tensors( tensors: Sequence, communication_graph: MultiDiGraph, prepare_nodes: Sequence[Sequence[PrepareNode]], measure_nodes: Sequence[Sequence[MeasureNode]], use_opt_einsum: bool = False, ): r"""Contract tensors according to the edges specified in the communication graph. .. note:: This function is designed for use as part of the circuit cutting workflow. Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details. Consider the three tensors :math:`T^{(1)}`, :math:`T^{(2)}`, and :math:`T^{(3)}`, along with their contraction equation .. math:: \sum_{ijklmn} T^{(1)}_{ij,km} T^{(2)}_{kl,in} T^{(3)}_{mn,jl} Each tensor is the result of the tomography of a circuit fragment and has some indices corresponding to state preparations (marked by the indices before the comma) and some indices corresponding to measurements (marked by the indices after the comma). An equivalent representation of the contraction equation is to use a directed multigraph known as the communication/quotient graph. In the communication graph, each tensor is assigned a node and edges are added between nodes to mark a contraction along an index. The communication graph resulting from the above contraction equation is a complete directed graph. In the communication graph provided by :func:`fragment_graph`, edges are composed of :class:`PrepareNode` and :class:`MeasureNode` pairs. To correctly map back to the contraction equation, we must keep track of the order of preparation and measurement indices in each tensor. This order is specified in the ``prepare_nodes`` and ``measure_nodes`` arguments. Args: tensors (Sequence): the tensors to be contracted communication_graph (nx.MultiDiGraph): the communication graph determining connectivity between the tensors prepare_nodes (Sequence[Sequence[PrepareNode]]): a sequence of size ``len(communication_graph.nodes)`` that determines the order of preparation indices in each tensor measure_nodes (Sequence[Sequence[MeasureNode]]): a sequence of size ``len(communication_graph.nodes)`` that determines the order of measurement indices in each tensor use_opt_einsum (bool): Determines whether to use the `opt_einsum <https://dgasmith.github.io/opt_einsum/>`__ package. This package is useful for faster tensor contractions of large networks but must be installed separately using, e.g., ``pip install opt_einsum``. Both settings for ``use_opt_einsum`` result in a differentiable contraction. Returns: float or tensor_like: the result of contracting the tensor network **Example** We first set up the tensors and their corresponding :class:`~.PrepareNode` and :class:`~.MeasureNode` orderings: .. code-block:: python from pennylane.transforms import qcut import networkx as nx import numpy as np tensors = [np.arange(4), np.arange(4, 8)] prep = [[], [qcut.PrepareNode(wires=0)]] meas = [[qcut.MeasureNode(wires=0)], []] The communication graph describing edges in the tensor network must also be constructed: .. code-block:: python graph = nx.MultiDiGraph([(0, 1, {"pair": (meas[0][0], prep[1][0])})]) The network can then be contracted using: >>> qml.transforms.qcut.contract_tensors(tensors, graph, prep, meas) 38 """ # pylint: disable=import-outside-toplevel if use_opt_einsum: try: from opt_einsum import contract, get_symbol except ImportError as e: raise ImportError( "The opt_einsum package is required when use_opt_einsum is set to " "True in the contract_tensors function. This package can be " "installed using:\npip install opt_einsum" ) from e else: contract = qml.math.einsum get_symbol = _get_symbol ctr = 0 tensor_indxs = [""] * len(communication_graph.nodes) meas_map = {} for i, (node, prep) in enumerate(zip(communication_graph.nodes, prepare_nodes)): predecessors = communication_graph.pred[node] for p in prep: for _, pred_edges in predecessors.items(): for pred_edge in pred_edges.values(): meas_op, prep_op = pred_edge["pair"] if p.id is prep_op.id: symb = get_symbol(ctr) ctr += 1 tensor_indxs[i] += symb meas_map[meas_op] = symb for i, (node, meas) in enumerate(zip(communication_graph.nodes, measure_nodes)): successors = communication_graph.succ[node] for m in meas: for _, succ_edges in successors.items(): for succ_edge in succ_edges.values(): meas_op, _ = succ_edge["pair"] if m.id is meas_op.id: symb = meas_map[meas_op] tensor_indxs[i] += symb eqn = ",".join(tensor_indxs) kwargs = {} if use_opt_einsum else {"like": tensors[0]} return contract(eqn, *tensors, **kwargs) CHANGE_OF_BASIS = qml.math.array( [[1.0, 1.0, 0.0, 0.0], [-1.0, -1.0, 2.0, 0.0], [-1.0, -1.0, 0.0, 2.0], [1.0, -1.0, 0.0, 0.0]] ) def _process_tensor(results, n_prep: int, n_meas: int): """Convert a flat slice of an individual circuit fragment's execution results into a tensor. This function performs the following steps: 1. Reshapes ``results`` into the intermediate shape ``(4,) * n_prep + (4**n_meas,)`` 2. Shuffles the final axis to follow the standard product over measurement settings. E.g., for ``n_meas = 2`` the standard product is: II, IX, IY, IZ, XI, ..., ZY, ZZ while the input order will be the result of ``qml.grouping.partition_pauli_group(2)``, i.e., II, IZ, ZI, ZZ, ..., YY. 3. Reshapes into the final target shape ``(4,) * (n_prep + n_meas)`` 4. Performs a change of basis for the preparation indices (the first ``n_prep`` indices) from the |0>, |1>, |+>, |+i> basis to the I, X, Y, Z basis using ``CHANGE_OF_BASIS``. Args: results (tensor_like): the input execution results n_prep (int): the number of preparation nodes in the corresponding circuit fragment n_meas (int): the number of measurement nodes in the corresponding circuit fragment Returns: tensor_like: the corresponding fragment tensor """ n = n_prep + n_meas dim_meas = 4**n_meas # Step 1 intermediate_shape = (4,) * n_prep + (dim_meas,) intermediate_tensor = qml.math.reshape(results, intermediate_shape) # Step 2 grouped = qml.grouping.partition_pauli_group(n_meas) grouped_flat = [term for group in grouped for term in group] order = qml.math.argsort(grouped_flat) if qml.math.get_interface(intermediate_tensor) == "tensorflow": # TensorFlow does not support slicing intermediate_tensor = qml.math.gather(intermediate_tensor, order, axis=-1) else: sl = [slice(None)] * n_prep + [order] intermediate_tensor = intermediate_tensor[tuple(sl)] # Step 3 final_shape = (4,) * n final_tensor = qml.math.reshape(intermediate_tensor, final_shape) # Step 4 change_of_basis = qml.math.convert_like(CHANGE_OF_BASIS, intermediate_tensor) for i in range(n_prep): axes = [[1], [i]] final_tensor = qml.math.tensordot(change_of_basis, final_tensor, axes=axes) axes = list(reversed(range(n_prep))) + list(range(n_prep, n)) # Use transpose to reorder indices. We must do this because tensordot returns a tensor whose # indices are ordered according to the uncontracted indices of the first tensor, followed # by the uncontracted indices of the second tensor. For example, calculating C_kj T_ij returns # a tensor T'_ki rather than T'_ik. final_tensor = qml.math.transpose(final_tensor, axes=axes) final_tensor *= qml.math.power(2, -(n_meas + n_prep) / 2) return final_tensor def _to_tensors( results, prepare_nodes: Sequence[Sequence[PrepareNode]], measure_nodes: Sequence[Sequence[MeasureNode]], ) -> List: """Process a flat list of execution results from all circuit fragments into the corresponding tensors. This function slices ``results`` according to the expected size of fragment tensors derived from the ``prepare_nodes`` and ``measure_nodes`` and then passes onto ``_process_tensor`` for further transformation. Args: results (tensor_like): A collection of execution results, provided as a flat tensor, corresponding to the expansion of circuit fragments in the communication graph over measurement and preparation node configurations. These results are processed into tensors by this function. prepare_nodes (Sequence[Sequence[PrepareNode]]): a sequence whose length is equal to the number of circuit fragments, with each element used here to determine the number of preparation nodes in a given fragment measure_nodes (Sequence[Sequence[MeasureNode]]): a sequence whose length is equal to the number of circuit fragments, with each element used here to determine the number of measurement nodes in a given fragment Returns: List[tensor_like]: the tensors for each circuit fragment in the communication graph """ ctr = 0 tensors = [] for p, m in zip(prepare_nodes, measure_nodes): n_prep = len(p) n_meas = len(m) n = n_prep + n_meas dim = 4**n results_slice = results[ctr : dim + ctr] tensors.append(_process_tensor(results_slice, n_prep, n_meas)) ctr += dim if results.shape[0] != ctr: raise ValueError(f"The results argument should be a flat list of length {ctr}") return tensors def qcut_processing_fn( results: Sequence[Sequence], communication_graph: MultiDiGraph, prepare_nodes: Sequence[Sequence[PrepareNode]], measure_nodes: Sequence[Sequence[MeasureNode]], use_opt_einsum: bool = False, ): """Processing function for the :func:`cut_circuit() <pennylane.cut_circuit>` transform. .. note:: This function is designed for use as part of the circuit cutting workflow. Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details. Args: results (Sequence[Sequence]): A collection of execution results generated from the expansion of circuit fragments over measurement and preparation node configurations. These results are processed into tensors and then contracted. communication_graph (nx.MultiDiGraph): the communication graph determining connectivity between circuit fragments prepare_nodes (Sequence[Sequence[PrepareNode]]): a sequence of size ``len(communication_graph.nodes)`` that determines the order of preparation indices in each tensor measure_nodes (Sequence[Sequence[MeasureNode]]): a sequence of size ``len(communication_graph.nodes)`` that determines the order of measurement indices in each tensor use_opt_einsum (bool): Determines whether to use the `opt_einsum <https://dgasmith.github.io/opt_einsum/>`__ package. This package is useful for faster tensor contractions of large networks but must be installed separately using, e.g., ``pip install opt_einsum``. Both settings for ``use_opt_einsum`` result in a differentiable contraction. Returns: float or tensor_like: the output of the original uncut circuit arising from contracting the tensor network of circuit fragments """ flat_results = qml.math.concatenate(results) tensors = _to_tensors(flat_results, prepare_nodes, measure_nodes) result = contract_tensors( tensors, communication_graph, prepare_nodes, measure_nodes, use_opt_einsum ) return result @batch_transform def cut_circuit( tape: QuantumTape, auto_cutter: Union[bool, Callable] = False, use_opt_einsum: bool = False, device_wires: Optional[Wires] = None, max_depth: int = 1, **kwargs, ) -> Tuple[Tuple[QuantumTape], Callable]: """ Cut up a quantum circuit into smaller circuit fragments. Following the approach outlined in Theorem 2 of `Peng et al. <https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.125.150504>`__, strategic placement of :class:`~.WireCut` operations can allow a quantum circuit to be split into disconnected circuit fragments. Each circuit fragment is then executed multiple times by varying the state preparations and measurements at incoming and outgoing cut locations, respectively, resulting in a process tensor describing the action of the fragment. The process tensors are then contracted to provide the result of the original uncut circuit. .. note:: Only circuits that return a single expectation value are supported. Args: tape (QuantumTape): the tape of the full circuit to be cut auto_cutter (Union[bool, Callable]): Toggle for enabling automatic cutting with the default :func:`~.kahypar_cut` partition method. Can also pass a graph partitioning function that takes an input graph and returns a list of edges to be cut based on a given set of constraints and objective. The default :func:`~.kahypar_cut` function requires KaHyPar to be installed using ``pip install kahypar`` for Linux and Mac users or visiting the instructions `here <https://kahypar.org>`__ to compile from source for Windows users. use_opt_einsum (bool): Determines whether to use the `opt_einsum <https://dgasmith.github.io/opt_einsum/>`__ package. This package is useful for faster tensor contractions of large networks but must be installed separately using, e.g., ``pip install opt_einsum``. Both settings for ``use_opt_einsum`` result in a differentiable contraction. device_wires (Wires): Wires of the device that the cut circuits are to be run on. When transforming a QNode, this argument is optional and will be set to the QNode's device wires. Required when transforming a tape. max_depth (int): The maximum depth used to expand the circuit while searching for wire cuts. Only applicable when transforming a QNode. kwargs: Additional keyword arguments to be passed to a callable ``auto_cutter`` argument. For the default KaHyPar cutter, please refer to the docstring of functions :func:`~.find_and_place_cuts` and :func:`~.kahypar_cut` for the available arguments. Returns: Callable: Function which accepts the same arguments as the QNode. When called, this function will perform a process tomography of the partitioned circuit fragments and combine the results via tensor contractions. **Example** The following :math:`3`-qubit circuit contains a :class:`~.WireCut` operation. When decorated with ``@qml.cut_circuit``, we can cut the circuit into two :math:`2`-qubit fragments: .. code-block:: python dev = qml.device("default.qubit", wires=2) @qml.cut_circuit @qml.qnode(dev) def circuit(x): qml.RX(x, wires=0) qml.RY(0.9, wires=1) qml.RX(0.3, wires=2) qml.CZ(wires=[0, 1]) qml.RY(-0.4, wires=0) qml.WireCut(wires=1) qml.CZ(wires=[1, 2]) return qml.expval(qml.grouping.string_to_pauli_word("ZZZ")) Executing ``circuit`` will run multiple configurations of the :math:`2`-qubit fragments which are then postprocessed to give the result of the original circuit: >>> x = np.array(0.531, requires_grad=True) >>> circuit(x) 0.47165198882111165 Futhermore, the output of the cut circuit is also differentiable: >>> qml.grad(circuit)(x) -0.276982865449393 Alternatively, if the optimal wire-cut placement is unknown for an arbitrary circuit, the ``auto_cutter`` option can be enabled to make attempts in finding such an optimal cut. The following examples shows this capability on the same circuit as above but with the :class:`~.WireCut` removed: .. code-block:: python @qml.cut_circuit(auto_cutter=True) @qml.qnode(dev) def circuit(x): qml.RX(x, wires=0) qml.RY(0.9, wires=1) qml.RX(0.3, wires=2) qml.CZ(wires=[0, 1]) qml.RY(-0.4, wires=0) qml.CZ(wires=[1, 2]) return qml.expval(qml.grouping.string_to_pauli_word("ZZZ")) >>> x = np.array(0.531, requires_grad=True) >>> circuit(x) 0.47165198882111165 >>> qml.grad(circuit)(x) -0.276982865449393 .. UsageDetails:: Manually placing :class:`~.WireCut` operations and decorating the QNode with the ``cut_circuit()`` batch transform is the suggested entrypoint into circuit cutting. However, advanced users also have the option to work directly with a :class:`~.QuantumTape` and manipulate the tape to perform circuit cutting using the below functionality: .. autosummary:: :toctree: ~transforms.qcut.tape_to_graph ~transforms.qcut.find_and_place_cuts ~transforms.qcut.replace_wire_cut_nodes ~transforms.qcut.fragment_graph ~transforms.qcut.graph_to_tape ~transforms.qcut.remap_tape_wires ~transforms.qcut.expand_fragment_tape ~transforms.qcut.qcut_processing_fn ~transforms.qcut.CutStrategy The following shows how these elementary steps are combined as part of the ``cut_circuit()`` transform. Consider the circuit below: .. code-block:: python with qml.tape.QuantumTape() as tape: qml.RX(0.531, wires=0) qml.RY(0.9, wires=1) qml.RX(0.3, wires=2) qml.CZ(wires=[0, 1]) qml.RY(-0.4, wires=0) qml.WireCut(wires=1) qml.CZ(wires=[1, 2]) qml.expval(qml.grouping.string_to_pauli_word("ZZZ")) >>> print(tape.draw()) 0: ──RX(0.531)──╭C──RY(-0.4)──────╭┤ ⟨Z ⊗ Z ⊗ Z⟩ 1: ──RY(0.9)────╰Z──//────────╭C──├┤ ⟨Z ⊗ Z ⊗ Z⟩ 2: ──RX(0.3)──────────────────╰Z──╰┤ ⟨Z ⊗ Z ⊗ Z⟩ To cut the circuit, we first convert it to its graph representation: >>> graph = qml.transforms.qcut.tape_to_graph(tape) .. figure:: ../../_static/qcut_graph.svg :align: center :width: 60% :target: javascript:void(0); If, however, the optimal location of the :class:`~.WireCut` is unknown, we can use :func:`~.find_and_place_cuts` to make attempts in automatically finding such a cut given the device constraints. Using the same circuit as above but with the :class:`~.WireCut` removed, the same (optimal) cut can be recovered with automatic cutting: .. code-block:: python with qml.tape.QuantumTape() as uncut_tape: qml.RX(0.531, wires=0) qml.RY(0.9, wires=1) qml.RX(0.3, wires=2) qml.CZ(wires=[0, 1]) qml.RY(-0.4, wires=0) qml.CZ(wires=[1, 2]) qml.expval(qml.grouping.string_to_pauli_word("ZZZ")) >>> cut_graph = qml.transforms.qcut.find_and_place_cuts( graph = qml.transforms.qcut.tape_to_graph(uncut_tape), cut_strategy = qml.transforms.qcut.CutStrategy(max_free_wires=2), ) >>> print(qml.transforms.qcut.graph_to_tape(cut_graph).draw()) 0: ──RX─╭C──RY────┤ ╭<Z@Z@Z> 1: ──RY─╰Z──//─╭C─┤ ├<Z@Z@Z> 2: ──RX────────╰Z─┤ ╰<Z@Z@Z> Our next step is to remove the :class:`~.WireCut` nodes in the graph and replace with :class:`~.MeasureNode` and :class:`~.PrepareNode` pairs. >>> qml.transforms.qcut.replace_wire_cut_nodes(graph) The :class:`~.MeasureNode` and :class:`~.PrepareNode` pairs are placeholder operations that allow us to cut the circuit graph and then iterate over measurement and preparation configurations at cut locations. First, the :func:`~.fragment_graph` function pulls apart the graph into disconnected components as well as returning the `communication_graph <https://en.wikipedia.org/wiki/Quotient_graph>`__ detailing the connectivity between the components. >>> fragments, communication_graph = qml.transforms.qcut.fragment_graph(graph) We now convert the ``fragments`` back to :class:`~.QuantumTape` objects >>> fragment_tapes = [qml.transforms.qcut.graph_to_tape(f) for f in fragments] The circuit fragments can now be visualized: >>> print(fragment_tapes[0].draw()) 0: ──RX(0.531)──╭C──RY(-0.4)─────┤ ⟨Z⟩ 1: ──RY(0.9)────╰Z──MeasureNode──┤ >>> print(fragment_tapes[1].draw()) 2: ──RX(0.3)──────╭Z──╭┤ ⟨Z ⊗ Z⟩ 1: ──PrepareNode──╰C──╰┤ ⟨Z ⊗ Z⟩ Additionally, we must remap the tape wires to match those available on our device. >>> dev = qml.device("default.qubit", wires=2) >>> fragment_tapes = [ ... qml.transforms.qcut.remap_tape_wires(t, dev.wires) for t in fragment_tapes ... ] Next, each circuit fragment is expanded over :class:`~.MeasureNode` and :class:`~.PrepareNode` configurations and a flat list of tapes is created: .. code-block:: expanded = [qml.transforms.qcut.expand_fragment_tape(t) for t in fragment_tapes] configurations = [] prepare_nodes = [] measure_nodes = [] for tapes, p, m in expanded: configurations.append(tapes) prepare_nodes.append(p) measure_nodes.append(m) tapes = tuple(tape for c in configurations for tape in c) Each configuration is drawn below: >>> for t in tapes: ... print(t.draw()) .. code-block:: 0: ──RX(0.531)──╭C──RY(-0.4)──╭┤ ⟨Z ⊗ I⟩ ╭┤ ⟨Z ⊗ Z⟩ 1: ──RY(0.9)────╰Z────────────╰┤ ⟨Z ⊗ I⟩ ╰┤ ⟨Z ⊗ Z⟩ 0: ──RX(0.531)──╭C──RY(-0.4)──╭┤ ⟨Z ⊗ X⟩ 1: ──RY(0.9)────╰Z────────────╰┤ ⟨Z ⊗ X⟩ 0: ──RX(0.531)──╭C──RY(-0.4)──╭┤ ⟨Z ⊗ Y⟩ 1: ──RY(0.9)────╰Z────────────╰┤ ⟨Z ⊗ Y⟩ 0: ──RX(0.3)──╭Z──╭┤ ⟨Z ⊗ Z⟩ 1: ──I────────╰C──╰┤ ⟨Z ⊗ Z⟩ 0: ──RX(0.3)──╭Z──╭┤ ⟨Z ⊗ Z⟩ 1: ──X────────╰C──╰┤ ⟨Z ⊗ Z⟩ 0: ──RX(0.3)──╭Z──╭┤ ⟨Z ⊗ Z⟩ 1: ──H────────╰C──╰┤ ⟨Z ⊗ Z⟩ 0: ──RX(0.3)─────╭Z──╭┤ ⟨Z ⊗ Z⟩ 1: ──H────────S──╰C──╰┤ ⟨Z ⊗ Z⟩ The last step is to execute the tapes and postprocess the results using :func:`~.qcut_processing_fn`, which processes the results to the original full circuit output via a tensor network contraction >>> results = qml.execute(tapes, dev, gradient_fn=None) >>> qml.transforms.qcut.qcut_processing_fn( ... results, ... communication_graph, ... prepare_nodes, ... measure_nodes, ... ) 0.47165198882111165 """ # pylint: disable=unused-argument if len(tape.measurements) != 1: raise ValueError( "The circuit cutting workflow only supports circuits with a single output " "measurement" ) if not all(m.return_type is Expectation for m in tape.measurements): raise ValueError( "The circuit cutting workflow only supports circuits with expectation " "value measurements" ) if use_opt_einsum: try: import opt_einsum # pylint: disable=import-outside-toplevel,unused-import except ImportError as e: raise ImportError( "The opt_einsum package is required when use_opt_einsum is set to " "True in the cut_circuit function. This package can be " "installed using:\npip install opt_einsum" ) from e g = tape_to_graph(tape) if auto_cutter is True or callable(auto_cutter): cut_strategy = kwargs.pop("cut_strategy", None) or CutStrategy( max_free_wires=len(device_wires) ) g = find_and_place_cuts( graph=g, cut_method=auto_cutter if callable(auto_cutter) else kahypar_cut, cut_strategy=cut_strategy, **kwargs, ) replace_wire_cut_nodes(g) fragments, communication_graph = fragment_graph(g) fragment_tapes = [graph_to_tape(f) for f in fragments] fragment_tapes = [remap_tape_wires(t, device_wires) for t in fragment_tapes] expanded = [expand_fragment_tape(t) for t in fragment_tapes] configurations = [] prepare_nodes = [] measure_nodes = [] for tapes, p, m in expanded: configurations.append(tapes) prepare_nodes.append(p) measure_nodes.append(m) tapes = tuple(tape for c in configurations for tape in c) return tapes, partial( qcut_processing_fn, communication_graph=communication_graph, prepare_nodes=prepare_nodes, measure_nodes=measure_nodes, use_opt_einsum=use_opt_einsum, ) @cut_circuit.custom_qnode_wrapper def qnode_execution_wrapper(self, qnode, targs, tkwargs): """Here, we overwrite the QNode execution wrapper in order to access the device wires.""" # pylint: disable=function-redefined tkwargs.setdefault("device_wires", qnode.device.wires) return self.default_qnode_wrapper(qnode, targs, tkwargs) def _qcut_expand_fn( tape: QuantumTape, max_depth: int = 1, auto_cutter: Union[bool, Callable] = False, ): """Expansion function for circuit cutting. Expands operations until reaching a depth that includes :class:`~.WireCut` operations. """ for op in tape.operations: if isinstance(op, WireCut): return tape if max_depth > 0: return _qcut_expand_fn(tape.expand(), max_depth=max_depth - 1, auto_cutter=auto_cutter) if not (auto_cutter is True or callable(auto_cutter)): raise ValueError( "No WireCut operations found in the circuit. Consider increasing the max_depth value if" " operations or nested tapes contain WireCut operations." ) return tape def _cut_circuit_expand( tape: QuantumTape, use_opt_einsum: bool = False, device_wires: Optional[Wires] = None, max_depth: int = 1, auto_cutter: Union[bool, Callable] = False, **kwargs, ): """Main entry point for expanding operations until reaching a depth that includes :class:`~.WireCut` operations.""" # pylint: disable=unused-argument return _qcut_expand_fn(tape, max_depth, auto_cutter) def _cut_circuit_mc_expand( tape: QuantumTape, classical_processing_fn: Optional[callable] = None, max_depth: int = 1, shots: Optional[int] = None, device_wires: Optional[Wires] = None, auto_cutter: Union[bool, Callable] = False, **kwargs, ): """Main entry point for expanding operations in sample-based tapes until reaching a depth that includes :class:`~.WireCut` operations.""" # pylint: disable=unused-argument, too-many-arguments return _qcut_expand_fn(tape, max_depth, auto_cutter) cut_circuit.expand_fn = _cut_circuit_expand cut_circuit_mc.expand_fn = _cut_circuit_mc_expand def remap_tape_wires(tape: QuantumTape, wires: Sequence) -> QuantumTape: """Map the wires of a tape to a new set of wires. Given an :math:`n`-wire ``tape``, this function returns a new :class:`~.QuantumTape` with operations and measurements acting on the first :math:`n` wires provided in the ``wires`` argument. The input ``tape`` is left unmodified. .. note:: This function is designed for use as part of the circuit cutting workflow. Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details. Args: tape (QuantumTape): the quantum tape whose wires should be remapped wires (Sequence): the new set of wires to map to Returns: QuantumTape: A remapped copy of the input tape Raises: ValueError: if the number of wires in ``tape`` exceeds ``len(wires)`` **Example** Consider the following circuit that operates on wires ``[2, 3]``: .. code-block:: python with qml.tape.QuantumTape() as tape: qml.RX(0.5, wires=2) qml.RY(0.6, wires=3) qml.CNOT(wires=[2, 3]) qml.expval(qml.PauliZ(2) @ qml.PauliZ(3)) We can map from wires ``[2, 3]`` to ``[0, 1]`` using: >>> new_wires = [0, 1] >>> new_tape = qml.transforms.qcut.remap_tape_wires(tape, new_wires) >>> print(new_tape.draw()) 0: ──RX(0.5)──╭C──╭┤ ⟨Z ⊗ Z⟩ 1: ──RY(0.6)──╰X──╰┤ ⟨Z ⊗ Z⟩ """ if len(tape.wires) > len(wires): raise ValueError( f"Attempting to run a {len(tape.wires)}-wire circuit on a " f"{len(wires)}-wire device. Consider increasing the number of wires in " f"your device." ) wire_map = dict(zip(tape.wires, wires)) copy_ops = [copy.copy(op) for op in tape.operations] copy_meas = [copy.copy(op) for op in tape.measurements] with QuantumTape() as new_tape: for op in copy_ops: new_wires = Wires([wire_map[w] for w in op.wires]) op._wires = new_wires apply(op) for meas in copy_meas: obs = meas.obs if isinstance(obs, Tensor): for obs in obs.obs: new_wires = Wires([wire_map[w] for w in obs.wires]) obs._wires = new_wires else: new_wires = Wires([wire_map[w] for w in obs.wires]) obs._wires = new_wires apply(meas) return new_tape @dataclass() class CutStrategy: """ A circuit-cutting distribution policy for executing (large) circuits on available (comparably smaller) devices. .. note:: This class is part of a work-in-progress feature to support automatic cut placement in the circuit cutting workflow. Currently only manual placement of cuts is supported, check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details. Args: devices (Union[qml.Device, Sequence[qml.Device]]): Single, or Sequence of, device(s). Optional only when ``max_free_wires`` is provided. max_free_wires (int): Number of wires for the largest available device. Optional only when ``devices`` is provided where it defaults to the maximum number of wires among ``devices``. min_free_wires (int): Number of wires for the smallest available device, or, equivalently, the smallest max fragment-wire-size that the partitioning is allowed to explore. When provided, this parameter will be used to derive an upper-bound to the range of explored number of fragments. Optional, defaults to 2 which corresponds to attempting the most granular partitioning of max 2-wire fragments. num_fragments_probed (Union[int, Sequence[int]]): Single, or 2-Sequence of, number(s) specifying the potential (range of) number of fragments for the partitioner to attempt. Optional, defaults to probing all valid strategies derivable from the circuit and devices. When provided, has precedence over all other arguments affecting partitioning exploration, such as ``max_free_wires``, ``min_free_wires``, or ``exhaustive``. max_free_gates (int): Maximum allowed circuit depth for the deepest available device. Optional, defaults to unlimited depth. min_free_gates (int): Maximum allowed circuit depth for the shallowest available device. Optional, defaults to ``max_free_gates``. imbalance_tolerance (float): The global maximum allowed imbalance for all partition trials. Optional, defaults to unlimited imbalance. Used only if there's a known hard balancing constraint on the partitioning problem. trials_per_probe (int): Number of repeated partitioning trials for a random automatic cutting method to attempt per set of partitioning parameters. For a deterministic cutting method, this can be set to 1. Defaults to 4. **Example** The following cut strategy specifies that a circuit should be cut into between ``2`` to ``5`` fragments, with each fragment having at most ``6`` wires and at least ``4`` wires: >>> cut_strategy = qml.transforms.CutStrategy( ... max_free_wires=6, ... min_free_wires=4, ... num_fragments_probed=(2, 5), ... ) """ # pylint: disable=too-many-arguments, too-many-instance-attributes #: Initialization argument only, used to derive ``max_free_wires`` and ``min_free_wires``. devices: InitVar[Union[qml.Device, Sequence[qml.Device]]] = None #: Number of wires for the largest available device. max_free_wires: int = None #: Number of wires for the smallest available device. min_free_wires: int = None #: The potential (range of) number of fragments for the partitioner to attempt. num_fragments_probed: Union[int, Sequence[int]] = None #: Maximum allowed circuit depth for the deepest available device. max_free_gates: int = None #: Maximum allowed circuit depth for the shallowest available device. min_free_gates: int = None #: The global maximum allowed imbalance for all partition trials. imbalance_tolerance: float = None #: Number of trials to repeat for per set of partition parameters probed. trials_per_probe: int = 4 #: Class attribute, threshold for warning about too many fragments. HIGH_NUM_FRAGMENTS: ClassVar[int] = 20 #: Class attribute, threshold for warning about too many partition attempts. HIGH_PARTITION_ATTEMPTS: ClassVar[int] = 20 def __post_init__( self, devices, ): """Deriving cutting constraints from given devices and parameters.""" self.max_free_wires = self.max_free_wires if isinstance(self.num_fragments_probed, int): self.num_fragments_probed = [self.num_fragments_probed] if isinstance(self.num_fragments_probed, (list, tuple)): self.num_fragments_probed = sorted(self.num_fragments_probed) self.k_lower = self.num_fragments_probed[0] self.k_upper = self.num_fragments_probed[-1] if self.k_lower <= 0: raise ValueError("`num_fragments_probed` must be positive int(s)") else: self.k_lower, self.k_upper = None, None if devices is None and self.max_free_wires is None: raise ValueError("One of arguments `devices` and max_free_wires` must be provided.") if isinstance(devices, qml.Device): devices = (devices,) if devices is not None: if not isinstance(devices, SequenceType) or any( (not isinstance(d, qml.Device) for d in devices) ): raise ValueError( "Argument `devices` must be a list or tuple containing elements of type " "`qml.Device`" ) device_wire_sizes = [len(d.wires) for d in devices] self.max_free_wires = self.max_free_wires or max(device_wire_sizes) self.min_free_wires = self.min_free_wires or min(device_wire_sizes) if (self.imbalance_tolerance is not None) and not ( isinstance(self.imbalance_tolerance, (float, int)) and self.imbalance_tolerance >= 0 ): raise ValueError( "The overall `imbalance_tolerance` is expected to be a non-negative number, " f"got {type(self.imbalance_tolerance)} with value {self.imbalance_tolerance}." ) self.min_free_wires = self.min_free_wires or 1 def get_cut_kwargs( self, tape_dag: MultiDiGraph, max_wires_by_fragment: Sequence[int] = None, max_gates_by_fragment: Sequence[int] = None, exhaustive: bool = True, ) -> List[Dict[str, Any]]: """Derive the complete set of arguments, based on a given circuit, for passing to a graph partitioner. Args: tape_dag (nx.MultiDiGraph): Graph representing a tape, typically the output of :func:`tape_to_graph`. max_wires_by_fragment (Sequence[int]): User-predetermined list of wire limits by fragment. If supplied, the number of fragments will be derived from it and exploration of other choices will not be made. max_gates_by_fragment (Sequence[int]): User-predetermined list of gate limits by fragment. If supplied, the number of fragments will be derived from it and exploration of other choices will not be made. exhaustive (bool): Toggle for an exhaustive search which will attempt all potentially valid numbers of fragments into which the circuit is partitioned. If ``True``, for a circuit with N gates, N - 1 attempts will be made with ``num_fragments`` ranging from [2, N], i.e. from bi-partitioning to complete partitioning where each fragment has exactly a single gate. Defaults to ``True``. Returns: List[Dict[str, Any]]: A list of minimal kwargs being passed to a graph partitioner method. **Example** Deriving kwargs for a given circuit and feeding them to a custom partitioner, along with extra parameters specified using ``extra_kwargs``: >>> cut_strategy = qcut.CutStrategy(devices=dev) >>> cut_kwargs = cut_strategy.get_cut_kwargs(tape_dag) >>> cut_trials = [ ... my_partition_fn(tape_dag, **kwargs, **extra_kwargs) for kwargs in cut_kwargs ... ] """ tape_wires = set(w for _, _, w in tape_dag.edges.data("wire")) num_tape_wires = len(tape_wires) num_tape_gates = sum(not isinstance(n, WireCut) for n in tape_dag.nodes) self._validate_input(max_wires_by_fragment, max_gates_by_fragment) probed_cuts = self._infer_probed_cuts( num_tape_wires=num_tape_wires, num_tape_gates=num_tape_gates, max_wires_by_fragment=max_wires_by_fragment, max_gates_by_fragment=max_gates_by_fragment, exhaustive=exhaustive, ) return probed_cuts @staticmethod def _infer_imbalance( k, num_wires, num_gates, free_wires, free_gates, imbalance_tolerance=None ) -> float: """Helper function for determining best imbalance limit.""" avg_fragment_wires = (num_wires - 1) // k + 1 avg_fragment_gates = (num_gates - 1) // k + 1 if free_wires < avg_fragment_wires: raise ValueError( "`free_wires` should be no less than the average number of wires per fragment. " f"Got {free_wires} >= {avg_fragment_wires} ." ) if free_gates < avg_fragment_gates: raise ValueError( "`free_gates` should be no less than the average number of gates per fragment. " f"Got {free_gates} >= {avg_fragment_gates} ." ) if free_gates > num_gates - k: # Case where gate depth not limited (`-k` since each fragments has to have >= 1 gates): free_gates = num_gates # A small adjustment is added to the imbalance factor to prevents small ks from resulting # in extremely unbalanced fragments. It will heuristically force the smallest fragment size # to be >= 3 if the average fragment size is greater than 5. In other words, tiny fragments # are only allowed when average fragmeng size is small in the first place. balancing_adjustment = 2 if avg_fragment_gates > 5 else 0 free_gates = free_gates - (k - 1 + balancing_adjustment) gate_imbalance = free_gates / avg_fragment_gates - 1 imbalance = max(gate_imbalance, 0.1 / avg_fragment_gates) # numerical stability if imbalance_tolerance is not None: imbalance = min(imbalance, imbalance_tolerance) return imbalance @staticmethod def _validate_input( max_wires_by_fragment, max_gates_by_fragment, ): """Helper parameter checker.""" if max_wires_by_fragment is not None: if not isinstance(max_wires_by_fragment, (list, tuple)): raise ValueError( "`max_wires_by_fragment` is expected to be a list or tuple, but got " f"{type(max_gates_by_fragment)}." ) if any(not (isinstance(i, int) and i > 0) for i in max_wires_by_fragment): raise ValueError( "`max_wires_by_fragment` is expected to contain positive integers only." ) if max_gates_by_fragment is not None: if not isinstance(max_gates_by_fragment, (list, tuple)): raise ValueError( "`max_gates_by_fragment` is expected to be a list or tuple, but got " f"{type(max_gates_by_fragment)}." ) if any(not (isinstance(i, int) and i > 0) for i in max_gates_by_fragment): raise ValueError( "`max_gates_by_fragment` is expected to contain positive integers only." ) if max_wires_by_fragment is not None and max_gates_by_fragment is not None: if len(max_wires_by_fragment) != len(max_gates_by_fragment): raise ValueError( "The lengths of `max_wires_by_fragment` and `max_gates_by_fragment` should be " f"equal, but got {len(max_wires_by_fragment)} and {len(max_gates_by_fragment)}." ) def _infer_probed_cuts( self, num_tape_wires, num_tape_gates, max_wires_by_fragment=None, max_gates_by_fragment=None, exhaustive=True, ) -> List[Dict[str, Any]]: """ Helper function for deriving the minimal set of best default partitioning constraints for the graph partitioner. Args: num_tape_wires (int): Number of wires in the circuit tape to be partitioned. num_tape_gates (int): Number of gates in the circuit tape to be partitioned. max_wires_by_fragment (Sequence[int]): User-predetermined list of wire limits by fragment. If supplied, the number of fragments will be derived from it and exploration of other choices will not be made. max_gates_by_fragment (Sequence[int]): User-predetermined list of gate limits by fragment. If supplied, the number of fragments will be derived from it and exploration of other choices will not be made. exhaustive (bool): Toggle for an exhaustive search which will attempt all potentially valid numbers of fragments into which the circuit is partitioned. If ``True``, ``num_tape_gates - 1`` attempts will be made with ``num_fragments`` ranging from [2, ``num_tape_gates``], i.e. from bi-partitioning to complete partitioning where each fragment has exactly a single gate. Defaults to ``True``. Returns: List[Dict[str, Any]]: A list of minimal set of kwargs being passed to a graph partitioner method. """ # Assumes unlimited width/depth if not supplied. max_free_wires = self.max_free_wires or num_tape_wires max_free_gates = self.max_free_gates or num_tape_gates # Assumes same number of wires/gates across all devices if min_free_* not provided. min_free_wires = self.min_free_wires or max_free_wires min_free_gates = self.min_free_gates or max_free_gates # The lower bound of k corresponds to executing each fragment on the largest available # device. k_lb = 1 + max( (num_tape_wires - 1) // max_free_wires, # wire limited (num_tape_gates - 1) // max_free_gates, # gate limited ) # The upper bound of k corresponds to executing each fragment on the smallest available # device. k_ub = 1 + max( (num_tape_wires - 1) // min_free_wires, # wire limited (num_tape_gates - 1) // min_free_gates, # gate limited ) if exhaustive: k_lb = max(2, k_lb) k_ub = num_tape_gates # The global imbalance tolerance, if not given, defaults to a very loose upper bound: imbalance_tolerance = k_ub if self.imbalance_tolerance is None else self.imbalance_tolerance probed_cuts = [] if max_gates_by_fragment is None and max_wires_by_fragment is None: # k_lower, when supplied by a user, can be higher than k_lb if the the desired k is known: k_lower = self.k_lower if self.k_lower is not None else k_lb # k_upper, when supplied by a user, can be higher than k_ub to encourage exploration: k_upper = self.k_upper if self.k_upper is not None else k_ub if k_lower < k_lb: warnings.warn( f"The provided `k_lower={k_lower}` is less than the lowest allowed value, " f"will override and set `k_lower={k_lb}`." ) k_lower = k_lb if k_lower > self.HIGH_NUM_FRAGMENTS: warnings.warn( f"The attempted number of fragments seems high with lower bound at {k_lower}." ) # Prepare the list of ks to explore: ks = list(range(k_lower, k_upper + 1)) if len(ks) > self.HIGH_PARTITION_ATTEMPTS: warnings.warn(f"The numer of partition attempts seems high ({len(ks)}).") else: # When the by-fragment wire and/or gate limits are supplied, derive k and imbalance and # return a single partition config. ks = [len(max_wires_by_fragment or max_gates_by_fragment)] for k in ks: imbalance = self._infer_imbalance( k, num_tape_wires, num_tape_gates, max_free_wires if max_wires_by_fragment is None else max(max_wires_by_fragment), max_free_gates if max_gates_by_fragment is None else max(max_gates_by_fragment), imbalance_tolerance, ) cut_kwargs = { "num_fragments": k, "imbalance": imbalance, } if max_wires_by_fragment is not None: cut_kwargs["max_wires_by_fragment"] = max_wires_by_fragment if max_gates_by_fragment is not None: cut_kwargs["max_gates_by_fragment"] = max_gates_by_fragment probed_cuts.append(cut_kwargs) return probed_cuts def _graph_to_hmetis( graph: MultiDiGraph, hyperwire_weight: int = 0, edge_weights: Sequence[int] = None, ) -> Tuple[List[int], List[int], List[Union[int, float]]]: """Converts a ``MultiDiGraph`` into the `hMETIS hypergraph input format <http://glaros.dtc.umn.edu/gkhome/fetch/sw/hmetis/manual.pdf>`__ conforming to KaHyPar's calling signature. Args: graph (MultiDiGraph): The original (tape-converted) graph to be cut. hyperwire_weight (int): Weight on the artificially appended hyperedges representing wires. Defaults to 0 which leads to no such insertion. If greater than 0, hyperedges will be appended with the provided weight, to encourage the resulting fragments to cluster gates on the same wire together. edge_weights (Sequence[int]): Weights for regular edges in the graph. Defaults to ``None``, which leads to unit-weighted edges. Returns: Tuple[List,List,List]: The 3 lists representing an (optionally weighted) hypergraph: - Flattened list of adjacent node indices. - List of starting indices for edges in the above adjacent-nodes-list. - Optional list of edge weights. ``None`` if ``hyperwire_weight`` is equal to 0. """ nodes = list(graph.nodes) edges = graph.edges(data="wire") wires = {w for _, _, w in edges} adj_nodes = [nodes.index(v) for ops in graph.edges(keys=False) for v in ops] edge_splits = qml.math.cumsum([0] + [len(e) for e in graph.edges(keys=False)]).tolist() edge_weights = ( edge_weights if edge_weights is not None and len(edges) == len(edge_weights) else None ) if hyperwire_weight: hyperwires = {w: set() for w in wires} num_wires = len(hyperwires) for v0, v1, wire in edges: hyperwires[wire].update([nodes.index(v0), nodes.index(v1)]) for wire, nodes_on_wire in hyperwires.items(): nwv = len(nodes_on_wire) edge_splits.append(nwv + edge_splits[-1]) adj_nodes = adj_nodes + list(nodes_on_wire) assert len(edge_splits) == len(edges) + num_wires + 1 if isinstance(hyperwire_weight, (int, float)): # assumes original edges having unit weights by default: edge_weights = edge_weights or ([1] * len(edges)) wire_weights = [hyperwire_weight] * num_wires edge_weights = edge_weights + wire_weights return adj_nodes, edge_splits, edge_weights def kahypar_cut( graph: MultiDiGraph, num_fragments: int, imbalance: int = None, edge_weights: List[Union[int, float]] = None, node_weights: List[Union[int, float]] = None, fragment_weights: List[Union[int, float]] = None, hyperwire_weight: int = 1, seed: int = None, config_path: Union[str, Path] = None, trial: int = None, verbose: bool = False, ) -> List[Tuple[Operation, Operation, Any]]: """Calls `KaHyPar <https://kahypar.org/>`__ to partition a graph. .. warning:: Requires KaHyPar to be installed separately. For Linux and Mac users, KaHyPar can be installed using ``pip install kahypar``. Windows users can follow the instructions `here <https://kahypar.org>`__ to compile from source. Args: graph (nx.MultiDiGraph): The graph to be partitioned. num_fragments (int): Desired number of fragments. imbalance (int): Imbalance factor of the partitioning. Defaults to KaHyPar's determination. edge_weights (List[Union[int, float]]): Weights for edges. Defaults to unit-weighted edges. node_weights (List[Union[int, float]]): Weights for nodes. Defaults to unit-weighted nodes. fragment_weights (List[Union[int, float]]): Maximum size constraints by fragment. Defaults to no such constraints, with ``imbalance`` the only parameter affecting fragment sizes. hyperwire_weight (int): Weight on the artificially appended hyperedges representing wires. Setting it to 0 leads to no such insertion. If greater than 0, hyperedges will be appended with the provided weight, to encourage the resulting fragments to cluster gates on the same wire together. Defaults to 1. seed (int): KaHyPar's seed. Defaults to the seed in the config file which defaults to -1, i.e. unfixed seed. config_path (str): KaHyPar's ``.ini`` config file path. Defaults to its SEA20 paper config. trial (int): trial id for summary label creation. Defaults to ``None``. verbose (bool): Flag for printing KaHyPar's output summary. Defaults to ``False``. Returns: List[Union[int, Any]]: List of cut edges. **Example** Consider the following 2-wire circuit with one CNOT gate connecting the wires: .. code-block:: python with qml.tape.QuantumTape() as tape: qml.RX(0.432, wires=0) qml.RY(0.543, wires="a") qml.CNOT(wires=[0, "a"]) qml.RZ(0.240, wires=0) qml.RZ(0.133, wires="a") qml.RX(0.432, wires=0) qml.RY(0.543, wires="a") qml.expval(qml.PauliZ(wires=[0])) We can let KaHyPar automatically find the optimal edges to place cuts: >>> graph = qml.transforms.qcut.tape_to_graph(tape) >>> cut_edges = qml.transforms.qcut.kahypar_cut( graph=graph, num_fragments=2, ) >>> cut_edges [(CNOT(wires=[0, 'a']), RZ(0.24, wires=[0]), 0)] """ # pylint: disable=too-many-arguments, import-outside-toplevel try: import kahypar except ImportError as e: raise ImportError( "KaHyPar must be installed to use this method for automatic " "cut placement. Try pip install kahypar or visit " "https://kahypar.org/ for installation instructions." ) from e adjacent_nodes, edge_splits, edge_weights = _graph_to_hmetis( graph=graph, hyperwire_weight=hyperwire_weight, edge_weights=edge_weights ) trial = 0 if trial is None else trial ne = len(edge_splits) - 1 nv = max(adjacent_nodes) + 1 if edge_weights is not None or node_weights is not None: edge_weights = edge_weights or [1] * ne node_weights = node_weights or [1] * nv hypergraph = kahypar.Hypergraph( nv, ne, edge_splits, adjacent_nodes, num_fragments, edge_weights, node_weights, ) else: hypergraph = kahypar.Hypergraph(nv, ne, edge_splits, adjacent_nodes, num_fragments) context = kahypar.Context() config_path = config_path or str(Path(__file__).parent / "_cut_kKaHyPar_sea20.ini") context.loadINIconfiguration(config_path) context.setK(num_fragments) if isinstance(imbalance, float): context.setEpsilon(imbalance) if isinstance(fragment_weights, SequenceType) and (len(fragment_weights) == num_fragments): context.setCustomTargetBlockWeights(fragment_weights) if not verbose: context.suppressOutput(True) # KaHyPar fixes seed to 42 by default, need to manually sample seed to randomize: kahypar_seed = np.random.default_rng(seed).choice(2**15) context.setSeed(kahypar_seed) kahypar.partition(hypergraph, context) cut_edge_mask = [hypergraph.connectivity(e) > 1 for e in hypergraph.edges()] # compress() ignores the extra hyperwires at the end if there is any. cut_edges = list(compress(graph.edges, cut_edge_mask)) if verbose: fragment_sizes = [hypergraph.blockSize(p) for p in range(num_fragments)] print(len(fragment_sizes), fragment_sizes) return cut_edges def place_wire_cuts( graph: MultiDiGraph, cut_edges: Sequence[Tuple[Operation, Operation, Any]] ) -> MultiDiGraph: """Inserts a :class:`~.WireCut` node for each provided cut edge into a circuit graph. Args: graph (nx.MultiDiGraph): The original (tape-converted) graph to be cut. cut_edges (Sequence[Tuple[Operation, Operation, Any]]): List of ``MultiDiGraph`` edges to be replaced with a :class:`~.WireCut` node. Each 3-tuple represents the source node, the target node, and the wire key of the (multi)edge. Returns: MultiDiGraph: Copy of the input graph with :class:`~.WireCut` nodes inserted. **Example** Consider the following 2-wire circuit with one CNOT gate connecting the wires: .. code-block:: python with qml.tape.QuantumTape() as tape: qml.RX(0.432, wires=0) qml.RY(0.543, wires="a") qml.CNOT(wires=[0, "a"]) qml.expval(qml.PauliZ(wires=[0])) >>> print(tape.draw()) 0: ──RX(0.432)──╭C──┤ ⟨Z⟩ a: ──RY(0.543)──╰X──┤ If we know we want to place a :class:`~.WireCut` node between nodes ``RY(0.543, wires=["a"])`` and ``CNOT(wires=[0, 'a'])`` after the tape is constructed, we can first find the edge in the graph: >>> graph = qml.transforms.qcut.tape_to_graph(tape) >>> op0, op1 = tape.operations[1], tape.operations[2] >>> cut_edges = [e for e in graph.edges if e[0] is op0 and e[1] is op1] >>> cut_edges [(RY(0.543, wires=['a']), CNOT(wires=[0, 'a']), 0)] Then feed it to this function for placement: >>> cut_graph = qml.transforms.qcut.place_wire_cuts(graph=graph, cut_edges=cut_edges) >>> cut_graph <networkx.classes.multidigraph.MultiDiGraph at 0x7f7251ac1220> And visualize the cut by converting back to a tape: >>> print(qml.transforms.qcut.graph_to_tape(cut_graph).draw()) 0: ──RX(0.432)──────╭C──┤ ⟨Z⟩ a: ──RY(0.543)──//──╰X──┤ """ cut_graph = graph.copy() for op0, op1, wire_key in cut_edges: # Get info: order = cut_graph.nodes[op0]["order"] + 1 wire = cut_graph.edges[(op0, op1, wire_key)]["wire"] # Apply cut: cut_graph.remove_edge(op0, op1, wire_key) # Increment order for all subsequent gates: for op, o in cut_graph.nodes(data="order"): if o >= order: cut_graph.nodes[op]["order"] += 1 # Add WireCut wire_cut = WireCut(wires=wire) cut_graph.add_node(wire_cut, order=order) cut_graph.add_edge(op0, wire_cut, wire=wire) cut_graph.add_edge(wire_cut, op1, wire=wire) return cut_graph def _remove_existing_cuts(graph: MultiDiGraph) -> MultiDiGraph: """Removes all existing, manually or automatically placed, cuts from a circuit graph, be it ``WireCut``s or ``MeasureNode``-``PrepareNode`` pairs. Args: graph (MultiDiGraph): The original (tape-converted) graph to be cut. Returns: (MultiDiGraph): Copy of the input graph with all its existing cuts removed. """ uncut_graph = graph.copy() for op in list(graph.nodes): if isinstance(op, WireCut): uncut_graph.remove_node(op) elif isinstance(op, MeasureNode): for op1 in graph.neighbors(op): if isinstance(op1, PrepareNode): uncut_graph.remove_node(op) uncut_graph.remove_node(op1) if len([n for n in uncut_graph.nodes if isinstance(n, (MeasureNode, PrepareNode))]) > 0: warnings.warn( "The circuit contains `MeasureNode` or `PrepareNode` operations that are " "not paired up correctly. Please check.", UserWarning, ) return uncut_graph def find_and_place_cuts( graph: MultiDiGraph, cut_method: Callable = kahypar_cut, cut_strategy: CutStrategy = None, replace_wire_cuts=False, local_measurement=False, **kwargs, ) -> MultiDiGraph: """Automatically finds and places optimal :class:`~.WireCut` nodes into a given tape-converted graph using a customizable graph partitioning function. Preserves existing placed cuts. Args: graph (MultiDiGraph): The original (tape-converted) graph to be cut. cut_method (Callable): A graph partitioning function that takes an input graph and returns a list of edges to be cut based on a given set of constraints and objective. Defaults to :func:`kahypar_cut` which requires KaHyPar to be installed using ``pip install kahypar`` for Linux and Mac users or visiting the instructions `here <https://kahypar.org>`__ to compile from source for Windows users. cut_strategy (CutStrategy): Strategy for optimizing cutting parameters based on device constraints. Defaults to ``None`` in which case ``kwargs`` must be fully specified for passing to the ``cut_method``. replace_wire_cuts (bool): Whether to replace :class:`~.WireCut` nodes with :class:`~.MeasureNode` and :class:`~.PrepareNode` pairs. Defaults to ``False``. local_measurement (bool): Whether to use the local-measurement circuit-cutting objective, i.e. the maximum node-degree of the communication graph, for cut evaluation. Defaults to ``False`` which assumes global measurement and uses the total number of cuts as the cutting objective. kwargs: Additional keyword arguments to be passed to the callable ``cut_method``. Returns: nx.MultiDiGraph: Copy of the input graph with :class:`~.WireCut` nodes inserted. **Example** Consider the following 4-wire circuit with a single CNOT gate connecting the top (wires ``[0, 1]``) and bottom (wires ``["a", "b"]``) halves of the circuit. Note there's a :class:`~.WireCut` manually placed into the circuit already. .. code-block:: python with qml.tape.QuantumTape() as tape: qml.RX(0.1, wires=0) qml.RY(0.2, wires=1) qml.RX(0.3, wires="a") qml.RY(0.4, wires="b") qml.CNOT(wires=[0, 1]) qml.WireCut(wires=1) qml.CNOT(wires=["a", "b"]) qml.CNOT(wires=[1, "a"]) qml.CNOT(wires=[0, 1]) qml.CNOT(wires=["a", "b"]) qml.RX(0.5, wires="a") qml.RY(0.6, wires="b") qml.expval(qml.PauliX(wires=[0]) @ qml.PauliY(wires=["a"]) @ qml.PauliZ(wires=["b"])) >>> print(tape.draw()) 0: ──RX(0.1)──╭C──────────╭C───────────╭┤ ⟨X ⊗ Y ⊗ Z⟩ 1: ──RY(0.2)──╰X──//──╭C──╰X───────────│┤ a: ──RX(0.3)──╭C──────╰X──╭C──RX(0.5)──├┤ ⟨X ⊗ Y ⊗ Z⟩ b: ──RY(0.4)──╰X──────────╰X──RY(0.6)──╰┤ ⟨X ⊗ Y ⊗ Z⟩ Since the existing :class:`~.WireCut` doesn't sufficiently fragment the circuit, we can find the remaining cuts using the default KaHyPar partitioner: >>> graph = qml.transforms.qcut.tape_to_graph(tape) >>> cut_graph = qml.transforms.qcut.find_and_place_cuts( graph=graph, num_fragments=2, imbalance=0.5, ) Visualizing the newly-placed cut: >>> print(qml.transforms.qcut.graph_to_tape(cut_graph).draw()) 0: ──RX(0.1)──╭C───────────────╭C────────╭┤ ⟨X ⊗ Y ⊗ Z⟩ 1: ──RY(0.2)──╰X──//──╭C───//──╰X────────│┤ a: ──RX(0.3)──╭C──────╰X──╭C────RX(0.5)──├┤ ⟨X ⊗ Y ⊗ Z⟩ b: ──RY(0.4)──╰X──────────╰X────RY(0.6)──╰┤ ⟨X ⊗ Y ⊗ Z⟩ We can then proceed with the usual process of replacing :class:`~.WireCut` nodes with pairs of :class:`~.MeasureNode` and :class:`~.PrepareNode`, and then break the graph into fragments. Or, alternatively, we can directly get such processed graph by passing ``replace_wire_cuts=True``: >>> cut_graph = qml.transforms.qcut.find_and_place_cuts( graph=graph, num_fragments=2, imbalance=0.5, replace_wire_cuts=True, ) >>> frags, comm_graph = qml.transforms.qcut.fragment_graph(cut_graph) >>> for t in frags: ... print(qml.transforms.qcut.graph_to_tape(t).draw()) .. code-block:: 0: ──RX(0.1)──────╭C───────────────╭C──┤ ⟨X⟩ 1: ──RY(0.2)──────╰X──MeasureNode──│───┤ 2: ──PrepareNode───────────────────╰X──┤ a: ──RX(0.3)──────╭C──╭X──╭C────────────RX(0.5)──╭┤ ⟨Y ⊗ Z⟩ b: ──RY(0.4)──────╰X──│───╰X────────────RY(0.6)──╰┤ ⟨Y ⊗ Z⟩ 1: ──PrepareNode──────╰C───MeasureNode────────────┤ Alternatively, if all we want to do is to find the optimal way to fit a circuit onto a smaller device, a :class:`~.CutStrategy` can be used to populate the necessary explorations of cutting parameters. As an extreme example, if the only device at our disposal is a 2-qubit device, a simple cut strategy is to simply specify the the ``max_free_wires`` argument (or equivalently directly passing a :class:`~.Device` to the ``device`` argument): >>> cut_strategy = qml.transforms.qcut.CutStrategy(max_free_wires=2) >>> print(cut_strategy.get_cut_kwargs(graph)) [{'num_fragments': 2, 'imbalance': 0.5714285714285714}, {'num_fragments': 3, 'imbalance': 1.4}, {'num_fragments': 4, 'imbalance': 1.75}, {'num_fragments': 5, 'imbalance': 2.3333333333333335}, {'num_fragments': 6, 'imbalance': 2.0}, {'num_fragments': 7, 'imbalance': 3.0}, {'num_fragments': 8, 'imbalance': 2.5}, {'num_fragments': 9, 'imbalance': 2.0}, {'num_fragments': 10, 'imbalance': 1.5}, {'num_fragments': 11, 'imbalance': 1.0}, {'num_fragments': 12, 'imbalance': 0.5}, {'num_fragments': 13, 'imbalance': 0.05}, {'num_fragments': 14, 'imbalance': 0.1}] The printed list above shows all the possible cutting configurations one can attempt to perform in order to search for the optimal cut. This is done by directly passing a :class:`~.CutStrategy` to :func:`~.find_and_place_cuts`: >>> cut_graph = qml.transforms.qcut.find_and_place_cuts( graph=graph, cut_strategy=cut_strategy, ) >>> print(qml.transforms.qcut.graph_to_tape(cut_graph).draw()) 0: ──RX──//─╭C──//────────╭C──//─────────┤ ╭<X@Y@Z> 1: ──RY──//─╰X──//─╭C──//─╰X─────────────┤ │ a: ──RX──//─╭C──//─╰X──//─╭C──//──RX──//─┤ ├<X@Y@Z> b: ──RY──//─╰X──//────────╰X──//──RY─────┤ ╰<X@Y@Z> As one can tell, quite a few cuts have to be made in order to execute the circuit on solely 2-qubit devices. To verify, let's print the fragments: >>> qml.transforms.qcut.replace_wire_cut_nodes(cut_graph) >>> frags, comm_graph = qml.transforms.qcut.fragment_graph(cut_graph) >>> for t in frags: ... print(qml.transforms.qcut.graph_to_tape(t).draw()) .. code-block:: 0: ──RX──MeasureNode─┤ 1: ──RY──MeasureNode─┤ a: ──RX──MeasureNode─┤ b: ──RY──MeasureNode─┤ 0: ──PrepareNode─╭C──MeasureNode─┤ 1: ──PrepareNode─╰X──MeasureNode─┤ a: ──PrepareNode─╭C──MeasureNode─┤ b: ──PrepareNode─╰X──MeasureNode─┤ 1: ──PrepareNode─╭C──MeasureNode─┤ a: ──PrepareNode─╰X──MeasureNode─┤ 0: ──PrepareNode─╭C──MeasureNode─┤ 1: ──PrepareNode─╰X──────────────┤ b: ──PrepareNode─╭X──MeasureNode─┤ a: ──PrepareNode─╰C──MeasureNode─┤ a: ──PrepareNode──RX──MeasureNode─┤ b: ──PrepareNode──RY─┤ <Z> 0: ──PrepareNode─┤ <X> a: ──PrepareNode─┤ <Y> """ cut_graph = _remove_existing_cuts(graph) if isinstance(cut_strategy, CutStrategy): cut_kwargs_probed = cut_strategy.get_cut_kwargs(cut_graph) # Need to reseed if a seed is passed: seed = kwargs.pop("seed", None) seeds = np.random.default_rng(seed).choice(2**15, cut_strategy.trials_per_probe).tolist() cut_edges_probed = { (cut_kwargs["num_fragments"], trial_id): cut_method( cut_graph, **{ **cut_kwargs, **kwargs, "seed": seed, }, # kwargs has higher precedence for colliding keys ) for cut_kwargs in cut_kwargs_probed for trial_id, seed in zip(range(cut_strategy.trials_per_probe), seeds) } valid_cut_edges = {} for (num_partitions, _), cut_edges in cut_edges_probed.items(): # The easiest way to tell if a cut is valid is to just do the fragment graph. cut_graph = place_wire_cuts(graph=graph, cut_edges=cut_edges) num_cuts = sum(isinstance(n, WireCut) for n in cut_graph.nodes) replace_wire_cut_nodes(cut_graph) frags, comm = fragment_graph(cut_graph) max_frag_degree = max(dict(comm.degree()).values()) if _is_valid_cut( fragments=frags, num_cuts=num_cuts, max_frag_degree=max_frag_degree, num_fragments_requested=num_partitions, cut_candidates=valid_cut_edges, max_free_wires=cut_strategy.max_free_wires, ): key = (len(frags), max_frag_degree) valid_cut_edges[key] = cut_edges if len(valid_cut_edges) < 1: raise ValueError( "Unable to find a circuit cutting that satisfies all constraints. " "Are the constraints too strict?" ) cut_edges = _get_optim_cut(valid_cut_edges, local_measurement=local_measurement) else: cut_edges = cut_method(cut_graph, **kwargs) cut_graph = place_wire_cuts(graph=graph, cut_edges=cut_edges) if replace_wire_cuts: replace_wire_cut_nodes(cut_graph) return cut_graph def _is_valid_cut( fragments, num_cuts, max_frag_degree, num_fragments_requested, cut_candidates, max_free_wires, ): """Helper function for determining if a cut is a valid canditate.""" # pylint: disable=too-many-arguments k = len(fragments) key = (k, max_frag_degree) correct_num_fragments = k <= num_fragments_requested best_candidate_yet = (key not in cut_candidates) or (len(cut_candidates[key]) > num_cuts) # pylint: disable=no-member all_fragments_fit = all( len(graph_to_tape(f).wires) <= max_free_wires for j, f in enumerate(fragments) ) return correct_num_fragments and best_candidate_yet and all_fragments_fit def _get_optim_cut(valid_cut_edges, local_measurement=False): """Picks out the best cut from a dict of valid candidate cuts.""" if local_measurement: min_max_node_degree = min(max_node_degree for _, max_node_degree in valid_cut_edges) optim_cuts = { k: cut_edges for (k, max_node_degree), cut_edges in valid_cut_edges.items() if (max_node_degree == min_max_node_degree) } else: min_cuts = min(len(cut_edges) for cut_edges in valid_cut_edges.values()) optim_cuts = { k: cut_edges for (k, _), cut_edges in valid_cut_edges.items() if (len(cut_edges) == min_cuts) } return optim_cuts[min(optim_cuts)] # choose the lowest num_fragments among best ones.
1.914063
2
tools/SDKTool/src/ui/dialog/progress_bar_dialog.py
Passer-D/GameAISDK
1,210
11963
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making GameAISDK available. This source code file is licensed under the GNU General Public License Version 3. For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package. Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. """ from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QWidget, QProgressDialog class ProgressBarDialog(QWidget): def __init__(self, title='', label='', minValue=0, maxValue=100, parent=None): super(ProgressBarDialog, self).__init__(parent) self.process_bar = QProgressDialog(self) self.set_bar_window_title(title) self.set_label_text(label) self.set_min_value(minValue) self.set_max_value(maxValue) self.process_bar.setWindowModality(Qt.WindowModal) self.setGeometry(800, 300, 580, 570) self.process_bar.canceled.connect(self.close_bar) def set_bar_window_title(self, text): self.process_bar.setWindowTitle(text) self.setWindowTitle(text) def set_label_text(self, text): self.process_bar.setLabelText(text) def set_min_value(self, minValue): self.process_bar.setMinimum(minValue) def set_max_value(self, maxvalue): self.process_bar.setMaximum(maxvalue) def set_value(self, value): self.process_bar.setValue(value) def close_bar(self): self.process_bar.close() def reset_bar(self): self.process_bar = None def show(self): self.process_bar.show() def is_valid(self): return bool(self.process_bar)
2.015625
2
9/main.py
misterwilliam/advent-of-code
0
11964
<gh_stars>0 import itertools import unittest data = """Faerun to Norrath = 129 Faerun to Tristram = 58 Faerun to AlphaCentauri = 13 Faerun to Arbre = 24 Faerun to Snowdin = 60 Faerun to Tambi = 71 Faerun to Straylight = 67 Norrath to Tristram = 142 Norrath to AlphaCentauri = 15 Norrath to Arbre = 135 Norrath to Snowdin = 75 Norrath to Tambi = 82 Norrath to Straylight = 54 Tristram to AlphaCentauri = 118 Tristram to Arbre = 122 Tristram to Snowdin = 103 Tristram to Tambi = 49 Tristram to Straylight = 97 AlphaCentauri to Arbre = 116 AlphaCentauri to Snowdin = 12 AlphaCentauri to Tambi = 18 AlphaCentauri to Straylight = 91 Arbre to Snowdin = 129 Arbre to Tambi = 53 Arbre to Straylight = 40 Snowdin to Tambi = 15 Snowdin to Straylight = 99 Tambi to Straylight = 70""" def GenPaths(cities): for path in _GenPathsRec([], list(cities)): yield path def _GenPathsRec(stack, cities): if len(cities) == 0: yield stack else: for i in xrange(len(cities)): for path in _GenPathsRec(stack + [cities[i]], cities[:i] + cities[i+1:]): yield path def CalcDistance(start, dest, distancePairs): return distancePairs[frozenset((start, dest))] def CalcPathLength(path, distance_pairs): length = 0 for i in xrange(len(path) - 1): length += CalcDistance(path[i], path[i+1], distance_pairs) return length def LoadData(data): distance_pairs = {} cities = set() for line in data.split("\n"): start, _, dest, _, distance = line.split() cities.add(start) cities.add(dest) distance_pairs[frozenset([start, dest])] = int(distance) return cities, distance_pairs # ANSWER -------------------------------- cities, distance_pairs = LoadData(data) longestLength = -1 for path in GenPaths(cities): length = CalcPathLength(path, distance_pairs) longestLength = max(longestLength, length) print longestLength # TESTS --------------------------------- class GenPathsTests(unittest.TestCase): def test_GenPaths(self): self.assertEqual( [path for path in GenPaths("abcd")], [list(permutation) for permutation in itertools.permutations("abcd")]) class CalcPathLengthTests(unittest.TestCase): def test_CalcPathLength(self): distance_pairs = { frozenset(["a", "b"]): 10, frozenset(["b", "c"]): 20 } self.assertEqual(CalcPathLength(["a", "b", "c"], distance_pairs), 30) if __name__ == "__main__": unittest.main()
2.65625
3
100-Exercicios/ex039.py
thedennerdev/ExerciciosPython-Iniciante
0
11965
#Exercício Python 39: Faça um programa que leia o ano de nascimento de um jovem e informe, de acordo com a sua idade, se ele ainda vai se alistar ao serviço militar, se é a hora exata de se alistar ou se já passou do tempo do alistamento. Seu programa também deverá mostrar o tempo que falta ou que passou do prazo. import datetime current_year = datetime.datetime.today().year ano_nasc = int(input('Informe o ano de seu nascimento: ')) idade_alistamento = current_year - ano_nasc if idade_alistamento < 18: print('Ainda não está na hora de se alistar') print(f'Sua idade ainda é {idade_alistamento} anos, faltam {18 - idade_alistamento } anos. Aguarde mais um pouco!') elif idade_alistamento == 18: print(f'Sua idade já é {idade_alistamento} anos') print('Você está na idade de se alistar. Não perca tempo!') else: print('Você passou do prazo de alistamento.') print(f'Sua idade é {idade_alistamento} anos, já passou {idade_alistamento - 18} anos. Regularize a situação!')
4.09375
4
fish_dashboard/scrapyd/scrapyd_service.py
SylvanasSun/FishFishJump
60
11966
<reponame>SylvanasSun/FishFishJump<gh_stars>10-100 #!/usr/bin/env python # -*- coding: utf-8 -*- from fish_core.utils.common_utils import format_dict_to_str, get_current_date, list_to_str, str_to_list from fish_dashboard.scrapyd.model import ScrapydStatusVO, JobListDO, JobStatus, JobPriority, ProjectListVO, SpiderListVO from fish_dashboard.scrapyd.scrapyd_db import SqlLite3Agent class ScrapydTimeoutException(Exception): pass class ScrapydJobExtInfoSQLSet(): TABLE_NAME = 'scrapyd_job_ext_info' DB_FILE_NAME = 'scrapyd.db' CREATE_TABLE = """CREATE TABLE %s (job_id VARCHAR(32) PRIMARY KEY, args VARCHAR(20), priority INT(1), creation_time DATE, logs_name VARCHAR(128), logs_url VARCHAR(255), project_name VARCHAR(32), project_version VARCHAR(20))""" % TABLE_NAME INSERT = 'INSERT INTO %s VALUES(?,?,?,?,?,?,?,?)' % TABLE_NAME SELECT_BY_ID = 'SELECT * FROM %s WHERE job_id = ?' % TABLE_NAME SELECT_ALL = 'SELECT * FROM %s' % TABLE_NAME DELETE_BY_ID = 'DELETE FROM %s WHERE job_id = ?' % TABLE_NAME DELETE_BY_PROJECT_NAME = 'DELETE FROM %s WHERE project_name = ?' % TABLE_NAME DELETE_BY_PROJECT_VERSION = 'DELETE FROM %s WHERE project_name = ? AND project_version = ?' % TABLE_NAME def open_sqllite(sql_set): agent = SqlLite3Agent(sql_set.DB_FILE_NAME) agent.create_table(sql_set.CREATE_TABLE) return agent sqllite_agent = open_sqllite(ScrapydJobExtInfoSQLSet) def schedule_job(agent, project_name, spider_name, priority=JobPriority.LOW, setting=None, job_id=None, version=None, args={} ): jobid = agent.schedule(project_name, spider_name, priority, setting, job_id, version, args)['jobid'] if version is None: version = agent.get_version_list(project_name)['versions'][-1:] # Save additional information that can't queried by scrapyd api into the database args_str = format_dict_to_str(args, '=') current_date = get_current_date() logs_name, logs_url = agent.get_logs(project_name, spider_name) sqllite_agent.execute(ScrapydJobExtInfoSQLSet.INSERT, (jobid, args_str, priority, current_date, list_to_str(logs_name), list_to_str(logs_url), project_name, version,)) def cancel_job(agent, project_name, job_id): """ cancel a job. If the job is pending, it will be removed. If the job is running, it will be terminated. """ prevstate = agent.cancel(project_name, job_id)['prevstate'] if prevstate == 'pending': sqllite_agent.execute(ScrapydJobExtInfoSQLSet.DELETE_BY_ID, (job_id,)) def packing_job_ext_info(job_lsit_DO): """ Packing additional information of the job into the job_list_DO(JobListDO) """ ext_info = sqllite_agent.execute(ScrapydJobExtInfoSQLSet.SELECT_BY_ID, (job_lsit_DO.job_id,)) if ext_info is None or len(ext_info) <= 0: return ext_info = ext_info[0] job_lsit_DO.args = ext_info[1] job_lsit_DO.priority = ext_info[2] job_lsit_DO.creation_time = ext_info[3] job_lsit_DO.logs_name = str_to_list(ext_info[4], ',') job_lsit_DO.logs_url = str_to_list(ext_info[5], ',') def get_scrapyd_status(agent): # record the amount of the project and spider project_list = agent.get_project_list() if project_list['status'] == 'error': raise ScrapydTimeoutException project_list = project_list['projects'] spider_list = [] for p in project_list: s = agent.get_spider_list(project_name=p) spider_list.extend(s['spiders']) # get load status of a scrapyd service load_status_dict = agent.get_load_status() running = load_status_dict['running'] pending = load_status_dict['pending'] finished = load_status_dict['finished'] scrapydStatusVO = ScrapydStatusVO(running=running, pending=pending, finished=finished, project_amount=len(project_list), spider_amount=len(spider_list), job_amount=running + pending + finished ) return scrapydStatusVO def add_version(agent, project_name, version, egg): return agent.add_version(project_name, version, egg)['status'] def delete_project(agent, project_name): status = agent.delete_project(project_name)['status'] if status == 'ok': sqllite_agent.execute(ScrapydJobExtInfoSQLSet.DELETE_BY_PROJECT_NAME, (project_name,)) def delete_project_version(agent, project_name, version): status = agent.delete_project_version(project_name, version)['status'] if status == 'ok': sqllite_agent.execute(ScrapydJobExtInfoSQLSet.DELETE_BY_PROJECT_VERSION, (project_name, version,)) def get_all_job_list(agent): """ Get all job list by each project name then return three job list on the base of different status(pending,running,finished). """ project_list = agent.get_project_list() if project_list['status'] == 'error': raise ScrapydTimeoutException project_list = project_list['projects'] pending_job_list = [] running_job_list = [] finished_job_list = [] for project_name in project_list: job_list = agent.get_job_list(project_name) # Extract latest version project_version = agent.get_version_list(project_name)['versions'][-1:] for pending_job in job_list['pending']: pending_job_list.append(JobListDO(project_name=project_name, project_version=project_version, job_id=pending_job['id'], spider_name=pending_job['spider'], job_status=JobStatus.PENDING )) for running_job in job_list['running']: running_job_list.append(JobListDO(project_name=project_name, project_version=project_version, job_id=running_job['id'], spider_name=running_job['spider'], start_time=running_job['start_time'], job_status=JobStatus.RUNNING )) for finished_job in job_list['finished']: finished_job_list.append(JobListDO(project_name=project_name, project_version=project_version, job_id=finished_job['id'], spider_name=finished_job['spider'], start_time=finished_job['start_time'], end_time=finished_job['end_time'], job_status=JobStatus.FINISHED )) return pending_job_list, running_job_list, finished_job_list def get_all_project_list(agent): project_name_list = agent.get_project_list() if project_name_list['status'] == 'error': raise ScrapydTimeoutException project_name_list = project_name_list['projects'] project_list = [] for project_name in project_name_list: version_list = agent.get_version_list(project_name)['versions'] spider_list = agent.get_spider_list(project_name)['spiders'] job_amounts = get_job_amounts(agent, project_name=project_name) project_list.append(ProjectListVO(project_name=project_name, project_versions=version_list, latest_project_version=version_list[-1:], spider_amount=len(spider_list), spider_names=spider_list, pending_job_amount=job_amounts['pending'], running_job_amount=job_amounts['running'], finished_job_amount=job_amounts['finished'] )) return project_list def get_all_spider_list(agent): project_name_list = agent.get_project_list() if project_name_list['status'] == 'error': raise ScrapydTimeoutException project_name_list = project_name_list['projects'] spider_list = [] for project_name in project_name_list: spider_name_list = agent.get_spider_list(project_name)['spiders'] latest_project_version = agent.get_version_list(project_name)['versions'][-1:] for spider_name in spider_name_list: logs_name, logs_url = agent.get_logs(project_name, spider_name) job_amounts = get_job_amounts(agent, project_name, spider_name) spider_list.append(SpiderListVO(spider_name=spider_name, project_name=project_name, latest_project_version=latest_project_version, logs_name=logs_name, logs_url=logs_url, pending_job_amount=job_amounts['pending'], running_job_amount=job_amounts['running'], finished_job_amount=job_amounts['finished'] )) return spider_list def get_job_amounts(agent, project_name, spider_name=None): """ Get amounts that pending job amount, running job amount, finished job amount. """ job_list = agent.get_job_list(project_name) pending_job_list = job_list['pending'] running_job_list = job_list['running'] finished_job_list = job_list['finished'] job_amounts = {} if spider_name is None: job_amounts['pending'] = len(pending_job_list) job_amounts['running'] = len(running_job_list) job_amounts['finished'] = len(finished_job_list) else: job_amounts['pending'] = len([j for j in pending_job_list if j['spider'] == spider_name]) job_amounts['running'] = len([j for j in running_job_list if j['spider'] == spider_name]) job_amounts['finished'] = len([j for j in finished_job_list if j['spider'] == spider_name]) return job_amounts def get_logs_info(agent, project_name, spider_name): logs_name, logs_url = agent.get_logs(project_name, spider_name) return {'logs_name': logs_name, 'logs_url': logs_url}
1.992188
2
apps/shared/storage.py
bensternthal/affiliates
0
11967
<filename>apps/shared/storage.py import os from tempfile import mkstemp from django.conf import settings from django.core.files import locks from django.core.files.move import file_move_safe from django.core.files.storage import FileSystemStorage from django.utils.text import get_valid_filename class OverwritingStorage(FileSystemStorage): """ File storage that allows overwriting of stored files. Modified from http://djangosnippets.org/snippets/2173/ """ def get_available_name(self, name): return name def _save(self, name, content): """ Lifted partially from django/core/files/storage.py """ full_path = self.path(name) directory = os.path.dirname(full_path) if not os.path.exists(directory): os.makedirs(directory) elif not os.path.isdir(directory): raise IOError("%s exists and is not a directory." % directory) # Ensure that content is open content.open() if hasattr(content, 'temporary_file_path'): # Content has a file that we can move. temp_data_location = content.temporary_file_path() file_move_safe(temp_data_location, full_path, allow_overwrite=True) else: # Write the content stream to a temporary file and move it. fd, tmp_path = mkstemp() locks.lock(fd, locks.LOCK_EX) for chunk in content.chunks(): os.write(fd, chunk) locks.unlock(fd) os.close(fd) file_move_safe(tmp_path, full_path, allow_overwrite=True) content.close() if settings.FILE_UPLOAD_PERMISSIONS is not None: os.chmod(full_path, settings.FILE_UPLOAD_PERMISSIONS) return name
2.375
2
bin/dupeFinder.py
kebman/dupe-finder-py
1
11968
<filename>bin/dupeFinder.py #!/usr/bin/env python2 import os import hashlib import datetime import sqlite3 from sqlite3 import Error def sha256(fname): """Return sha256 hash from input file (fname). :param fname: :return: Sha256 hash digest in hexadecimal""" hash_sha256 = hashlib.sha256() with open(fname, "rb") as f: for chunk in iter(lambda: f.read(65536), b""): hash_sha256.update(chunk) return hash_sha256.hexdigest() def getHRT(timestamp): """Get human readable time from a Python timestamp. :param timestamp: :return: Human readable timestamp (HRT)""" dtval = datetime.datetime.fromtimestamp(timestamp) return dtval.strftime('%Y-%m-%d %H:%M:%S') def getSQLT(timestamp): """Make timestamp for SQLite from Python timestamp, meaning a UNIX epoch INTEGER. :param timestamp: :return: SQLite compatible timestamp in the form of a UNIX epoch INTEGER""" # I know this is a very small function, but now it's clear what SQL needs return int(timestamp) def create_connection(db_file): """Create a database connection to the SQLite database specified by db_file :param db_file: database file :return: Connection object or None""" try: connection = sqlite3.connect(db_file) return connection except Error as e: print(e) return None def check_exists(connection, path): """Check the file path in the SQL filepaths table. :param connection: :param path: :return: path id""" exists = '''SELECT EXISTS(SELECT 1 FROM filepaths WHERE filepath = ?);''' cursor = connection.cursor() cursor.execute(exists, (path,)) return cursor.fetchone() def get_path(connection, path): """Get the file path in the SQL filepaths table. :param connection: :param path: :return: path id""" select = '''SELECT id FROM filepaths WHERE filepath = ?;''' cursor = connection.cursor() cursor.execute(select, (path,)) return cursor.fetchone()[0] def store_path(connection, path): """Store the file path in the SQL filepaths table. :param connection: :param path: :return: path id""" insert = '''INSERT OR IGNORE INTO filepaths(filepath) VALUES(?)''' cursor = connection.cursor() cursor.execute(insert, (path,)) return cursor.lastrowid def store_file(connection, file): """Store the file, hash and relevant file attributes in the SQL files table. :param connection: :param file: :return: Filepath ID""" sql = '''INSERT INTO files(filename, checksum, filesize, btime, ctime, mtime, filepath_id) VALUES(?, ?, ?, ?, ?, ?, ?)''' cursor = connection.cursor() cursor.execute(sql, file) return None # return cursor.lastrowid def main(): path = "." # UX (and OS X) spesific path names # homedir = os.path.expanduser('~') db_file = "db/pythonsqlite.db" connection = create_connection(db_file) with connection: os.chdir(path) for entry in os.walk("."): folder = str(entry[0]) for file in entry[2]: filepath = os.getcwd() + folder[1:] #[1:] cuts out the preceding dot # only write if exists exists = check_exists(connection, filepath) if exists[0]: filepath_id = get_path(connection, filepath) # print('Fetched '+ str(filepath_id)) else: filepath_id = store_path(connection, filepath) # print('Written '+ str(filepath_id)) fullpathfile = os.getcwd() + folder[1:] + "/" + file file = file checksum = sha256(fullpathfile) size = os.stat(fullpathfile).st_size bstamp = os.stat(fullpathfile).st_birthtime cstamp = os.stat(fullpathfile).st_ctime mstamp = os.stat(fullpathfile).st_mtime fileInfo = (file, checksum, size, bstamp, cstamp, mstamp, filepath_id) store_file(connection, fileInfo) # test print: # print(str(getSQLT(birthstamp)) + " " + sha256(fullpathfile) + " " + fullpathfile + " " + str(size) + "b") if __name__ == '__main__': main()
3.328125
3
Python/factorialIterative.py
Ricardoengithub/Factorial
0
11969
def factorial(n): fact = 1 for i in range(2,n+1): fact*= i return fact def main(): n = int(input("Enter a number: ")) if n >= 0: print(f"Factorial: {factorial(n)}") else: print(f"Choose another number") if __name__ == "__main__": main()
4.1875
4
code_old/sort.py
benwoo1110/A-List-of-Sorts-v2
6
11970
###################################### # Import and initialize the librarys # ##################################### from code.pygame_objects import * from code.algorithm.bubblesort import bubblesort from code.algorithm.insertionsort import insertionsort from code.algorithm.bogosort import bogosort from code.algorithm.mergesort import mergesort from code.algorithm.quicksort import quicksort from code.algorithm.radixsort import radixsort from code.algorithm.selectionsort import selectionsort from code.algorithm.commonFunc import commonFunc ################# # Setup logging # ################# filename = os.path.basename(__file__).split('.')[0] logger = log.get_logger(filename) logger.info('Loading up {}...'.format(filename)) sort_screen = screen( name = 'sort', surfaceParameters = { 'frame': coord(w=1024, h=768) }, objectsParameters = { 'background': { 'type': 'object', 'frame': { 'image': coord(w=1024, h=768) }, }, 'sort_title': { 'type': 'title', 'frame': { 'image': coord(w=1024, h=135) }, }, 'back': { 'type': 'button', 'frame': { 'box': coord(x=71, y=41, w=112, h=61), 'image': coord(x=71, y=41, w=112, h=61) }, 'runclass': runclass(action='go_back') }, 'info': { 'type': 'button', 'frame': { 'box': coord(x=841, y=40, w=112, h=61), 'image': coord(x=841, y=40, w=112, h=61), }, 'runclass': runclass(action='info') }, 'speed': { 'type': 'text', 'frame': { 'image': coord(x=349, y=630, w=254, h=40), 'text': coord(x=349, y=630, w=254, h=40) }, 'data': text( text = '10', editable = False, suffix = ' sec per move', format = textFormat( fontType=pg_ess.font.futura, fontSize=28, colour=pg_ess.colour.black ) ), 'dataAddSelf': True, }, 'moves': { 'type': 'text', 'frame': { 'image': coord(x=436, y=677, w=112, h=40), 'text': coord(x=436, y=677, w=112, h=40) }, 'data': moves( format = textFormat( fontType=pg_ess.font.futura, fontSize=28, colour=pg_ess.colour.black ) ), 'dataAddSelf': True, }, 'time_taken': { 'type': 'text', 'frame': { 'image': coord(x=768, y=630, w=177, h=40), 'text': coord(x=768, y=630, w=177, h=40) }, 'data': timer( format = textFormat( fontType=pg_ess.font.futura, fontSize=28, colour=pg_ess.colour.black ) ), 'dataAddSelf': True, }, 'list_length': { 'type': 'text', 'frame': { 'image': coord(x=759, y=677, w=112, h=186), 'text': coord(x=759, y=677, w=112, h=186) }, 'data': text( text = '100', editable = False, suffix = ' bars', format = textFormat( fontType=pg_ess.font.futura, fontSize=28, colour=pg_ess.colour.black ) ), 'dataAddSelf': True, }, 'sortbox': { 'type': 'object', 'frame': { 'box': coord(x=52, y=145, w=922, h=430), 'image': coord(x=52, y=145, w=922, h=430) }, 'data': sortbars( bars=10, ), 'dataAddSelf': True, } } ) runSort = { 'Bubble sort': bubblesort.run, 'Insertion sort': insertionsort.run, 'Merge sort': mergesort.run, 'Quick sort': quicksort.run, 'Radix sort': radixsort.run, 'Bogo sort': bogosort.run, 'Selection sort': selectionsort.run } class sort: @staticmethod def run(screen, sortType:str, bars:int, speed:float): # Set data from parent sort_screen.objects.sort_title.switchState(sortType, withDisplay=False) if sort_screen.objects.sortbox.data.bars != int(bars): sort_screen.objects.sortbox.data.bars = int(bars) else: sort_screen.objects.sortbox.data.genBars() sort_screen.objects.speed.data.setText(str(speed), withDisplay=False) sort_screen.objects.list_length.data.setText(str(bars), withDisplay=False) sort_screen.objects.moves.data.reset() sort_screen.objects.time_taken.data.resetTimer() # Display sort screen sort_screen.surface.display() # Buffer time before sort starts action_result = commonFunc.waitAction(sort_screen, 0.5) if action_result != None: return action_result sort_result = runSort[sortType](sort_screen, speed) if sort_result != None: return sort_result while True: # Get check for interaction with screen action_result = sort_screen.event.action() # No action if action_result == None: continue # When program is set to close if action_result.contains('outcome','__quit__'): return '__quit__' # Going back if action_result.contains('outcome', 'go_back'): return '__back__' # Load back screen if action_result.contains('outcome', '__back__'): sort_screen.surface.display(withLoad=False)
2.65625
3
catkin_ws/src/tutorials/scripts/number_sub.py
vipulkumbhar/AuE893Spring19_VipulKumbhar
3
11971
<gh_stars>1-10 #!/usr/bin/env python import rospy from std_msgs.msg import Int64 counter = 0 pub = None def callback_number(msg): global counter counter += msg.data new_msg = Int64() new_msg.data = counter pub.publish(new_msg) rospy.loginfo(counter) if __name__ == '__main__': rospy.init_node('number_counter') sub = rospy.Subscriber("/number", Int64, callback_number) pub = rospy.Publisher("/number_count", Int64, queue_size =10) rospy.spin()
2.265625
2
setup.py
eddo888/perdy
0
11972
#!/usr/bin/env python import codecs from os import path from setuptools import setup pwd = path.abspath(path.dirname(__file__)) with codecs.open(path.join(pwd, 'README.md'), 'r', encoding='utf8') as input: long_description = input.read() version='1.7' setup( name='Perdy', version=version, license='MIT', long_description=long_description, long_description_content_type="text/markdown", url='https://github.com/eddo888/perdy', download_url='https://github.com/eddo888/perdy/archive/%s.tar.gz'%version, author='<NAME>', author_email='<EMAIL>', packages=[ 'Perdy', ], install_requires=[ 'pytz', 'arrow', 'xmltodict', 'PyYAML', 'jsonpath', 'argcomplete', 'Baubles', ], scripts=[ 'bin/parser.py', 'bin/pyson.py', 'bin/colourize.py', ], )
1.554688
2
utils/slack_send.py
IntelliGrape/pennypincher
0
11973
from tabulate import tabulate from slack.errors import SlackApiError import sys import logging import slack class Slackalert: """To send cost report on slack.""" def __init__(self, channel=None, slack_token=None): self.channel = channel self.slack_token = slack_token logging.basicConfig(level=logging.WARNING) self.logger = logging.getLogger() def get_resource_list(self, resource_name, resource_info, resource_header, resource_list, resource_savings): """Returns all the idle resource information in a dictionary format.""" resource_list.insert(0, resource_header) resource_info[resource_name] = {} resource_info[resource_name]['Resources'] = resource_list resource_info[resource_name]['Savings'] = resource_savings return resource_info def slack_alert(self, resource_info, account_name, total_savings): """Creates a txt file which contains the cost report and sends to the slack channel.""" try: client = slack.WebClient(token=self.slack_token) f = open("/tmp/cost_optimization_report.txt", "w+") for res in resource_info.keys(): #Converts resource info dictionary to tabular format. f.write('\n' + 'Resource: ' + res + '\n') resource_table = tabulate(resource_info[res]['Resources'][1:], headers=resource_info[res]['Resources'][0], tablefmt="grid", disable_numparse=True) f.write('\n' + resource_table + '\n \n' + 'Savings: $' + str(resource_info[res]['Savings']) + '\n') f.close() response = client.files_upload( file='/tmp/cost_optimization_report.txt', initial_comment='Cost Optimization Report | ' + account_name + ' | Total Savings: $' + str(total_savings), channels=self.channel ) print("Sending the Cost Optimization report to slack "+ self.channel) except SlackApiError as e: """You will get a SlackApiError if "ok" is False.""" assert e.response["ok"] is False assert e.response["error"] """str like 'invalid_auth', 'channel_not_found'.""" self.logger.error("Slack api error: {e.response['error']} | Error in slack_send.py") sys.exit(1) except Exception as e: self.logger.error( "Error on line {} in slack_send.py".format(sys.exc_info()[-1].tb_lineno) + " | Message: " + str(e)) sys.exit(1)
3.265625
3
src/importer/importer.py
tiefenauer/ip7-python
0
11974
import logging from abc import ABC, abstractmethod from pony.orm import db_session, commit log = logging.getLogger(__name__) class Importer(ABC): def __init__(self, TargetEntity): self.TargetEntity = TargetEntity @db_session def truncate(self): log.info('Truncating target tables...') self.TargetEntity.select().delete(bulk=True) commit() log.info('...done!') @abstractmethod def __iter__(self): """iterate over items to be imported""" return
2.578125
3
news/pybo/migrations/0006_auto_20211010_0322.py
Smashh712/nrib
0
11975
# Generated by Django 3.2.7 on 2021-10-09 18:22 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('pybo', '0005_auto_20211010_0320'), ] operations = [ migrations.AddField( model_name='issue', name='agree_representor_id', field=models.CharField(default='', max_length=20, null=True), ), migrations.AddField( model_name='issue', name='disagree_representor_id', field=models.CharField(default='', max_length=20, null=True), ), migrations.AlterField( model_name='issue', name='agree_representor', field=models.CharField(default='', max_length=20, null=True), ), migrations.AlterField( model_name='issue', name='disagree_representor', field=models.CharField(default='', max_length=20, null=True), ), ]
1.640625
2
anno_gen/modify_filesprocessed.py
KevinQian97/diva_toolbox
0
11976
import json import os def get_file_index(filesProcessed): new_dict = {} for f in filesProcessed: new_dict[f]={"framerate": 30.0, "selected": {"0": 1, "9000": 0}} return new_dict ref = json.load(open("/home/lijun/downloads/kf1_meta/references/kf1_all.json","r")) files = ref["filesProcessed"] print(len(files)) output = json.load(open("/mnt/ssdb/kevinq/adaptive_temporal_shift_module/exp/iod_kf1_all/output.json","r")) output["filesProcessed"] = files jname = "/mnt/ssdb/kevinq/adaptive_temporal_shift_module/exp/iod_kf1_all/output-mod.json" with open(jname,'w') as j: json.dump(output,j,indent=2,ensure_ascii=False) file_dict = get_file_index(files) jname = "/mnt/ssdb/kevinq/adaptive_temporal_shift_module/exp/iod_kf1_all/file-index.json" with open(jname,'w') as j: json.dump(file_dict,j,indent=2,ensure_ascii=False)
2.609375
3
monotone_bipartition/search.py
mvcisback/monotone-bipartition
1
11977
from enum import Enum, auto import funcy as fn import numpy as np from monotone_bipartition import rectangles as mdtr from monotone_bipartition import refine EPS = 1e-4 class SearchResultType(Enum): TRIVIALLY_FALSE = auto() TRIVIALLY_TRUE = auto() NON_TRIVIAL = auto() def diagonal_convex_comb(r): bot, top = np.array(r.bot), np.array(r.top) diag = top - bot return lambda t: bot + t * diag def binsearch(r, oracle, eps=EPS, find_lambda=False): """Binary search over the diagonal of the rectangle. Returns the lower and upper approximation on the diagonal. """ f = diagonal_convex_comb(r) feval = fn.compose(oracle, f) lo, hi = 0, 1 # Early termination via bounds checks if feval(lo): result_type = SearchResultType.TRIVIALLY_TRUE hi = 0 elif not feval(hi): result_type = SearchResultType.TRIVIALLY_FALSE else: result_type = SearchResultType.NON_TRIVIAL mid = lo while hi - lo > eps: mid = lo + (hi - lo) / 2 lo, hi = (lo, mid) if feval(mid) else (mid, hi) if find_lambda: if result_type == SearchResultType.TRIVIALLY_TRUE: return result_type, -1 elif result_type == SearchResultType.TRIVIALLY_FALSE: return result_type, 2 return result_type, (lo+hi)/2 else: return result_type, mdtr.to_rec(zip(f(lo), f(hi))) def line_intersect(func, point, tol, *, percent=False): box_intersect = np.array(point) / max(point) origin = [0]*len(point) rec = mdtr.to_rec(zip(origin, box_intersect)) # Compute bounding rec. return binsearch(rec, func, eps=tol, find_lambda=percent)[1] def lexicographic_opt(func, ordering, tol): dim = len(ordering) assert set(fn.pluck(0, ordering)) == set(range(dim)) tol /= dim # Need to compensate for multiple binsearches. rec = refine.bounding_box( domain=mdtr.unit_rec(dim), oracle=func ) # If polarity is True, set initial value at bounding.top. # O.w. use bounding.bot. base = tuple((rec.top if p else rec.bot)[i] for i, p in sorted(ordering)) res_rec = mdtr.to_rec(zip(base, base)) for idx, polarity in ordering: oracle = func rec = mdtr.to_rec( (0, 1) if i == idx else (p, p) for i, p in enumerate(base) ) result_type, res_cand = binsearch(rec, oracle, eps=tol) if result_type == SearchResultType.NON_TRIVIAL: res_rec = res_cand base = res_rec.bot return res_rec
2.671875
3
api/web/apps/auth/views.py
procool/itstructure
0
11978
from flask import url_for from flaskcbv.view import View from flaskcbv.conf import settings from misc.mixins import HelperMixin from misc.views import JSONView class authView(JSONView): def helper(self): return """Authorizaion handler Use "login" and "passwd" arguments by GET or POST to get session """ def get(self, *args, **kwargs): return self.post(*args, **kwargs) def post(self, *args, **kwargs): try: username = self.get_argument_smart('username') passwd = self.get_argument_smart('password') except Exception as err: self.abort_error(errno=-1, error="wrong_params", details="set arguments: 'username', 'passwd'") r = settings._BB_CLIENT.login(username, passwd) answ = r.as_dict del answ["cmd"] del answ["token"] self.abort_error(**answ) class sessionView(JSONView): def helper(self): return """Session check handler Use "session" argument by GET or POST to check your session """ def get(self, *args, **kwargs): return self.post(*args, **kwargs) def post(self, *args, **kwargs): try: session = self.get_argument_smart('session') except Exception as err: self.abort_error(errno=-1, error="wrong_params", details="set argument: 'session'") r = settings._BB_CLIENT.session(session) answ = r.as_dict del answ["cmd"] del answ["token"] self.abort_error(**answ)
2.546875
3
tests/test_heart_forest.py
RainingComputers/pykitml
34
11979
<reponame>RainingComputers/pykitml from pykitml.testing import pktest_graph, pktest_nograph @pktest_graph def test_heart_forest(): import os.path import numpy as np import pykitml as pk from pykitml.datasets import heartdisease # Download the dataset if(not os.path.exists('heartdisease.pkl')): heartdisease.get() # Load heart data set inputs, outputs = heartdisease.load() outputs = pk.onehot(outputs) # Create model ftypes = [ 'continues', 'categorical', 'categorical', 'continues', 'continues', 'categorical', 'categorical', 'continues', 'categorical', 'continues', 'categorical', 'categorical', 'categorical' ] forest_heart_classifier = pk.RandomForest(13, 2, max_depth=8, feature_type=ftypes) # Train forest_heart_classifier.train(inputs, outputs) # Save it pk.save(forest_heart_classifier, 'forest_heart_classifier.pkl') # Print accuracy accuracy = forest_heart_classifier.accuracy(inputs, outputs) print('Accuracy:', accuracy) # Plot confusion matrix forest_heart_classifier.confusion_matrix(inputs, outputs, gnames=['False', 'True']) # Assert accuracy assert (forest_heart_classifier.accuracy(inputs, outputs)) >= 94 @pktest_nograph def test_predict_heart_forest(): import os.path import numpy as np import pykitml as pk # Predict heartdisease for a person with # age sex cp trestbps chol fbs restecg thalach exang oldpeak slope ca thal # 67, 1, 4, 160, 286, 0, 2, 108, 1, 1.5, 2, 3, 3 input_data = np.array([67, 1, 4, 160, 286, 0, 2, 108, 1, 1.5, 2, 3, 3], dtype=float) # Load the model forest_heart_classifier = pk.load('forest_heart_classifier.pkl') # Get output forest_heart_classifier.feed(input_data) model_output = forest_heart_classifier.get_output() # Print result (log of probabilities) print(model_output) if __name__ == '__main__': try: test_heart_forest.__wrapped__() test_predict_heart_forest.__wrapped__() except AssertionError: pass
2.953125
3
pdf_audit.py
marctjones/perception
0
11980
<filename>pdf_audit.py from globals import Globals import os import subprocess import datetime as dt from urllib import \ request as request # urlopen from io import \ StringIO, BytesIO import string import requests import re import csv import threading import utils as utils import time import datetime as datetime import multiprocessing from report import PDFItem from PyPDF2 import PdfFileReader from pdfminer.pdfparser import PDFParser from pdfminer.pdfdocument import PDFDocument from pdfminer.pdfpage import PDFPage from pdfminer.pdfinterp import resolve1 from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter from pdfminer.pdfpage import PDFTextExtractionNotAllowed from pdfminer.layout import LAParams # , LTTextBox, LTTextLine from threading import Thread, Event stop_event = Event() global document class PDFAudit: def __init__(self): self.report_folder = '' self.document_folder = '' self.pdf_path = '' self.report_name = '' self.csv_header = [] self.gbl_report_folder = Globals.gbl_report_folder + self.report_folder self.log = self.gbl_report_folder + 'logs\\' self.document_t = PDFDocument self.parser = PDFParser self.url = '' self.line_count = 1 def load_pdf(self, PDFDocument, password): i = 0 while threading.currentThread().is_alive(): i += 1 report_path = self.report_folder + self.report_name print('LOADING: ' + i.__str__()) time.sleep(1) # try: self.document_t = PDFDocument(self.parser) # except Exception as e: # print('PDFDocument(self.parser) FAILED ::::: ' + e.__str__()) if stop_event.is_set(): if i >= 120: # print(self.parser.fp.name + ' FAILED (SEC): ' + i.__str__()) print(' >>> FAIL : PDF LOAD STOP EVENT : 120 SECONDS') row = [self.line_count, 'PDFDocument FAILED TO LOAD - 90 SEC TIMEOUT REACHED FOR: ' + self.url, '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', ] # self.line_count += 1 # 90 SECOND TIMEOUT or FAILED TO PARSER with open(report_path, 'a', encoding='utf8', newline='') as csv_file: writer = csv.writer(csv_file, quoting=csv.QUOTE_ALL) writer.dialect.lineterminator.replace('\n', '') writer.writerow(row) break def thread_monitor(self, process_name, thread): i = 0 while thread.is_alive(): time.sleep(2) i += 2 print(process_name + ' WORKING FOR ' + i.__str__() + ' seconds for: ' + thread.getName()) print('ACTIVE COUNT: ' + str(threading.active_count())) if i == 180: print(thread.getName() + ' KILLED AT 180 SECONDS') report_path = self.report_folder + self.report_name row = [self.line_count, 'PDF THREAD FAILED TO PROCESS - 180 SEC TIMEOUT REACHED FOR: ' + self.url, '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', ] # self.line_count += 1 # 120 SECOND TIMEOUT with open(report_path, 'a', encoding='utf8', newline='') as csv_file: writer = csv.writer(csv_file, quoting=csv.QUOTE_ALL) writer.dialect.lineterminator.replace('\n', '') writer.writerow(row) break print(process_name + ':[COMPLETED IN ' + i.__str__() + ' seconds for: ' + thread.getName() + ']') def pdf_csv(self, csv_to_audit, source_folder, scope): # Define CSV self.csv_header = (['csvline', 'url', 'filename', 'local_path', 'encrypted', 'decrypt_pass', 'istagged', 'pages', 'toc', 'form', 'fields', 'tables', 'word_count', 'char_count', 'words_per_page', 'chars_per_word', 'image_count', '%_img_per_page', 'ocr_risk', 'author', 'creator', 'producer', 'subject', 'title', 'text']) # root_path = os.path.split(source_folder)[0] self.report_folder = os.path.split(source_folder)[0].replace('SPIDER', '') # Set logs self.log = os.path.join(self.report_folder, 'logs') if not os.path.exists(self.log): os.makedirs(self.log) self.report_folder = os.path.join( self.report_folder, 'PDF') if not os.path.exists(self.report_folder): os.makedirs(self.report_folder) # os.chdir(self.report_folder) if csv_to_audit.find('internal') >= 0 or scope == 'internal': self.log = os.path.join(self.log, '_pdf_internal_log.txt') self.report_name = csv_to_audit[:-4] + '_a.csv' if csv_to_audit.find('external') >= 0 or scope == 'external': self.log = os.path.join(self.log, '_pdf_external_log.txt') self.report_name = csv_to_audit[:-4] + '_a.csv' self.document_folder = self.report_folder if not os.path.exists(self.document_folder): os.makedirs(self.document_folder) try: write_header = False report_path = self.report_folder + self.report_name if not os.path.exists(report_path): write_header = True os.chdir(self.report_folder) with open(report_path, 'a', encoding='utf8', newline='') as csv_file: writer = csv.writer(csv_file, quoting=csv.QUOTE_ALL) if write_header: writer.writerow(self.csv_header) except Exception as e: print('PDF I/O error: ' + e.__str__()) csv_source = os.path.join(source_folder, csv_to_audit) row_count = sum(1 for row in csv.reader(open(csv_source, 'r', encoding='utf8'), delimiter=',')) row_count_i = row_count - 2 with open(csv_source, encoding='utf8') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') # set number of threads thread_count = 1 destination_folder = self.report_name # Get URL for PDF from row[1] # FOR EACH PDF first_line = True for row in csv_reader: pdf_url = row[0] skip = False if first_line: first_line = False print(' ::: START ALL PDF :::') continue elif os.path.exists(destination_folder): with open(destination_folder, encoding='utf8') as completed_urls: completed_urls_reader = csv.reader(completed_urls, delimiter=',') jump = True fl = True skip = False for completed_url in completed_urls_reader: if fl: jump = True fl = False continue if pdf_url in completed_url[1]: msg = (' >>> Remaining PDFs: ' + row_count_i.__str__() + ' out of ' + row_count.__str__() + ' ' + (datetime.datetime.now().__str__()[:-7])) row_count_i -= 1 # self.line_count += 1 utils.logline(self.log, msg) print(msg) fl = False skip = True break # completed_urls.close() try: if skip: skip = False continue self.line_count = csv_reader.line_num self.url = pdf_url thread = Thread(target=self.pdf_thread, args=(pdf_url,)) thread.setDaemon(True) while threading.active_count() > 35: print(' !! TAKE 5 !!') time.sleep(5) print('RUN AUDIT FOR :: ' + pdf_url + ' ' + thread.getName()) thread.start() i = 0 thread_monitor = Thread(target=self.thread_monitor, args=('PDF', thread)) thread_monitor.setDaemon(True) thread_monitor.start() time.sleep(5) msg = (' >>> Remaining PDFs: ' + row_count_i.__str__() + ' out of ' + row_count.__str__() + ' ' + (datetime.datetime.now().__str__()[:-7])) row_count_i -= 1 utils.logline(self.log, msg) print(msg) except Exception as e: msg = e.__str__() + ' PDF:01' + '\n' print(msg) utils.logline(self.log, msg) def pdf_thread(self, url): pdf_name = '' exit_call = '' csv_row = [] # save PDF to disk try: pdf_name = BytesIO(url.split("/")[-1].encode('UTF-8')).read().__str__()[2:-1] valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits) regex = re.compile(valid_chars) pdf_name = regex.sub('', pdf_name.__str__()) self.pdf_path = self.document_folder + regex.sub('', pdf_name) r = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'}) with open(self.pdf_path, 'wb') as code: code.write(r.content) code.close() csv_row.insert(0, [self.csv_header[0], self.line_count.__str__()]) csv_row.insert(1, [self.csv_header[1], url if url.__len__() > 0 else 'NULL']) csv_row.insert(2, [self.csv_header[2], pdf_name if pdf_name.__len__() > 0 else 'NULL']) csv_row.insert(3, [self.csv_header[3], self.pdf_path if self.pdf_path.__len__() > 0 else 'NULL']) print(' >>>> PDF START:[' + url + '] ' + self.line_count.__str__() + ' ' + ( datetime.datetime.now().__str__()[:-7])) except Exception as e: csv_row.insert(0, [self.csv_header[0], self.line_count.__str__()]) csv_row.insert(1, [self.csv_header[1], url if url.__len__() > 0 else 'NULL']) csv_row.insert(2, [self.csv_header[2], e.__str__()]) csv_row.insert(3, [self.csv_header[3], self.pdf_path if self.pdf_path.__len__() > 0 else 'NULL']) print(e) pass my_file = os.path.join(self.document_folder + pdf_name) try: fp = open(my_file, 'rb') # self.pdf(fp, csv_row) except Exception as e: print(' PDF LOAD FAILED !!! ' + self.line_count.__str__() + ' : ' + self.pdf_path) csv_row.pop(3) csv_row.insert(3, [self.csv_header[3], 'PDF FAILED TO OPEN:' + self.pdf_path if self.pdf_path.__len__() > 0 else 'NULL']) # Write results row = [] for i in range(csv_row.__len__()): row.append(csv_row[i][1]) report_path = self.report_folder + self.report_name row_append = ['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', ''] index = 4 for ii in row_append: row.insert(index, ii) index += 1 # OPEN FAILED with open(report_path, 'a', encoding='utf8', newline='') as csv_file: writer = csv.writer(csv_file, quoting=csv.QUOTE_ALL) writer.dialect.lineterminator.replace('\n', '') writer.writerow(row) return try: self.pdf(fp, csv_row) except Exception as e: print('PDF FAIL') def pdf(self, fp, csv_row): password = '' extracted_text = '' self.parser = PDFParser(fp) self.document_t = PDFDocument pf = PdfFileReader # isEncrypted try: i = 0 try: thread = Thread(target=self.load_pdf, args=(PDFDocument, password)) thread.start() thread.join(timeout=90) except Exception as e: print('PDF I/O error: ' + e.__str__()) row = [self.line_count, 'PDF DOCUMENT OBJECT FAILED TO LOAD - ' + e.__str__() + ': ' + self.url, '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', ] # self.line_count += 1 report_path = self.report_folder + self.report_name # 90 SECONDS or LOAD FAIL with open(report_path, 'a', encoding='utf8', newline='') as csv_file: writer = csv.writer(csv_file, quoting=csv.QUOTE_ALL) writer.dialect.lineterminator.replace('\n', '') writer.writerow(row) stop_event.set() document = PDFDocument document = self.document_t pf = PdfFileReader(BytesIO(open(self.pdf_path, 'rb').read())) # ENCRYPTION if self.parser.doc.encryption is not None: csv_row.insert(4, [self.csv_header[4], 'ENCRYPTED']) csv_row.insert(5, [self.csv_header[5], 'ENCRYPTED']) else: csv_row.insert(4, [self.csv_header[4], 'FALSE']) csv_row.insert(5, [self.csv_header[5], 'NA']) except Exception as e: csv_row.insert(4, [self.csv_header[4], 'FAILED: ' + e.__str__()]) csv_row.insert(5, [self.csv_header[5], 'NA']) exit_call = e.__str__() + ' document failed!!' print(exit_call) pass page_count = 0 # istagged try: pages = PDFPage.get_pages(document) if not document.is_extractable: raise PDFTextExtractionNotAllowed rsrcmgr = PDFResourceManager() laparams = LAParams() page_no = 0 istagged = 'FALSE' try: # document.catalog if document.catalog['MarkInfo']: istagged = 'TRUE' except Exception as e: exit_call = e.__str__() + ' tagged info failed!!' print(exit_call) page_count = resolve1(document.catalog['Pages'])['Count'] csv_row.insert(6, [self.csv_header[6], istagged]) csv_row.insert(7, [self.csv_header[7], page_count]) except Exception as e: csv_row.insert(6, [self.csv_header[6], 'IsTagged: ' + e.__str__()]) csv_row.insert(7, [self.csv_header[7], 'Page Count: ' + e.__str__()]) exit_call = e.__str__() + ' tagged info failed!!' print(exit_call) # TOC try: if pf.outlines: csv_row.insert(8, [self.csv_header[8], 'TRUE']) '''pdf_path_toc = self.document_folder + pdf_name + '_toc.txt' places_list = pf.outlines with open(pdf_path_toc, 'w') as filehandle: filehandle.writelines("%s\n" % place for place in places_list) filehandle.close()''' else: csv_row.insert(8, [self.csv_header[8], 'FALSE']) except Exception as e: csv_row.insert(8, [self.csv_header[8], 'TOC FAILED: ' + e.__str__()]) exit_call = e.__str__() + ' toc info failed!!' print(exit_call) # isForm, fields, try: if pf.getFields(): csv_row.insert(9, [self.csv_header[9], 'TRUE']) csv_row.insert(10, [self.csv_header[10], pf.getFields().__len__()]) else: csv_row.insert(9, [self.csv_header[9], 'FALSE']) csv_row.insert(10, [self.csv_header[10], 0]) except Exception as e: csv_row.insert(9, [self.csv_header[9], 'FORMS: ' + e.__str__()]) csv_row.insert(10, [self.csv_header[10], 'FIELDS: ' + e.__str__()]) exit_call = e.__str__() + ' forms failed!!' print(exit_call) # tables csv_row.insert(11, [self.csv_header[11], 'NOT RUN']) write_clip = '' word_count = 0 words_per_page = 0 char_count = 0 chars_per_word = 0 image_count = 0 # TODO: write 3 page sample and word count try: if pf.getNumPages() < 50: for page in range(pf.getNumPages()): p = pf.getPage(page) text_clip = p.extractText().encode('UTF-8') text_clip = BytesIO(text_clip).read().__str__()[2:] count_clip = re.findall(r"[^\W_]+", text_clip, re.MULTILINE) word_count += len(count_clip) char_count += len(text_clip) if page <= 3: write_clip += '[ PAGE ' + (page + 1).__str__() + ' START ] ' write_clip += text_clip.replace('\n', '').replace(',', ' ').replace('"', '') write_clip += '[ PAGE ' + (page + 1).__str__() + ' END ]' else: write_clip = 'OVER 50 PAGES - SAMPLE SKIPPED' except Exception as e: exit_call = e.__str__() + ' :: TEXT sample failed!!' write_clip = exit_call word_count = exit_call char_count = exit_call print(exit_call) # TODO: Words/chars per page try: if not word_count == 0: chars_per_word = char_count / word_count else: chars_per_word = 0 if not page_count == 0: words_per_page = word_count / page_count else: words_per_page = 0 except Exception as e: exit_call = e.__str__() + ' :: WORD METRICS failed!!' chars_per_word = exit_call words_per_page = exit_call print(exit_call) # TODO: Add to row i = 12 try: csv_row.insert(i, [self.csv_header[i], word_count.__str__()]) except Exception as e: csv_row.insert(i, [self.csv_header[i], 'WORD_COUNT: ' + e.__str__()]) i = 13 try: csv_row.insert(i, [self.csv_header[i], char_count.__str__()]) except Exception as e: csv_row.insert(i, [self.csv_header[i], 'CHAR_COUNT: ' + e.__str__()]) i = 14 try: csv_row.insert(i, [self.csv_header[i], words_per_page.__str__()]) except Exception as e: csv_row.insert(i, [self.csv_header[i], 'WPP: ' + e.__str__()]) i = 15 try: csv_row.insert(i, [self.csv_header[i], chars_per_word.__str__()]) except Exception as e: csv_row.insert(i, [self.csv_header[i], 'CPP: ' + e.__str__()]) # TODO: IMAGES i = 16 '''try: pdfImages = Globals.base_folder + 'cli-tools\\pdfimages.exe' img_folder = self.document_folder + 'images\\' # + pdf_name[:-4] + '\\' if not os.path.exists(img_folder): os.makedirs(img_folder) # cmd = pdfImages + ' -list ' + '\"' + pdf_path + '\"' # output = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0].split(b'\n') # save images to disk cmd = pdfImages + ' -list \"' + self.pdf_path + '\" \"' + ' ' + '\"' # subprocess.Popen(cmd, stdout=subprocess.PIPE) os.chdir(img_folder) image_list = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0].split(b'\r\n') # os.remove(img_folder) # image_count = output.count('\n') image_count = image_list.__len__() if image_count > 2: # target = open(pdf_path_image, 'w') # target.write(image_list) # target.close() csv_row.insert(i, [self.csv_header[i], (image_count - 2).__str__()]) elif image_count == 0: csv_row.insert(i, [self.csv_header[i], 0]) else: csv_row.insert(i, [self.csv_header[i], 0]) except Exception as e: csv_row.insert(i, [self.csv_header[i], e.__str__() + ' image info failed!!']) exit_call = e.__str__() + ' image info failed!!' print(exit_call)''' # TODO: IMAGES per page i = 17 percent_img_per_page = float try: if not image_count == 0 or page_count == 0: percent_img_per_page = (float(image_count) / float(page_count)) * 100 else: percent_img_per_page = 0 csv_row.insert(i, [self.csv_header[i], percent_img_per_page]) except Exception as e: csv_row.insert(i, [self.csv_header[i], 'IMG: ' + e.__str__()]) # TODO: OCR risk i = 18 try: if words_per_page == 0 or percent_img_per_page > 3000: ocr_risk = 5 elif words_per_page < 15 or percent_img_per_page > 2000: ocr_risk = 4 elif words_per_page < 40 or percent_img_per_page > 1000: ocr_risk = 3 elif words_per_page < 70 or percent_img_per_page > 425: ocr_risk = 2 elif words_per_page < 80 or percent_img_per_page > 200: ocr_risk = 1 else: ocr_risk = 0 csv_row.insert(i, [self.csv_header[i], ocr_risk]) except Exception as e: csv_row.insert(i, [self.csv_header[i], 'OCR: ' + e.__str__()]) # author, creator, producer, subject, title, di = pf try: di = pf.documentInfo except Exception as e: exit_call = e.__str__() + ' :: DOCUMENT INFO LOAD failed!!' print(exit_call) # Document info if di: # Author try: i = 19 if di.author: csv_row.insert(i, [self.csv_header[i], di.author.encode('UTF-8')]) else: csv_row.insert(i, [self.csv_header[i], 'NULL']) except Exception as e: csv_row.insert(i, [self.csv_header[i], 'AUTHOR: ' + e.__str__()]) exit_call = e.__str__() + ' doc info failed!!' print(exit_call) # Creator try: i = 20 if di.creator: csv_row.insert(i, [self.csv_header[i], di.creator.encode('UTF-8')]) else: csv_row.insert(i, [self.csv_header[i], 'NULL']) except Exception as e: csv_row.insert(i, [self.csv_header[i], 'CREATOR: ' + e.__str__()]) print(exit_call) print('#5.1') # Producer try: i = 21 if di.producer: csv_row.insert(i, [self.csv_header[i], di.producer.encode('UTF-8')]) else: csv_row.insert(i, [self.csv_header[i], 'NULL']) except Exception as e: csv_row.insert(i, [self.csv_header[i], 'PRODUCER: ' + e.__str__()]) print(exit_call) # Subject try: i = 22 if di.subject: csv_row.insert(i, [self.csv_header[i], di.subject.encode('UTF-8')]) else: csv_row.insert(i, [self.csv_header[i], 'NULL']) except Exception as e: csv_row.insert(i, [self.csv_header[i], 'SUBJECT: ' + e.__str__()]) print(exit_call) # Title try: i = 23 if di.title: csv_row.insert(i, [self.csv_header[i], di.title.encode('UTF-8')]) else: csv_row.insert(i, [self.csv_header[i], 'NULL']) except Exception as e: csv_row.insert(i, [self.csv_header[i], 'TITLE: ' + e.__str__()]) print(exit_call) # Document clip i = 24 try: csv_row.insert(i, [self.csv_header[i], write_clip]) except Exception as e: csv_row.insert(i, [self.csv_header[i], e.__str__()]) # Write results row = [] for i in range(csv_row.__len__()): row.append(csv_row[i][1]) report_path = self.report_folder + self.report_name # COPLETE WRITE with open(report_path, 'a', encoding='utf8', newline='') as csv_file: writer = csv.writer(csv_file, quoting=csv.QUOTE_ALL) writer.dialect.lineterminator.replace('\n', '') writer.writerow(row) # csv_file.close() fp.close() os.remove(self.pdf_path) # Log close msg = (' >>>> PDF complete:[' + self.url + '] ' + self.line_count.__str__() + ' ' + (datetime.datetime.now().__str__()[:-7])) print(msg) utils.logline(self.log, msg)
2.515625
3
morepath/tests/test_method_directive.py
DuncanBetts/morepath
0
11981
import morepath from webtest import TestApp as Client def test_implicit_function(): class app(morepath.App): @morepath.dispatch_method() def one(self): return "Default one" @morepath.dispatch_method() def two(self): return "Default two" @app.path(path='') class Model(object): def __init__(self): pass @app.method(app.one) def one_impl(self): return self.two() @app.method(app.two) def two_impl(self): return "The real two" @app.view(model=Model) def default(self, request): return request.app.one() c = Client(app()) response = c.get('/') assert response.body == b'The real two' def test_implicit_function_mounted(): class base(morepath.App): @morepath.dispatch_method() def one(self): return "Default one" @morepath.dispatch_method() def two(self): return "Default two" class alpha(base): pass class beta(base): def __init__(self, id): self.id = id @alpha.mount(path='mounted/{id}', app=beta) def mount_beta(id): return beta(id=id) class AlphaRoot(object): pass class Root(object): def __init__(self, id): self.id = id @alpha.path(path='/', model=AlphaRoot) def get_alpha_root(): return AlphaRoot() @beta.path(path='/', model=Root) def get_root(app): return Root(app.id) @beta.method(base.one) def one_impl(self): return self.two() @beta.method(base.two) def two_impl(self): return "The real two" @alpha.view(model=AlphaRoot) def alpha_default(self, request): return request.app.one() @beta.view(model=Root) def default(self, request): return "View for %s, message: %s" % (self.id, request.app.one()) c = Client(alpha()) response = c.get('/mounted/1') assert response.body == b'View for 1, message: The real two' response = c.get('/') assert response.body == b'Default one'
2.46875
2
edb/edgeql/tracer.py
hyperdrivetech/edgedb
0
11982
# # This source file is part of the EdgeDB open source project. # # Copyright 2015-present MagicStack Inc. and the EdgeDB authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import annotations import functools import typing from edb.schema import name as sn from edb.schema import objects as so from edb.edgeql import ast as qlast class Type: def __init__(self, name): self.name = name def get_name(self, schema): return self.name class ObjectType(Type): def __init__(self, name): super().__init__(name) self.pointers = {} def is_pointer(self): return False def getptr(self, schema, name): return self.pointers.get(name) class UnionType: def __init__(self, types): self.types = types class Pointer: def __init__(self, name, *, source=None, target=None): self.name = name self.source = source self.target = target self.pointers = {} def is_pointer(self): return True def getptr(self, schema, name): return self.pointers.get(name) def get_target(self, schema): return self.target def get_name(self, schema): return self.name def trace_refs( qltree: qlast.Base, *, schema, source: typing.Optional[sn.Name] = None, subject: typing.Optional[sn.Name] = None, path_prefix: typing.Optional[sn.Name] = None, module: typing.Optional[str] = None, objects: typing.Dict[str, object], ) -> typing.FrozenSet[sn.Name]: """Return a list of schema item names used in an expression.""" ctx = TracerContext(schema, module, objects, source, subject, path_prefix) trace(qltree, ctx=ctx) return frozenset(ctx.refs) class TracerContext: def __init__(self, schema, module, objects, source, subject, path_prefix): self.schema = schema self.refs = set() self.module = module self.objects = objects self.source = source self.subject = subject self.path_prefix = path_prefix def get_ref_name(self, ref: qlast.ObjectRef) -> sn.Name: if ref.module: return sn.Name(module=ref.module, name=ref.name) elif f'{self.module}::{ref.name}' in self.objects: return sn.Name(module=self.module, name=ref.name) else: return sn.Name(module="std", name=ref.name) @functools.singledispatch def trace(node: qlast.Base, *, ctx: TracerContext) -> typing.Optional[so.Object]: raise NotImplementedError(f"do not know how to trace {node!r}") @trace.register def trace_none(node: type(None), *, ctx: TracerContext) -> None: pass @trace.register def trace_Constant(node: qlast.BaseConstant, *, ctx: TracerContext) -> None: pass @trace.register def trace_Array(node: qlast.Array, *, ctx: TracerContext) -> None: for el in node.elements: trace(el, ctx=ctx) @trace.register def trace_Set(node: qlast.Set, *, ctx: TracerContext) -> None: for el in node.elements: trace(el, ctx=ctx) @trace.register def trace_Tuple(node: qlast.Tuple, *, ctx: TracerContext) -> None: for el in node.elements: trace(el, ctx=ctx) @trace.register def trace_NamedTuple(node: qlast.NamedTuple, *, ctx: TracerContext) -> None: for el in node.elements: trace(el.val, ctx=ctx) @trace.register def trace_BinOp(node: qlast.BinOp, *, ctx: TracerContext) -> None: trace(node.left, ctx=ctx) trace(node.right, ctx=ctx) @trace.register def trace_UnaryOp(node: qlast.UnaryOp, *, ctx: TracerContext) -> None: trace(node.operand, ctx=ctx) @trace.register def trace_Detached(node: qlast.DetachedExpr, *, ctx: TracerContext) -> None: trace(node.expr, ctx=ctx) @trace.register def trace_TypeCast(node: qlast.TypeCast, *, ctx: TracerContext) -> None: trace(node.expr, ctx=ctx) if not node.type.subtypes: ctx.refs.add(ctx.get_ref_name(node.type.maintype)) @trace.register def trace_IsOp(node: qlast.IsOp, *, ctx: TracerContext) -> None: trace(node.left, ctx=ctx) if not node.right.subtypes: ctx.refs.add(ctx.get_ref_name(node.right.maintype)) @trace.register def trace_Introspect(node: qlast.Introspect, *, ctx: TracerContext) -> None: if not node.type.subtypes: ctx.refs.add(ctx.get_ref_name(node.type.maintype)) @trace.register def trace_FunctionCall(node: qlast.FunctionCall, *, ctx: TracerContext) -> None: for arg in node.args: trace(arg, ctx=ctx) for arg in node.kwargs.values(): trace(arg, ctx=ctx) @trace.register def trace_Indirection(node: qlast.Indirection, *, ctx: TracerContext) -> None: for indirection in node.indirection: trace(indirection, ctx=ctx) trace(node.arg, ctx=ctx) @trace.register def trace_Index(node: qlast.Index, *, ctx: TracerContext) -> None: trace(node.index, ctx=ctx) @trace.register def trace_Slice(node: qlast.Slice, *, ctx: TracerContext) -> None: trace(node.start, ctx=ctx) trace(node.stop, ctx=ctx) @trace.register def trace_Path(node: qlast.Path, *, ctx: TracerContext) -> typing.Optional[so.Object]: tip = None ptr = None plen = len(node.steps) for i, step in enumerate(node.steps): if isinstance(step, qlast.ObjectRef): refname = ctx.get_ref_name(step) if refname in ctx.objects: ctx.refs.add(refname) tip = ctx.objects[refname] else: tip = ctx.schema.get(refname) elif isinstance(step, qlast.Ptr): if i == 0: # Abbreviated path. if ctx.path_prefix in ctx.objects: tip = ctx.objects[ctx.path_prefix] else: # We can't reason about this path. return if step.type == 'property': lprop = ptr.getptr(ctx.schema, step.ptr.name) if lprop is None: # Invalid link property reference, bail. return if isinstance(lprop, Pointer): ctx.refs.add(f'{lprop.source}@{step.ptr.name}') else: if step.direction == '<': if plen > i + 1 and isinstance(node.steps[i + 1], qlast.TypeIndirection): # A reverse link traversal with a type filter, # process it on the next step. pass else: # otherwise we cannot say anything about the target, # so bail. return else: if tip is None: # We can't reason about this path. return ptr = tip.getptr(ctx.schema, step.ptr.name) if ptr is None: # Invalid pointer reference, bail. return if ptr.source == tip: tip_name = tip.get_name(ctx.schema) ctx.refs.add(f'{tip_name}@{step.<EMAIL>}') tip = ptr.get_target(ctx.schema) elif isinstance(step, qlast.TypeIndirection): tip = _resolve_type_expr(step.type, ctx=ctx) prev_step = node.steps[i - 1] if prev_step.direction == '<': ptr = tip.getptr(ctx.schema, prev_step.ptr.name) if ptr is None: # Invalid pointer reference, bail. return if isinstance(tip, Type): tip_name = tip.get_name(ctx.schema) ctx.refs.add(f'{tip_name}@{prev_step.<EMAIL>}') tip = ptr.get_target(ctx.schema) else: tr = trace(step, ctx=ctx) if tr is not None: tip = tr if isinstance(tip, Pointer): ptr = tip return tip @trace.register def trace_Source(node: qlast.Source, *, ctx: TracerContext) -> so.Object: return ctx.objects[ctx.source] @trace.register def trace_Subject(node: qlast.Subject, *, ctx: TracerContext) -> typing.Optional[so.Object]: # Apparently for some paths (of length 1) ctx.subject may be None. if ctx.subject is not None: return ctx.objects[ctx.subject] def _resolve_type_expr( texpr: qlast.TypeExpr, *, ctx: TracerContext ) -> typing.Union[so.Object, UnionType]: if isinstance(texpr, qlast.TypeName): if texpr.subtypes: return Type(name=texpr.maintype.name) else: refname = ctx.get_ref_name(texpr.maintype) obj = ctx.objects.get(refname) if obj is None: obj = ctx.schema.get(refname) else: ctx.refs.add(refname) return obj elif isinstance(texpr, qlast.TypeOp): if texpr.op == '|': return UnionType([ _resolve_type_expr(texpr.left, ctx=ctx), _resolve_type_expr(texpr.right, ctx=ctx), ]) else: raise NotImplementedError( f'unsupported type operation: {texpr.op}') else: raise NotImplementedError( f'unsupported type expression: {texpr!r}' ) @trace.register def trace_TypeIndirection(node: qlast.TypeIndirection, *, ctx: TracerContext) -> None: trace(node.type, ctx=ctx) @trace.register def trace_TypeOf(node: qlast.TypeOf, *, ctx: TracerContext) -> None: trace(node.expr, ctx=ctx) @trace.register def trace_TypeName(node: qlast.TypeName, *, ctx: TracerContext) -> None: if node.subtypes: for st in node.subtypes: trace(st, ctx=ctx) else: fq_name = node.maintype.name if node.maintype.module: fq_name = f'{node.maintype.module}::{fq_name}' ctx.refs.add(fq_name) @trace.register def trace_TypeOp(node: qlast.TypeOp, *, ctx: TracerContext) -> None: trace(node.left, ctx=ctx) trace(node.right, ctx=ctx) @trace.register def trace_IfElse(node: qlast.IfElse, *, ctx: TracerContext) -> None: trace(node.if_expr, ctx=ctx) trace(node.else_expr, ctx=ctx) trace(node.condition, ctx=ctx) @trace.register def trace_Shape(node: qlast.Shape, *, ctx: TracerContext) -> None: if isinstance(node.expr, qlast.Path): tip = trace(node.expr, ctx=ctx) orig_prefix = ctx.path_prefix if tip is not None: ctx.path_prefix = tip.get_name(ctx.schema) else: ctx.path_prefix = None else: trace(node.expr, ctx=ctx) for element in node.elements: trace(element, ctx=ctx) if isinstance(node.expr, qlast.Path): ctx.path_prefix = orig_prefix @trace.register def trace_ShapeElement(node: qlast.ShapeElement, *, ctx: TracerContext) -> None: trace(node.expr, ctx=ctx) for element in node.elements: trace(element, ctx=ctx) trace(node.where, ctx=ctx) for element in node.orderby: trace(element, ctx=ctx) trace(node.offset, ctx=ctx) trace(node.limit, ctx=ctx) trace(node.compexpr, ctx=ctx) @trace.register def trace_Select(node: qlast.SelectQuery, *, ctx: TracerContext) -> None: for alias in node.aliases: if isinstance(alias, qlast.AliasedExpr): trace(alias.expr, ctx=ctx) trace(node.result, ctx=ctx) if node.where is not None: trace(node.where, ctx=ctx) if node.orderby: for expr in node.orderby: trace(expr, ctx=ctx) if node.offset is not None: trace(node.offset, ctx=ctx) if node.limit is not None: trace(node.limit, ctx=ctx) @trace.register def trace_SortExpr(node: qlast.SortExpr, *, ctx: TracerContext) -> None: trace(node.path, ctx=ctx) @trace.register def trace_InsertQuery(node: qlast.InsertQuery, *, ctx: TracerContext) -> None: for alias in node.aliases: if isinstance(alias, qlast.AliasedExpr): trace(alias.expr, ctx=ctx) trace(node.subject, ctx=ctx) for element in node.shape: trace(element, ctx=ctx) @trace.register def trace_UpdateQuery(node: qlast.UpdateQuery, *, ctx: TracerContext) -> None: for alias in node.aliases: if isinstance(alias, qlast.AliasedExpr): trace(alias.expr, ctx=ctx) trace(node.subject, ctx=ctx) for element in node.shape: trace(element, ctx=ctx) trace(node.where, ctx=ctx) @trace.register def trace_DeleteQuery(node: qlast.DeleteQuery, *, ctx: TracerContext) -> None: for alias in node.aliases: if isinstance(alias, qlast.AliasedExpr): trace(alias.expr, ctx=ctx) trace(node.subject, ctx=ctx) if node.where is not None: trace(node.where, ctx=ctx) if node.orderby: for expr in node.orderby: trace(expr, ctx=ctx) if node.offset is not None: trace(node.offset, ctx=ctx) if node.limit is not None: trace(node.limit, ctx=ctx) @trace.register def trace_DescribeStmt( node: qlast.DescribeStmt, *, ctx: TracerContext, ) -> None: if node.object: fq_name = node.object.name if node.object.module: fq_name = f'{node.object.module}::{fq_name}' ctx.refs.add(fq_name)
1.867188
2
libraries/website/docs/snippets/envs/tree_to_list.py
justindujardin/mathy
95
11983
<reponame>justindujardin/mathy<gh_stars>10-100 from typing import List from mathy_core import ExpressionParser, MathExpression parser = ExpressionParser() expression: MathExpression = parser.parse("4 + 2x") nodes: List[MathExpression] = expression.to_list() # len([4,+,2,*,x]) assert len(nodes) == 5
2.75
3
math/0x04-convolutions_and_pooling/test/2-main.py
cbarros7/holbertonschool-machine_learning
1
11984
#!/usr/bin/env python3 import matplotlib.pyplot as plt import numpy as np convolve_grayscale_padding = __import__( '2-convolve_grayscale_padding').convolve_grayscale_padding if __name__ == '__main__': dataset = np.load('../../supervised_learning/data/MNIST.npz') images = dataset['X_train'] print(images.shape) kernel = np.array([[1, 0, -1], [1, 0, -1], [1, 0, -1]]) images_conv = convolve_grayscale_padding(images, kernel, (2, 4)) print(images_conv.shape) plt.imshow(images[0], cmap='gray') plt.show() plt.imshow(images_conv[0], cmap='gray') plt.show()
3.359375
3
code.py
surojitnath/olympic-hero
0
11985
# -------------- #Importing header files import pandas as pd import numpy as np import matplotlib.pyplot as plt #Path of the file data=pd.read_csv(path) data.rename(columns={'Total':'Total_Medals'},inplace =True) data.head(10) #Code starts here # -------------- try: data['Better_Event'] = np.where(data['Total_Summer'] > data['Total_Winter'] , 'Summer', 'Winter') data['Better_Event'] =np.where(data['Total_Summer'] ==data['Total_Winter'],'Both',data['Better_Event']) #print(data['Better_Event']) Total_Count=data['Better_Event'].value_counts() if(Total_Count[0]>Total_Count[1]): better_event='Summer' print(better_event) print(data) else: better_event='Winter' print(better_event) except: print("code Failed") else: print("code passed Successfully") # -------------- #Code starts here top_countries= data[['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']] top_countries=top_countries[:-1] #print(top_countries) def top_ten(Col): country_list= list((data.nlargest(11,Col)['Country_Name'])) country_list=country_list[1:] print(country_list) return country_list top_10_summer=top_ten('Total_Summer') top_10_winter =top_ten('Total_Winter') top_10 =top_ten('Total_Medals') common=list(set(top_10_summer) & set(top_10_winter) & set(top_10)) print("common",common) # -------------- #Code starts here summer_df =data[data['Country_Name'].isin(top_10_summer)] winter_df =data[data['Country_Name'].isin(top_10_winter)] top_df =data[data['Country_Name'].isin(top_10)] # -------------- #Code starts here summer_df['Golden_Ratio']=summer_df['Gold_Summer']/summer_df['Total_Summer'] summer_max_ratio=max(summer_df['Golden_Ratio']) summer_country_gold=summer_df.loc[summer_df['Golden_Ratio'].idxmax(),'Country_Name'] winter_df['Golden_Ratio']=winter_df['Gold_Winter']/winter_df['Total_Winter'] winter_max_ratio=max(winter_df['Golden_Ratio']) winter_country_gold=summer_df.loc[winter_df['Golden_Ratio'].idxmax(),'Country_Name'] top_df['Golden_Ratio']=top_df['Gold_Total']/top_df['Total_Medals'] top_max_ratio=max(top_df['Golden_Ratio']) top_country_gold=summer_df.loc[top_df['Golden_Ratio'].idxmax(),'Country_Name'] # -------------- #Code starts here data_1=data[:-1] data_1['Total_Points']=pd.Series(data_1['Gold_Total']*3+data_1['Silver_Total']*2+data_1['Bronze_Total']) print(data_1['Total_Points']) most_points = max(data_1['Total_Points']) print(most_points) best_country = data_1.loc[data_1['Total_Points'].idxmax(),'Country_Name'] print(most_points) print(best_country) # -------------- #Code starts here best = pd.DataFrame(data[data['Country_Name']==best_country]) best=best[['Gold_Total','Silver_Total','Bronze_Total']] best.plot.bar() plt.xlabel('United States') plt.ylabel('Medals Tally') # Rotate X-axes labels plt.xticks(rotation=45)
3.453125
3
planes/kissSlope/kissSlopeWing2.py
alexpGH/blenderCadCamTools
3
11986
<gh_stars>1-10 import bpy import math import numpy as np #=== add scripts dir to path import sys import os #=== define path of scripts dir libDir=bpy.path.abspath("//../../scripts/") # version1: relative to current file #libDir="/where/you/placed/blenderCadCam/scripts/" #version 2: usa an absolute path if not libDir in sys.path: sys.path.append(libDir) #=== add local dir to path dir = os.path.dirname(bpy.data.filepath) if not dir in sys.path: sys.path.append(dir) #print(sys.path) #=== blender imports only once even if the file change. if we edit outsde, we need to force a reload from importlib import reload #=== import scripts modules import wingLib reload(wingLib) #=================================================================================================== #=== #=================================================================================================== if 0: import ipdb ipdb.set_trace() ipdb.set_trace(context=5) if 1: #=== delete all but camera and lamp to start from a clean scene collection wingLib.deleteAllButNames(['outl','outl2','myWing1','myWing2']) #=================================================================================================== #=== basic geometry definition #=================================================================================================== foilwidth=1.6 chAdditive=0.06 #we add this additive as constant to the chordlength to generate an (towrds tip) increasing over-elliptic ch chordlength=0.17 nSec=41*2 halfSpan=foilwidth/2.0 if 1: #============================================================= #=== prepare profiles #============================================================= f=libDir+'/AG25_resampled.dat' cAG25, leAG25=wingLib.foilImport(f,'auto') f=libDir+'/AG26_resampled.dat' cAG26, leAG26=wingLib.foilImport(f,'auto') f=libDir+'/AG14_resampled.dat' cAG14, leAG14=wingLib.foilImport(f,'auto') #f=libDir+'/AG27_resampled.dat' #cAG27, leAG27=wingLib.foilImport(f,'auto') #=== downsampling of the root profile - we don't nee a too fine resolution for the CAM model nPoints=100 cAG25r, leAG25r=wingLib.foildDataReduceToNpoints(cAG25,nPoints, True) #True: save trailing edge (kep 1st and last point) pAG25r=wingLib.curveBezierFromPoints(cAG25r,'PAG25r',True,True) #=== get & interpolate the outer profile on the root (necessary for morphing) pAG26=wingLib.curveBezierFromPoints(cAG26,'PAG26',True,True) pAG14=wingLib.curveBezierFromPoints(cAG14,'PAG14',True,True) #pAG27=wingLib.curveBezierFromPoints(cAG27,'PAG27',True,True) cAG14r=wingLib.interpolateBezier2on1(pAG25r, pAG14, leAG25r, leAG14, 40) cAG26r=wingLib.interpolateBezier2on1(pAG25r, pAG26, leAG25r, leAG26, 40) #cAG27_=wingLib.interpolateBezier2on1(pAG25, pAG27, leAG25, leAG27, 40) #=== plot for check: if 0: pAG25=wingLib.curveBezierFromPoints(cAG25,'PAG25',True,True) pAG14r=wingLib.curveBezierFromPoints(cAG14_,'PG14r',True,True) pAG26r=wingLib.curveBezierFromPoints(cAG26_,'ProfileAG26r',True,True) #=== clean-up if 1: wingLib.deleteByName('PAG25r') wingLib.deleteByName('PAG14') wingLib.deleteByName('PAG26') # compile the coord dict for easy access cDict={ "AG25": cAG25r, "AG26": cAG26r, "AG14": cAG14r, #"AG27": cAG27_, } #============================================================= #=== prepare base sections settings #============================================================= baseSectionsL=[] baseSectionsL.append({"p":'AG25', "s":0.00*halfSpan, "tA":0.0, "tMorph":True, "morphT":'lCh'}) baseSectionsL.append({"p":'AG25', "s":0.05*halfSpan, "tA":0.0, "tMorph":True, "morphT":'lCh'}) baseSectionsL.append({"p":'AG26', "s":0.40*halfSpan, "tA":0.0, "tMorph":True, "morphT":'lCh'}) baseSectionsL.append({"p":'AG14', "s":0.95*halfSpan, "tA":0.0, "tMorph":False, "morphT":'lCh'}) baseSectionsL.append({"p":'AG14', "s":1.00*halfSpan, "tA":0.0, "tMorph":False, "morphT":'lCh'}) #============================================================= #=== chordlength distribution #============================================================= #=== define section-wise ch extension dChL=[] dChL.append({"s": 0.00*halfSpan, "dy": chAdditive}) dChL.append({"s": 0.40*halfSpan, "dy": chAdditive}) dChL.append({"s": 0.95*halfSpan, "dy": chAdditive}) dChL.append({"s": 1.00*halfSpan, "dy": chAdditive}) #=== ellipse parameters a=halfSpan b=(chordlength-chAdditive)/2.0 #=== get/init the wing Data object # for morphed profiles, le is the same wingData=wingLib.WingFromSections(cDict, leAG25r, baseSectionsL, halfSpan, a, b, dChL) if 1: #=== get data for indivudual CAM sections # get basic ellipse arc points in 1st and 2nd quadrant (the unshifted leading edge) and chordlength x,y=wingLib.ellipseParamV(a,b,nSec) ch=np.multiply(y,2.0)# #==adapted chordlength ch=wingLib.chordExtensionLinear(ch, x, dChL) #shellthickness #thickness=1.0 #=== set 2d profile to be used (gives us a function reference used later) func4coords=wingData.coords quality='none' #plot Re(span) if 0: v=8.0# determined from stall velocity, see e.g. https://alexpgh.github.io/foss-toolchain-mpcnc/blenderKissSlope/#wing-loading-and-re v2=9.7 #v3=15.0 #v4=30.0 #v5=45.0 nu=1.52E-05 outFile=bpy.path.abspath("//Fig_ReSpan_fast.png") Re=[] Re.append(np.multiply(ch,v/nu)) Re.append(np.multiply(ch,v2/nu)) #Re.append(np.multiply(ch,v3/nu)) #Re.append(np.multiply(ch,v4/nu)) #Re.append(np.multiply(ch,v5/nu)) numpy_array = np.array(Re) transpose = numpy_array.T #legend=[str(v)+' m/s', str(v2), str(v3),str(v4),str(v5)] legend=[] #n=int(len(Re)/2)+1 n=int(transpose.shape[0]/2)+1 #import ipdb #ipdb.set_trace() #ipdb.set_trace(context=5) #wingLib.plotArray(x[0:n],Re[0:n],'Re(span)',outFile) #wingLib.plotArray(x,Re,'Re(span)',outFile) wingLib.plotArray(x[0:n],transpose[0:n,:],'Re(span)', legend, outFile) import ipdb ipdb.set_trace() ipdb.set_trace(context=5) #=== leading edge shift definition LeShiftL=[] LeShiftL.append(wingLib.LeShift('elliptic',0.04, 0.5, 1.0,foilwidth/2.0)) ysh=wingLib.applyLeShifts(x,y, LeShiftL) #placeSections(x,ysh,ch) sectionNames=wingLib.placeSectionsMinLimited(x,ysh,ch,0.001,func4coords,quality) if 1: wingLib.bridgeListOfEdgeLoopsCloseOuterWithFace(sectionNames,'myWing') #shift to origin bpy.context.object.location[1] = -chordlength/2.0 bpy.context.object.location[2] = 0.0
1.914063
2
saleor/order/migrations/0072_django_price_2.py
elwoodxblues/saleor
19
11987
# Generated by Django 2.2.4 on 2019-08-14 09:13 from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): dependencies = [("order", "0071_order_gift_cards")] operations = [ migrations.RenameField( model_name="order", old_name="shipping_price_gross", new_name="shipping_price_gross_amount", ), migrations.RenameField( model_name="order", old_name="shipping_price_net", new_name="shipping_price_net_amount", ), migrations.RenameField( model_name="order", old_name="total_gross", new_name="total_gross_amount" ), migrations.RenameField( model_name="order", old_name="total_net", new_name="total_net_amount" ), migrations.RenameField( model_name="orderline", old_name="unit_price_gross", new_name="unit_price_gross_amount", ), migrations.RenameField( model_name="orderline", old_name="unit_price_net", new_name="unit_price_net_amount", ), migrations.AddField( model_name="order", name="currency", field=models.CharField( default=settings.DEFAULT_CURRENCY, max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH, ), ), migrations.AddField( model_name="orderline", name="currency", field=models.CharField( default=settings.DEFAULT_CURRENCY, max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH, ), ), ]
1.765625
2
miping/training/features.py
mclgoerg/MiningPersonalityInGerman
1
11988
<filename>miping/training/features.py import numpy as np from sklearn.preprocessing import FunctionTransformer from sklearn.pipeline import Pipeline from sklearn.pipeline import FeatureUnion from sklearn.preprocessing import StandardScaler from ..models.profile import Profile from ..interfaces.helper import Helper from ..interfaces.glove import GloVe from .noGloveValueError import NoGloveValueError class Features: """ Contains all pipeline functions for both LIWC and glove. """ def __init__( self, ): return def featureLIWC( self, profileCol, ): """ Extract LIWC features (namely LIWC categories) from each profile in list as feature. Parameters ---------- profileCol : list, default=None, required List with profiles to generate features for. Returns ------- np.array(outputList) : numpy.array Generated features in numpy format. """ # will contain the LIWC measures for each profile outputList = [] # loop over profileCollection for profile in profileCol: # create row liwc_data = [] # get names of liwc categories for attrName in Profile.liwc_category_list: # get value of current category attr = getattr(profile, attrName) # append to current profile # and convert to float liwc_data.append(np.float(attr)) outputList.append(liwc_data) # create numpy array, as scikit needs this format return np.array(outputList) def createLIWCFeaturePipeline( self, ): """ Create pipeline that can be passed into multiple training procceses this is just a blueprint for calculating the features no features are calculated yet! Returns ------- featurePipeline : Pipeline Pipeline containing feature generation and scaling. """ # Create skicit-learn compatible FunctionTransformers # for usage with other sklearn functions # featureLIWC is the name of the function to be called to # extract features liwc_Trans = FunctionTransformer(self.featureLIWC, validate=False) # Combine feature(s) with FeatureUnion featureTransformer = FeatureUnion([ ('liwc', liwc_Trans), ], n_jobs=-1) # parallelize via multiprocess # combine into a pipeline including scaling featurePipeline = Pipeline([ ('features', featureTransformer), ("stdScaler", StandardScaler()) ]) return featurePipeline def _condenseGloVeVectors( self, vectorList, ): """ For each user a vectorList is passed in with different length. This will be condensed into a single 900 dim vector. """ # convert to np array for mean,max,min functions vectorList = np.array(vectorList) # correct structure from (1,x,300) to (x,300) vectorList = vectorList[0] # for each dimension identify mean,max,min # and save in separate vector meanVector = vectorList.mean(axis=0) maxVector = np.amax(a=vectorList, axis=0) minVector = np.amin(a=vectorList, axis=0) # combine all 300 dim vectors in 900 dim vector returnVector = [] returnVector.extend(meanVector) returnVector.extend(maxVector) returnVector.extend(minVector) # convert to numpy array for scikit returnVector = np.array(returnVector) return returnVector def featureGloVe( self, profileList, ): """ For each profile in profile list generate GloVe features. Each profile contains text and for this text the glove vectors are retrieved and condensed into one single vector for this user. All user vectors are appended into the outputList. The word coverageStatistics and wordCounts for each user are saved in this feature object instance to be retrieved later. Parameters ---------- profileList : list, default=None, required List containing relevant profiles for which to extract features. Returns ------- np.array(outputList) : numpy.array Features in correct output format. """ if self.glove is None: raise Exception("GloVe not loaded.") # will contain the GloVe measures for each profile outputList = [] # get index as list, for faster lookup index_as_list = self.glove.get_index_list() # initialize progress bar helper = Helper() numProfiles = len(profileList) helper.printProgressBar( 0, numProfiles, prefix='Progress:', suffix='Complete', length=50 ) # list for saving coverage statistics coverageStatistics = [] # word count, that are included, for profiles wordCounts = [] # loop over profileList for num, profile in enumerate(profileList): # tokenize text in tweets # separated by space tokens = profile.text.split(' ') profile_vectors = [] # for each word lookup glove vector # if no match -> ignore it # first identify set of words not in glove not_in_glove = set(np.setdiff1d(tokens, index_as_list)) # get words in glove, indcluding duplicates # so if words exist n times in text, they will be n times in list in_glove = [word for word in tokens if word not in not_in_glove] if len(in_glove) == 0: # es konnte kein wort in glove gefunden werden # raise Exception eString = ( "Could not find any glove values for given words" ) raise NoGloveValueError(eString) else: # mind. ein Wort wurde gefunden # lookup glove vectors # should return duplicates! glove_values = self.glove.getGloVeByWordList( wordList=in_glove ) converted_vals = np.array(glove_values) # add vectors to list of this profile's vectors profile_vectors.append(converted_vals) # fill coverage statistics as share of tokens (=words) # that exist in glove in comparison to total tokens profile_coverage = len(converted_vals) / len(tokens) # add to global list coverageStatistics.append(profile_coverage) wordCounts.append(len(tokens)) # after all vectors for this profile are retrieved # condense with maximum, minimum, average in 900 dim vector final_vector = self._condenseGloVeVectors(profile_vectors) # add 900 dim to output list outputList.append(final_vector) # Update Progress Bar helper.printProgressBar( num + 1, numProfiles, prefix='Progress:', suffix='Complete', length=50 ) # save coverage statistics in class attribute to be accessible self.coverageStatistics = coverageStatistics self.wordCounts = wordCounts # create numpy array, as scikit needs this format return np.array(outputList) def createGloVeFeaturePipeline( self, glovePath='data/glove/glove.db', dataBaseMode=True, ): """ Create pipeline that can be passed into multiple training procceses this is just a blueprint for calculating the features no features are calculated yet! No parallelization (n_jobs=1) due to GloVe lookup in database. Parameters ---------- glovePath : string, default='data/glove/glove.db' Path to GloVe flat or database file. dataBaseMode : boolean, default=True If True path points to SQLite database file. Returns ------- featurePipeline : Pipeline Pipeline containing feature generation. """ glove = GloVe( filePath=glovePath, dataBaseMode=dataBaseMode, ) self.glove = glove # Create skicit-learn compatible FunctionTransformers # for usage with other sklearn functions # featureGloVe is the name of the function to be called to # extract features glove_Trans = FunctionTransformer(self.featureGloVe, validate=False) # Combine feature(s) with FeatureUnion featureTransformer = FeatureUnion([ ('glove', glove_Trans), ], n_jobs=1) # no parallelization # combine into a pipeline, no scaling since GloVe is scaled featurePipeline = Pipeline([ ('features', featureTransformer) ]) return featurePipeline
2.84375
3
tests/unit/test_serializers.py
launchpadrecruits/placebo
1
11989
<reponame>launchpadrecruits/placebo # Copyright (c) 2015 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import unittest import json from placebo.serializer import serialize, deserialize, utc date_sample = { "LoginProfile": { "UserName": "baz", "CreateDate": datetime.datetime(2015, 1, 4, 9, 1, 2, 0, tzinfo=utc), } } date_json = """{"LoginProfile": {"CreateDate": {"__class__": "datetime", "day": 4, "hour": 9, "microsecond": 0, "minute": 1, "month": 1, "second": 2, "year": 2015}, "UserName": "baz"}}""" class TestSerializers(unittest.TestCase): def test_datetime_to_json(self): result = json.dumps(date_sample, default=serialize, sort_keys=True) self.assertEqual(result, date_json) def test_datetime_from_json(self): response = json.loads(date_json, object_hook=deserialize) self.assertEqual(response, date_sample)
2.140625
2
Chapter11/web_03.py
vabyte/Modern-Python-Standard-Library-Cookbook
84
11990
<filename>Chapter11/web_03.py import urllib.request import urllib.parse import json def http_request(url, query=None, method=None, headers={}, data=None): """Perform an HTTP request and return the associated response.""" parts = vars(urllib.parse.urlparse(url)) if query: parts['query'] = urllib.parse.urlencode(query) url = urllib.parse.ParseResult(**parts).geturl() r = urllib.request.Request(url=url, method=method, headers=headers, data=data) with urllib.request.urlopen(r) as resp: msg, resp = resp.info(), resp.read() if msg.get_content_type() == 'application/json': resp = json.loads(resp.decode('utf-8')) return msg, resp if __name__ == '__main__': msg, resp = http_request( 'https://httpbin.org/get', query={ 'a': 'Hello', 'b': 'World' } ) print(msg.get_content_type(), resp) msg, resp = http_request('https://httpbin.org/bytes/16') print(msg.get_content_type(), resp) msg, resp = http_request('https://httpbin.org/post', method='POST', data='This is my posted data!'.encode('ascii'), headers={'Content-Type': 'text/plain'}) print(msg.get_content_type(), resp)
3.609375
4
src/oci/dns/models/external_master.py
Manny27nyc/oci-python-sdk
249
11991
# coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class ExternalMaster(object): """ An external master name server used as the source of zone data. """ def __init__(self, **kwargs): """ Initializes a new ExternalMaster object with values from keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param address: The value to assign to the address property of this ExternalMaster. :type address: str :param port: The value to assign to the port property of this ExternalMaster. :type port: int :param tsig_key_id: The value to assign to the tsig_key_id property of this ExternalMaster. :type tsig_key_id: str """ self.swagger_types = { 'address': 'str', 'port': 'int', 'tsig_key_id': 'str' } self.attribute_map = { 'address': 'address', 'port': 'port', 'tsig_key_id': 'tsigKeyId' } self._address = None self._port = None self._tsig_key_id = None @property def address(self): """ **[Required]** Gets the address of this ExternalMaster. The server's IP address (IPv4 or IPv6). :return: The address of this ExternalMaster. :rtype: str """ return self._address @address.setter def address(self, address): """ Sets the address of this ExternalMaster. The server's IP address (IPv4 or IPv6). :param address: The address of this ExternalMaster. :type: str """ self._address = address @property def port(self): """ Gets the port of this ExternalMaster. The server's port. Port value must be a value of 53, otherwise omit the port value. :return: The port of this ExternalMaster. :rtype: int """ return self._port @port.setter def port(self, port): """ Sets the port of this ExternalMaster. The server's port. Port value must be a value of 53, otherwise omit the port value. :param port: The port of this ExternalMaster. :type: int """ self._port = port @property def tsig_key_id(self): """ Gets the tsig_key_id of this ExternalMaster. The OCID of the TSIG key. :return: The tsig_key_id of this ExternalMaster. :rtype: str """ return self._tsig_key_id @tsig_key_id.setter def tsig_key_id(self, tsig_key_id): """ Sets the tsig_key_id of this ExternalMaster. The OCID of the TSIG key. :param tsig_key_id: The tsig_key_id of this ExternalMaster. :type: str """ self._tsig_key_id = tsig_key_id def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other is None: return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self == other
2.234375
2
cinder/tests/unit/backup/fake_service_with_verify.py
puremudassir/cinder
0
11992
<filename>cinder/tests/unit/backup/fake_service_with_verify.py<gh_stars>0 # Copyright (C) 2014 Deutsche Telekom AG # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.backup import driver from cinder.tests.unit.backup import fake_service class FakeBackupServiceWithVerify(driver.BackupDriverWithVerify, fake_service.FakeBackupService): def verify(self, backup): pass
2.125
2
src/fasttick.py
JevinJ/Bittrex-Notify
12
11993
<filename>src/fasttick.py import config import misc def heartbeat(): """ Processes data from Bittrex into a simpler dictionary, calls the save function on it, deletes the oldest saved dictionary(if it's out of lookback range), and finally creates a list of the best coins to be used in tkinter listboxes. :return: A list containing triples of (coin name, increase rate, volume) """ data = misc.retrieve_data() # Processing for saving latest data from Bittrex API latest_data = {} for d in data.get('result', {}): name = d.get('Market', {}).get('MarketCurrencyLong', '') last_price = d.get('Summary', {}).get('Last', 0.0) last_vol = d.get('Summary', {}).get('BaseVolume', 0.0) base_currency = d.get('Market', {}).get('BaseCurrency', '') if base_currency == 'BTC' and last_price >= \ config.FASTTICK_MIN_PRICE and last_vol >= config.FASTTICK_MIN_VOL: latest_data[name] = {'Summary': d['Summary']} # Processing all data within 9 ticks + latest and returning # rate for output in GUI prev_data = list(misc.open_pickles('fasttick_history', config.FASTTICK_LOOKBACK)) prev_data.append(latest_data) ticker_data = [] if prev_data: for name in latest_data: prev_changes = [] for i in range(len(prev_data)-1): old_price = float(prev_data[i].get(name, {}).get('Summary', {}).get('Last', 0.0)) new_price = float(prev_data[i+1].get(name, {}).get('Summary', {}).get('Last', 0.0)) if old_price != 0: change = (((new_price - old_price) / old_price) * 100) prev_changes.append(change) if prev_changes: volume = float(latest_data.get(name, {}).get('Summary', {}).get('BaseVolume', 0.0)) average_rate = float((sum(prev_changes) / len(prev_changes))) if average_rate >= config.FASTTICK_MIN_RATE: ticker_data.append((name, average_rate, volume)) misc.save_pickle(latest_data, 'fasttick_history') misc.delete_ancient_pickles('fasttick_history', config.FASTTICK_LOOKBACK) return ticker_data
3.171875
3
src/mlb_statsapi/model/api/game.py
power-edge/mlb_statsapi_etl
0
11994
<gh_stars>0 """ created by nikos at 4/26/21 """ import datetime from ..base import MLBStatsAPIEndpointModel from mlb_statsapi.utils.stats_api_object import configure_api YMDTHMS = '%Y-%m-%dT%H:%M:%SZ' YYYYMMDD_HHMMSS = '%Y%m%d_%H%M%S' MMDDYYYY_HHMMSS = '%m%d%Y_%H%M%S' class GameModel(MLBStatsAPIEndpointModel): date_formats = { 'updatedSince': YMDTHMS, 'timecode': YYYYMMDD_HHMMSS, 'startTimecode': MMDDYYYY_HHMMSS, 'endTimecode': MMDDYYYY_HHMMSS } @configure_api def liveGameV1(self, **kwargs): return self.get_api_file_object(**kwargs) @configure_api def liveGameDiffPatchV1(self, **kwargs): return self.get_api_file_object(**kwargs) @configure_api def liveTimestampv11(self, **kwargs): return self.get_api_file_object(**kwargs) @configure_api def currentGameStats(self, **kwargs): return self.get_api_file_object(**kwargs) @configure_api def getGameContextMetrics(self, **kwargs): return self.get_api_file_object(**kwargs) @configure_api def getWinProbability(self, **kwargs): return self.get_api_file_object(**kwargs) @configure_api def boxscore(self, **kwargs): return self.get_api_file_object(**kwargs) @configure_api def content(self, **kwargs): return self.get_api_file_object(**kwargs) @configure_api def colorFeed(self, **kwargs): return self.get_api_file_object(**kwargs) @configure_api def colorTimestamps(self, **kwargs): return self.get_api_file_object(**kwargs) @configure_api def linescore(self, **kwargs): return self.get_api_file_object(**kwargs) @configure_api def playByPlay(self, **kwargs): return self.get_api_file_object(**kwargs) @property def _methods(self) -> dict: return {m.__name__: m for m in ( self.liveGameV1, self.liveGameDiffPatchV1, self.liveTimestampv11, self.currentGameStats, self.getGameContextMetrics, self.getWinProbability, self.boxscore, self.content, self.colorFeed, self.colorTimestamps, self.linescore, self.playByPlay )} @property def now_timestamp(self): return datetime.datetime.now().strftime(YYYYMMDD_HHMMSS)
2.0625
2
scripts/build_folding_map.py
tsieprawski/md4c
475
11995
#!/usr/bin/env python3 import os import sys import textwrap self_path = os.path.dirname(os.path.realpath(__file__)); f = open(self_path + "/unicode/CaseFolding.txt", "r") status_list = [ "C", "F" ] folding_list = [ dict(), dict(), dict() ] # Filter the foldings for "full" folding. for line in f: comment_off = line.find("#") if comment_off >= 0: line = line[:comment_off] line = line.strip() if not line: continue raw_codepoint, status, raw_mapping, ignored_tail = line.split(";", 3) if not status.strip() in status_list: continue codepoint = int(raw_codepoint.strip(), 16) mapping = [int(it, 16) for it in raw_mapping.strip().split(" ")] mapping_len = len(mapping) if mapping_len in range(1, 4): folding_list[mapping_len-1][codepoint] = mapping else: assert(False) f.close() # If we assume that (index0 ... index-1) makes a range (as defined below), # check that the newly provided index is compatible with the range too; i.e. # verify that the range can be extended without breaking its properties. # # Currently, we can handle ranges which: # # (1) either form consecutive sequence of codepoints and which map that range # to other consecutive range of codepoints (of the same length); # # (2) or a consecutive sequence of codepoints with step 2 where each codepoint # CP is mapped to the codepoint CP+1 # (e.g. 0x1234 -> 0x1235; 0x1236 -> 0x1237; 0x1238 -> 0x1239; ...). # # Note: When the codepoints in the range are mapped to multiple codepoints, # only the 1st mapped codepoint is considered. All the other ones have to be # shared by all the mappings covered by the range. def is_range_compatible(folding, codepoint_list, index0, index): N = index - index0 codepoint0 = codepoint_list[index0] codepoint1 = codepoint_list[index0+1] codepointN = codepoint_list[index] mapping0 = folding[codepoint0] mapping1 = folding[codepoint1] mappingN = folding[codepointN] # Check the range type (1): if codepoint1 - codepoint0 == 1 and codepointN - codepoint0 == N \ and mapping1[0] - mapping0[0] == 1 and mapping1[1:] == mapping0[1:] \ and mappingN[0] - mapping0[0] == N and mappingN[1:] == mapping0[1:]: return True # Check the range type (2): if codepoint1 - codepoint0 == 2 and codepointN - codepoint0 == 2 * N \ and mapping0[0] - codepoint0 == 1 \ and mapping1[0] - codepoint1 == 1 and mapping1[1:] == mapping0[1:] \ and mappingN[0] - codepointN == 1 and mappingN[1:] == mapping0[1:]: return True return False def mapping_str(list, mapping): return ",".join("0x{:04x}".format(x) for x in mapping) for mapping_len in range(1, 4): folding = folding_list[mapping_len-1] codepoint_list = list(folding) index0 = 0 count = len(folding) records = list() data_records = list() while index0 < count: index1 = index0 + 1 while index1 < count and is_range_compatible(folding, codepoint_list, index0, index1): index1 += 1 if index1 - index0 > 2: # Range of codepoints records.append("R(0x{:04x},0x{:04x})".format(codepoint_list[index0], codepoint_list[index1-1])) data_records.append(mapping_str(data_records, folding[codepoint_list[index0]])) data_records.append(mapping_str(data_records, folding[codepoint_list[index1-1]])) index0 = index1 else: # Single codepoint records.append("S(0x{:04x})".format(codepoint_list[index0])) data_records.append(mapping_str(data_records, folding[codepoint_list[index0]])) index0 += 1 sys.stdout.write("static const unsigned FOLD_MAP_{}[] = {{\n".format(mapping_len)) sys.stdout.write("\n".join(textwrap.wrap(", ".join(records), 110, initial_indent = " ", subsequent_indent=" "))) sys.stdout.write("\n};\n") sys.stdout.write("static const unsigned FOLD_MAP_{}_DATA[] = {{\n".format(mapping_len)) sys.stdout.write("\n".join(textwrap.wrap(", ".join(data_records), 110, initial_indent = " ", subsequent_indent=" "))) sys.stdout.write("\n};\n")
2.75
3
app/api/v1/models/user_model.py
munniomer/Send-IT-Api-v1
0
11996
<gh_stars>0 users = [] class UserModel(object): """Class user models.""" def __init__(self): self.db = users def add_user(self, fname, lname, email, phone, password, confirm_password, city): """ Method for saving user to the dictionary """ payload = { "userId": len(self.db)+1, "fname": fname, "lname": lname, "email": email, "phone": phone, "password": password, "confirm_password": confirm_password, "city": city, } self.db.append(payload) return self.db def check_email(self, email): """Method for checking if user email exist""" user = [user for user in users if user['email'] == email] if user: return True return False def check_user(self, userId): """Method for checking if user exist""" user = [user for user in users if user['userId'] == userId] if user: return True return False
3.609375
4
Codigo/pruebas/Jose_Gonzalez/Solucion_PruebaTipoPiso.py
JoaquinRodriguez2006/RoboCup_Junior_Material
0
11997
from controller import Robot from controller import Motor from controller import PositionSensor from controller import Robot, DistanceSensor, GPS, Camera, Receiver, Emitter import cv2 import numpy as np import math import time robot = Robot() timeStep = 32 tile_size = 0.12 speed = 6.28 media_baldoza = 0.06 estado = 1 start = 0 global r global g global b r = 0 g = 0 b = 0 # start = robot.getTime() # Camera initialization camera = robot.getDevice("camera3") camera.enable(timeStep) # Colour sensor initialization colour_sensor = robot.getDevice("colour_sensor") colour_sensor.enable(timeStep) # Distance sensor initialization distancia_sensor1 = robot.getDevice("distance sensor1") distancia_sensor1.enable(timeStep) # Motor initialization ruedaIzquierda = robot.getDevice("wheel1 motor") ruedaDerecha = robot.getDevice("wheel2 motor") ruedaIzquierda.setPosition(float('inf')) ruedaDerecha.setPosition(float('inf')) rIzq_encoder = ruedaIzquierda.getPositionSensor() rDer_encoder = ruedaDerecha.getPositionSensor() rIzq_encoder.enable(timeStep) rDer_encoder.enable(timeStep) # Functions def leer_sensores(): global r global g global b # Color sensor image = colour_sensor.getImage() r = colour_sensor.imageGetRed(image, 1, 0, 0) g = colour_sensor.imageGetGreen(image, 1, 0, 0) b = colour_sensor.imageGetBlue(image, 1, 0, 0) # azul: r=65 g=65 b=252 # rojo: r=252 g=65 b=65 # print("r: " + str(r) + " g: " + str(g) + " b: " + str(b)) """ # Camara image = camera.getImage() imagen = np.frombuffer(image, np.uint8).reshape((camera.getHeight(), camera.getWidth(), 4)) frame = cv2.cvtColor(imagen, cv2.COLOR_BGRA2BGR) cv2.imshow("frame", frame) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Grayscale cv2.imshow("grayScale", frame) cv2.threshold(frame, 80, 255, cv2.THRESH_BINARY) # Threshold cv2.imshow("thresh", frame) cv2.waitKey(1) # Sensor de Distancia print("Distancia: " + str(distancia_sensor1.getValue())) """ def avanzar(vel): ruedaIzquierda.setVelocity(vel) ruedaDerecha.setVelocity(vel) def retroceder(vel): ruedaIzquierda.setVelocity(-vel) ruedaDerecha.setVelocity(-vel) def girar_der(vel): ruedaIzquierda.setVelocity(-vel) ruedaDerecha.setVelocity(vel) def girar_izq(vel): ruedaIzquierda.setVelocity(vel) ruedaDerecha.setVelocity(-vel) gyro = robot.getDevice("gyro") gyro.enable(timeStep) def rotar(angulo): global angulo_actual global tiempo_anterior # iniciar_rotacion if angulo > 0: girar_der(0.5) else: girar_izq(0.5) # Mientras no llego al angulo solicitado sigo girando if (abs(abs(angulo) - angulo_actual) > 1): tiempo_actual = robot.getTime() # print("Inicio rotacion angulo", angulo, "Angulo actual:",angulo_actual) tiempo_transcurrido = tiempo_actual - \ tiempo_anterior # tiempo que paso en cada timestep # rad/seg * mseg * 1000 radsIntimestep = abs(gyro.getValues()[1]) * tiempo_transcurrido degsIntimestep = radsIntimestep * 180 / math.pi # print("rads: " + str(radsIntimestep) + # " | degs: " + str(degsIntimestep)) angulo_actual += degsIntimestep # Si se pasa de 360 grados se ajusta la rotacion empezando desde 0 grados angulo_actual = angulo_actual % 360 # Si es mas bajo que 0 grados, le resta ese valor a 360 if angulo_actual < 0: angulo_actual += 360 tiempo_anterior = tiempo_actual # print("Angulo actual:", angulo_actual) return False #print("Rotacion finalizada.") angulo_actual = 0 return True def delay(ms): initTime = robot.getTime() # Store starting time (in seconds) while robot.step(timeStep) != -1: print("delay") if (robot.getTime() - initTime) * 1000.0 > ms: # If time elapsed (converted into ms) is greater than value passed in avanzar(0) break def rotar_enclavado(angulo): while robot.step(timeStep) != -1: leer_sensores() # print("r: " + str(r) + " g: " + str(g) + " b: " + str(b)) if rotar(angulo) == True: # If time elapsed (converted into ms) is greater than value passed in avanzar(0) break def avance(tipo_avance): start = rDer_encoder.getValue() velocidad = 0 avance = 0 if tipo_avance == "medio": velocidad = 3 avance = 2.9 elif tipo_avance == "largo": avance = 5.9 velocidad = 5.96 elif tipo_avance == "esquina": avance = 4.1 velocidad = 6.28 while robot.step(timeStep) != -1: avanzar(velocidad) leer_sensores() tipo_pizza() # print("r: " + str(r) + " g: " + str(g) + " b: " + str(b)) if rDer_encoder.getValue() >= start + avance: avanzar(0) break def retroceso(tipo_retroceso): start = rDer_encoder.getValue() velocidad = 0 retroceso = 0 if tipo_retroceso == "medio": velocidad = 6.28 retroceso = 2.9 elif tipo_retroceso == "largo": retroceso = 5.9 velocidad = 5.96 elif tipo_retroceso == "esquina": retroceso = 4.1 velocidad = 6.28 elif tipo_retroceso == "poquito": retroceso = 1.9 velocidad = 6.28 while robot.step(timeStep) != -1: retroceder(velocidad) leer_sensores() # print("r: " + str(r) + " g: " + str(g) + " b: " + str(b)) if start - retroceso >= rDer_encoder.getValue(): avanzar(0) break def tipo_pizza(): #print("valores(1): r:" + str(r) + " , g:" + str(g) + " , b:" + str(b)) if 255 >= r >= 240 and 60 <= b <= 75 and 60 <= g <= 75: print("(Red)pasaje zona 3 a 1") elif 150 >= r >= 100 and 210 <= b <= 230 and 60 <= g <= 75: print("(Vaiolet)pasaje zona 2 a 3") elif 60 <= r <= 75 and 255 >= b >= 245 and 60 <= g <= 75: print("(Blue)pasaje zona 1 a 2") elif 200 <= r <= 220 and 110 >= b >= 100 and 175 <= g <= 180: print("Entered swamp") return "swamp" elif 250 >= r >= 230 and 250 >= b >= 235 and 250 >= g >= 235: print("Found Checkpoint") elif r == 233 and b == 233 and g == 233: print("Azulejo normal") elif 30 <= r <= 50 : print("un agujero negro we") retroceso("medio") rotar_enclavado(90) else: return "prueba" angulo_actual = 0 tiempo_anterior = robot.getTime() contador = 0 while robot.step(timeStep) != -1: avance("medio")
3
3
drip/migrations/0002_querysetrule_rule_type.py
RentFreeMedia/django-drip-campaigns
46
11998
# Generated by Django 3.0.7 on 2020-11-25 13:13 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('drip', '0001_initial'), ] operations = [ migrations.AddField( model_name='querysetrule', name='rule_type', field=models.CharField(choices=[('or', 'Or'), ('and', 'And')], default='and', max_length=3), ), ]
1.71875
2
venv/lib/python3.6/site-packages/cligj/__init__.py
booklover98/A-_pathfinding
0
11999
<filename>venv/lib/python3.6/site-packages/cligj/__init__.py # cligj # Shared arguments and options. import click from .features import normalize_feature_inputs # Arguments. # Multiple input files. files_in_arg = click.argument( 'files', nargs=-1, type=click.Path(resolve_path=True), required=True, metavar="INPUTS...") # Multiple files, last of which is an output file. files_inout_arg = click.argument( 'files', nargs=-1, type=click.Path(resolve_path=True), required=True, metavar="INPUTS... OUTPUT") # Features from files, command line args, or stdin. # Returns the input data as an iterable of GeoJSON Feature-like # dictionaries. features_in_arg = click.argument( 'features', nargs=-1, callback=normalize_feature_inputs, metavar="FEATURES...") # Options. verbose_opt = click.option( '--verbose', '-v', count=True, help="Increase verbosity.") quiet_opt = click.option( '--quiet', '-q', count=True, help="Decrease verbosity.") # Format driver option. format_opt = click.option( '-f', '--format', '--driver', 'driver', default='GTiff', help="Output format driver") # JSON formatting options. indent_opt = click.option( '--indent', type=int, default=None, help="Indentation level for JSON output") compact_opt = click.option( '--compact/--not-compact', default=False, help="Use compact separators (',', ':').") # Coordinate precision option. precision_opt = click.option( '--precision', type=int, default=-1, help="Decimal precision of coordinates.") # Geographic (default), projected, or Mercator switch. projection_geographic_opt = click.option( '--geographic', 'projection', flag_value='geographic', default=True, help="Output in geographic coordinates (the default).") projection_projected_opt = click.option( '--projected', 'projection', flag_value='projected', help="Output in dataset's own, projected coordinates.") projection_mercator_opt = click.option( '--mercator', 'projection', flag_value='mercator', help="Output in Web Mercator coordinates.") # Feature collection or feature sequence switch. sequence_opt = click.option( '--sequence/--no-sequence', default=False, help="Write a LF-delimited sequence of texts containing individual " "objects or write a single JSON text containing a feature " "collection object (the default).") use_rs_opt = click.option( '--rs/--no-rs', 'use_rs', default=False, help="Use RS (0x1E) as a prefix for individual texts in a sequence " "as per http://tools.ietf.org/html/draft-ietf-json-text-sequence-13 " "(default is False).") # GeoJSON output mode option. def geojson_type_collection_opt(default=False): return click.option( '--collection', 'geojson_type', flag_value='collection', default=default, help="Output as GeoJSON feature collection(s).") def geojson_type_feature_opt(default=False): return click.option( '--feature', 'geojson_type', flag_value='feature', default=default, help="Output as GeoJSON feature(s).") def geojson_type_bbox_opt(default=False): return click.option( '--bbox', 'geojson_type', flag_value='bbox', default=default, help="Output as GeoJSON bounding box array(s).")
2.640625
3