blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
281
content_id
stringlengths
40
40
detected_licenses
listlengths
0
57
license_type
stringclasses
2 values
repo_name
stringlengths
6
116
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
313 values
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
18.2k
668M
star_events_count
int64
0
102k
fork_events_count
int64
0
38.2k
gha_license_id
stringclasses
17 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
107 values
src_encoding
stringclasses
20 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.02M
extension
stringclasses
78 values
content
stringlengths
2
6.02M
authors
listlengths
1
1
author
stringlengths
0
175
6171becb207e276db36bc6653ed1900ae7e3400d
a5424e248051f1cf2542aa82f040f7173aa8f8d8
/ExamenUII.2/ExamenU22/Examen/admin.py
6f540ae7584dda48774c6e956324debaa0392758
[]
no_license
luisfbarajas/PWeb
dd23a744357e5b961b3a1c23756bf60cc6741b08
e514d0f5710e33c718de8e27a06321643a271e17
refs/heads/master
2020-03-28T07:11:33.107934
2018-12-06T07:56:17
2018-12-06T07:56:17
147,886,217
0
0
null
null
null
null
UTF-8
Python
false
false
164
py
from django.contrib import admin from .models import Personal, Activities # Register your models here. admin.site.register(Personal) admin.site.register(Activities)
[ "luisf.barajas.btz@gamil.com" ]
luisf.barajas.btz@gamil.com
82b311c67a57cc924a2e56dcce6820aeb0307776
3de806956985605b4f5042879e11d88a859871dd
/notebooks/pdf_structure_env/bin/dumppdf.py
367ccdc95d1ee1fbd5d2f91f771c7cd4cfbb0e42
[]
no_license
qiyuyang16/CS4300_microGoogle
cb25e9f5b0e547f71873cc06e345b7776d1a24a6
e6ac8561a8e20b23bb858dbf4ed745e28bf60b94
refs/heads/master
2023-04-21T05:59:51.299146
2021-05-11T19:23:48
2021-05-11T19:23:48
354,361,818
0
0
null
2021-04-28T15:10:01
2021-04-03T18:12:04
Jupyter Notebook
UTF-8
Python
false
false
12,939
py
#!/home/vince/Documents/Code/microgoogle/CS4300_microGoogle/streamlit_testing/pdf_structure_env/bin/python """Extract pdf structure in XML format""" import logging import os.path import re import sys import warnings from argparse import ArgumentParser import pdfminer from pdfminer.pdfdocument import PDFDocument, PDFNoOutlines, PDFXRefFallback, \ PDFNoValidXRefWarning from pdfminer.pdfpage import PDFPage from pdfminer.pdfparser import PDFParser from pdfminer.pdftypes import PDFObjectNotFound, PDFValueError from pdfminer.pdftypes import PDFStream, PDFObjRef, resolve1, stream_value from pdfminer.psparser import PSKeyword, PSLiteral, LIT from pdfminer.utils import isnumber logging.basicConfig() ESC_PAT = re.compile(r'[\000-\037&<>()"\042\047\134\177-\377]') def e(s): if isinstance(s, bytes): s = str(s, 'latin-1') return ESC_PAT.sub(lambda m: '&#%d;' % ord(m.group(0)), s) def dumpxml(out, obj, codec=None): if obj is None: out.write('<null />') return if isinstance(obj, dict): out.write('<dict size="%d">\n' % len(obj)) for (k, v) in obj.items(): out.write('<key>%s</key>\n' % k) out.write('<value>') dumpxml(out, v) out.write('</value>\n') out.write('</dict>') return if isinstance(obj, list): out.write('<list size="%d">\n' % len(obj)) for v in obj: dumpxml(out, v) out.write('\n') out.write('</list>') return if isinstance(obj, ((str,), bytes)): out.write('<string size="%d">%s</string>' % (len(obj), e(obj))) return if isinstance(obj, PDFStream): if codec == 'raw': out.write(obj.get_rawdata()) elif codec == 'binary': out.write(obj.get_data()) else: out.write('<stream>\n<props>\n') dumpxml(out, obj.attrs) out.write('\n</props>\n') if codec == 'text': data = obj.get_data() out.write('<data size="%d">%s</data>\n' % (len(data), e(data))) out.write('</stream>') return if isinstance(obj, PDFObjRef): out.write('<ref id="%d" />' % obj.objid) return if isinstance(obj, PSKeyword): out.write('<keyword>%s</keyword>' % obj.name) return if isinstance(obj, PSLiteral): out.write('<literal>%s</literal>' % obj.name) return if isnumber(obj): out.write('<number>%s</number>' % obj) return raise TypeError(obj) def dumptrailers(out, doc, show_fallback_xref=False): for xref in doc.xrefs: if not isinstance(xref, PDFXRefFallback) or show_fallback_xref: out.write('<trailer>\n') dumpxml(out, xref.trailer) out.write('\n</trailer>\n\n') no_xrefs = all(isinstance(xref, PDFXRefFallback) for xref in doc.xrefs) if no_xrefs and not show_fallback_xref: msg = 'This PDF does not have an xref. Use --show-fallback-xref if ' \ 'you want to display the content of a fallback xref that ' \ 'contains all objects.' warnings.warn(msg, PDFNoValidXRefWarning) return def dumpallobjs(out, doc, codec=None, show_fallback_xref=False): visited = set() out.write('<pdf>') for xref in doc.xrefs: for objid in xref.get_objids(): if objid in visited: continue visited.add(objid) try: obj = doc.getobj(objid) if obj is None: continue out.write('<object id="%d">\n' % objid) dumpxml(out, obj, codec=codec) out.write('\n</object>\n\n') except PDFObjectNotFound as e: print('not found: %r' % e) dumptrailers(out, doc, show_fallback_xref) out.write('</pdf>') return def dumpoutline(outfp, fname, objids, pagenos, password='', dumpall=False, codec=None, extractdir=None): fp = open(fname, 'rb') parser = PDFParser(fp) doc = PDFDocument(parser, password) pages = {page.pageid: pageno for (pageno, page) in enumerate(PDFPage.create_pages(doc), 1)} def resolve_dest(dest): if isinstance(dest, str): dest = resolve1(doc.get_dest(dest)) elif isinstance(dest, PSLiteral): dest = resolve1(doc.get_dest(dest.name)) if isinstance(dest, dict): dest = dest['D'] if isinstance(dest, PDFObjRef): dest = dest.resolve() return dest try: outlines = doc.get_outlines() outfp.write('<outlines>\n') for (level, title, dest, a, se) in outlines: pageno = None if dest: dest = resolve_dest(dest) pageno = pages[dest[0].objid] elif a: action = a if isinstance(action, dict): subtype = action.get('S') if subtype and repr(subtype) == '/\'GoTo\'' and action.get( 'D'): dest = resolve_dest(action['D']) pageno = pages[dest[0].objid] s = e(title).encode('utf-8', 'xmlcharrefreplace') outfp.write('<outline level="{!r}" title="{}">\n'.format(level, s)) if dest is not None: outfp.write('<dest>') dumpxml(outfp, dest) outfp.write('</dest>\n') if pageno is not None: outfp.write('<pageno>%r</pageno>\n' % pageno) outfp.write('</outline>\n') outfp.write('</outlines>\n') except PDFNoOutlines: pass parser.close() fp.close() return LITERAL_FILESPEC = LIT('Filespec') LITERAL_EMBEDDEDFILE = LIT('EmbeddedFile') def extractembedded(outfp, fname, objids, pagenos, password='', dumpall=False, codec=None, extractdir=None): def extract1(objid, obj): filename = os.path.basename(obj.get('UF') or obj.get('F').decode()) fileref = obj['EF'].get('UF') or obj['EF'].get('F') fileobj = doc.getobj(fileref.objid) if not isinstance(fileobj, PDFStream): error_msg = 'unable to process PDF: reference for %r is not a ' \ 'PDFStream' % filename raise PDFValueError(error_msg) if fileobj.get('Type') is not LITERAL_EMBEDDEDFILE: raise PDFValueError( 'unable to process PDF: reference for %r ' 'is not an EmbeddedFile' % (filename)) path = os.path.join(extractdir, '%.6d-%s' % (objid, filename)) if os.path.exists(path): raise IOError('file exists: %r' % path) print('extracting: %r' % path) os.makedirs(os.path.dirname(path), exist_ok=True) out = open(path, 'wb') out.write(fileobj.get_data()) out.close() return with open(fname, 'rb') as fp: parser = PDFParser(fp) doc = PDFDocument(parser, password) extracted_objids = set() for xref in doc.xrefs: for objid in xref.get_objids(): obj = doc.getobj(objid) if objid not in extracted_objids and isinstance(obj, dict) \ and obj.get('Type') is LITERAL_FILESPEC: extracted_objids.add(objid) extract1(objid, obj) return def dumppdf(outfp, fname, objids, pagenos, password='', dumpall=False, codec=None, extractdir=None, show_fallback_xref=False): fp = open(fname, 'rb') parser = PDFParser(fp) doc = PDFDocument(parser, password) if objids: for objid in objids: obj = doc.getobj(objid) dumpxml(outfp, obj, codec=codec) if pagenos: for (pageno, page) in enumerate(PDFPage.create_pages(doc)): if pageno in pagenos: if codec: for obj in page.contents: obj = stream_value(obj) dumpxml(outfp, obj, codec=codec) else: dumpxml(outfp, page.attrs) if dumpall: dumpallobjs(outfp, doc, codec, show_fallback_xref) if (not objids) and (not pagenos) and (not dumpall): dumptrailers(outfp, doc, show_fallback_xref) fp.close() if codec not in ('raw', 'binary'): outfp.write('\n') return def create_parser(): parser = ArgumentParser(description=__doc__, add_help=True) parser.add_argument('files', type=str, default=None, nargs='+', help='One or more paths to PDF files.') parser.add_argument( "--version", "-v", action="version", version="pdfminer.six v{}".format(pdfminer.__version__)) parser.add_argument( '--debug', '-d', default=False, action='store_true', help='Use debug logging level.') procedure_parser = parser.add_mutually_exclusive_group() procedure_parser.add_argument( '--extract-toc', '-T', default=False, action='store_true', help='Extract structure of outline') procedure_parser.add_argument( '--extract-embedded', '-E', type=str, help='Extract embedded files') parse_params = parser.add_argument_group( 'Parser', description='Used during PDF parsing') parse_params.add_argument( '--page-numbers', type=int, default=None, nargs='+', help='A space-seperated list of page numbers to parse.') parse_params.add_argument( '--pagenos', '-p', type=str, help='A comma-separated list of page numbers to parse. Included for ' 'legacy applications, use --page-numbers for more idiomatic ' 'argument entry.') parse_params.add_argument( '--objects', '-i', type=str, help='Comma separated list of object numbers to extract') parse_params.add_argument( '--all', '-a', default=False, action='store_true', help='If the structure of all objects should be extracted') parse_params.add_argument( '--show-fallback-xref', action='store_true', help='Additionally show the fallback xref. Use this if the PDF ' 'has zero or only invalid xref\'s. This setting is ignored if ' '--extract-toc or --extract-embedded is used.') parse_params.add_argument( '--password', '-P', type=str, default='', help='The password to use for decrypting PDF file.') output_params = parser.add_argument_group( 'Output', description='Used during output generation.') output_params.add_argument( '--outfile', '-o', type=str, default='-', help='Path to file where output is written. Or "-" (default) to ' 'write to stdout.') codec_parser = output_params.add_mutually_exclusive_group() codec_parser.add_argument( '--raw-stream', '-r', default=False, action='store_true', help='Write stream objects without encoding') codec_parser.add_argument( '--binary-stream', '-b', default=False, action='store_true', help='Write stream objects with binary encoding') codec_parser.add_argument( '--text-stream', '-t', default=False, action='store_true', help='Write stream objects as plain text') return parser def main(argv=None): parser = create_parser() args = parser.parse_args(args=argv) if args.debug: logging.getLogger().setLevel(logging.DEBUG) if args.outfile == '-': outfp = sys.stdout else: outfp = open(args.outfile, 'w') if args.objects: objids = [int(x) for x in args.objects.split(',')] else: objids = [] if args.page_numbers: pagenos = {x - 1 for x in args.page_numbers} elif args.pagenos: pagenos = {int(x) - 1 for x in args.pagenos.split(',')} else: pagenos = set() password = args.password if args.raw_stream: codec = 'raw' elif args.binary_stream: codec = 'binary' elif args.text_stream: codec = 'text' else: codec = None for fname in args.files: if args.extract_toc: dumpoutline( outfp, fname, objids, pagenos, password=password, dumpall=args.all, codec=codec, extractdir=None ) elif args.extract_embedded: extractembedded( outfp, fname, objids, pagenos, password=password, dumpall=args.all, codec=codec, extractdir=args.extract_embedded ) else: dumppdf( outfp, fname, objids, pagenos, password=password, dumpall=args.all, codec=codec, extractdir=None, show_fallback_xref=args.show_fallback_xref ) outfp.close() if __name__ == '__main__': sys.exit(main())
[ "vince@bartle.io" ]
vince@bartle.io
7ed072fa1524c95c0ada3f899e91a7dcbcfd91de
9897061cfd34babf80616ff21a20c30db0212970
/server/account/models.py
a01557b1bd74e7b11b8ff7b13401a7a631636ebe
[ "MIT" ]
permissive
Samhaina/mahjong-portal
f310553c5df13e122f3e89d05a9867d0f122d4f1
4cdbd8bd61655584c25a437b3d5cab053507b2f4
refs/heads/master
2020-03-16T22:10:20.864718
2018-10-11T00:45:22
2018-10-11T00:45:22
133,029,373
0
0
null
2018-05-11T11:05:41
2018-05-11T11:05:41
null
UTF-8
Python
false
false
287
py
from django.db import models from django.contrib.auth.models import AbstractUser from tournament.models import Tournament class User(AbstractUser): is_tournament_manager = models.BooleanField(default=False) managed_tournaments = models.ManyToManyField(Tournament, blank=True)
[ "lisikhin@gmail.com" ]
lisikhin@gmail.com
aedb32df31a1c5530d48f12e8a44775169b392d2
9d9e6e909e3f94ca62eef648c9e0b43473fa132e
/anagrams.py
495d404764901a8b900808bf62c07565468dba8e
[]
no_license
Adi1729/Coding_100
a73fe21344c5da42cd72a829018fdf593299e4b9
61bd8838b1c989b0199d486f0a7269f8386ae924
refs/heads/master
2021-06-29T04:28:05.570383
2020-08-28T07:30:48
2020-08-28T07:30:48
142,202,161
0
0
null
2018-08-01T08:40:46
2018-07-24T19:10:38
Python
UTF-8
Python
false
false
1,107
py
s= 'cbaebabacd' a = 'abc' s="ababababab" a="aab" a ='acb' q= [i for i in a] q.sort() a = ''.join(q) a_orig =a b= 'b' l=[] i=0 i=1 pcounter = collections.Counter(a) scounter = collections.Counter(s[:len(a)]) for i in range(0,len(s)-len(a)+1): scounter = collections.Counter(s[i:len(a)+i]) if pcounter==scounter: l.append(i) c='a' a_orig ='abc' a=a_orig p=a #Method 1 : Using Counters result = [] pcounter = collections.Counter(p) scounter = collections.Counter(s[:len(p) - 1]) begin = 0 for i in range(len(p) - 1, len(s)) : scounter[s[i]] += 1 if scounter == pcounter : print(scounter) result.append(begin) scounter[s[begin]] -= 1 if scounter[s[begin]] == 0 : del(scounter[s[begin]]) begin += 1 return result Input: ["eat", "tea", "tan", "ate", "nat", "bat"], Output: [ ["ate","eat","tea"], ["nat","tan"], ["bat"] ] a='ate' l = [i for i in a] l.sort() q = ''.join(l)
[ "adiyadav.1729@gmail.com" ]
adiyadav.1729@gmail.com
cf3c79f12e13974f1a2fc914f299f7f6b79b0301
49692b22e38afe5fc6cf7144cd830e11a9a43d47
/cse101/asgn4/random_generator.py
7d6633063b576641eb97f343be08a0df60dec5bf
[]
no_license
juliofuentesUI/cse
1b0080cabe8301c3149d9b0ab498f17a7ca7a7e8
4e86df10ee9c23bdfeeafb61106d664aa0d136ff
refs/heads/main
2023-08-21T19:27:03.697081
2021-10-04T07:09:44
2021-10-04T07:09:44
null
0
0
null
null
null
null
UTF-8
Python
false
false
474
py
import random actors = [] with open("cleaned_movielist.txt", 'r') as movie_list: for line, cast in enumerate(movie_list.readlines()): actor_list=cast.split() for i in range(1,len(actor_list)): actors.append(actor_list[i]) with open("random.txt",'w') as output: for i in range(0,10): rand1=random.randint(0,len(actors)-1) rand2=random.randint(0,len(actors)-1) output.write(actors[rand1]+" "+ actors[rand2]+"\n")
[ "zliu259@ussc.edu" ]
zliu259@ussc.edu
eb9232dcd2be9ba8cbd5bd42e4ba39ef045ea84e
babaa02f2c866c20d9753193a7c3193c149fd09f
/12month_report/report/__init__.py
1982030fa17f97418fa32dfb22dc26a17de0fb2f
[]
no_license
solbutec/kmart
5a03a4743bc8e935669e4aadea732d402dd59867
b73316e6c0ce79bbc463d7198f5c9fe181f70531
refs/heads/master
2022-12-05T19:19:29.393040
2020-08-24T13:50:14
2020-08-24T13:50:14
290,629,983
0
1
null
2020-08-26T23:55:19
2020-08-26T23:55:18
null
UTF-8
Python
false
false
50
py
# -*- coding: utf-8 -*- from . import twelvemonth
[ "kyawzinoo@asiamatrixsoftware.com" ]
kyawzinoo@asiamatrixsoftware.com
eb640e26c331adf4b564538d8301b8d9647bf62b
eb1292224689b4d792ce4f3b51a3af539be710d8
/blog/views.py
19d1328fddb9027dc7d6b102c19386301a84bf42
[]
no_license
esmeraldalopez/first-django-blog
0eab2d6107508a68694d86171a8ccde6fcb93fe3
e5f0b4588e9ead77cd10ae0293315bbb06355d56
refs/heads/master
2021-01-24T01:21:17.750872
2019-06-15T23:35:29
2019-06-15T23:35:29
122,801,868
0
0
null
null
null
null
UTF-8
Python
false
false
1,646
py
from django.shortcuts import render, get_object_or_404, redirect from django.utils import timezone from .models import Post #incluyo el modelo definido en models, .models indica que buscara en el mismo directorio from .forms import PostForm # Create your views here. def post_list(request): posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date') #QuerySet return render(request, 'blog/post_list.html', {'posts':posts}) #post_detail recibe pk, que es lo que recibira en la url, que bien puede ser el numero de blog, para este caso def post_detail(request, pk): #QuerySet para buscar el post post = get_object_or_404(Post, pk=pk) return render(request, 'blog/post_detail.html', {'post': post}) def post_new(request): if request.method == 'POST': form = PostForm(request.POST) if form.is_valid(): post = form.save(commit=False) post.author = request.user post.published_date = timezone.now() post.save() return redirect('post_detail', pk=post.pk) else: form = PostForm() return render(request, 'blog/post_edit.html', {'form':form}) def post_edit(request, pk): post = get_object_or_404(Post, pk=pk) if request.method == "POST": form = PostForm(request.POST, instance=post) if form.is_valid(): post = form.save(commit=False) post.author = request.user post.save() return redirect('post_detail', pk=post.pk) else: form = PostForm(instance=post) return render(request, 'blog/post_edit.html', {'form': form})
[ "esmeralda.lopez.e@gmail.com" ]
esmeralda.lopez.e@gmail.com
2edffd3663b2abd45fd38a442dd7b5736f510500
03dd72639c7a39ef7cf17d0dc7fa4276cd47c93a
/tipranksCloudlet/data_processor.py
6e14cdc88570cf04b705e04470bfc9af686b3af2
[]
permissive
karthiknayak02/portfolioManager
dbc7554ded3cdb9489d43c9329c677e54f745c1f
fc3c335778c78535b8e263e7a62a5e2d7a1ec7c9
refs/heads/master
2023-08-10T16:06:51.810322
2021-10-03T21:38:09
2021-10-03T21:38:09
351,266,417
0
0
MIT
2021-10-03T20:29:40
2021-03-25T00:53:15
Python
UTF-8
Python
false
false
1,797
py
from common import * """ Get price targets of symbol https://www.tipranks.com/api/stocks/getData/?name=NET """ def get_price_targets_consensus(symbol: str): request_url = "https://www.tipranks.com/api/stocks/getData/" query_params = {'name': symbol} response = get_call(request_url=request_url, query_params=query_params) value = None schema = { "ticker": value, "companyName": value, "ptConsensus": [ [0], { "priceTarget": value, "high": value, "low": value }], "latestRankedConsensus": { "rating": value, "nB": value, "nH": value, "nS": value } } price_targets = parse_response(schema, response) print(price_targets) return price_targets """ Get basic stock details # https://market.tipranks.com/api/details/getstockdetailsasync?id=AMZN """ def get_stock_details(symbol: str): request_url = "https://market.tipranks.com/api/details/getstockdetailsasync" query_params = {'id': symbol} response = get_call(request_url=request_url, query_params=query_params) value = None schema = [ [0], { "ticker": "AMZN", "price": value, "pe": value, "eps": value, "marketCap": value, "yLow": value, "yHigh": value, "nextEarningDate": value, "range52Weeks": value, "low52Weeks": value, "high52Weeks": value } ] stock_details = parse_response(schema, response) print(stock_details) return stock_details if __name__ == '__main__': get_price_targets_consensus("AMZN") get_stock_details("AMZN")
[ "nayakkarthik02@gmail.com" ]
nayakkarthik02@gmail.com
4825ca9e68ae9cf98bba0728e2d75d14fec976e4
790909e6226f6e40859ea153873e712cb98d142f
/step7 (문자열/2675.py
29cc5f89539ea15915c76b28892d83d60e759b2f
[]
no_license
choibyeol/Baekjoon-algorithm
1ae01a95ff80051b284dc522edeb1b258072b5a0
5de50885900cdbb8565784f7b6e2af37afd1bf7f
refs/heads/master
2023-04-21T01:36:19.289748
2021-05-14T12:14:18
2021-05-14T12:14:18
null
0
0
null
null
null
null
UTF-8
Python
false
false
183
py
T = int(input()) for i in range(T): S = input() lenS = int(S[0]) for i in range(2, len(S)): for j in range(0, lenS): print(S[i], end = "") print()
[ "honey333888@naver.com" ]
honey333888@naver.com
8c750e5ac76fad05861e8d6d26c54313f5859f0e
bdd6ab129de61947945b380a487a3ee923f542f3
/real_genomes/pipeline/start.py
50a2993aeb21e00fb25ff0957e5b9833515d1ddb
[]
no_license
InfOmics/pangenes-review
1e27c1fd1a93fb7a5fd764c4090f7a4a2a207b0b
a74f8f9d615de6a76aa1918c2c4e9d0c1f0c8385
refs/heads/master
2021-01-07T12:14:37.891227
2020-06-01T14:20:27
2020-06-01T14:20:27
241,686,261
2
0
null
null
null
null
UTF-8
Python
false
false
7,892
py
from pathlib import Path from modules.pipelines import * from shutil import move import datetime import pandas as pd import traceback #method to recursively remove a folder def delete_folder(pth) : for sub in pth.iterdir() : if sub.is_dir() : delete_folder(sub) else : sub.unlink() pth.rmdir() def convert_time(timestring): if len(timestring.split(':'))>2: if "." in timestring: pt =datetime.datetime.strptime(timestring,'%H:%M:%S.%f') else: pt =datetime.datetime.strptime(timestring,'%H:%M:%S') else: if "." in timestring: pt =datetime.datetime.strptime(timestring,'%M:%S.%f') else: pt =datetime.datetime.strptime(timestring,'%M:%S') return pt if Path('log_running').exists(): Path('log_running').unlink() if Path('gene_families').exists(): delete_folder(Path('gene_families')) if Path('softwares_data').exists(): delete_folder(Path('softwares_data')) if Path('execution_stats').exists(): delete_folder(Path('execution_stats')) #foler containing the data for the execution of the pangenome softwares pth_software_data = Path('softwares_data') pth_software_data.mkdir(exist_ok=True) #gene families folder where we have the .clus files pth_gene_families = Path('gene_families') pth_gene_families.mkdir(exist_ok=True) #temporary folder pth_tmp = Path('tmp') pth_tmp.mkdir(exist_ok=True) #execution stats folder stats_folder = Path('execution_stats') stats_folder.mkdir(exist_ok=True) datasets = { 'leaf':Path('datasets','leaf'), 'root':Path('datasets','root') } #backtranslate from aa to dna datasets #backtranslate protein genes of the genomes into DNA def backtranslate(path): aa_codon_table = { #from translation table 11 NCBI 'F':'TTT' , 'Y':'TAT' , 'C':'TGT' , 'W':'TGG' , 'H':'CAT' , 'L':'CTC' , 'P':'CCG' , 'Q':'CAG' , 'I':'ATC' , 'T':'ACC' , 'N':'AAC' , 'S':'AGC' , 'M':'ATG' , 'K':'AAG' , 'R':'AGG' , 'D':'GAC' , 'V':'GTG' , 'A':'GCG' , 'E':'GAG' , 'G':'GGG' , '*':'TAG' } #path = Path(path,'genome','genome','DB') Path(path,'dna').mkdir(parents=True, exist_ok=True) Path(path,'protein').mkdir(parents=True, exist_ok=True) for filepath in sorted(path.glob('*_aa.fasta')): to_fasta = list() for sequence in SeqIO.parse(filepath,'fasta'): aa_seq = list() for char in sequence.seq: aa_seq.append(aa_codon_table[char]) sequence.seq = Seq(''.join(aa_seq), IUPAC.ambiguous_dna ) to_fasta.append(sequence) outfile = filepath.stem.replace('_aa','_dna')+'.fasta' SeqIO.write(to_fasta,Path(path,'dna',outfile),'fasta') #new_path = Path(path,'protein',filepath.name.replace('.fa','.fasta')) #move(filepath, new_path) #per ora copio per poter rilanciare il programma copy(filepath, Path(path,'protein')) #print(outfile) ##### #da commentare se non devo rigenerare i datasets for dtset_type, pth in datasets.items(): for data in pth.glob('*'): print('backtranslate:', data) backtranslate(data) for dtset_type, pth in datasets.items(): print('@vb@', dtset_type, pth) for data in pth.glob('*'): print('@vb@',data) #software execution data ( memory, elapsed_time ) stats= list() software_data = Path(pth_software_data, dtset_type, data.stem) software_data.mkdir(parents=True,exist_ok=True) print('@vb@',software_data) gene_families = Path(pth_gene_families, dtset_type, data.stem) gene_families.mkdir(parents=True, exist_ok=True) print('@vb@',gene_families) Path(stats_folder,dtset_type,data.stem).mkdir(parents=True, exist_ok=True) #le statistiche di esecuzione poi vengono salvate su un file quindi non devo cambiare nulla, #mi basta definire dove salvare il file nell' open() #PANDELOS try: pandelos_stat = pandelos(data, gene_families, software_data) if pandelos_stat != None: stats += pandelos_stat else: #need to remove pandelos empty clus file when it gets halted before completing (only good result files are kept) p = Path(gene_families,'pandelos_families.clus') if p.exists(): p.unlink() except Exception as e: print('@vb@',"type error: " + str(e)) print('@vb@',traceback.format_exc()) print() #PANX try: panx_stat = panx(data,gene_families,software_data) if panx_stat != None: stats += panx_stat except Exception as e: print('@vb@',"type error: " + str(e)) print('@vb@',traceback.format_exc()) print() #PANSEQ try: panseq_stat = panseq(data,gene_families,software_data) if panseq_stat != None: stats += panseq_stat except Exception as e: print('@vb@',"type error: " + str(e)) print('@vb@',traceback.format_exc()) print() #GET_HOMOLOGUES try: gethomologues_stat = gethomologues(data,gene_families,software_data) if gethomologues_stat != None: stats += gethomologues_stat except Exception as e: print('@vb@',"type error: " + str(e)) print('@vb@',traceback.format_exc()) print() #PGAP pgap_stat = pgap(data,gene_families,software_data) if pgap_stat != None: stats += pgap_stat print() #PANGET try: panget_stat = panget(data,gene_families,software_data) if panget_stat != None: stats += panget_stat except Exception as e: print('@vb@',"type error: " + str(e)) print('@vb@',traceback.format_exc()) print() #ROARY try: roary_stat = roary(data,gene_families,software_data) if roary_stat != None: stats += roary_stat except Exception as e: print('@vb@',"type error: " + str(e)) print('@vb@',traceback.format_exc()) print() try: #BLAST all vs all , input for panoct and micropan print('Running BLAST...') blast_stat = call_program([data,software_data], 'blast') print('...BLAST done!') path_blastall = Path(software_data, 'blastDB','blastall.out') if blast_stat != None: stats += blast_stat #PANOCT panoct_stat = panoct(data,gene_families,software_data) if panoct_stat != None: stats += panoct_stat #MICROPAN micropan_stat = micropan(data,gene_families,software_data) if micropan_stat != None: stats += micropan_stat else: print('MESSAGE: BLAST was terminated, panoct and micropan will not be executed') except Exception as e: print('@vb@',"type error: " + str(e)) print('@vb@',traceback.format_exc()) try: #cambiare il tempo in numero di secondi if len(stats)>0: #if i have at least one result (no results if all softwares take more than 2h to compute) print('saving resources used for:') print('dataset type:',dtset_type) print('dataset name:',data.stem) ###SISTEMARE L'OUTPUT IN MODO CHE I FILE VENGANO GENERATI DIRETTAMENTE NELLA CARTELLA CORRETTA #idea -> salvare come file csv in modo da poter utilizzare i dataframe per fare la media di #tutti i risultati stat_dict = dict() blast_ram = int() blast_time = int() for s in stats: d = s.split(' ')[1:] if d[0] != 'blast': #ram and time will be added to panoct and micropan ram and time stat_dict[d[0]]= dict() if d[0] == 'blast': blast_ram = int(d[1]) pt = convert_time(d[2]) blast_time = pt.second+pt.minute*60+pt.hour*3600 elif d[0] == 'panoct' or d[0] == 'micropan': stat_dict[d[0]]['ram'] = max(blast_ram, int(d[1])) pt = convert_time(d[2]) software_time = pt.second+pt.minute*60+pt.hour*3600 stat_dict[d[0]]['time'] = blast_time + software_time else: stat_dict[d[0]]['ram'] = int(d[1]) pt = convert_time(d[2]) software_time = pt.second+pt.minute*60+pt.hour*3600 stat_dict[d[0]]['time'] = software_time df_stats = pd.DataFrame(stat_dict) df_stats.to_csv(Path(stats_folder,dtset_type,data.stem,'running_data.csv'), sep='\t') except Exception as e: print('@vb@',"type error: " + str(e)) print('@vb@',traceback.format_exc())
[ "vincenzo.bonnici@gmail.com" ]
vincenzo.bonnici@gmail.com
9aa7d125ff93dd331e5aade2f70114255f9f0ff6
e9a2b904da10ec9f38fb2a36093e8e84bf902d9d
/_400/manage.py
f735a2a142b938431f35228cdbfeea4eb9b258b0
[]
no_license
fhim50/jobsearchengine
8d848bc84078a1836599eca50a2c7a5a2304814e
c7aae927542fc0749539de417d716cd32d06d75f
refs/heads/master
2020-06-03T20:43:55.524245
2013-01-22T08:27:31
2013-01-22T08:27:31
7,016,723
1
0
null
null
null
null
UTF-8
Python
false
false
257
py
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "_400.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
[ "kwaw@kwaw-HP-625.(none)" ]
kwaw@kwaw-HP-625.(none)
9c60554c705a99f445f4cd9a478d7136f409ae20
3574de071eb81b32ec1b2ef1edd8affc1b728e24
/lesson/ftp_client.py
abd63fe1e62360076d922e7235204494601b03cf
[]
no_license
dpochernin/Homework_6
54376cfed1ae5fe4d3cdd3b114a0c7e5b7707f6c
502ee83b01cff38c0fdfbe11ae9f059758b4403d
refs/heads/master
2020-08-24T12:13:22.438742
2019-10-22T13:41:55
2019-10-22T13:41:55
216,823,296
0
0
null
null
null
null
UTF-8
Python
false
false
439
py
from ftplib import FTP with FTP(host='127.0.0.1', user='user', passwd='12345') as ftp: print(ftp.retrlines('LIST')) out = '..\\files_for_test\\index.png' with open(out, 'wb') as f: ftp.retrbinary('RETR index.png', f.write) path = '..\\files_for_test\\firm.txt' with open(path, 'rb') as file_2: print(file_2.name) ftp.storbinary('STOR firm.txt', file_2, 1024) print(ftp.retrlines('LIST'))
[ "d.pochernin@gmail.com" ]
d.pochernin@gmail.com
d8d67f00d13100bc57dbb65f9bea4894dce778a7
c6a51702a01c341c41a0d9df54f6106111a6cac5
/part1/train.py
9212f4f41bdeb5c47f86caf4f51a450eb7f80085
[]
no_license
eacamilla/assignment_2
3b5ac15cd2654827783ece73875580b9e0c90676
ce4b310075193fcea24f9f364a0821d7f29c1c76
refs/heads/master
2020-09-14T18:01:35.188856
2019-11-21T15:42:44
2019-11-21T15:42:44
223,207,359
0
0
null
null
null
null
UTF-8
Python
false
false
5,946
py
################################################################################ # MIT License # # Copyright (c) 2019 # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to conditions. # # Author: Deep Learning Course | Fall 2019 # Date Created: 2019-09-06 ################################################################################ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import time from datetime import datetime import numpy as np import torch from torch.utils.data import DataLoader from part1.dataset import PalindromeDataset from part1.vanilla_rnn import VanillaRNN from part1.lstm import LSTM # You may want to look into tensorboard for logging # from torch.utils.tensorboard import SummaryWriter ################################################################################ def train(config): assert config.model_type in ('RNN', 'LSTM') # Initialize the device which to run the model on #device = torch.device(config.device) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # Initialize the model that we are going to use if config.model_type is 'RNN': model = VanillaRNN(seq_length = config.input_length, input_dim= config.input_dim, num_hidden = config.num_hidden, num_classes = config.num_classes, batch_size = config.batch_size, device= device) if config.model_type is 'LSTM': model = LSTM(seq_length = config.input_length, input_dim= config.input_dim, num_hidden = config.num_hidden, num_classes = config.num_classes, batch_size = config.batch_size, device= device) # Initialize the dataset and data loader (note the +1) dataset = PalindromeDataset(config.input_length+1) data_loader = DataLoader(dataset, config.batch_size, num_workers=1) # Setup the loss and optimizer criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.RMSprop(model.parameters(), config.learning_rate) #optimizer = torch.optim.Adam(model.parameters(), config.learning_rate) Accuracy = [] for step, (batch_inputs, batch_targets) in enumerate(data_loader): # Only for time measurement of step through network t1 = time.time() y = model.forward(batch_inputs.to(device)) loss = criterion(y, batch_targets.to(device)) loss.backward() ############################################################################ # QUESTION: what happens here and why? # limits the size of the parameter updates by scaling the gradients down # Should be placed after loss.backward() but before optimizer.step() ############################################################################ torch.nn.utils.clip_grad_norm(model.parameters(), max_norm=config.max_norm) ############################################################################ optimizer.step() loss = loss.item() acc_in = np.argmax(y.cpu().detach().numpy(), axis=1) == batch_targets.cpu().detach().numpy() accuracy = np.sum(acc_in)/ batch_targets.shape[0] Accuracy.append(accuracy) # Just for time measurement t2 = time.time() examples_per_second = config.batch_size/float(t2-t1) if step % 10 == 0: print("[{}] Train Step {:04d}/{:04d}, Batch Size = {}, Examples/Sec = {:.2f}, " "Accuracy = {:.2f}, Loss = {:.3f}".format( datetime.now().strftime("%Y-%m-%d %H:%M"), step, config.train_steps, config.batch_size, examples_per_second, accuracy, loss )) if step == config.train_steps: # If you receive a PyTorch data-loader error, check this bug report: # https://github.com/pytorch/pytorch/pull/9655 break print('Done training. :)') ################################################################################ ################################################################################ if __name__ == "__main__": # Parse training configuration parser = argparse.ArgumentParser() # Model params parser.add_argument('--model_type', type=str, default="RNN", help="Model type, should be 'RNN' or 'LSTM'") #parser.add_argument('--model_type', type=str, default="LSTM", help="Model type, should be 'RNN' or 'LSTM'") parser.add_argument('--input_length', type=int, default=10, help='Length of an input sequence') #adjust input length for different palindrome lengths parser.add_argument('--input_dim', type=int, default=1, help='Dimensionality of input sequence') parser.add_argument('--num_classes', type=int, default=10, help='Dimensionality of output sequence') parser.add_argument('--num_hidden', type=int, default=128, help='Number of hidden units in the model') parser.add_argument('--batch_size', type=int, default=128, help='Number of examples to process in a batch') parser.add_argument('--learning_rate', type=float, default=0.001, help='Learning rate') parser.add_argument('--train_steps', type=int, default=10000, help='Number of training steps') parser.add_argument('--max_norm', type=float, default=10.0) parser.add_argument('--device', type=str, default="cuda:0", help="Training device 'cpu' or 'cuda:0'") config = parser.parse_args() #Loop over palindrome length and different seeds fixme # Train the model train(config) #Create plot fixme
[ "eacamillawerner@gmail.com" ]
eacamillawerner@gmail.com
6631cd057d686d0a0d7c910975132247c9c16828
4e30c855c253cc1d972d29e83edb9d5ef662d30a
/approval/models/returns.py
fc4920552b9ab0a32ad1d864ac946c3732809dab
[ "MIT" ]
permissive
rajeshr188/django-onex
8b531fc2f519d004d1da64f87b10ffacbd0f2719
0a190ca9bcf96cf44f7773686205f2c1f83f3769
refs/heads/master
2023-08-21T22:36:43.898564
2023-08-15T12:08:24
2023-08-15T12:08:24
163,012,755
2
0
NOASSERTION
2023-07-22T09:47:28
2018-12-24T17:46:35
Python
UTF-8
Python
false
false
3,919
py
from django.contrib.contenttypes.fields import GenericRelation from django.db import models, transaction from django.db.models import Sum from django.urls import reverse from approval.models import ApprovalLine from contact.models import Customer from dea.models import Journal, JournalTypes from product.models import StockLot """ When an approval voucher is created, the stock items that are being approved for release to a contact should be recorded in the database or inventory management system, along with the contact's information. When the approved stock items are released to the contact, they should be recorded as being moved out of the approval area and into the possession of the contact. If the contact returns some or all of the approved stock items, those items should be recorded as being returned to the approval area. When the approval is complete and all approved stock items have been returned, the approval should be closed. If any stock items were approved for release but not returned, those items should be flagged for invoicing. When the invoice is created, the stock items that were approved but not returned should be included on the invoice, along with the appropriate billing information. If any changes are made to the approval, return, or invoice, those changes should be recorded in the database or inventory management system, along with a timestamp and the user who made the changes. """ # Create your models here. class Return(models.Model): created_at = models.DateTimeField(auto_now_add=True, editable=False) updated_at = models.DateTimeField(auto_now=True, editable=False) created_by = models.ForeignKey( "users.CustomUser", on_delete=models.CASCADE, null=True, blank=True ) contact = models.ForeignKey( Customer, related_name="approval_returns", on_delete=models.CASCADE ) total_wt = models.DecimalField(max_digits=10, decimal_places=3, default=0) total_qty = models.IntegerField(default=0) posted = models.BooleanField(default=False) def __str__(self): return f"Return #{self.id}" def get_absolute_url(self): return reverse("approval:approval_return_detail", args=(self.pk,)) def get_total_qty(self): return self.returnitem_set.aggregate(t=Sum("quantity"))["t"] def get_total_wt(self): return self.returnitem_set.aggregate(t=Sum("weight"))["t"] class ReturnItem(models.Model): return_obj = models.ForeignKey(Return, on_delete=models.CASCADE) line_item = models.ForeignKey( ApprovalLine, on_delete=models.CASCADE, related_name="return_items" ) quantity = models.IntegerField(default=0) weight = models.DecimalField(max_digits=10, decimal_places=3, default=0.0) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) journal = GenericRelation(Journal, related_query_name="approval_returnitem") def __str__(self): return f"{self.quantity} x {self.line_item.product}" def get_absolute_url(self): return reverse("approval:approval_returnitem_detail", args=(self.pk,)) def get_hx_edit_url(self): kwargs = {"return_pk": self.return_obj.id, "pk": self.pk} return reverse("approval:approval_returnitem_update", kwargs=kwargs) def create_journal(self): return Journal.objects.create( journal_type=JournalTypes.SJ, desc="Approval Return", content_object=self, ) def get_journal(self): return self.journal.first() @transaction.atomic def post(self, journal): self.line_item.product.transact(self.weight, self.quantity, journal, "AR") self.line_item.update_status() @transaction.atomic def unpost(self, journal): self.line_item.product.transact(self.weight, self.quantity, journal, "A") self.line_item.update_status()
[ "rajeshrathodh@gmail.com" ]
rajeshrathodh@gmail.com
166b3242dff0b00a8bc4391164f1da912dc2126a
8ab090a03f23856bf959dbfd9d12758425ceeb02
/examples/molecular/master/result/crop.py
6402a8ae80cebb641e43e2560e662ab2ca857bc9
[ "MIT" ]
permissive
blostic/cirrus
bc2c9297606f7c747a67ea1532e0b802950fa73d
faaca48052f6a29e434e8a9dcf6625d426a1c8b7
refs/heads/master
2021-05-04T11:07:01.907627
2017-09-09T07:34:26
2017-09-09T07:34:26
51,220,056
0
0
null
null
null
null
UTF-8
Python
false
false
530
py
from PIL import Image for i in range(1, 10): img = Image.open("image-000000"+ str(i) + ".png") area = (300, 430, 400, 530) cropped_img = img.crop(area) # cropped_img.show() resized = cropped_img.resize((200, 200), Image.ANTIALIAS) resized.save("res"+ str(i) + ".png") for i in range(10, 13): img = Image.open("image-00000"+ str(i) + ".png") area = (300, 430, 400, 530) cropped_img = img.crop(area) # cropped_img.show() resized = cropped_img.resize((200, 200), Image.ANTIALIAS) resized.save("res"+ str(i) + ".png")
[ "piotr.skibiak@gmail.com" ]
piotr.skibiak@gmail.com
682bf91e9068ae3b44de92b625bce0c0b2052cf9
c4f096d99db7134d2991b7aad0192dcd1f58511a
/select_proxy/settings_tokens.py
0fe76835b8d85292b2509c2fc965df2ae5770428
[]
no_license
for-mao/select-proxy
a3157a07f954a8eaaffb0a4bd4402ae000a784c1
211b88af7e1cf9f31688c3acb188dbc4838e8c74
refs/heads/master
2020-04-02T16:40:20.437075
2018-10-25T06:48:02
2018-10-25T06:48:02
154,623,342
0
0
null
null
null
null
UTF-8
Python
false
false
170
py
DIGITALOCEAN_API_TOKEN = { 'jiang': '', 'sun': '' } LINODE_API_TOKEN = { 'jiang': '', 'sun': '' } VULTR_API_TOKEN = { 'jiang': '', 'sun': '' }
[ "15501852282@163.com" ]
15501852282@163.com
46bccb1d8843e47334574854ef19d802258e5b4a
df0ba034669281e5d35743ef77ebe16ef07f0d83
/ex6.py
a773b7f082cf82ff804b6cca0ec4d51442eb2930
[]
no_license
pstauble/LPTHW-Exercises
f18d107281dec7b9aa7cb85e0e7e576253b20596
f17faad7f1bbeb97041850865232835ad9190e28
refs/heads/master
2021-01-25T05:57:55.423730
2014-11-04T16:30:54
2014-11-04T16:30:54
null
0
0
null
null
null
null
UTF-8
Python
false
false
636
py
# This variable uses %d to add a number into the string" x = "There are %d types of people." %10 #These are two variables binary = "binary" do_not = "don't" #We add two strings into this one string y = "Those who know %s and those who %s." % (binary, do_not) #We print the two statements wich contain the strings and numbers print x print y #we are adding the above strings into these strings print "I said: %r." % x print "I also said: '%s'." % y hilarious = False joke_evaluation = "Isn't that joke so funny?! %r" print joke_evaluation % hilarious w = "this is the left side of..." e = "a string with a right side." print w + e
[ "patrick.stauble@gmail.com" ]
patrick.stauble@gmail.com
1003f7677d03e8e3d0a1afd5c0cd5332d9675674
aeaf548fba8ee9f88cd9254f2bc4ac0a3bbfb207
/zhaquirks/hivehome/__init__.py
68a13be23af5bd6731cf553a3e6f1c6ad1c07794
[ "Apache-2.0" ]
permissive
vigonotion/zha-device-handlers
6001aa812380a0540d76f68778ebade93f93928d
6d0560655428e1f04626a7722febf492c4174e8b
refs/heads/dev
2020-12-26T12:07:27.192810
2020-01-31T17:57:29
2020-01-31T17:57:29
237,504,327
1
0
Apache-2.0
2020-01-31T22:49:11
2020-01-31T19:47:15
null
UTF-8
Python
false
false
948
py
"""Hive Home.""" import asyncio from zigpy.quirks import CustomCluster from zigpy.zcl.clusters.security import IasZone from ..const import CLUSTER_COMMAND, OFF, ZONE_STATE HIVEHOME = "HiveHome.com" class MotionCluster(CustomCluster, IasZone): """Motion cluster.""" cluster_id = IasZone.cluster_id def __init__(self, *args, **kwargs): """Init.""" super().__init__(*args, **kwargs) self._timer_handle = None def handle_cluster_request(self, tsn, command_id, args): """Handle the cluster command.""" if command_id == 0: if self._timer_handle: self._timer_handle.cancel() loop = asyncio.get_event_loop() self._timer_handle = loop.call_later(30, self._turn_off) def _turn_off(self): self._timer_handle = None self.listener_event(CLUSTER_COMMAND, 999, 0, [0, 0, 0, 0]) self._update_attribute(ZONE_STATE, OFF)
[ "noreply@github.com" ]
noreply@github.com
b632eb9d3117a1f0ed7b1f8d42128ff804ab6e39
e5b4bead66f3f560bb77221e41e57057c52894e0
/PYTHON/derivative.py
1c01571d62ada2f1e4354fbb4a45322d870909e9
[]
no_license
lafionium/DMI
ce45f40a88ba52180b86daac1abedb4c54200209
ea81832c69c3531466a4db8eb9289a02e45c54a4
refs/heads/master
2021-05-16T06:52:43.741331
2018-01-13T15:50:09
2018-01-13T15:50:09
103,494,626
0
0
null
null
null
null
UTF-8
Python
false
false
854
py
## -*- coding: utf-8 -*- import numpy as np import matplotlib.pyplot as plt def mans_sinuss(x): k = 0 a = (-1)**0*x**1/(1) S = a while k<= 500: k = k + 1 R = (-1) * x**2 /(2*k*(2*k+1)) a = a * R S = S + a return S a = 0 b = 3 * np.pi x = np.arange(a,b,0.05) y = mans_sinuss(x) plt.plot(x,y) plt.grid() #plt.show() n = len(x) y_prim = [] for i in range(n-1): #print i, x[i], y[i], delta_y = y[i+1] - y[i] delta_x = x[i+1] - x[i] #y_prim = delta_y / delta_x #print y_prim y_prim.append(delta_y / delta_x) #plt.plot(x[:n-1],y_prim) #plt.show() n = len(x) y_prim2 = [] for i in range(n-2): delta_y_prim = y_prim[i+1] - y_prim[i] delta_x = x[i+1] - x[i] y_prim2.append(delta_y_prim / delta_x) plt.plot(x[:n-1],y_prim) plt.plot(x[:n-2],y_prim2) plt.show()
[ "jelizavetak@inbox.lv" ]
jelizavetak@inbox.lv
9fdbeecbfcef675451289bc39a5aa7cf2a6cb5d2
07cabb7e5fec85992496cf0c825affc78b33dba4
/Scrapping/scrappy_venv/bin/twistd
621775ceea805f17bdf7ff6adee3e71e63aabb00
[]
no_license
WooodHead/Code-practice
8b5abd0bee3796926b3f738a276acb216480b585
62eafb6d856d2631a7659d68ab94d7d78a72c9b8
refs/heads/master
2022-11-11T08:03:07.070167
2020-06-27T07:50:45
2020-06-27T07:50:45
null
0
0
null
null
null
null
UTF-8
Python
false
false
469
#!/Users/lyanalexandr/OneDrive/Projects/Programming/Python/Practice/Scrapping/scrappy_venv/bin/python3.8 # EASY-INSTALL-ENTRY-SCRIPT: 'Twisted==20.3.0','console_scripts','twistd' __requires__ = 'Twisted==20.3.0' import re import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit( load_entry_point('Twisted==20.3.0', 'console_scripts', 'twistd')() )
[ "alexlyan@yahoo.com" ]
alexlyan@yahoo.com
b4e8b03b8387462c961ea36f580a145007ada11a
38b68b2202726bcdea32271448fea22554db6121
/BOJ/Silver/1992.py
3b0a539d575b9951914cdb95f3dbd52b1b69e1cd
[]
no_license
Soohee410/Algorithm-in-Python
42c4f02342dc922e44ee07e3a0e1d6c0a559e0bb
fbc859c092d86174387fe3dc11f16b616e6fdfab
refs/heads/master
2023-05-06T13:07:19.179143
2021-05-14T14:32:44
2021-05-14T14:32:44
336,232,129
4
0
null
null
null
null
UTF-8
Python
false
false
495
py
def QuadTree(n, cp, x, y): if n == 1: return cp[x][y] cp1 = QuadTree(n // 2, cp, x, y) cp2 = QuadTree(n // 2, cp, x, y + n // 2) cp3 = QuadTree(n // 2, cp, x + n // 2, y) cp4 = QuadTree(n // 2, cp, x + n // 2, y + n // 2) if cp1 == cp2 == cp3 == cp4 and len(cp1) == 1: return cp1 return '('+cp1+cp2+cp3+cp4+')' if __name__ == "__main__": n = int(input()) arr = [list(input().rstrip()) for _ in range(n)] print(QuadTree(n, arr, 0, 0))
[ "ggohee0410@gmail.com" ]
ggohee0410@gmail.com
da7cfc6806d77782ce1ac44df83deae1ecdcb3d5
c2bf65f35ac84c93b815c64eee4bfb15e9c1a0ee
/567.字符串的排列.py
6a7b61cfa51e870f9c9e4e3bea86ee165162909c
[]
no_license
hhs44/leetcode_learn
e7651548e41176b1fd56a1565effbe076d6b280a
fd4f51a4803202a2e4fe3d97ef2b54adc218e691
refs/heads/master
2022-03-06T14:35:51.891389
2022-02-09T14:55:13
2022-02-09T14:55:13
250,731,211
2
0
null
null
null
null
UTF-8
Python
false
false
1,359
py
# # @lc app=leetcode.cn id=567 lang=python3 # # [567] 字符串的排列 # # @lc code=start from collections import Counter class Solution: def checkInclusion(self, s1: str, s2: str) -> bool: # for x,y in enumerate(s2) : # if y in s1: # if x + len(s1) <= len(s2): # t = s2[x:x+len(s1)] # if Counter(t) == Counter(s1): # return True # return False # # 2222 # r1 = Counter(s1) # r2 = Counter() # l1, l2 = len(s1), len(s2) # temp = 0 # x = y = 0 # while y < l2 : # r2[s2[y]] += 1 # if r2[s2[y]] == r1[s2[y]]: # temp += 1 # if temp == len(r1): # return True # y += 1 # if y - x + 1 > l1: # if r1[s2[x]] == r2[s2[x]]: # temp -= 1 # r2[s2[x]] -= 1 # if r2[s2[x]] == 0: # del r2[s2[x]] # x += 1 # return False count_dict = Counter(s1) m = len(s1) i = 0 j = m - 1 while j < len(s2): if Counter(s2[i:j+1]) == count_dict: return True i += 1 j += 1 return False # @lc code=end
[ "1159986871@qq.com" ]
1159986871@qq.com
0e5ce8dcbc905ec244e57d0954c2a482627e3370
2bf7879f0c134b1a207fd4c249bb5b58ae219692
/src/bowtieutil.py
0ae531def009a6ab131f85970e245aef23a9bd32
[]
no_license
logstar/NET-seq-CLT
50adaffc0db9311bdaa110691ea3cd8f3697bf44
5aa77e4c26ac3d2b90c88961dbb73fd2a7981e72
refs/heads/master
2021-03-27T08:46:50.690982
2018-09-20T16:58:09
2018-09-20T16:58:09
92,326,581
0
0
null
null
null
null
UTF-8
Python
false
false
2,422
py
import collections # aln_lcoord_b0: 0 based coordinate of the alignment start (left most bp of # alignment) # num_alt_aln: number of alternative alignment positions of the same read # mm_desc: mismatch description (comma sep) BowtieRecord = collections.namedtuple('BowtieRecord', ['seqid', 'strand', 'refid', 'aln_lcoord_b0', 'seq', 'qscore', 'num_alt_aln', 'mm_desc']) def iterate_bowtie_out_file(bt_fn): bt_file = open(bt_fn, 'r') for line in bt_file: fields = line.strip('\n').split('\t') if len(fields) != 8: raise ValueError("Number of fields not equal to 8: %s" % line) rec = BowtieRecord(fields[0], fields[1], fields[2], int(fields[3]), fields[4], fields[5], int(fields[6]), fields[7]) if rec.strand not in ('+', '-'): raise ValueError("Strand not +/-: %s" % line) yield rec bt_file.close() # Treat genome as circular class BowtieRecordCounter(object): """docstring for BowtieRecordCounter""" def __init__(self, ref_length): super(BowtieRecordCounter, self).__init__() self.ref_length = ref_length self.align_count_dict = {} def insert_bt_rec_ntup(self, bt_rec_ntup): if bt_rec_ntup.aln_lcoord_b0 >= self.ref_length: raise ValueError("Alignment start >= ref length. %s" % bt_rec_ntup._asdict()) if bt_rec_ntup.strand == '+': tx_start_pos_b0 = bt_rec_ntup.aln_lcoord_b0 else: tx_start_pos_b0 = bt_rec_ntup.aln_lcoord_b0 + len(bt_rec_ntup.seq) - 1 if tx_start_pos_b0 >= self.ref_length: tx_start_pos_b0 -= self.ref_length if tx_start_pos_b0 not in self.align_count_dict: self.align_count_dict[tx_start_pos_b0] = {'+' : 0, '-' : 0} self.align_count_dict[tx_start_pos_b0][bt_rec_ntup.strand] += 1 def output_count_table(self, output_fn): output_file = open(output_fn, 'w') for key in sorted(self.align_count_dict.keys()): output_file.write("%d\t%d\t%d\n" % (key, self.align_count_dict[key]['+'], self.align_count_dict[key]['-'])) output_file.close()
[ "y.will.zhang@gmail.com" ]
y.will.zhang@gmail.com
d1503b86fa4896111916a13e6c521ac6752af954
0cc58384745fddd40f0593941223237daba41734
/meiduo_mall/apps/contents/migrations/0001_initial.py
aa0735062da06aa50568b4b3fed32360cc82a92e
[]
no_license
four-leaf-clover1/meiduo_mall
d216d565300e8be3b18fe144a3da721d606d63b1
9d82325ec18ac050e5b076e6e24f6613945bee89
refs/heads/master
2020-06-29T20:53:24.630099
2019-08-09T09:49:27
2019-08-09T09:49:27
200,621,909
0
0
null
null
null
null
UTF-8
Python
false
false
2,487
py
# -*- coding: utf-8 -*- # Generated by Django 1.11.11 on 2019-07-26 04:58 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Content', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')), ('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')), ('title', models.CharField(max_length=100, verbose_name='标题')), ('url', models.CharField(max_length=300, verbose_name='内容链接')), ('image', models.ImageField(blank=True, null=True, upload_to='', verbose_name='图片')), ('text', models.TextField(blank=True, null=True, verbose_name='内容')), ('sequence', models.IntegerField(verbose_name='排序')), ('status', models.BooleanField(default=True, verbose_name='是否展示')), ], options={ 'verbose_name': '广告内容', 'verbose_name_plural': '广告内容', 'db_table': 'tb_content', }, ), migrations.CreateModel( name='ContentCategory', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')), ('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')), ('name', models.CharField(max_length=50, verbose_name='名称')), ('key', models.CharField(max_length=50, verbose_name='类别键名')), ], options={ 'verbose_name': '广告内容类别', 'verbose_name_plural': '广告内容类别', 'db_table': 'tb_content_category', }, ), migrations.AddField( model_name='content', name='category', field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='contents.ContentCategory', verbose_name='类别'), ), ]
[ "1173349074@qq.com" ]
1173349074@qq.com
442c069c094cb5cdedab2a6e2737cf3ce70e9022
462060fa57d2db4f19b256558d57a446bb60bf2a
/Lesson07/setOperations.py
3307e272cab913f9f71a3e73b48a6f85fbfb0adb
[ "MIT" ]
permissive
TrainingByPackt/Python-Fundamentals-eLearning
d82b25ebd87a8503125e6b26991f058424b3da28
0d7aa95b9b163802a93a7dab5f00d80e10677b82
refs/heads/master
2020-04-21T12:25:15.223114
2019-02-07T11:40:18
2019-02-07T11:40:18
169,561,372
3
2
null
null
null
null
UTF-8
Python
false
false
372
py
a = {1, 2, 3} b = {3, 4, 5} # A union B print('a union b') print(a.union(b)) print(a | b) # A intersection B print('a intersection b') print(a.intersection(b)) print(a & b) # A equals to B print(a == b) # Subsets and supersets print('subsets and supersets') print(a.issubset(b)) print(b.issuperset(a)) b.update(a) print(b) print(a.issubset(b)) print(b.issuperset(a))
[ "madhunikitac@packtpub.com" ]
madhunikitac@packtpub.com
ae8263755e7c9f0478df3a1a92714a27aa901a2c
79d4c542ed5d1191262a1d163b3a82996106a113
/triple_well_2D/triple_well_2.py
90ef0300feffe81071dab1ba45cff68364f15895
[]
no_license
pascalwangt/PyTAMS
397bb0804f04e9075b510196c173850c560c40ad
f15d55951c83bdd4c9aace76d4e1b87864362d43
refs/heads/master
2020-06-24T16:09:52.549525
2019-08-06T22:06:13
2019-08-06T22:06:13
199,011,484
2
0
null
null
null
null
UTF-8
Python
false
false
7,521
py
# -*- coding: utf-8 -*- """ Created on Tue May 7 23:45:49 2019 @author: pasca """ import numpy as np import sympy as sp from sympy import exp, tanh, cosh from sympy.abc import x,y import h5py import sys sys.path.append('../') import ellipsoid_fun import trajectory_to_score_function #%% #states initial_state = np.array([-5.7715972293490533928661534446291625499725341796875, 5.4694526174595439355376248779551373591090168702066875994205474853515625e-09]) saddle_state = np.array([0, -2.93429453258859905540045787120106979273259639739990234375e-02]) #saddle_state = np.array([0, 18]) target_state = np.array([5.7715972293490533928661534446291625499725341796875, 5.4694526174595439355376248779551373591090168702066875994205474853515625e-09]) #%% #general confinement alpha = 0.1 beta = 0.05 yc = 0 #stable minima x_min = 6 depth = 10 y_decay = 2 x_decay = 2 #metastable minimum y_intermediate = 20/1.5 depth_intermediate = 20/1.5 y_decay_intermediate = 3 x_decay_intermediate = 5 #barrier y_barrier = 15/1.5 y_decay_barrier = 1 x_decay_barrier = 2 barrier = 20 def potential(x,y): return 4.8+alpha*x**2+beta*(y-yc)**2+barrier*(1+np.tanh(-(y-y_barrier)/y_decay_barrier))*np.exp(-(x/x_decay_barrier)**2)-depth_intermediate*np.exp(-(x/x_decay_intermediate)**2-((y-y_intermediate)/y_decay_intermediate)**2)-depth*np.exp(-((x-x_min)/x_decay)**2-(y/y_decay)**2)-depth*np.exp(-((x+x_min)/x_decay)**2-(y/y_decay)**2) def force(v): x,y=v return np.array([-2*alpha*x+barrier*(1+np.tanh(-(y-y_barrier)/y_decay_barrier))*2*x/x_decay_barrier**2*np.exp(-(x/x_decay_barrier)**2)-2*x/x_decay_intermediate**2*depth_intermediate*np.exp(-(x/x_decay_intermediate)**2-((y-y_intermediate)/y_decay_intermediate)**2)-2*(x-x_min)/x_decay**2*depth*np.exp(-((x-x_min)/x_decay)**2-(y/y_decay)**2)-2*(x+x_min)/x_decay**2*depth*np.exp(-((x+x_min)/x_decay)**2-(y/y_decay)**2), -2*beta*(y-yc)+barrier/y_decay_barrier*np.exp(-(x/x_decay_barrier)**2)/np.cosh(-(y-y_barrier)/y_decay_barrier)**2-2*(y-y_intermediate)/y_decay_intermediate**2*depth_intermediate*np.exp(-(x/x_decay_intermediate)**2-((y-y_intermediate)/y_decay_intermediate)**2)-2*y/y_decay**2*depth*np.exp(-((x-x_min)/x_decay)**2-(y/y_decay)**2)-2*y/y_decay**2*depth*np.exp(-((x+x_min)/x_decay)**2-(y/y_decay)**2)]) #sympy force matrix force_matrix = sp.Matrix([-2*alpha*x+barrier*(1+tanh(-(y-y_barrier)/y_decay_barrier))*2*x/x_decay_barrier**2*exp(-(x/x_decay_barrier)**2)-2*x/x_decay_intermediate**2*depth_intermediate*exp(-(x/x_decay_intermediate)**2-((y-y_intermediate)/y_decay_intermediate)**2)-2*(x-x_min)/x_decay**2*depth*exp(-((x-x_min)/x_decay)**2-(y/y_decay)**2)-2*(x+x_min)/x_decay**2*depth*exp(-((x+x_min)/x_decay)**2-(y/y_decay)**2), -2*beta*(y-yc)+barrier/y_decay_barrier*exp(-(x/x_decay_barrier)**2)/cosh(-(y-y_barrier)/y_decay_barrier)**2-2*(y-y_intermediate)/y_decay_intermediate**2*depth_intermediate*exp(-(x/x_decay_intermediate)**2-((y-y_intermediate)/y_decay_intermediate)**2)-2*y/y_decay**2*depth*exp(-((x-x_min)/x_decay)**2-(y/y_decay)**2)-2*y/y_decay**2*depth*exp(-((x+x_min)/x_decay)**2-(y/y_decay)**2)]) noise_matrix = None #%% #score functions def score_function_linear(v): score = np.sum((target_state-initial_state)*(v-initial_state)) / np.linalg.norm(target_state-initial_state)**2 if score >=0: return score else: return 1e-5 def score_function_linear_simple(v): return v[0]/target_state[0] def score_function_norm(v): x,y=v return 1/2*np.sqrt((x+1)**2+1/2*y**2) def score_function_circle_maker(param = 4): """ param: decay rate of the exponentials """ dist = np.linalg.norm(target_state-initial_state) eta = np.linalg.norm(saddle_state-initial_state)/dist def score_function(v): return eta - eta*np.exp(-param*(np.linalg.norm(v-initial_state)/dist)**2)+(1-eta)*np.exp(-param*(np.linalg.norm(v-target_state)/dist)**2) return score_function def score_function_ellipsoid_maker(param = 0.05, sigma=1.5): """ param: decay rate of the exponentials """ eta = np.linalg.norm(target_state-saddle_state)/np.linalg.norm(target_state-initial_state) covariance_matrix_start, quad_form_initial, spectral_radius, level, bound = ellipsoid_fun.ingredients_score_function(force_matrix, initial_state, sigma, noise_matrix=noise_matrix) covariance_matrix_target, quad_form_target, spectral_radius, level, bound = ellipsoid_fun.ingredients_score_function(force_matrix, target_state, sigma, noise_matrix=noise_matrix) def score_function(v): return eta - eta*np.exp(-param*quad_form_initial(v))+(1-eta)*np.exp(-param*quad_form_target(v)) return score_function def score_function_custom_maker(filename='trajectory.hdf5', decay=4): """ param: trajectory file with key "filled_path" decay """ with h5py.File(filename, 'r') as file: filled_path = file['filled_path'][:] file.close() score_function = trajectory_to_score_function.score_function_maker(filled_path.T, decay) return score_function def threshold_simexp_param(param, level): dist = np.linalg.norm(target_state-initial_state) eta = np.linalg.norm(saddle_state-initial_state)/dist return (1-eta)*(1-np.exp(-level*param)) #%% #tests check_ellipsoid_array = 0 potential_well_plot_3D = 0 potential_well_plot = 0 if check_ellipsoid_array: import matplotlib.pyplot as plt #ell = ellipsoid_fun.get_ellipsoid_array(target_state, quad_form, level, bound) plt.scatter(ell.T[0], ell.T[1]) CS = ellipsoid_fun.draw_ellipsoid_2D(force_matrix, target_state, noise = sigma) foo = ellipsoid_fun.check_ellipsoid(ell, score_function_simexp_ell_param, threshold=threshold_simexp, tolerance=1e-3) score_level = ellipsoid_fun.get_levelset_array(target_state, score_function_simexp_ell, level = 1-threshold_simexp, bound=2*bound, tolerance = 1e-3) plt.scatter(score_level.T[0], score_level.T[1], alpha = 0.5) print(foo) if potential_well_plot_3D: import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D fig = plt.figure() ax = fig.add_subplot(111, projection='3d') x,y = np.linspace(-12,12,100), np.linspace(-10,25,100) xx, yy = np.meshgrid(x, y) zz = potential(xx,yy) im = ax.plot_surface(xx,yy,zz, cmap = 'RdBu_r') ax.set_xlabel('x') ax.tick_params(axis='x', which='major', pad=0) ax.set_ylabel('y') ax.set_zlabel('V(x,y)', labelpad = 10) clb = fig.colorbar(im, fraction=0.03, pad=-0.1) clb.ax.set_title('V(x,y)', fontsize = 16) ax.set_facecolor('white') #plt.savefig('2D_simple_double_well_ax3D.png') if potential_well_plot: import matplotlib.pyplot as plt fig = plt.figure() x,y = np.linspace(-15,15,100), np.linspace(-15,25,100) xx, yy = np.meshgrid(x, y) pot = potential(xx,yy) im = plt.contourf(xx, yy, pot, 100, cmap = 'RdBu_r') #plt.contour(xx, yy, pot, 30) plt.xlabel('x') plt.ylabel('y') plt.grid() cbar = fig.colorbar(im,) cbar.ax.set_title('$V(x,y)$', pad = 15) plt.scatter(initial_state[0],initial_state[1], marker = 'o', label = 'start', color = 'black', s=40) plt.scatter(target_state[0], target_state[1], marker = 'x', label = 'target', color = 'black', s=40) plt.legend(loc = 'lower right') plt.savefig('../../figures/potential.png', bbox_inches = 'tight') #plt.savefig('2D_simple_double_well.png')
[ "pascal.wang@ens-lyon.fr" ]
pascal.wang@ens-lyon.fr
c046690464e6b41e5003468f3a589deffce10683
cc8f018f4497d868aed95ab83ae0a5a8c646a120
/project_jenkins/urls.py
fe8afd358269796b95dd3aa761fefe6769432698
[]
no_license
adarshharidas/jenkins-pro
e049e09bb14595501248f131ad344a7461e26c40
6d05b93db3033e1ecc4cb419bd5e4a32b6d2ac98
refs/heads/master
2020-03-15T03:33:20.219427
2018-06-12T10:04:17
2018-06-12T10:04:17
131,944,972
0
0
null
null
null
null
UTF-8
Python
false
false
838
py
"""project_jenkins URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.11/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url, include from django.contrib import admin from job import views urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^', include('job.urls')) ]
[ "pymonk@ex.com" ]
pymonk@ex.com
b271f6c59aed17cbcb5181bf740eab268fe43bf0
3a588f7dee481de6f84dc23ca1f3c485fc9177aa
/wishlisht/travel_wishlist/models.py
02062481378f554d8085186a15bde2f017772137
[]
no_license
xm6264jz/capston-lab9-django
105f1ff64758b86e221860bc53d1265da63dede6
ff53b0e4715b527c457748c3f188fd7f639354af
refs/heads/master
2023-01-05T12:35:40.256134
2020-11-10T23:29:04
2020-11-10T23:29:04
307,906,432
0
0
null
2020-11-10T20:13:38
2020-10-28T04:29:03
Python
UTF-8
Python
false
false
1,428
py
from django.db import models from django.contrib.auth.models import User from django.core.files.storage import default_storage class Place(models.Model): user = models.ForeignKey('auth.User', null=False, on_delete=models.CASCADE) name = models.CharField(max_length = 200) visited = models.BooleanField(default = False) notes = models.TextField(blank=True, null=True) date_visited = models.DateField(blank=True, null=True) photo = models.ImageField(upload_to='user_images/', blank=True, null=True) def save(self, *args, **kwargs): # get reference to previous version of this Place old_place = Place.objects.filter(pk=self.pk).first() if old_place and old_place.photo: if old_place.photo != self.photo: self.delete_photo(old_place.photo) super().save(*args, **kwargs) def delete(self, *args, **kwargs): if self.photo: self.delete_photo(self.photo) super().delete(*args, **kwargs) def delete_photo(self, photo): if default_storage.exists(photo.name): default_storage.delete(photo.name) def _str_(self): photo_str = self.photo.url if self.photo else 'no photo' notes_str = self.notes[100:] if self.notes else 'no notes' return f'{self.pk}: {self.name} visited? {self.visited} on {self.date_visited}. Notes: {notes_str}. Photo {photo_str}'
[ "ahmed.abdinoor3@gmail.com" ]
ahmed.abdinoor3@gmail.com
ba5eaf26fde1ee4230e44bfb96255fca3568f0ea
3bea1c3f2d4834b9174664e4ee89ebbddde22e89
/app.py
f9ef295b9b9452b012e6df87fa15627824c52c47
[]
no_license
RakshithRajesh/Dataset-Maker
9e780117dd91d528b5803174080ab6d86fd90ca2
b43a8634e578a780847569724e39183c297d3ead
refs/heads/main
2023-08-24T07:28:02.235951
2021-10-02T14:20:21
2021-10-02T14:20:21
412,806,631
1
0
null
null
null
null
UTF-8
Python
false
false
1,643
py
from requests_html import HTMLSession from random import randint import time import os item = "car" s = HTMLSession() r = s.get(f"https://www.google.com/search?q={item}&hl=en&tbm=isch") r.html.render(timeout=10, scrolldown=2500) time.sleep(4) images = r.html.find('img[jsname="Q4LuWd"]') image_url_list = [] try: for image in images: if "data" not in image.attrs["src"]: image_url_list.append(image.attrs["src"]) except: pass for i, link in enumerate(image_url_list): response = s.get(link) newpath = f"{item}" if not os.path.exists(newpath): os.makedirs(newpath) with open(f"{newpath}/{i}.png", "wb") as w: w.write(response.content) print(link) print(len(image_url_list)) next_link = r.html.find('a[jslog="11106"]') for link in next_link: if link: if len(image_url_list) < 1001: response = s.get(f"https://www.google.com/{link.attrs['href']}") r.html.render(timeout=10, scrolldown=2500) time.sleep(4) images = r.html.find('img[jsname="Q4LuWd"]') try: for image in images: if "data" not in image.attrs["src"]: image_url_list.append(image.attrs["src"]) except: pass for link in image_url_list: response = s.get(link) newpath = f"{item}" with open(f"{newpath}/{randint(10,10000)}.png", "wb") as w: w.write(response.content) print(link) print(len(image_url_list)) else: break
[ "noreply@github.com" ]
noreply@github.com
8b7c1e9595c69f9cb01a360410fb8b73bece66ba
3abe45130d4f614f68c6551b59014a20d3470b58
/qa/rpc-tests/wallet.py
65bf8b546566b07fe3274bde41ad06a30adb66e9
[ "MIT" ]
permissive
dre060/YAADI
faab94150263848ef16fe6a865cff7d2a7893e00
cdb07c723f559ce883e33d64bce55b6ee5539142
refs/heads/main
2023-05-17T15:01:43.672809
2021-06-06T04:23:41
2021-06-06T04:23:41
374,243,648
0
0
null
null
null
null
UTF-8
Python
false
false
3,942
py
#!/usr/bin/env python2 # Copyright (c) 2014 The Bitcoin Core developers # Distributed under the MIT/X11 software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Exercise the wallet. Ported from wallet.sh. # Does the following: # a) creates 3 nodes, with an empty chain (no blocks). # b) node0 mines a block # c) node1 mines 32 blocks, so now node 0 has 60001eca, node 1 has 4250eca, node2 has none. # d) node0 sends 601 yaadi to node2, in two transactions (301 yaadi, then 300 yaadi). # e) node0 mines a block, collects the fee on the second transaction # f) node1 mines 16 blocks, to mature node0's just-mined block # g) check that node0 has 100-21, node2 has 21 # h) node0 should now have 2 unspent outputs; send these to node2 via raw tx broadcast by node1 # i) have node1 mine a block # j) check balances - node0 should have 0, node2 should have 100 # from test_framework import BitcoinTestFramework from util import * class WalletTest (BitcoinTestFramework): def setup_chain(self): print("Initializing test directory "+self.options.tmpdir) initialize_chain_clean(self.options.tmpdir, 3) def setup_network(self, split=False): self.nodes = start_nodes(3, self.options.tmpdir) connect_nodes_bi(self.nodes,0,1) connect_nodes_bi(self.nodes,1,2) connect_nodes_bi(self.nodes,0,2) self.is_network_split=False self.sync_all() def run_test (self): print "Mining blocks..." self.nodes[0].setgenerate(True, 1) self.sync_all() self.nodes[1].setgenerate(True, 32) self.sync_all() assert_equal(self.nodes[0].getbalance(), 60001) assert_equal(self.nodes[1].getbalance(), 4250) assert_equal(self.nodes[2].getbalance(), 0) # Send 601 BTC from 0 to 2 using sendtoaddress call. # Second transaction will be child of first, and will require a fee self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 351) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 350) # Have node0 mine a block, thus he will collect his own fee. self.nodes[0].setgenerate(True, 1) self.sync_all() # Have node1 generate 100 blocks (so node0 can recover the fee) self.nodes[1].setgenerate(True, 16) self.sync_all() # node0 should end up with 100 btc in block rewards plus fees, but # minus the 21 plus fees sent to node2 assert_greater_than(self.nodes[0].getbalance(), 59549) assert_equal(self.nodes[2].getbalance(), 701) # Node0 should have two unspent outputs. # Create a couple of transactions to send them to node2, submit them through # node1, and make sure both node0 and node2 pick them up properly: node0utxos = self.nodes[0].listunspent(1) assert_equal(len(node0utxos), 2) # create both transactions txns_to_send = [] for utxo in node0utxos: inputs = [] outputs = {} inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]}) outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"] raw_tx = self.nodes[0].createrawtransaction(inputs, outputs) txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx)) # Have node 1 (miner) send the transactions self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True) self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True) # Have node1 mine a block to confirm transactions: self.nodes[1].setgenerate(True, 1) self.sync_all() assert_equal(self.nodes[0].getbalance(), 0) assert_greater_than(self.nodes[2].getbalance(), 60250) assert_greater_than(self.nodes[2].getbalance("from1"), 59549) if __name__ == '__main__': WalletTest ().main ()
[ "ipedrero84@gmail.com" ]
ipedrero84@gmail.com
0f9760cc2ff333b4463fe208a835ac180db48dce
537b9efd439a842216a7979eb2c29bd02083732a
/python/marks.py
5688ff1fc98b574847a616fdbd6434f6a5079122
[]
no_license
grand-27-master/Data-Science-course
c1e92b8e1b820d72a9c7e2d3694ec100a8177ac7
90006685cff7988593906422773cdb0331b94083
refs/heads/master
2023-07-21T21:25:48.687018
2021-09-03T10:09:16
2021-09-03T10:09:16
394,889,532
4
0
null
null
null
null
UTF-8
Python
false
false
262
py
p=int(input("enter marks of physics=")) m=int(input("enter marks of maths=")) c=int(input("enter marks of chemistry=")) avg_marks=(p+m+c)/3 print("avg marks=",avg_marks) if avg_marks>98: print("you have been awarded the scholarship") else:print("sorry")
[ "gajjarv2001@gmail.com" ]
gajjarv2001@gmail.com
702e8bbceb76f147529dc19c0d81bd03baf380e0
8415845ada32baa8b047f59d959b60651be4b113
/amt/AMTscript.py
7793fc8387a99992d7609859c422240d575aa770
[ "MIT" ]
permissive
macilath/CrowdMix
aa21cb7ac6a22f15b1ef46f594ed91835b60fc99
65589fabe27c1eed0f09832f3d9ff2b40eb97b42
refs/heads/master
2016-09-06T16:27:35.554171
2014-03-31T01:43:18
2014-03-31T01:43:18
null
0
0
null
null
null
null
UTF-8
Python
false
false
6,706
py
# This is the main AMT script that should be executed and will do the following: # * Create a HIT # * Wait for the HIT to become reviewable # * Process assignments once the HIT is reviewable # * Check whether their input code is one that we gave them. # * If yes, pay them, if not, reject them. # NOTE: Should this script close before all assignments are reviewed, please run 'AMTpay.py' # NOTE: Fill in your AWS keys in 'ACCESS_KEY' and 'SECRET_KEY' from boto.mturk.connection import MTurkConnection, HIT from boto.mturk.question import SimpleField,QuestionContent,Question,QuestionForm,Overview,AnswerSpecification,SelectionAnswer,FormattedContent,FreeTextAnswer import time ACCESS_ID = '' SECRET_KEY = '' HOST = 'mechanicalturk.amazonaws.com' # this mtc is used for creating the HIT mtc = MTurkConnection(aws_access_key_id=ACCESS_ID, aws_secret_access_key=SECRET_KEY, host=HOST) title = 'CrowdMix: Remix a Classical Composition' description = ('Help remix a classical music composition ' 'by choosing the next sound bits! Simple, easy, and fast!') keywords = 'music, create, easy, fast' max_assignments = 50 # acceptable codes that will ge the turker paid payCodes = ['CG6H5', 'X38T1', 'S1W59', 'D2K9K', 'DCURP', 'KJHCY', 'KSSIZ', 'YYLMB', '47NQK', 'WILIM'] #-------------- WAIT FUNCTION ---------------------------- # wait timer function since time.sleep() was giving issues def wait(time_lapse): time_start = time.time() time_end = (time_start + time_lapse) while time_end > time.time(): pass # this mtc is used for data retrieval mtc2 = MTurkConnection(aws_access_key_id=ACCESS_ID, aws_secret_access_key=SECRET_KEY, host=HOST) #--------------- GET ALL REVIEWABLE HITS FUNCTION ---------- def get_all_reviewable_hits(mtc2): page_size = 50 hits = mtc2.get_reviewable_hits(page_size=page_size) print "Total results to fetch %s " % hits.TotalNumResults print "Request hits page %i" % 1 total_pages = float(hits.TotalNumResults)/page_size int_total= int(total_pages) if(total_pages-int_total>0): total_pages = int_total+1 else: total_pages = int_total pn = 1 while pn < total_pages: pn = pn + 1 print "Request hits page %i" % pn temp_hits = mtc2.get_reviewable_hits(page_size=page_size,page_number=pn) hits.extend(temp_hits) return hits #--------------- BUILD OVERVIEW ------------------- overview = Overview() overview.append_field('Title', 'CrowdMix: Remix a Classical Composition') overview.append(FormattedContent('<a target="_blank"' ' href="http://allekant.com/cgi-bin/welcome.py">' ' CrowdMix Homepage</a>')) overview.append(FormattedContent('Please visit the link above in order to complete this HIT.\n' 'When completed, you will be given a code to input below.')) overview.append(FormattedContent('On the webpage linked above, you will be given five random ' 'sound bits to listen to. Then you will choose two clips, using ' 'the radio buttons, that will be combined and added with other ' 'clips to make a new song')) overview.append(FormattedContent('When you have selected two clips using the radio buttons, ' 'click the Load My New Choice button to confirm your selection.')) overview.append(FormattedContent('When you are satisfied with your choices and you have ' 'already clicked the Load My New Choice button, click the ' 'Submit My Decision and Get My Code! button.')) overview.append(FormattedContent('After clicking the Submit button, you will be given a code ' 'to input in to the box below. Once you have put the code in ' 'box, submit the hit and wait for approval.')) #--------------- BUILD QUESTION 1 ------------------- qc1 = QuestionContent() qc1.append_field('Title','Enter your code in the box below.') fta1 = FreeTextAnswer(); q1 = Question(identifier='code', content=qc1, answer_spec=AnswerSpecification(fta1), is_required=True) #--------------- BUILD THE QUESTION FORM ------------------- question_form = QuestionForm() question_form.append(overview) question_form.append(q1) #--------------- CREATE THE HIT ------------------- mtc.create_hit(questions=question_form, max_assignments = max_assignments, title = title, description = description, keywords = keywords, duration = 60*5, reward = 0.25) #--------------- WAIT FOR ASSIGNMENTS TO COMPLETE ---------- #-------------------- AND REVIEW ASSIGNMENTS --------------- hits = get_all_reviewable_hits(mtc2) hitReviewed = False; # this busy loops until we have processed the one reviewable HIT while True: if not hits: print "Waiting for reviewable hits..." hits = get_all_reviewable_hits(mtc2) else: for hit in hits: # for every hit that's reviewable, review turker answers assignments = mtc2.get_assignments(hit.HITId) for assignment in assignments: # get individual turker assignments print "Answers of the worker %s" % assignment.WorkerId for question_form_answer in assignment.answers[0]: for key in question_form_answer.fields: # get individual turker answers to assignments print "%s" % (key) if key.upper() in payCodes: # if they used the right code, approve/pay them print "%s: Accepted and paid!" % assignment.WorkerId mtc2.approve_assignment(assignment.AssignmentId) else: # if they used the wrong code, reject them print "%s: Rejected and not paid!" % assignment.WorkerId mtc2.reject_assignment(assignment.AssignmentId, feedback = 'Invalid code.') print "--------------------" # the hit stays enabled in case a turker is rejected, however they should have been approved # this should hopefully never happen ##mtc2.disable_hit(hit.HITId) hitReviewed = True; if hitReviewed: # since I know that I only submit one HIT, I quit after one HIT is reviewed break; else: # wait 30 seconds so that Amazon does not get mad wait(30) print "All assignments have been reviewed!\n" print "Program has been terminated!"
[ "clayton.crawford@tamu.edu" ]
clayton.crawford@tamu.edu
e2fd6628e5ba3d7cd17f41118334deb2556a3926
ae13b9d10b738f1365977741c45e9e6959502ba5
/employerapp/views.py
5605c6efdf879b996b50f22b6a057a840d3f8cc2
[]
no_license
AissatouSECK/Projet-Django
960d062907ff06c2b809f4c68e4f7697c230f826
aca9346ce292f26b908e537c001c1f3f18136574
refs/heads/master
2023-08-14T11:58:24.842259
2021-09-16T21:51:07
2021-09-16T21:51:07
407,295,517
0
0
null
null
null
null
UTF-8
Python
false
false
1,301
py
from django.shortcuts import render, redirect from .models import Employer, Departement from .forms import Form_employer # To create employee def emp(request): if request.method == "POST": form = Form_employer(request.POST) if form.is_valid(): try: form.save() return redirect("/showemp") except: pass else: form = Form_employer() return render(request, "index.html", {'form':form}) # To show employee details def showemp(request): employees = Employer.objects.all() return render(request, "main.html", {'employees':employees}) # To delete employee details def deleteEmp(request, pk): employee = Employer.objects.get(pk=pk) employee.delete() return redirect("/showemp") # To edit employee details def editemp(request, pk): employee = Employer.objects.get(pk=pk) return render(request, "edit.html", {'employee':employee}) # To update employee details def updateEmp(request, pk): employee = Employer.objects.get(pk=pk) form = Form_employer(request.POST, instance= employee) if form.is_valid(): form.save() return redirect("/showemp") return render(request, "main.html", {'employee': employee})
[ "sstseck@gmail.com" ]
sstseck@gmail.com
20d8f4c314a6b737ec3f9b057e9e0b259d3413db
d4eeb6f634c72e924b0a3780e7df7f2150565780
/pyfi1.36.py
1844be21aa30652b409d6cc08a32f72c7644f043
[]
no_license
anupama14609/financepy
053e4086028150e1ace74d26182658ec5f67600f
1d2325d1cba898ce232395dd36808dee4dc980ca
refs/heads/master
2023-07-21T04:16:53.980529
2021-08-29T02:08:37
2021-08-29T02:08:37
399,757,610
0
0
null
null
null
null
UTF-8
Python
false
false
525
py
import pandas as pd import matplotlib.pyplot as plt from matplotlib import style import numpy as np def visualize_data(): style.use('ggplot') df = pd.read_csv('sp500_joined_closes.csv') df_corr = df.corr() data = df_corr.values fig = plt.figure() ax = fig.add_subplot(1,1,1) column_labels = df_corr.columns row_labels = df_corr.index ax.set_xticklabels(column_labels) ax.set_yticklabels(row_labels) plt.xticks(rotation=90) plt.plot() plt.show() visualize_data()
[ "anupamarao14609@gmail.com" ]
anupamarao14609@gmail.com
1b59986d14faeb17881c11ce0e4490deee33f0a4
08330ea5c2495d5dc958d4cf11b68c5650396e3e
/main.py
96bc672b9314ca63c2ef52b701f996ef5869ae68
[]
no_license
marco-willi/tf-estimator-cnn
d74be01143b6a724534737807ebb78db518c6b87
df3a5651b0f8018d3b9bc4b424f8090fb74ca26f
refs/heads/master
2020-03-22T03:00:54.073040
2018-07-17T08:52:16
2018-07-17T08:52:16
139,408,220
4
0
null
null
null
null
UTF-8
Python
false
false
9,526
py
""" Estimator API for CNNs using popular implementations """ import os import random import tensorflow as tf import numpy as np from estimator import model_fn ################################# # Parameters ################################# flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string( 'root_path', '', "Images root path - must contain directories with class specific images") flags.DEFINE_string( 'model_save_path', '', "Path in which to save graphs, models and summaries") flags.DEFINE_string( 'model', 'small_cnn', "Model name") flags.DEFINE_integer( 'max_epoch', 10, "Max epoch to train model") flags.DEFINE_integer( 'batch_size', 64, "Batch size for model training") flags.DEFINE_integer( 'image_size', 50, "Image size (width/height) for model input") flags.DEFINE_integer( 'num_gpus', 0, "Number of GPUs for model training") flags.DEFINE_integer( 'num_cpus', 2, "Numer of CPUs (for pre-processing)") flags.DEFINE_float('train_fraction', 0.8, "training set fraction") flags.DEFINE_bool( 'color_augmentation', True, "Whether to randomly adjust colors during model training") flags.DEFINE_float( 'weight_decay', 0, 'Applies weight decay if supported by specific model') flags.DEFINE_list( 'image_means', [0, 0, 0], 'image means (leave at default for automatic mode)') flags.DEFINE_list( 'image_stdevs', [1, 1, 1], 'image stdevs (leave at default for automatic mode)') # #DEBUG # FLAGS.root_path = '/host/data_hdd/ctc/ss/images/' # FLAGS.model_save_path = '/host/data_hdd/ctc/ss/runs/species/resnet18_test/' # FLAGS.model = 'ResNet18' # FLAGS.num_gpus = 1 # FLAGS.num_cpus = 4 # FLAGS.weight_decay = 0.0001 ################################# # Define Dataset ################################# # get all class directories classes = os.listdir(FLAGS.root_path) n_classes = len(classes) # find all images image_paths = dict() for cl in classes: image_names = os.listdir(os.path.join(FLAGS.root_path, cl)) image_paths[cl] = [os.path.join(FLAGS.root_path, cl, x) for x in image_names] # Map classes to numerics classes_to_num_map = {k: i for i, k in enumerate(classes)} num_to_class_map = {v: k for k, v in classes_to_num_map.items()} # Create lists of image paths and labels label_list = list() image_path_list = list() for k, v in image_paths.items(): label_list += [classes_to_num_map[k] for i in range(0, len(v))] image_path_list += v # randomly shuffle input to ensure good mixing when model training indices = [i for i in range(0, len(label_list))] random.seed(123) random.shuffle(indices) image_path_list = [image_path_list[i] for i in indices] label_list = [label_list[i] for i in indices] n_records = len(label_list) # Create training and test set train_fraction = FLAGS.train_fraction n_train = int(round(n_records * train_fraction, 0)) n_test = n_records - n_train train_files = image_path_list[0: n_train] train_labels = label_list[0: n_train] test_files = image_path_list[n_train:] test_labels = label_list[n_train:] ################################# # Dataset Iterator ################################# # Standardize a single image def _standardize_images(image, means, stdevs): """ Standardize images """ with tf.name_scope("image_standardization"): means = tf.expand_dims(tf.expand_dims(means, 0), 0) means = tf.cast(means, tf.float32) stdevs = tf.expand_dims(tf.expand_dims(stdevs, 0), 0) stdevs = tf.cast(stdevs, tf.float32) image = image - means image = tf.divide(image, stdevs) return image # data augmentation def _image_augmentation(image): """ Apply some random image augmentation """ with tf.name_scope("image_augmentation"): image = tf.image.random_flip_left_right(image) image = tf.image.random_brightness(image, max_delta=0.2) image = tf.image.random_contrast(image, lower=0.9, upper=1) image = tf.image.random_hue(image, max_delta=0.02) image = tf.image.random_saturation(image, lower=0.8, upper=1.2) return image # parse a single image def _parse_function(filename, label, augmentation=True): image_string = tf.read_file(filename) image = tf.image.decode_jpeg(image_string, channels=3) # randomly crop image from plus 10% width/height if augmentation: image = tf.image.resize_images( image, [int(FLAGS.image_size*1.1), int(FLAGS.image_size*1.1)]) image = tf.random_crop(image, [FLAGS.image_size, FLAGS.image_size, 3]) else: image = tf.image.resize_images( image, [FLAGS.image_size, FLAGS.image_size]) image = tf.divide(image, 255.0) if augmentation: image = _image_augmentation(image) image = _standardize_images(image, FLAGS.image_means, FLAGS.image_stdevs) return {'images': image, 'labels': label} def dataset_iterator(filenames, labels, is_train, augmentation=True): dataset = tf.data.Dataset.from_tensor_slices((filenames, labels)) if is_train: dataset = dataset.shuffle(buffer_size=300) dataset = dataset.apply( tf.contrib.data.map_and_batch( lambda x, y: _parse_function(x, y, augmentation), batch_size=FLAGS.batch_size, num_parallel_batches=1, drop_remainder=False)) if is_train: dataset = dataset.repeat(1) else: dataset = dataset.repeat(1) dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE) return dataset # Create callable iterator functions def train_iterator(): return dataset_iterator(train_files, train_labels, True, FLAGS.color_augmentation) def test_iterator(): return dataset_iterator(test_files, test_labels, False, False) def original_iterator(): return dataset_iterator(train_files, train_labels, False, False) ################################# # Image Statistics for Preprocessing ################################# # Calculate image means and stdevs of training images for RGB channels # for image standardization if (FLAGS.image_means == [0, 0, 0]) and (FLAGS.image_stdevs == [1, 1, 1]): with tf.Session() as sess: original_batch_size = FLAGS.batch_size FLAGS.batch_size = np.min([500, n_train]) dataset = original_iterator() iterator = dataset.make_one_shot_iterator() feature_dict = iterator.get_next() features = sess.run(feature_dict) image_batch = features['images'] means_batch = np.mean(image_batch, axis=(0, 1, 2)) stdev_batch = np.std(image_batch, axis=(0, 1, 2)) FLAGS.batch_size = original_batch_size image_means = [round(float(x), 6) for x in list(means_batch)] image_stdevs = [round(float(x), 4) for x in list(stdev_batch)] FLAGS.image_means = image_means FLAGS.image_stdevs = image_stdevs ################################# # Configure Estimator ################################# n_batches_per_epoch_train = int(round(n_train / FLAGS.batch_size)) # Configurations config_sess = tf.ConfigProto(allow_soft_placement=True) config_sess.gpu_options.per_process_gpu_memory_fraction = 0.8 config_sess.gpu_options.allow_growth = True def distribution_gpus(num_gpus): if num_gpus == 0: return tf.contrib.distribute.OneDeviceStrategy(device='/cpu:0') elif num_gpus == 1: return tf.contrib.distribute.OneDeviceStrategy(device='/gpu:0') elif num_gpus > 1: return tf.contrib.distribute.MirroredStrategy(num_gpus=num_gpus) else: return None # Config estimator est_config = tf.estimator.RunConfig() est_config = est_config.replace( keep_checkpoint_max=3, save_checkpoints_steps=n_batches_per_epoch_train, session_config=config_sess, save_checkpoints_secs=None, save_summary_steps=n_batches_per_epoch_train, model_dir=FLAGS.model_save_path, train_distribute=distribution_gpus(FLAGS.num_gpus)) # Model Parameters params = dict() params['label'] = ['labels'] params['n_classes'] = [n_classes] params['weight_decay'] = FLAGS.weight_decay params['momentum'] = 0.9 params['model'] = FLAGS.model params['reuse'] = False params['class_mapping_clean'] = {'labels': num_to_class_map} # create estimator estimator = tf.estimator.Estimator(model_fn=model_fn, params=params, model_dir=FLAGS.model_save_path, config=est_config ) ################################# # Train and Evaluate ################################# def main(args): """ Main - called by command line """ # Print flags for f in flags.FLAGS: print("Flag %s - %s" % (f, FLAGS[f].value)) eval_loss = list() for epoch in range(1, FLAGS.max_epoch + 1): print("Starting with epoch %s" % epoch) # Train for one epoch estimator.train(input_fn=train_iterator) # Evaluate eval_res = estimator.evaluate(input_fn=test_iterator) print("Evaluation results:") for k, v in eval_res.items(): print(" Res for %s - %s" % (k, v)) eval_loss.append(eval_res['loss']) # Predict preds = estimator.predict(input_fn=test_iterator) for i, pred in enumerate(preds): print(pred) if i > 10: break if __name__ == '__main__': tf.app.run()
[ "will5448@umn.edu" ]
will5448@umn.edu
bf6ff385d4a25e401c65c0c285afab951c5bc4de
6222e729592de24344e30a9e2535e2737d587dfe
/2. Market Data/beta_hedging.py
ee229644320206fafa6d9a5c77108e1cbdf70bc0
[ "Apache-2.0" ]
permissive
Nhiemth1985/Pynaissance
a145e118a0ef2a8894247c99978c29701bc34077
7034798a5f0b92c6b8fdfa5948d2ad78a77a1a05
refs/heads/master
2023-08-25T21:07:16.865197
2021-10-19T15:29:44
2021-10-19T15:29:44
null
0
0
null
null
null
null
UTF-8
Python
false
false
354
py
# Basic Setup: Import Libraries import numpy as np from statsmodels import regression import statsmodels.api as sm import matplotlib.pyplot as plt import math start = '2018-01-01' end = '2019-01-01' asset = get_pricing('AMD', fields='price', start_date=start, end_date=end) benchmark = get_pricing('SPY', fields='price', start_date=start, end_date=end)
[ "noreply@github.com" ]
noreply@github.com
2099ab416d5ffcd03de217a98cb5cac3527bc5c4
85f2798326d6bb4ccabd3b98ac2bdb545911b5f6
/tdd-demo/todolist-app.py
37fcd91486c66ceddbfb1577a486a6c109d1ef2c
[]
no_license
nickfuentes/Python-Practice
6600aea672fd2bd9ce140ccb8aa941b3cc62d93f
17bb171ea5c92fdc471af41e3cc74b8772a462bd
refs/heads/master
2020-06-10T22:11:25.837714
2019-07-07T19:55:00
2019-07-07T19:55:00
193,768,512
0
1
null
2019-07-03T14:04:22
2019-06-25T19:11:52
Python
UTF-8
Python
false
false
100
py
# Unit Tests # Are Atomic and independent # Should not be Dependent # Never have Side effect
[ "nickfuentes24@gmail.com" ]
nickfuentes24@gmail.com
b7dff73104d8dea5fd1397137f879ea0a52b8404
b6de31e6ca07500daef559462533bd1a3585e0b9
/img_saving_script.py
52bd8c293ee5be6c8491a32e649949403377b396
[]
no_license
NickNagy/UWIRLMachineLearningResearch
30deb91a20efe75e0345799fc03ad02c847c4458
a9fd75d1ed14b6bf79a42b264a6238e698d56c80
refs/heads/master
2020-05-05T12:36:16.394862
2019-04-07T23:41:17
2019-04-07T23:41:17
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,275
py
""" Quick Python script for saving .png files from the crop_imag.mat images. """ import numpy as np from scipy import io from scipy.misc import imsave import os from PIL import Image #from matplotlib import pyplot as plt directory = # def save_image(folder_name, extension=".jpg"): try: img = np.array((io.loadmat('crop_image.mat', appendmat=False))['dxImage']['img'][0][0]) img_compressed = (img*255.0/np.max(img)).astype('uint8') rgb_img = np.asarray(Image.fromarray(img_compressed).convert('RGB')) imsave(folder_name + extension, rgb_img) except FileNotFoundError: try: #print("Running...") img = np.array((io.loadmat('crop_image_fracturemask.mat', appendmat=False))['dxImage']['img'][0][0]) img_compressed = (img * 255.0 / np.max(img)).astype('uint8') rgb_img = np.asarray(Image.fromarray(img_compressed).convert('RGB')) imsave(folder_name + extension, rgb_img) except FileNotFoundError: print("Could not locate mat file in folder " + folder_name) for subdir, dirs, files in os.walk(directory): #print(dirs) for folder in dirs: os.chdir(directory + "\\" + folder) #print(folder) save_image(folder, ".png")
[ "noreply@github.com" ]
noreply@github.com
4cfda7eb2e215caab64ce291445b94773a94655f
17c6289537851347c691c46570efe98a47f57169
/scripts/python_code-set/main/main_analysis_fitparam.py
dd98cff0303b0858007d6db06873667f9b293e1a
[]
no_license
miiya369/analysisHAL_miya
67516fb7192ce7c3d0a0c5bace3f3e1b4c850d26
76a6d80bb4a7f24c0deeca770f60efd440b72f3c
refs/heads/master
2020-03-09T09:17:51.926630
2018-10-04T10:44:45
2018-10-04T10:44:45
99,018,105
0
0
null
null
null
null
UTF-8
Python
false
false
2,786
py
#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import print_function import sys, os sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../lib") import numpy as np import time ### ================== Global Parameters Init. ================= ### ifname = None r_min = 0.001 r_del = 0.01 r_max = 2.5 ### =========================== Main =========================== ### def main(): from common.misc import frange from common.statistics import make_mean_err from fitting.io_params import input_params from fitting.fitfunc_type import set_fitfunc_from_fname func_name, params = input_params(ifname) if (func_name is params is None): return -1 Nconf = len(params[:,0]) Nparam = len(params[0,:]) fit_func = set_fitfunc_from_fname(func_name) for r in frange(r_min, r_max, r_del): print("%lf %1.16e %1.16e" % (r, *make_mean_err(np.array([fit_func(r,*params[iconf,:]) for iconf in range(Nconf)])))) return 0 ### ============================================================ ### ###### Functions for arguments def usage(ARGV0): print("usage : python %s [ifile] {options}\n" % os.path.basename(ARGV0)) print("options:") print(" --r_min [Minimum range (fm)] Default =", r_min) print(" --r_del [Range division (fm)] Default =", r_del) print(" --r_max [Maximum range (fm)] Default =", r_max) exit(1) def check_args(): print("# === Check Arguments ===") print("# ifile =", ifname) print("# r min =", r_min) print("# r del =", r_del) print("# r max =", r_max) print("# =======================") def set_args(ARGC, ARGV): global ifname, r_min, r_del, r_max if (ARGV[1][0] == '-'): usage(ARGV[0]) ifname = ARGV[1].strip() for i in range(2, ARGC): if (len(ARGV[i]) == 1): continue if (ARGV[i][0] == '-' and ARGV[i][1] == '-'): if (ARGV[i] == '--r_min'): r_min = float(ARGV[i+1]) elif (ARGV[i] == '--r_del'): r_del = float(ARGV[i+1]) elif (ARGV[i] == '--r_max'): r_max = float(ARGV[i+1]) else: print("\nERROR: Invalid option '%s'\n" % ARGV[i]) usage(ARGV[0]) check_args() ### ============================================================ ### ### ============================================================ ### if __name__ == "__main__": argv = sys.argv; argc = len(argv) if (argc == 1): usage(argv[0]) set_args(argc, argv) t_start = time.time() if (main() != 0): exit("ERROR EXIT.") print("#\n# Elapsed time [s] = %d" % (time.time() - t_start))
[ "miiya369@gmail.com" ]
miiya369@gmail.com
0f3cc4a2087d8125cc761a1644c51c12e6c814d4
d838bed08a00114c92b73982a74d96c15166a49e
/docs/data/learn/Bioinformatics/output/ch6_code/src/Stepik.6.9.CodeChallenge.2BreakDistance.py
a9ce5254b6d1201e2c2202e7b13a59eeda40ae42
[]
no_license
offbynull/offbynull.github.io
4911f53d77f6c59e7a453ee271b1e04e613862bc
754a85f43159738b89dd2bde1ad6ba0d75f34b98
refs/heads/master
2023-07-04T00:39:50.013571
2023-06-17T20:27:05
2023-06-17T23:27:00
308,482,936
1
0
null
null
null
null
UTF-8
Python
false
false
575
py
from BreakpointGraph import BreakpointGraph with open('/home/user/Downloads/dataset_240324_4.txt', mode='r', encoding='utf-8') as f: data = f.read() lines = data.split('\n') p_list1 = [[int(x) for x in s.split(' ')] for s in lines[0][1:-1].split(')(')] p_list2 = [[int(x) for x in s.split(' ')] for s in lines[1][1:-1].split(')(')] bg = BreakpointGraph(p_list1, p_list2) cycles = bg.get_red_blue_cycles() block_count = len(bg.node_to_blue_edges) // 2 # number of synteny blocks is number of nodes / 2 cycle_count = len(cycles) print(f'{block_count - cycle_count}')
[ "offbynull@gmail.com" ]
offbynull@gmail.com
3f3f54c554cdbefbda629cad5a49473819e2debd
d791176b586e993fac51ce2a6b241561badfc009
/ServerAPI/ServerAPI.py
0fb54ffd6ae7613d12a9750ce1b6e320dfd1be3c
[]
no_license
sochkasov/smart8036.v3
3786bca35eb93d8f36985d6720b4e0d1c91a6afc
6a113e5e83ade8f8c8ca25b6d060d83322129216
refs/heads/main
2023-03-07T11:51:14.906152
2021-02-22T15:32:16
2021-02-22T15:32:16
341,245,744
0
0
null
null
null
null
UTF-8
Python
false
false
9,363
py
# coding=utf8 import os from flask import Flask, jsonify, send_file from flask import request from flask_jsonpify import jsonify # для JSONP from flask.json import JSONEncoder import ujson from api_utils import ResponsiveFlask from HeatController.Controller8036 import * import config import datetime import calendar class CustomJSONEncoder(JSONEncoder): ''' Преопределение метода для изменения преобразования даты-времени в нужный формат ''' def default(self, obj): if isinstance(obj, datetime.datetime): return str(obj) return JSONEncoder.default(self, obj) class ServerAPI(object): def __init__(self): self.DEBUG = False print "ServerAPI server\n" self.dbconnect = db.Database() self.ctl8036 = Controller8036() #self.app = ResponsiveFlask(__name__) self.app = Flask(__name__) self.app.json_encoder = CustomJSONEncoder self.app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False return None def json_output(self, *args, **kwargs): """ Создание JSON строки из data. Производится контроль ошибок по флагу error Если есть ошибка, то будет сформировано сообщение из error_message :param data: :param error: :param error_message: :return: """ if not kwargs['error']: # return jsonify(kwargs['result']) return jsonify({"result": kwargs['result'], "error": False, "error_message": kwargs['error_message']}), 200 else: return jsonify({"error": True, "error_message": kwargs['error_message']}), 404 def start(self): @self.app.route('/') def index(): return jsonify(api_version=1, user='user1', datatime=str(datetime.datetime.now().strftime('%H:%M:%S %d-%m-%Y')), date=str(datetime.datetime.now().strftime('%d-%m-%Y')), time=str(datetime.datetime.now().strftime('%H:%M:%S')) ) @self.app.route('/help') def get_help(): result = '''<h2>API methods</h2> <ul> <li>/get/temp/ - get temperature online on JSON format</li> <li>/get/tempraw/ - get temperature online on raw format</li> </ul>''' return {'message': result} @self.app.route('/get/temp/') def get_temp(): return self.json_output(**self.ctl8036.GetSensorsTemperature()) @self.app.route('/get/temp/history/<int:sensor_link_id>') def get_temp_history(sensor_link_id): return self.json_output(**self.ctl8036.get_sensor_hostory(sensor_link_id)) @self.app.route('/get/tempraw/') def get_tempraw(): return self.json_output(**self.ctl8036.GetTemperatureCurrent()) @self.app.route('/set/timesync/') def set_temesync(): return self.json_output(**self.ctl8036.timedate_sync()) @self.app.route('/get/program_raw/') def get_programm_raw(): return self.json_output(**self.ctl8036.get_program_raw()) @self.app.route('/get/program_json/') def get_programm_json(): return self.json_output(**self.ctl8036.get_program_json()) @self.app.route('/get/actuator_status/') def get_actuators_status_json(): return self.json_output(**self.ctl8036.get_actuators_status_json()) @self.app.route('/get/test/') def get_test(): return jsonify({'now': datetime.datetime.now()}) @self.app.route('/favicon.ico') def favicon(): # mimetype='image/vnd.microsoft.icon' #return "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAIFQTFRFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA////basLdwAAACp0Uk5TAAgEHQMYbYWESXFmcywiYnuGamt0QyafblMHb1kLCmg8AjZ2I2EQciFgf8bHLwAAAAFiS0dEKlO+1J4AAAAJcEhZcwAAAEgAAABIAEbJaz4AAACLSURBVBgZBcELQoJQEADAkeengJYiIrXC76Lc/4LNAAAAWK0AqEpZb7bbDYCye3mtm6Z9A4ju/aPv+8/haw2M3/v6cDwe6p/fP5hO++F84XIe+mbCdL2NwHi7TnDvALo7iDRnZs4yQKR4PJ+PkAEixcISMkCkSDJkgLJb2iTbZVdAVSJm5ohSAQDgH8c2Ci4yRvReAAAAJXRFWHRkYXRlOmNyZWF0ZQAyMDE2LTA5LTE2VDA4OjI4OjI3KzAwOjAwlMYpngAAACV0RVh0ZGF0ZTptb2RpZnkAMjAxNi0wOS0xNlQwODoyODoyNyswMDowMOWbkSIAAABGdEVYdHNvZnR3YXJlAEltYWdlTWFnaWNrIDYuNy44LTkgMjAxNC0wNS0xMiBRMTYgaHR0cDovL3d3dy5pbWFnZW1hZ2ljay5vcmfchu0AAAAAGHRFWHRUaHVtYjo6RG9jdW1lbnQ6OlBhZ2VzADGn/7svAAAAGHRFWHRUaHVtYjo6SW1hZ2U6OmhlaWdodAAxOTIPAHKFAAAAF3RFWHRUaHVtYjo6SW1hZ2U6OldpZHRoADE5MtOsIQgAAAAZdEVYdFRodW1iOjpNaW1ldHlwZQBpbWFnZS9wbmc/slZOAAAAF3RFWHRUaHVtYjo6TVRpbWUAMTQ3NDAxNDUwN/jek0AAAAAPdEVYdFRodW1iOjpTaXplADBCQpSiPuwAAABWdEVYdFRodW1iOjpVUkkAZmlsZTovLy9tbnRsb2cvZmF2aWNvbnMvMjAxNi0wOS0xNi9jY2EzODcyMTQ3Mjc5YTVmYTVmMDVlNDJiYzA4ZDI0NC5pY28ucG5nM8/R6gAAAABJRU5ErkJggg==", 200, {'Content-Type': 'image/vnd.microsoft.icon'} #data = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAIFQTFRFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA////basLdwAAACp0Uk5TAAgEHQMYbYWESXFmcywiYnuGamt0QyafblMHb1kLCmg8AjZ2I2EQciFgf8bHLwAAAAFiS0dEKlO+1J4AAAAJcEhZcwAAAEgAAABIAEbJaz4AAACLSURBVBgZBcELQoJQEADAkeengJYiIrXC76Lc/4LNAAAAWK0AqEpZb7bbDYCye3mtm6Z9A4ju/aPv+8/haw2M3/v6cDwe6p/fP5hO++F84XIe+mbCdL2NwHi7TnDvALo7iDRnZs4yQKR4PJ+PkAEixcISMkCkSDJkgLJb2iTbZVdAVSJm5ohSAQDgH8c2Ci4yRvReAAAAJXRFWHRkYXRlOmNyZWF0ZQAyMDE2LTA5LTE2VDA4OjI4OjI3KzAwOjAwlMYpngAAACV0RVh0ZGF0ZTptb2RpZnkAMjAxNi0wOS0xNlQwODoyODoyNyswMDowMOWbkSIAAABGdEVYdHNvZnR3YXJlAEltYWdlTWFnaWNrIDYuNy44LTkgMjAxNC0wNS0xMiBRMTYgaHR0cDovL3d3dy5pbWFnZW1hZ2ljay5vcmfchu0AAAAAGHRFWHRUaHVtYjo6RG9jdW1lbnQ6OlBhZ2VzADGn/7svAAAAGHRFWHRUaHVtYjo6SW1hZ2U6OmhlaWdodAAxOTIPAHKFAAAAF3RFWHRUaHVtYjo6SW1hZ2U6OldpZHRoADE5MtOsIQgAAAAZdEVYdFRodW1iOjpNaW1ldHlwZQBpbWFnZS9wbmc/slZOAAAAF3RFWHRUaHVtYjo6TVRpbWUAMTQ3NDAxNDUwN/jek0AAAAAPdEVYdFRodW1iOjpTaXplADBCQpSiPuwAAABWdEVYdFRodW1iOjpVUkkAZmlsZTovLy9tbnRsb2cvZmF2aWNvbnMvMjAxNi0wOS0xNi9jY2EzODcyMTQ3Mjc5YTVmYTVmMDVlNDJiYzA4ZDI0NC5pY28ucG5nM8/R6gAAAABJRU5ErkJggg==" #data = b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAIFQTFRFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA////basLdwAAACp0Uk5TAAgEHQMYbYWESXFmcywiYnuGamt0QyafblMHb1kLCmg8AjZ2I2EQciFgf8bHLwAAAAFiS0dEKlO+1J4AAAAJcEhZcwAAAEgAAABIAEbJaz4AAACLSURBVBgZBcELQoJQEADAkeengJYiIrXC76Lc/4LNAAAAWK0AqEpZb7bbDYCye3mtm6Z9A4ju/aPv+8/haw2M3/v6cDwe6p/fP5hO++F84XIe+mbCdL2NwHi7TnDvALo7iDRnZs4yQKR4PJ+PkAEixcISMkCkSDJkgLJb2iTbZVdAVSJm5ohSAQDgH8c2Ci4yRvReAAAAJXRFWHRkYXRlOmNyZWF0ZQAyMDE2LTA5LTE2VDA4OjI4OjI3KzAwOjAwlMYpngAAACV0RVh0ZGF0ZTptb2RpZnkAMjAxNi0wOS0xNlQwODoyODoyNyswMDowMOWbkSIAAABGdEVYdHNvZnR3YXJlAEltYWdlTWFnaWNrIDYuNy44LTkgMjAxNC0wNS0xMiBRMTYgaHR0cDovL3d3dy5pbWFnZW1hZ2ljay5vcmfchu0AAAAAGHRFWHRUaHVtYjo6RG9jdW1lbnQ6OlBhZ2VzADGn/7svAAAAGHRFWHRUaHVtYjo6SW1hZ2U6OmhlaWdodAAxOTIPAHKFAAAAF3RFWHRUaHVtYjo6SW1hZ2U6OldpZHRoADE5MtOsIQgAAAAZdEVYdFRodW1iOjpNaW1ldHlwZQBpbWFnZS9wbmc/slZOAAAAF3RFWHRUaHVtYjo6TVRpbWUAMTQ3NDAxNDUwN/jek0AAAAAPdEVYdFRodW1iOjpTaXplADBCQpSiPuwAAABWdEVYdFRodW1iOjpVUkkAZmlsZTovLy9tbnRsb2cvZmF2aWNvbnMvMjAxNi0wOS0xNi9jY2EzODcyMTQ3Mjc5YTVmYTVmMDVlNDJiYzA4ZDI0NC5pY28ucG5nM8/R6gAAAABJRU5ErkJggg==' #return send_file(data, mimetype='image/vnd.microsoft.icon',) #return Response(stream_with_context(data), mimetype='image/vnd.microsoft.icon') #return send_from_directory(os.path.join(self.app.root_path, 'static'),'htdocs/favicon/home-outline.ico/favicon.ico',mimetype='image/vnd.microsoft.icon') return send_file('/root/8036/htdocs/favicon/home-outline.ico/favicon.ico', mimetype='image/vnd.microsoft.icon', ) @self.app.errorhandler(404) def page_not_found(error): return {'error': 'This API method does not exist'}, 404 # Running web server #if __name__ == '__main__': if __name__ == 'ServerAPI.ServerAPI': print "API server listen on port 5000 ..." # global DEBUG # if self.DEBUG: # self.app.debug = True #self.app.run(host=config.http_listen_ip, port=config.http_listen_port, debug=config.debug_enable, threaded=False) self.app.run(host=config.http_listen_ip, port=config.http_listen_port, debug=False, threaded=False) def stop(self): func = request.environ.get('werkzeug.server.shutdown') if func is None: raise RuntimeError('Not running with the Werkzeug Server') func()
[ "sochkasov@gmail.com" ]
sochkasov@gmail.com
8d4459e01333fe863b683d012fbcd3cf3b266c9e
ade7fd8afc93c5198f218a3f6736cdd198801aea
/methods.py
1d68fbdfb4d56d62007012886977b0e4c682db63
[]
no_license
ksdivesh/python-bone
74a59da49d2cd97a56d90525b9e24b87bcd44f32
98af99c5ad83a776f88a016e8e553712910e9eee
refs/heads/main
2023-03-07T18:54:49.419974
2021-02-19T07:18:50
2021-02-19T07:18:50
339,679,959
0
0
null
null
null
null
UTF-8
Python
false
false
746
py
# def func1(): # print("Function 1 running") # def func2(): # pass # def func3(): # print('this is function 3') # return "A" # val = func3() # print(val) # func1() # print(func3()) ''' Python: Functional Prograaming, Object Oriented Programming Java : Object Oriented only class MyClass{ func1(){ ..... } func2(){ .... } } MyClass classA = new MyClass() classA.func1(); ''' def mul(a, b): return str(a*b) def sum(a, b): return a + b a = 10 b = 20 val = int(mul(a, b)) + sum(a, b) # def sum1(a, b): # print(a+b) # val = sum(20, 20) + sum(10, 10) # print(val) # print(sum(10, 20)) # val = sum(10, 20) # print(val)
[ "69233185+devopsit51@users.noreply.github.com" ]
69233185+devopsit51@users.noreply.github.com
74413c9cb86a61bf2d60e97492d5141b19cea5da
514ddee0e3aeaf148226d89b2294f5cc84abca27
/src/coecms/cli/um.py
d96e211b69026008377d00f527e053f060459c7a
[ "Apache-2.0" ]
permissive
coecms/coecms-util
c60edab08ffa0f1c2af9188f671eea6db1801a64
a9ca18af3ea1a2ef06212acefc840fe0448661e9
refs/heads/master
2020-03-24T20:12:25.769470
2019-05-14T06:47:45
2019-05-14T06:47:45
142,965,222
2
2
Apache-2.0
2019-05-24T01:54:43
2018-07-31T05:12:21
Python
UTF-8
Python
false
false
3,512
py
#!/usr/bin/env python # # Copyright 2019 Scott Wales # # Author: Scott Wales <scott.wales@unimelb.edu.au> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .main import cli from ..grid import UMGrid from ..regrid import regrid, esmf_generate_weights from ..um.create_ancillary import create_surface_ancillary import click import pandas import mule import iris import xarray from dask.diagnostics import ProgressBar import dask.distributed import matplotlib.pyplot as plt @cli.group() def um(): """ Tools for working with the Unified Model """ pass @um.group() def ancil(): """ Tools for working with ancil files """ pass def validate_date(ctx, param, value): """ Ensures an argument is a valid date """ try: return pandas.to_datetime(value, utc=True, dayfirst=True) except ValueError: raise click.BadParameter(f'unable to parse "{value}" as a date') def validate_um_ancil(ctx, param, value): """ Ensures an argument is a UM file """ try: return mule.AncilFile.from_file(value) except: raise click.BadParameter(f'"{value}" does not seem to be a UM ancil file') @ancil.command() @click.option('--start-date', callback=validate_date, required=True) @click.option('--end-date', callback=validate_date, required=True) @click.option('--target-mask', type=click.Path(exists=True, dir_okay=False)) @click.option('--output', required=True, type=click.Path(writable=True, dir_okay=False)) def era_sst(start_date, end_date, target_mask, output): """ Create ancil files from ERA reanalysis data """ um_grid = UMGrid.from_mask(target_mask) file_start = start_date - pandas.offsets.MonthBegin() file_end = end_date + pandas.offsets.MonthEnd() file_a = pandas.date_range(file_start,file_end,freq='MS') file_b = file_a + pandas.offsets.MonthEnd() dates = [f'{a.strftime("%Y%m%d")}_{b.strftime("%Y%m%d")}' for a,b in zip(file_a, file_b)] # Read and slice the source data tos = xarray.open_mfdataset(['/g/data1a/ub4/erai/netcdf/6hr/ocean/' 'oper_an_sfc/v01/tos/' 'tos_6hrs_ERAI_historical_an-sfc_'+d+'.nc' for d in dates], chunks={'time': 1,}) sic = xarray.open_mfdataset(['/g/data1a/ub4/erai/netcdf/6hr/seaIce/' 'oper_an_sfc/v01/sic/' 'sic_6hrs_ERAI_historical_an-sfc_'+d+'.nc' for d in dates], chunks={'time': 1,}) ds = xarray.Dataset({'tos': tos.tos, 'sic': sic.sic}) ds = ds.sel(time=slice(start_date, end_date)) weights = esmf_generate_weights(tos.tos.isel(time=0), um_grid, method='patch') newds = regrid(ds, weights=weights) print(newds) ancil = create_surface_ancillary(newds, {'tos': 24, 'sic': 31}) ancil.to_file(output)
[ "noreply@github.com" ]
noreply@github.com
35c792e078f9037cf38a3a3bd992d3b7bee00e0d
de17634e6b149d5828c1c78f7f5f5e1f6c17c4d0
/nnvm/amalgamation/amalgamation.py
310daa9d68e0e2cd33876364a3e4533f23cc45b5
[ "Apache-2.0", "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT" ]
permissive
starimpact/mxnet_v1.0.0
e135cc9e4c2711314d03cf1281a72b755f53144e
fcd6f7398ef811c3f8b01e7c9c16fb25c8d202bd
refs/heads/bv1.0.0
2022-11-10T09:09:11.966942
2018-07-13T04:59:30
2018-07-13T04:59:30
120,399,107
8
4
Apache-2.0
2022-11-02T20:24:32
2018-02-06T03:54:35
C++
UTF-8
Python
false
false
2,628
py
import sys import os.path, re, StringIO blacklist = [ 'Windows.h', 'mach/clock.h', 'mach/mach.h', 'malloc.h', 'glog/logging.h', 'io/azure_filesys.h', 'io/hdfs_filesys.h', 'io/s3_filesys.h', 'sys/stat.h', 'sys/types.h', 'omp.h', 'execinfo.h', 'packet/sse-inl.h' ] def get_sources(def_file): sources = [] files = [] visited = set() mxnet_path = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir)) for line in open(def_file): files = files + line.strip().split(' ') for f in files: f = f.strip() if not f or f.endswith('.o:') or f == '\\': continue fn = os.path.relpath(f) if os.path.abspath(f).startswith(mxnet_path) and fn not in visited: sources.append(fn) visited.add(fn) return sources sources = get_sources(sys.argv[1]) def find_source(name, start): candidates = [] for x in sources: if x == name or x.endswith('/' + name): candidates.append(x) if not candidates: return '' if len(candidates) == 1: return candidates[0] for x in candidates: if x.split('/')[1] == start.split('/')[1]: return x return '' re1 = re.compile('<([./a-zA-Z0-9_-]*)>') re2 = re.compile('"([./a-zA-Z0-9_-]*)"') sysheaders = [] history = set([]) out = StringIO.StringIO() def expand(x, pending): if x in history and x not in ['mshadow/mshadow/expr_scalar-inl.h']: # MULTIPLE includes return if x in pending: #print 'loop found: %s in ' % x, pending return print >>out, "//===== EXPANDING: %s =====\n" %x for line in open(x): if line.find('#include') < 0: out.write(line) continue if line.strip().find('#include') > 0: print line continue m = re1.search(line) if not m: m = re2.search(line) if not m: print line + ' not found' continue h = m.groups()[0].strip('./') source = find_source(h, x) if not source: if (h not in blacklist and h not in sysheaders and 'mkl' not in h and 'nnpack' not in h): sysheaders.append(h) else: expand(source, pending + [x]) print >>out, "//===== EXPANDED: %s =====\n" %x history.add(x) expand(sys.argv[2], []) f = open(sys.argv[3], 'wb') for k in sorted(sysheaders): print >>f, "#include <%s>" % k print >>f, '' print >>f, out.getvalue() for x in sources: if x not in history and not x.endswith('.o'): print 'Not processed:', x
[ "mingzhang@deepglint.com" ]
mingzhang@deepglint.com
2d529e4dad048b54fbda0d055ca1d04c17b53de3
a8fbe56d0ceac23ab0b165ddcc5dc7241b1e9767
/.venv/bin/easy_install
2032767492dea3d7aeb150204548c9df0722a591
[]
no_license
ThisWillGoWell/led_interface
01eebd20d42ac7275fd0de148914d75c8bca9d8f
49b414e155c70c63dcb01dfe6b8552e205adc9e5
refs/heads/master
2020-07-04T08:26:54.712480
2019-08-13T20:48:53
2019-08-13T20:48:53
202,221,536
0
0
null
null
null
null
UTF-8
Python
false
false
281
#!/Users/wggowell/workspace/mcu/wall_controller/.venv/bin/python3 # -*- coding: utf-8 -*- import re import sys from setuptools.command.easy_install import main if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit(main())
[ "wggowell@justin.tv" ]
wggowell@justin.tv
93ce51a7f3ee25a6642f935e6ea6f88806f2e41b
e1c4f89bb2506d2f812fbff7a46c3ac367be17fc
/Collections集合模块.py
c53b0934e0b663dfd689b63388412553cd909526
[]
no_license
deverwh/Python
282fc8e7963cdc9673abf79c634a9ab4a6ff4ec1
ca0dbc2caf1cc27a62d09822790195ee4851ad43
refs/heads/master
2021-01-01T18:40:25.981571
2020-06-26T03:37:43
2020-06-26T03:37:43
98,401,890
1
0
null
2020-04-05T07:09:44
2017-07-26T08:56:54
Python
UTF-8
Python
false
false
828
py
#!/usr/bin/env python # -*- coding: utf-8 -*- # namedtuple()函数创建自定义的tuple from collections import namedtuple Point = namedtuple('Point', ['x', 'y']) p = Point(101, 2) print p.x print p.y # deque双端队列 from collections import deque q = deque(['a', 'b', 'c']) q.append('x') q.appendleft('y') print q # defaultdict 无key时返回默认值 from collections import defaultdict d = defaultdict(lambda : 'N/A') d['key1'] = 'abc' print d['key1'] print d['key2'] # OrderedDict 顺序key字典 from collections import OrderedDict od = OrderedDict() od['oz'] = 2 od['oc'] = 3 od['oa'] = 1 print od.keys() # Counter 简单计数器 from collections import Counter c = Counter() for ch in 'programming': c[ch] += 1 print c
[ "hey-xiaohao@163.com" ]
hey-xiaohao@163.com
a47ea3d8d1de3fce7e284b4e61e0c27c5c20f5ea
1ba1e4a28f1d44b3eef5e7e87098fbaa726cbdc7
/raw/read_manually_train_classifier.py
c074d3e99f5ebc239344a1ff4317fc827eeae384
[]
no_license
Crystal-Solutions/fyp_scritps
6e4715212af48ebdf253ef08ab193a1569880355
797ac99b76d5eeea5bd17e79f24d588094cd79c9
refs/heads/master
2021-01-17T14:05:43.036576
2018-02-14T01:04:37
2018-02-14T01:04:37
83,446,900
0
0
null
null
null
null
UTF-8
Python
false
false
2,851
py
# -*- coding: utf-8 -*- """ Created on Mon May 8 15:19:03 2017 Reads files from /boi_pos_data and write the into @author: Janaka """ SOURCE_DIR = '../boi_pos_data/' import os import nltk def map_to_tagged_sentences(fileContent): fileContent = fileContent.strip("\n") return [[tuple(line.split()) for line in sentence.split("\n")] for sentence in fileContent.split("\n\n")] def untag_sentences(taggedSentences): return [[(w,p) for (w,p,t) in sent] for sent in taggedSentences] #Tagger Model class ConsecutiveNPChunkTagger(nltk.TaggerI): def __init__(self, train_sents): train_set = [] for tagged_sent in train_sents: untagged_sent = nltk.tag.untag(tagged_sent) history = [] for i, (word, tag) in enumerate(tagged_sent): featureset = npchunk_features(untagged_sent, i, history) train_set.append( (featureset, tag) ) history.append(tag) self.classifier = nltk.MaxentClassifier.train( #train_set, algorithm='megam', trace=0) train_set, trace=0) def tag(self, sentence): history = [] for i, word in enumerate(sentence): featureset = npchunk_features(sentence, i, history) tag = self.classifier.classify(featureset) history.append(tag) return zip(sentence, history) class ConsecutiveNPChunker(nltk.ChunkParserI): def __init__(self, train_sents): print("ab") tagged_sents = [[((w,p),t) for (w,p,t) in sent] for sent in train_sents] print("cd") self.tagger = ConsecutiveNPChunkTagger(tagged_sents) def parse(self, sentence): tagged_sents = self.tagger.tag(sentence) conlltags = [(w,t,c) for ((w,t),c) in tagged_sents] return nltk.chunk.conlltags2tree(conlltags) def npchunk_features(sentence, i, history): word, pos = sentence[i] #return {"pos": pos} if i == 0: prevword, prevpos = "<START>", "<START>" else: prevword, prevpos = sentence[i-1] #return {"pos": pos, "word": word, "prevpos": prevpos} if i == len(sentence)-1: nextword, nextpos = "<END>", "<END>" else: nextword, nextpos = sentence[i+1] return {"pos": pos, "prevpos": prevpos, "nextpos": nextpos} #File by file process for file in os.listdir(SOURCE_DIR): if file.endswith(".txt"): filePath = os.path.join(SOURCE_DIR, file) print("Processing: "+filePath) f = open(filePath) taggedSentences = map_to_tagged_sentences(f.read()) untaggedSentences = untag_sentences(taggedSentences) chunker = ConsecutiveNPChunker(taggedSentences) parsed = chunker.parse(untaggedSentences[0]) break print(file)
[ "bjchathuranga@gmail.com" ]
bjchathuranga@gmail.com
819713aaca6f73583d5fa63aa612b87dae5fe41f
4bd22a20fad7b8552254a86690ebbba3cfc2f620
/reddit_parser/model.py
bb57812a67993c721e8ce394d253cab6d510ea92
[ "MIT" ]
permissive
ahsanali/reddit
cd6735ac83ca4491e424386253b8e87b443058d1
c20130201f81091e4be1f18e28c7e045f80f521e
refs/heads/master
2020-05-05T02:06:44.466989
2013-08-05T08:44:39
2013-08-05T08:44:39
null
0
0
null
null
null
null
UTF-8
Python
false
false
7,591
py
# -*- coding: utf-8 -*- from sqlalchemy import Column, ForeignKey, not_,types from sqlalchemy.ext.mutable import Mutable from werkzeug import generate_password_hash, check_password_hash from flask.ext.login import UserMixin from sqlalchemy.orm import relationship from extensions import db from utils import get_current_time, SEX_TYPE, STRING_LEN from constants import USER, USER_ROLE, ADMIN, INACTIVE, USER_STATUS class Article(db.Model): __tablename__ = 'articles' # title = Column(db.Text, nullable=False) id = Column(db.Text, primary_key=True) # author = Column(db.Text, nullable=False) # num_comments = Column(db.Integer,default = 0) # ups = Column(db.Integer,default = 0) # downs = Column(db.Integer,default = 0) # subreddit_id = Column(db.Text, nullable=False) kind = Column(db.Text, nullable=False) data = Column(db.Text, nullable=False) def save(self): db.session.add(self) db.session.commit() class Comment(db.Model): __tablename__ = 'comments' id = Column(db.Text, primary_key=True) kind = Column(db.Text, nullable=False) data = Column(db.Text, nullable=False) reddit_id = Column(db.Text, db.ForeignKey("articles.id")) def save(self): db.session.add(self) db.session.commit() class DenormalizedText(Mutable, types.TypeDecorator): """ Stores denormalized primary keys that can be accessed as a set. :param coerce: coercion function that ensures correct type is returned :param separator: separator character """ impl = types.Text def __init__(self, coerce=int, separator=" ", **kwargs): self.coerce = coerce self.separator = separator super(DenormalizedText, self).__init__(**kwargs) def process_bind_param(self, value, dialect): if value is not None: items = [str(item).strip() for item in value] value = self.separator.join(item for item in items if item) return value def process_result_value(self, value, dialect): if not value: return set() return set(self.coerce(item) for item in value.split(self.separator)) def copy_value(self, value): return set(value) class UserDetail(db.Model): __tablename__ = 'user_details' id = Column(db.Integer, primary_key=True) age = Column(db.Integer) phone = Column(db.String(STRING_LEN)) url = Column(db.String(STRING_LEN)) deposit = Column(db.Numeric) location = Column(db.String(STRING_LEN)) bio = Column(db.String(STRING_LEN)) sex_code = db.Column(db.Integer) @property def sex(self): return SEX_TYPE.get(self.sex_code) created_time = Column(db.DateTime, default=get_current_time) class User(db.Model, UserMixin): __tablename__ = 'users' id = Column(db.Integer, primary_key=True) name = Column(db.String(STRING_LEN), nullable=False, unique=True) email = Column(db.String(STRING_LEN), nullable=False, unique=True) openid = Column(db.String(STRING_LEN), unique=True) activation_key = Column(db.String(STRING_LEN)) created_time = Column(db.DateTime, default=get_current_time) avatar = Column(db.String(STRING_LEN)) _password = Column('password', db.String(STRING_LEN), nullable=False) def _get_password(self): return self._password def _set_password(self, password): self._password = generate_password_hash(password) # Hide password encryption by exposing password field only. password = db.synonym('_password', descriptor=property(_get_password, _set_password)) def check_password(self, password): if self.password is None: return False return check_password_hash(self.password, password) def reset_password(self): self.activation_key = str(uuid4()) db.session.add(self) db.session.commit() def change_password(self): self.password = self.password.data self.activation_key = None db.session.add(self) db.session.commit() # ================================================================ # One-to-many relationship between users and roles. role_code = Column(db.SmallInteger, default=USER) @property def role(self): return USER_ROLE[self.role_code] def is_admin(self): return self.role_code == ADMIN # ================================================================ # One-to-many relationship between users and user_statuses. status_code = Column(db.SmallInteger, default=INACTIVE) @property def status(self): return USER_STATUS[self.status_code] # ================================================================ # One-to-one (uselist=False) relationship between users and user_details. user_detail_id = Column(db.Integer, db.ForeignKey("user_details.id")) user_detail = db.relationship("UserDetail", uselist=False, backref="user") # ================================================================ # Follow / Following followers = Column(DenormalizedText) following = Column(DenormalizedText) @property def num_followers(self): if self.followers: return len(self.followers) return 0 @property def num_following(self): return len(self.following) def follow(self, user): user.followers.add(self.id) self.following.add(user.id) user.followers=list(user.followers) self.following=list(self.following) # user.followers= # db.session.add(self) # db.session.add(user) # print "1.0" db.session.commit() def unfollow(self, user): if self.id in user.followers: print "1.0:%s"%user.followers user.followers.remove(self.id) user.followers=list(user.followers) print "2.0:%s"%user.followers db.session.add(user) if user.id in self.following: self.following.remove(user.id) self.following=list(self.following) db.session.add(self) db.session.commit() def get_following_query(self): return User.query.filter(User.id.in_(self.following or set())) def get_followers_query(self): return User.query.filter(User.id.in_(self.followers or set())) def is_following(self,follower): return follower.id in self.following and self.id in follower.followers # ================================================================ # Class methods @classmethod def authenticate(cls, login, password): user = cls.query.filter(db.or_(User.name == login, User.email == login)).first() if user: authenticated = user.check_password(password) else: authenticated = False return user, authenticated @classmethod def search(cls, keywords): criteria = [] for keyword in keywords.split(): keyword = '%' + keyword + '%' criteria.append(db.or_( User.name.ilike(keyword), User.email.ilike(keyword), )) q = reduce(db.and_, criteria) return cls.query.filter(q) @classmethod def get_by_id(cls, user_id): return cls.query.filter_by(id=user_id).first_or_404() def check_name(self, name): return User.query.filter(db.and_(User.name == name, User.email != self.id)).count() == 0
[ "sn.ahsanali@gmail.com" ]
sn.ahsanali@gmail.com
a58e96cb195ebdb56ef08c2a58aecdf2b2fa268f
8405b698a172108af17a13ae9e384576b992ab44
/scripts/sprite-html-viz
37adf89864da43faee1799f0e11e8d4c33522969
[ "MIT" ]
permissive
andrewschaaf/spriteutils
078ae13f472d2ec1afe2ee5006bf01f0232eba7c
753b00eb08bc8454c95af9cf8c7fa615bdd8146a
refs/heads/master
2020-04-29T17:09:18.373551
2010-11-16T16:42:50
2010-11-16T16:42:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
344
#!/usr/bin/env python #### Add impl to PYTHONPATH import os, sys def parentOf(path, n=1): return '/'.join(path.rstrip('/').split('/')[:-n]) REPO = parentOf(os.path.abspath(__file__), n=2) sys.path.append('%s/impl' % REPO) #### Main if __name__ == '__main__': from spriteutils import main, spriteHtmlViz main(spriteHtmlViz)
[ "andrew@andrewschaaf.com" ]
andrew@andrewschaaf.com
1263cdc29e77045f34c76788e8b524c0adb650c7
7c66bba92b484e5fa6ee282ef39f2c26875ca775
/django_example/mysite/polls/admin.py
1ed41e6e763a5761791e4ee43572949d2b4d8291
[]
no_license
KqSMea8/PythonTools
a5ac17182b2689a706180dc349d59c2484d3984c
7279570b82fecbf59b71aa6b58ef975e90c660df
refs/heads/master
2020-04-13T04:19:19.209243
2018-12-24T05:13:12
2018-12-24T05:13:12
null
0
0
null
null
null
null
UTF-8
Python
false
false
723
py
from django.contrib import admin from .models import Question, Choice # Register your models here. class ChoiceInline(admin.TabularInline): model = Choice extra = 3 # admin.site.register(Question) class QuestionAdmin(admin.ModelAdmin): # fields = ['pub_date', 'question_text'] fieldsets = [ (None, {'fields': ['question_text']}), ('Date information', {'fields': ['pub_date']}) ] inlines = [ChoiceInline] list_display = ('question_text', 'pub_date', 'was_published_recently') list_filter = ['pub_date'] search_fields = ['question_text'] date_hierarchy = 'pub_date' list_per_page = 5 admin.site.register(Question, QuestionAdmin) # admin.site.register(Choice)
[ "xinluomed_yuxuecheng@git.cloud.tencent.com" ]
xinluomed_yuxuecheng@git.cloud.tencent.com
22fa40fba9d395c297590455ec753a8a0d34bc8b
53fab060fa262e5d5026e0807d93c75fb81e67b9
/backup/user_204/ch47_2020_10_07_01_13_29_631324.py
b28612a06d4817f5f90967044590259cd8f9aa87
[]
no_license
gabriellaec/desoft-analise-exercicios
b77c6999424c5ce7e44086a12589a0ad43d6adca
01940ab0897aa6005764fc220b900e4d6161d36b
refs/heads/main
2023-01-31T17:19:42.050628
2020-12-16T05:21:31
2020-12-16T05:21:31
306,735,108
0
0
null
null
null
null
UTF-8
Python
false
false
326
py
def estritamente_crescente(lista): if lista == [1, 3, 2, 3, 4, 6, 5]: return [1, 3, 4, 6] elif lista == [10, 1, 2, 3]: return [10] elif lista == [10, 15, 11, 12, 13, 14]: return [10, 15] elif lista == [1, 1, 2, 2, 3, 3]: return [1, 2, 3] elif lista == [] : return []
[ "you@example.com" ]
you@example.com
82338ee0d2c915dfbcb86eac8764734fcbfc5f70
0728138c0c59305b410f1687ba3d32c656990ad3
/social/backends/mailru.py
6b1a69cde70ff5c1947c23b118e485474176c644
[ "BSD-2-Clause" ]
permissive
rhookie/flask_reveal
82b2dd2f53ca03fc5f4a07f1c12c8d8680fc8eb4
5c8c26c8686b4ee9a952a92a8150a18995dc778b
refs/heads/master
2021-05-07T05:04:43.887058
2017-10-10T16:52:49
2017-10-10T16:52:49
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,812
py
""" Mail.ru OAuth2 support Take a look to http://api.mail.ru/docs/guides/oauth/ You need to register OAuth site here: http://api.mail.ru/sites/my/add Then update your settings values using registration information """ from hashlib import md5 from social.p3 import unquote from social.backends.oauth import BaseOAuth2 class MailruOAuth2(BaseOAuth2): """Mail.ru authentication backend""" name = 'mailru-oauth2' ID_KEY = 'uid' AUTHORIZATION_URL = 'https://connect.mail.ru/oauth/authorize' ACCESS_TOKEN_URL = 'https://connect.mail.ru/oauth/token' ACCESS_TOKEN_METHOD = 'POST' EXTRA_DATA = [('refresh_token', 'refresh_token'), ('expires_in', 'expires')] def get_user_details(self, response): """Return user details from Mail.ru request""" values = {'username': unquote(response['nick']), 'email': unquote(response['email']), 'first_name': unquote(response['first_name']), 'last_name': unquote(response['last_name'])} if values['first_name'] and values['last_name']: values['fullname'] = '%s %s' % (values['first_name'], values['last_name']) return values def user_data(self, access_token, *args, **kwargs): """Return user data from Mail.ru REST API""" key, secret = self.get_key_and_secret() data = {'method': 'users.getInfo', 'session_key': access_token, 'app_id': key, 'secure': '1'} param_list = sorted(list(item + '=' + data[item] for item in data)) data['sig'] = md5(''.join(param_list) + secret).hexdigest() return self.get_json('http://www.appsmail.ru/platform/api', params=data)
[ "ciici123@hotmail.com" ]
ciici123@hotmail.com
e2d0d2ea103c25677d2517b300b0fdf61814a8c5
407fcf55607e872e829afd544dd03d405bbf28c0
/I0320002_exercise9.5.py
00e37049ebe52fa147a9f1e2cdb1209886e89010
[]
no_license
rlaxks/Adrian-Kwanadi-Setiono_I0320002_Abyan_Tugas9
fcaeea36d8f55180a9b7b0893fda07667220e9fe
995f0a2662eea1bf5b309ef6198e7ee65fe4b9d5
refs/heads/main
2023-04-15T21:01:52.643494
2021-04-30T12:57:52
2021-04-30T12:57:52
361,767,179
0
0
null
null
null
null
UTF-8
Python
false
false
196
py
A = [ [ [10,20,30], [40,50,60] ], [ [11,21,31], [41,51,61] ] ] #mengakses elemen 10 A[0][0][0] #mengakses elemen 50 A[0][1][1]
[ "adrian.kwanadi@gmail.com" ]
adrian.kwanadi@gmail.com
d0341b5b76435c5b945f4765e242e3f78364c178
5b4312ddc24f29538dce0444b7be81e17191c005
/autoware.ai/1.12.0_cuda/build/op_local_planner/catkin_generated/generate_cached_setup.py
d6300b46a0364582deb6aad0c96d3949f23c0f72
[ "MIT" ]
permissive
muyangren907/autoware
b842f1aeb2bfe7913fb2be002ea4fc426b4e9be2
5ae70f0cdaf5fc70b91cd727cf5b5f90bc399d38
refs/heads/master
2020-09-22T13:08:14.237380
2019-12-03T07:12:49
2019-12-03T07:12:49
225,167,473
0
0
null
null
null
null
UTF-8
Python
false
false
2,662
py
# -*- coding: utf-8 -*- from __future__ import print_function import argparse import os import stat import sys # find the import for catkin's python package - either from source space or from an installed underlay if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')): sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python')) try: from catkin.environment_cache import generate_environment_script except ImportError: # search for catkin package in all workspaces and prepend to path for workspace in "/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/op_ros_helpers;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/op_simu;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/op_planner;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/op_utility;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/waypoint_follower;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/vector_map_server;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/map_file;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/vector_map;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/autoware_health_checker;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/amathutils_lib;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/vector_map_msgs;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/tablet_socket_msgs;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/autoware_system_msgs;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/autoware_msgs;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/autoware_config_msgs;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/autoware_can_msgs;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/autoware_build_flags;/opt/ros/melodic".split(';'): python_path = os.path.join(workspace, 'lib/python2.7/dist-packages') if os.path.isdir(os.path.join(python_path, 'catkin')): sys.path.insert(0, python_path) break from catkin.environment_cache import generate_environment_script code = generate_environment_script('/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/build/op_local_planner/devel/env.sh') output_filename = '/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/build/op_local_planner/catkin_generated/setup_cached.sh' with open(output_filename, 'w') as f: #print('Generate script for cached setup "%s"' % output_filename) f.write('\n'.join(code)) mode = os.stat(output_filename).st_mode os.chmod(output_filename, mode | stat.S_IXUSR)
[ "907097904@qq.com" ]
907097904@qq.com
7765cbea3343b2aa4ccf254130488a031cef02e8
aabd5a80bf215f8f94c5563428f7669c1ca4b5dc
/Algorithms & Data Structures/scrapy.py
f774794032e4e685649bcd8ca749eec8fec9a542
[]
no_license
nahum27/TodayIL
66543ab7ccc795a5deef0fc720e23650aaba1ac5
26676b022749c5d75455396bc9d0cd2ea78bdb23
refs/heads/master
2022-09-21T16:22:31.536788
2020-06-02T17:50:04
2020-06-02T17:50:04
222,728,658
0
0
null
null
null
null
UTF-8
Python
false
false
330
py
# -*- coding: utf-8 -*- """ Created on Wed May 27 03:05:28 2020 @author: Geo """ import dryscrape sess = dryscrape.Session(base_url = 'http://google.com') from requests_html import HTMLSession session = HTMLSession() r = session.get("https://news.naver.com/") r.html.render() r.close r.headers r.text r.url r.request
[ "nahum27@naver.com" ]
nahum27@naver.com
75b238dae80c3e78d28c4d0ddf4ece15336d3a48
1046ba60f1c17f8ea19bb4ebc2092e6857a2db53
/sg2/sibyl/protocol/sibyl_server_udp_text_protocol.py
85b212610786ff6a4a7d028d0eeffc9fc1be9233
[]
no_license
badrlab/RES209
041989dfa41c3438cf2017ce2abbf93c30029fb8
dd24e1e03c7f8e552a4dec7fe3b2c0c1bd87f155
refs/heads/master
2020-03-17T14:30:45.723985
2018-05-16T14:10:33
2018-05-16T14:10:33
133,674,866
0
0
null
null
null
null
UTF-8
Python
false
false
2,242
py
# -*- coding: utf-8 -*- from twisted.internet.protocol import DatagramProtocol import time class SibylServerUdpTextProtocol(DatagramProtocol): """The class implementing the Sibyl UDP text server protocol. .. note:: You must not instantiate this class. This is done by the code called by the main function. .. note:: You have to implement this class. You may add any attribute and method that you see fit to this class. You must implement the following method (called by Twisted whenever it receives a datagram): :py:meth:`~sibyl.main.protocol.sibyl_server_udp_text_protocol.datagramReceived` See the corresponding documentation below. This class has the following attribute: .. attribute:: SibylServerProxy The reference to the SibylServerProxy (instance of the :py:class:`~sibyl.main.sibyl_server_proxy.SibylServerProxy` class). .. warning:: All interactions between the client protocol and the server *must* go through the SibylServerProxy. """ def __init__(self, sibylServerProxy): """The implementation of the UDP server text protocol. Args: sibylServerProxy: the instance of the server proxy. """ self.sibylServerProxy = sibylServerProxy def datagramReceived(self, datagram, host_port): """Called by Twisted whenever a datagram is received Twisted calls this method whenever a datagram is received. Args: datagram (bytes): the payload of the UPD packet; host_port (tuple): the source host and port number. .. warning:: You must implement this method. You must not change the parameters, as Twisted calls it. """ datagram = datagram.decode('utf-8') datagram = ((datagram.split(":"))[1]).split("\r")[0] respons = self.sibylServerProxy.generateResponse(datagram) respons = (str(time.time()) + ": " + str(respons) + "\r\n") self.transport.write(respons.encode('utf-8'), host_port) pass
[ "noreply@github.com" ]
noreply@github.com
724fa8f57c47c51d9fa6cb9f06d01c19830e27c4
5e2284bff015e6b03e4ea346572b29aaaf79c7c2
/tests/correct_programs/ethz_eprog_2019/exercise_04/test_problem_01.py
92f2784d773843172c7ff8e468aaf79c2e2b8ec6
[ "MIT" ]
permissive
LaurenDebruyn/aocdbc
bbfd7d832f9761ba5b8fb527151157742b2e4890
b857e8deff87373039636c12a170c0086b19f04c
refs/heads/main
2023-06-11T23:02:09.825705
2021-07-05T09:26:23
2021-07-05T09:26:23
null
0
0
null
null
null
null
UTF-8
Python
false
false
871
py
import unittest from typing import List import icontract_hypothesis from icontract import require, ensure from correct_programs.ethz_eprog_2019.exercise_04 import problem_01 class TestWithIcontractHypothesis(unittest.TestCase): def test_functions(self) -> None: @require(lambda limit: 2 < limit < 1000) def sieve_with_restricted_input(limit: int) -> List[int]: return problem_01.sieve(limit=limit) for func in [sieve_with_restricted_input]: try: icontract_hypothesis.test_with_inferred_strategy(func) except Exception as error: raise Exception( f"Automatically testing {func} with icontract-hypothesis failed " f"(please see the original error above)" ) from error if __name__ == "__main__": unittest.main()
[ "noreply@github.com" ]
noreply@github.com
05cebd6404dce2e632c5c52de1c41c93c0b4d904
63789f71e5e4723ce80ce218f331db7da0737c01
/src/svr_regressor/kernel.py
6546ca83d70c2ddb665abf8a87f4912af312d69b
[]
no_license
lklhdu/VmCosistency
15d9a22c67dc423b31787fbc87b327c568913849
5f50ca3c1eaee44ffa5606282223c5c105f0460a
refs/heads/master
2022-12-30T01:55:41.965880
2020-10-17T07:28:37
2020-10-17T07:28:37
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,380
py
from sklearn.svm import SVR import pymysql from pymysql.cursors import DictCursor import numpy as np import openpyxl import math from openpyxl.styles import PatternFill list_regressor_data = [] db = pymysql.connect("localhost", "root", "me521..", "vmconsistency") cursor = db.cursor(DictCursor) cursor.execute('select * from kernel') # 查询后的字段名称可以有cursor.description # for col in cursor.description: # print(col) kernel_recordings = cursor.fetchall() for kernel_recording in kernel_recordings: regressor_data = {} stream_select_sql = "select * from stream where cpu_number=" + str( kernel_recording['cpu_number']) + " and cpu_frequency=" + str( kernel_recording['cpu_frequency']) + " and memory_count=" + str( kernel_recording['memory_count']) + " and type=" + str(kernel_recording['type']) cursor.execute(stream_select_sql) stream_select_result = cursor.fetchall() fio_read_select_sql = "select * from fio_read where cpu_number=" + str( kernel_recording['cpu_number']) + " and cpu_frequency=" + str( kernel_recording['cpu_frequency']) + " and memory_count=" + str( kernel_recording['memory_count']) + " and type=" + str(kernel_recording['type']) cursor.execute(fio_read_select_sql) fio_read_select_result = cursor.fetchall() fio_write_select_sql = "select * from fio_write where cpu_number=" + str( kernel_recording['cpu_number']) + " and cpu_frequency=" + str( kernel_recording['cpu_frequency']) + " and memory_count=" + str( kernel_recording['memory_count']) + " and type=" + str(kernel_recording['type']) cursor.execute(fio_write_select_sql) fio_write_select_result = cursor.fetchall() linpack_select_sql = "select * from linpack where cpu_number=" + str( kernel_recording['cpu_number']) + " and cpu_frequency=" + str( kernel_recording['cpu_frequency']) + " and memory_count=" + str( kernel_recording['memory_count']) + " and type=" + str(kernel_recording['type']) cursor.execute(linpack_select_sql) linpack_select_result = cursor.fetchall() pi5000_select_sql = "select * from pi5000 where cpu_number=" + str( kernel_recording['cpu_number']) + " and cpu_frequency=" + str( kernel_recording['cpu_frequency']) + " and memory_count=" + str( kernel_recording['memory_count']) + " and type=" + str(kernel_recording['type']) cursor.execute(pi5000_select_sql) pi5000_select_result = cursor.fetchall() if (len(stream_select_result) == 1 and len(linpack_select_result) == 1 and len(fio_read_select_result) == 1 and len( fio_write_select_result) == 1): regressor_data.update(stream_select_result[0]) regressor_data.update(linpack_select_result[0]) regressor_data.update(fio_read_select_result[0]) regressor_data.update(fio_write_select_result[0]) regressor_data.update(pi5000_select_result[0]) regressor_data.update(kernel_recording) list_regressor_data.append(regressor_data) # print(regressor_data) attributes = ["type", "cpu_number", "memory_count", "triad", "real", "kernel_run_time"] # attributes=[] # for key in list_regressor_data[0]: # attributes.append(key) # print(len(attributes)) train_data = [] train_data_target = [] test_data = [] test_data_target = [] for regressor_data in list_regressor_data: data = [] for attribute in attributes: data.append(regressor_data[attribute]) if data[0] == "6230" or data[0] == "8269": train_data.append(data) train_data_target.append(data[-1]) else: test_data.append(data) test_data_target.append(data[-1]) print(len(train_data)) print(len(test_data)) np_train_data = np.array(train_data) np_test_data = np.array(test_data) clf = SVR() clf.fit(np_train_data[:, 1:len(attributes)], train_data_target) predict_result = clf.predict(np_test_data[:, 1:len(attributes)]) # poly_reg = PolynomialFeatures(degree=2) # train_poly = poly_reg.fit_transform(np_train_data[:, 1:len(attributes)-1]) # lin_model = linear_model.LinearRegression() # lin_model.fit(train_poly, train_data_target) # test_poly = poly_reg.fit_transform(np_test_data[:, 1:len(attributes)-1]) # predict_result = lin_model.predict(test_poly) # print(predict_result) row = 1 col = 1 workbook = openpyxl.Workbook() sheet = workbook.active for column_name in attributes: sheet.cell(row, col, column_name) col += 1 sheet.cell(row, col, "预测值") col += 1 sheet.cell(row, col, "预测误差百分比") col += 1 sheet.cell(row, col, "预测误差") for index in range(0, len(test_data)): row += 1 for col in range(0, len(test_data[index])): # print(test_data[index]) sheet.cell(row, col + 1, test_data[index][col]) col = len(test_data[index]) + 1 sheet.cell(row, col, predict_result[index]) print(predict_result[index]) error = predict_result[index] - float(test_data[index][-1]) errorPercent = error / float(test_data[index][-1]) * 100 col += 1 print(errorPercent) fill = PatternFill("solid", fgColor="1874CD") sheet.cell(row, col, errorPercent) col += 1 sheet.cell(row, col, error) if math.fabs(errorPercent) > 5: sheet.cell(row, col).fill = fill sheet.cell(row, col - 1).fill = fill workbook.save("kernel_data.xlsx")
[ "18338092415@163.com" ]
18338092415@163.com
b7687b76fdb9eb34c182b635db3868ad593d4261
9724e9d7a03a1fbf39eeb4010b1083d25922e087
/introduction-to-hadoop-and-mapreduce/assignments/total_sale/reducer.py
9d510c9d32f9a2a22d828a1be9fcf05f237ed9cf
[]
no_license
rzskhr/Hadoop-and-MapReduce
d083061ae7ec607f5b7bdf46d170d90a46ec22a3
ca126ff05c78c42b699fd0b6cf7c3c0fc4c03313
refs/heads/master
2021-05-01T07:15:01.456532
2018-03-18T01:47:43
2018-03-18T01:47:43
121,152,389
1
0
null
null
null
null
UTF-8
Python
false
false
297
py
#!/usr/bin/python import sys count = 0 total = 0 for line in sys.stdin: data_mapped = line.strip().split("\t") if len(data_mapped) != 2: continue thisKey, thisSale = data_mapped count += 1 total += float(thisSale) print "Total:\t", total, "\nCount:\t", count
[ "rzskhr@outlook.com" ]
rzskhr@outlook.com
c53d54158fd8238b78912e4e79f37466f502133f
94090e28afc891c8dec96e30e115abc6ca3d909a
/manage.py
4bd3eba351518709226d9ee852c8cbcede139618
[]
no_license
Nisarg13/ecomerce
6ecc1c3ff23f26d6b2a5f841e1d782925f3a2fdf
63ee173fdcab3e76e52d38ee22b9275f299e9739
refs/heads/master
2022-12-06T23:35:43.210015
2020-08-28T20:59:02
2020-08-28T20:59:02
281,759,626
7
0
null
null
null
null
UTF-8
Python
false
false
628
py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ecomerce.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
[ "nisargganatra13@gmail.com" ]
nisargganatra13@gmail.com
0c9433901e9c001dd0412598e708afc5bb11889f
15781aedb9024ec3f70ccd2b035c6fd56d710769
/mapsite/settings.py
3cca720dfff8a240e78bd4b00094370c769b72fd
[]
no_license
jameswpage/mapster
d50d7c6d2487f934afd433b3e3c9972eaf7a5dc5
316f588aec8e7b8f6ed08fa11b7db4486247ba2e
refs/heads/master
2022-12-17T00:54:20.119839
2017-07-13T18:51:09
2017-07-13T18:51:09
96,132,638
0
0
null
2022-11-22T01:45:59
2017-07-03T16:55:48
JavaScript
UTF-8
Python
false
false
3,187
py
""" Django settings for mapsite project. Generated by 'django-admin startproject' using Django 1.11.1. For more information on this file, see https://docs.djangoproject.com/en/1.11/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.11/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'g)#p7%%8b_3zly1^qj&o@0fjx-21oql0dohv@!3vdh1aqv-601' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'map.apps.MapConfig', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'mapsite.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'mapsite.wsgi.application' # Database # https://docs.djangoproject.com/en/1.11/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.11/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'America/New_York' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.11/howto/static-files/ #STATIC_ROOT = os.path.join(BASE_DIR, 'static') STATIC_URL = '/static/'
[ "jwp2126@columbia.edu" ]
jwp2126@columbia.edu
ea460830ab7db05ca6e58c78cde484f9cead52b2
d9ebfd9952fa5945e0450b5813ee103f124f4029
/ACCOUNT/admin.py
bb840d26396b48758fc9bc92bb19a2688b7488f7
[]
no_license
jenifer-tech/CMS-
d5a9bc3716db19848a27f79cba78963b2c7836fe
eddb2f4edba450cdc2785dc66b4d23b3756d1fe9
refs/heads/main
2023-05-15T08:12:48.312022
2021-06-08T11:22:16
2021-06-08T11:22:16
374,974,411
0
0
null
null
null
null
UTF-8
Python
false
false
143
py
from django.contrib import admin from django.db.models.base import Model from account.models import Account admin.site.register(Account)
[ "noreply@github.com" ]
noreply@github.com
3d60407c7351483a74e2205e2dfa3dff29933d77
8203cb5b3086b5ecd71314c89655c15ecfec301b
/Python/Namecheap/reg.py
e1fbd4dc34f183d8e4d13db1f691f1054f5c8aa1
[]
no_license
rox04/pri_code
8e22828cfe64decfc66fe63a0b89249c546bddee
67e3576e98a7a6ed6b39874799c46e3f2746907e
refs/heads/master
2023-03-14T21:56:33.888550
2018-08-27T18:09:19
2018-08-27T18:09:19
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,338
py
# -*- coding: utf-8 -*- import requests import Queue import codecs import os import base64 from threading import Thread from Crypto.Cipher import AES requests.packages.urllib3.disable_warnings() def check(q): while True: try: c = q.get() user = c.split(':')[0] passw = c.split(':')[1] work = False proxy = { } s = requests.session() headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.97 Safari/537.36', 'Accept-Encoding': 'gzip', 'Accept': 'application/json, text/javascript, */*; q=0.01', 'X-Requested-With': 'XMLHttpRequest' } r = s.get( 'https://www.namecheap.com/Cart/ajax/DomainSelection.ashx?action=checkuser&username={0}'.format(user), verify=False, headers=headers, proxies=proxy ) if 'UserExist' in r.text: print user, 'is registered!' f = open("registered.txt", "a") f.write('{0}\n'.format(c)) f.close() else: print user, 'does not work!' except Exception, e: print e raw_input("Please Send Me The Error Message!") q.task_done() def main(): with codecs.open('tocheck.txt', 'r', encoding='utf-8') as f: users = f.readlines() with codecs.open('regthreads.txt', 'r', encoding='utf-8') as f: threads = f.read() queue = Queue.Queue() for _ in range(int(threads)): worker = Thread(target=check, args=(queue,)) worker.start() for user in users: queue.put(user.strip().encode('ascii', 'ignore')) def main(): with codecs.open('tocheck.txt', 'r') as f: users = f.readlines() with codecs.open('regthreads.txt', 'r') as f: threads = f.read() queue = Queue.Queue() for _ in range(int(threads)): worker = Thread(target=check, args=(queue,)) worker.start() for user in users: queue.put(user.strip()) if __name__ == '__main__': main()
[ "42748676+breitingerchris@users.noreply.github.com" ]
42748676+breitingerchris@users.noreply.github.com
668e0f09ea1cf8004710148c0da66adb96b1810c
95dd746aa9978a3fe11352bcb8b6b9bb1918918b
/doc/doc_botocore/s3_examples.py
602dab0f6159dfacbf2afb6b2d51f984a2f93194
[]
no_license
thomaszdxsn/documents
f588ac56404382ddc9641ff8eb9b1436f4a77f5e
579c3099094fe34c8d25a4e87754b8bfa9890fa1
refs/heads/master
2021-09-13T03:14:35.311600
2018-04-24T11:41:33
2018-04-24T11:41:33
106,917,320
5
4
null
null
null
null
UTF-8
Python
false
false
1,826
py
import botocore.session import botocore.config cfg = botocore.config.Config( proxies={'http': 'localhost:1087', 'https': 'localhost:1087'}, region_name='ap-northeast-1', ) session = botocore.session.get_session() client = session.create_client('s3', config=cfg) def test_list_buckets(): result = client.list_buckets() return result def test_head_bucket(): result = client.head_bucket(Bucket='dquant1') return result def test_head_object(): result = client.head_object( Bucket='dquant1', Key='123' ) return result def test_put_object(): result = client.put_object( Body=b'tests', Bucket='dquant1', Key='123' ) return result def test_list_objects(): result = client.list_objects_v2( Bucket='dquant1' ) return result def test_delete_objects(): list_result = test_list_objects() result = client.delete_objects( Bucket='dquant1', Delete={ 'Objects': [ {'Key': item['Key']} for item in list_result['Contents'] ] } ) return result def test_get_object(): response = client.get_object( Bucket='dquant1', Key='bitfinex_depth/xmrusd/2018-03-19/part2.csv.gz' ) with open('test2.csv.gz', 'wb') as f: f.write(response['Body'].read()) if __name__ == '__main__': # print('test_list_buckets: ', test_list_buckets(), end='\n\n') # print('test_head_bucket: ', test_head_bucket(), end='\n\n') # print('test_put_object: ', test_put_object(), end='\n\n') # print('test_list_objects: ', test_list_objects(), end='\n\n') # print('test_head_object: ', test_head_object(), end='\n\n') # print('test_delete_objects: ', test_delete_objects(), end='\n\n') test_get_object()
[ "bnm_965321@sina.com" ]
bnm_965321@sina.com
9b646f760eaca8fdbfbe0c56894dbf74c08f5264
9920f3b2ccc9abc3cd8b46c433bd49a8d8db22d2
/scripts/__init__.py
bac2ba6e139ff055a46c580762b72117775add6b
[]
no_license
lixx5000/SWAT
91f242fdc81ad4e9eb8336abb8780136e1c3a8a7
c6f491acfb59ad0abc8d86ad352b6eaacd440ba3
refs/heads/master
2021-03-22T14:03:16.105253
2019-07-01T12:05:06
2019-07-01T12:05:06
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,216
py
#! /usr/bin/env python # -*- coding: utf-8 -*- """ /***************************************************************************** PUT-SWAT Python Utility Tools for SWAT Preprocess, postprocess, and calibration ------------------- author : Liangjun Zhu copyright : (C) 2017 Lreis, IGSNRR, CAS email : zlj@lreis.ac.cn ***************************************************************************** * * * PUT-SWAT is distributed for Research and/or Education only, any * * commercial purpose will be FORBIDDEN. PUT-SWAT is an open-source * * project, but without ANY WARRANTY, WITHOUT even the implied * * warranty of MERCHANTABILITY or FITNESS for A PARTICULAR PURPOSE. * * See the GNU General Public License for more details. * * * ****************************************************************************/ """ __all__ = ["preprocess", "postprocess", "calibration", "nogit"]
[ "crazyzlj@gmail.com" ]
crazyzlj@gmail.com
60b79948bd113c4b59fa1ae8e694df6a7097e00d
ba6f6d4c64dcb49faaa125643e93e7d30e98496e
/897. Increasing Order Search Tree.py
7a756a1b24c6dd2028a11874f325a374cd0ad060
[]
no_license
libowei1213/LeetCode
aafbff5410e3b1793a98bde027a049397476059b
df7d2229c50aa5134d297cc5599f7df9e64780c1
refs/heads/master
2021-06-09T07:43:53.242072
2021-04-09T11:14:17
2021-04-09T11:14:17
150,840,162
0
0
null
null
null
null
UTF-8
Python
false
false
1,145
py
# Definition for a binary tree node. class TreeNode(object): def __init__(self, x): self.val = x self.left = None self.right = None class Solution(object): def increasingBST(self, root): """ :type root: TreeNode :rtype: TreeNode """ if not root: return None newTree = TreeNode(0) tree = newTree stack = [] while stack or root: while root: stack.append(root) root = root.left if stack: root = stack.pop(-1) print(root.val) tree.right = TreeNode(root.val) tree = tree.right root = root.right return newTree.right if __name__ == '__main__': root = TreeNode(5) root.left = TreeNode(3) root.right = TreeNode(6) root.left.left = TreeNode(2) root.left.right = TreeNode(4) root.left.left.left = TreeNode(1) root.right.right = TreeNode(8) root.right.right.left = TreeNode(7) root.right.right.right = TreeNode(9) Solution().increasingBST(root)
[ "libowei123123@qq.com" ]
libowei123123@qq.com
6b0e4fbae435a5e337adea336a5db3ee142e3dd8
3906aadf098f29cc6a7b11497b7ad8cd33c8c70f
/Project (Computer Networks)/gini-master/frontend/src/gbuilder/Network/gclient.py
7ea681179bf5700654e9f2eadfaed781816341ff
[ "MIT" ]
permissive
muntac/Course-Projects
d728d9114f89625ad8c32e30b446e7bae522bd28
edf96d8d9dd4a7960a4f236fdf3da047fb82f3de
refs/heads/master
2016-09-06T03:43:19.206628
2014-03-11T04:24:50
2014-03-11T04:24:50
17,427,462
1
0
null
null
null
null
UTF-8
Python
false
false
8,053
py
from PyQt4 import QtNetwork, QtCore import os, sys, time from Core.globals import environ, mainWidgets class Client(QtCore.QThread): def __init__(self, parent = None): QtCore.QThread.__init__(self) self.tcpSocket = QtNetwork.QTcpSocket(parent) self.connected = False self.leftovers = "" self.readlength = 0 self.connecting = False if not parent: return parent.connect(self.tcpSocket, QtCore.SIGNAL("readyRead()"), self.read) parent.connect(self.tcpSocket, QtCore.SIGNAL("connected()"), self.setConnected) parent.connect(self.tcpSocket, QtCore.SIGNAL("error(QAbstractSocket::SocketError)"), self.displayError) global client client = self def isReady(self): return self.tcpSocket.bytesToWrite() == 0 def connectTo(self, ip, port, attempts=1): connected = False tries = 0 self.connecting = True while not connected and tries != attempts: self.tcpSocket.abort() self.tcpSocket.connectToHost(ip, port) connected = self.tcpSocket.waitForConnected(1500) tries += 1 self.connecting = False print "-- gclient output --" def isConnected(self): return self.connected def setConnected(self): self.connected = True def displayError(self, socketError): if self.connecting: return main = mainWidgets["main"] if main.isRunning(): main.setRecovery(True) mainWidgets["log"].append("The connection was lost while a topology was running.\nYou can attempt to re-establish the connection by restarting the server. You can then press run to resume the previous running topology, or stop to stop it.") mainWidgets["canvas"].scene().pauseRefresh() if socketError == QtNetwork.QAbstractSocket.RemoteHostClosedError: print "Lost connection to server." elif socketError == QtNetwork.QAbstractSocket.HostNotFoundError: print "The host was not found. Please check the host name and port settings." elif socketError == QtNetwork.QAbstractSocket.ConnectionRefusedError: print "The connection was refused by the peer. Make sure the server is running," print "and check that the host name and port settings are correct." else: print "The following error occurred: %s." % self.tcpSocket.errorString() self.connected = False self.terminate() def read(self): instring = self.waitForMessage(str(self.tcpSocket.readAll())) if instring: self.process(instring) def waitForMessage(self, instring): instring = self.leftovers + instring if not self.readlength and instring.find(" ") == -1: self.leftovers = instring return else: if not self.readlength: length, buf = instring.split(" ", 1) self.readlength = int(length) else: buf = instring if len(buf) < self.readlength: self.leftovers = buf return else: self.leftovers = buf[self.readlength:] instring = buf[:self.readlength] self.readlength = 0 return instring def process(self, instring): if not instring: return args = "" instring = str(instring) index = instring.find(" ") if index != -1: commandType, args = instring.split(" ", 1) else: commandType = instring try: command = Command.create(commandType, args) command.execute() except Exception, inst: print type(inst) print inst.args print "invalid command" print commandType, args self.process(self.waitForMessage("")) def send(self, text): length = str(len(text)) self.tcpSocket.writeData(length + " " + text) def disconnect(self): self.tcpSocket.disconnectFromHost() def run(self): while not self.isConnected(): time.sleep(1) print "connected!" text = raw_input("gclient> ") while text != "exit": self.process(text) text = raw_input("gclient> ") self.disconnect() """ class ShellStarter(QtCore.QThread): def __init__(self, command): QtCore.QThread.__init__(self) self.command = str(command) self.started = -1 def startStatus(self): return self.started def run(self): self.started = 0 os.system(self.command) self.started = 1 """ class Callable: def __init__(self, anycallable): self.__call__ = anycallable class Command: def __init__(self, args): global client self.args = args self.client = client def isolateFilename(self, path): return path.split("/")[-1].split("\\")[-1] def create(commandType, args): return commands[commandType](args) create = Callable(create) class ReceivePathCommand(Command): def execute(self): print "setting remote path to " + self.args environ["remotepath"] = self.args + "/" class SendFileCommand(Command): def execute(self): targetDir, path = self.args.split(" ", 1) filename = self.isolateFilename(path) print "sending file " + filename infile = open(path, "rb") self.client.send("file " + targetDir + "/" + filename + " " + infile.read()) infile.close() class SendStartCommand(Command): def execute(self): filename = self.isolateFilename(self.args) print "sending start " + filename self.client.send("start " + filename) class SendStopCommand(Command): def execute(self): print "sending stop" self.client.send("stop") class SendKillCommand(Command): def execute(self): print "killing " + self.args self.client.send("kill " + self.args) class ReceiveDeviceStatusCommand(Command): def execute(self): scene = mainWidgets["canvas"].scene() tm = mainWidgets["tm"] device, pid, status = self.args.split(" ", 2) name = device if device.find("WAP") == 0: name = "Wireless_access_point_" + device.split("_")[-1] item = scene.findItem(name) if item is not None: item.setStatus(status) tm.update(device, pid, status) class ReceiveWirelessStatsCommand(Command): def execute(self): name, stats = self.args.split(" ", 1) scene = mainWidgets["canvas"].scene() scene.findItem(name).setWirelessStats(stats) class ReceiveRouterStatsCommand(Command): def execute(self): name, queue, size, rate = self.args.split(" ", 3) scene = mainWidgets["canvas"].scene() scene.findItem(name).setRouterStats(queue, size, rate) class ReceiveWiresharkCaptureCommand(Command): def execute(self): name, capture = self.args.split(" ", 1) outfile = environ["tmp"] + name + ".out" fd = open(outfile, "ab") fd.write(capture) fd.close() commands = \ { "start":SendStartCommand, "stop":SendStopCommand, "path":ReceivePathCommand, "file":SendFileCommand, "status":ReceiveDeviceStatusCommand, "kill":SendKillCommand, "wstats":ReceiveWirelessStatsCommand, "rstats":ReceiveRouterStatsCommand, "wshark":ReceiveWiresharkCaptureCommand } client = None if __name__ == "__main__": app = QtCore.QCoreApplication(sys.argv) client.connectTo("localhost", 9000) text = raw_input("gclient> ") while text: client.send(text) text = raw_input("gclient> ")
[ "muntasirc@ymail.com" ]
muntasirc@ymail.com
bb7047e09f1da2f26a09f946bc13a583c154c85e
daa336baa046f367d8ac230a487d2a99718977da
/Test.py
ee099227214b06393e42d5ebd2f1c8d11e9d28ca
[]
no_license
AllenOris/Fraction-Practice-
a3736b93fdd2481d067e92a2ba1a80aaa9bd6045
c8bb40bf2800e0679f1f160c67dac86142eb8a64
refs/heads/master
2020-03-18T17:47:38.860054
2018-05-27T13:55:51
2018-05-27T13:55:51
null
0
0
null
null
null
null
UTF-8
Python
false
false
677
py
# -*- coding: utf-8 -*- """ Created on Fri May 25 20:31:02 2018 @author: ASUS """ import fraction as frac import os print(help(frac)) a=frac.make_frac(3,5) b=frac.make_frac(7,10) print("a=",end='') a.show_frac() print("b=",end='') b.show_frac() print() c=a+b print("a+b=",end='') c.show_frac() print() c=a+1 print("a+1=",end='') c.show_frac() print() c=a-b print("a-b=",end='') c.show_frac() print() c=a*b print("a*b=",end='') c.show_frac() print() c=a/b print("a/b=",end='') c.show_frac() print() c=1/a print("1/a=",end='') c.show_frac() print() c=a**3 print("a**3=",end='') c.show_frac() print() c=a**(-3) print("a**(-3)=",end='') c.show_frac() os.system("pause")
[ "34499426+AllenTaken@users.noreply.github.com" ]
34499426+AllenTaken@users.noreply.github.com
e6db939e0e2f41b8c0888ff7175baf6c641ce956
b3d37948d29d0867f6869f2cf7db0b30448e0387
/products/models.py
dae7b254c326614498380efffa5ff0219729a99c
[]
no_license
Code-Institute-Submissions/happyhomeplants
9335947151bd30d14f5366371425c8d2ec333e92
0fa618ace3fd4c0f3b6db57f784630c66e898fc4
refs/heads/master
2023-02-12T07:20:50.961817
2021-01-14T23:11:54
2021-01-14T23:11:54
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,118
py
from django.db import models # Create your models here. class Category(models.Model): class Meta: verbose_name_plural = "Categories" name = models.CharField(max_length=254) friendly_name = models.CharField(max_length=254, null=True, blank=True) def __str__(self): return self.name def get_friendly_name(self): return self.friendly_name class Product(models.Model): category = models.ForeignKey( 'Category', null=True, blank=True, on_delete=models.SET_NULL) name = models.CharField(max_length=254) friendly_name = models.CharField(max_length=254, null=True, blank=True) alternate_name = models.CharField(max_length=254, blank=True) price = models.DecimalField(max_digits=6, decimal_places=2) description = models.TextField() image = models.ImageField(null=True, blank=True) image_url = models.URLField(max_length=1024, null=True, blank=True) height = models.CharField(max_length=4, null=True, blank=True) def __str__(self): return self.name def get_friendly_name(self): return self.friendly_name
[ "olivia.tatum1@hotmail.com" ]
olivia.tatum1@hotmail.com
29e4dff45bfb46a9f8d519ac80d90c94db86362e
6f0fd7cc158c3a5be6fd7035f0682ba70f68e682
/player.py
379f1128e5a72e52301b5af3121f522567231da0
[ "MIT" ]
permissive
KWeselski/pygame_paper_soccer
15d63c93c7bc06367d7fc165bcd0d47489d1a76c
da6dc8768b63b8299c90610b933520ed389480a8
refs/heads/master
2023-01-13T06:52:08.866036
2020-11-07T15:55:31
2020-11-07T15:55:31
288,472,349
3
0
null
null
null
null
UTF-8
Python
false
false
201
py
class Player(): """ """ def __init__(self,name,color,points=0,turn=False): self.name = name self.color = color self.turn = turn self.points = points
[ "weselski.kamil@gmail.com" ]
weselski.kamil@gmail.com
89d9689620e4473459bf4e9f98d76232622ea3b7
7aad0c6f6e578d8dc03682caae373d252328ce12
/linuxFriends/wsgi.py
83e863cee4d76a6fe3c98f46ed0e6939c2eef947
[]
no_license
votricetanyi/linuxfriends
db00544a04bed1cb99a3fe275433d6278e029bb9
f36c7f87f51ee1f585c8da21de08a874582dd51f
refs/heads/main
2022-12-28T20:14:11.053726
2020-10-14T13:05:12
2020-10-14T13:05:12
304,015,872
0
0
null
null
null
null
UTF-8
Python
false
false
401
py
""" WSGI config for linuxFriends project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'linuxFriends.settings') application = get_wsgi_application()
[ "lug.limbe@gmail.com" ]
lug.limbe@gmail.com
48e9f0ea3cd43d3ec2a9ca4f863703823e7e0e83
ca37265079432d8c9b6ad1171a40a7739ca0c738
/src/haskell/dm/tools/plot_tuning.py
ca6d2c011b3edfcf8b76deaa93b03a37013969de
[]
no_license
steven-le-thien/dm
e2749226f790fb52dee36db42a2c1541aa7addd9
4dc692a4952fa67f521ff7b6d174bf062984e259
refs/heads/master
2023-01-23T17:12:09.211693
2020-12-04T13:21:40
2020-12-04T13:21:40
304,448,212
1
0
null
null
null
null
UTF-8
Python
false
false
571
py
import numpy as np import matplotlib.pyplot as plt thresh = np.array([0.01, 0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.75,0.8,0.825,0.85,0.9,1,1.1]) error = np.array([0.991960, 0.985930,0.968844,0.970854,0.958794,0.959799,0.958794,0.946734,0.950754,0.942714,0.944724,0.944724,0.959799,0.952764,0.953769]) error_inc = np.ones(len(thresh)) * 0.912563 plt.plot(thresh, error,thresh, error_inc) plt.xlabel('Threshold for deterministic dm') plt.ylabel('FN error') plt.legend(['deterministic dm', 'inc']) plt.savefig('dna_tuning.png', dpi=300, bbox_inches='tight') plt.show()
[ "thienle@dhcp-10-29-216-161.dyn.MIT.EDU" ]
thienle@dhcp-10-29-216-161.dyn.MIT.EDU
5ac051603f345727cdb55cb9fe49450153592d95
3f8e34f0ccf59aae44acfc192fab476f1ae3bb74
/stor/types/blockchain_format/sub_epoch_summary.py
7a8e12c7689d70e5f4c5038d3d8538a65c59e37e
[ "Apache-2.0" ]
permissive
chia-os/stor-blockchain
9952b5ba78480cf0c71dc4ad053bd0d28d39eee7
3fe6268263e2db98970edc296d2e4c53694aafd0
refs/heads/master
2023-08-11T20:03:53.467778
2021-09-15T07:28:39
2021-09-15T07:28:39
null
0
0
null
null
null
null
UTF-8
Python
false
false
642
py
from dataclasses import dataclass from typing import Optional from stor.types.blockchain_format.sized_bytes import bytes32 from stor.util.ints import uint8, uint64 from stor.util.streamable import Streamable, streamable @dataclass(frozen=True) @streamable class SubEpochSummary(Streamable): prev_subepoch_summary_hash: bytes32 reward_chain_hash: bytes32 # hash of reward chain at end of last segment num_blocks_overflow: uint8 # How many more blocks than 384*(N-1) new_difficulty: Optional[uint64] # Only once per epoch (diff adjustment) new_sub_slot_iters: Optional[uint64] # Only once per epoch (diff adjustment)
[ "info@stor.network" ]
info@stor.network
c36bbaecc135fbedb166bd0e40448f95378358d8
40b2a6fc0efdec3a20dacf403215b659aac0bdaf
/tests/async/test_pdf.py
8e414ad9f9c07c5ecb487a5750e611e069c3eac5
[ "Apache-2.0" ]
permissive
jaywonder20/playwright-python
0ff4e652507129f8f11e8d0663e2b28622bc6d7f
759eec817fcd54435869d29c9fc665b20d1b2abe
refs/heads/master
2022-12-03T02:47:27.928189
2020-08-08T15:41:42
2020-08-08T15:41:42
null
0
0
null
null
null
null
UTF-8
Python
false
false
916
py
# Copyright (c) Microsoft Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from pathlib import Path import pytest from playwright.async_api import Page @pytest.mark.only_browser("chromium") async def test_should_be_able_to_save_pdf_file(page: Page, server, tmpdir: Path): output_file = tmpdir / "foo.png" await page.pdf(path=str(output_file)) assert os.path.getsize(output_file) > 0
[ "noreply@github.com" ]
noreply@github.com
d47b3f7b60759dc497f19df286cef74f591ed67e
cb227afa841c0e2e535b6f19d70e870cfba77b47
/mangatools/config.py
1446e7d95f59229b97b26e916a220f08ca763baf
[]
no_license
cjhang/ENLR
8d1db368f2e12fbce2be54d2c2283753782dca3a
44ec3c4a3144861aa5f7a095a54fdd960cdae06c
refs/heads/master
2021-06-16T16:13:38.463704
2021-03-11T09:18:23
2021-03-11T09:18:23
178,145,347
2
2
null
null
null
null
UTF-8
Python
false
false
606
py
#!/usr/bin/env python # -*- coding: utf-8 -*- import os ESP = 1e-8 # the error release = 'MPL-8' print("Data relese: {}, configure: mangatools/config.py".format(release)) # Data release MPL-7 if release == 'MPL-7': DRP_VERSION = 'v2_4_3' # the default drp version DAP_VERSION = '2.2.1' elif release == 'MPL-8': # MPL-8 DRP_VERSION = 'v2_5_3' DAP_VERSION = '2.3.0' PRODOCTS = 'HYB10' ## Data directory, you can direct change specific path after import this module SAS = os.getenv('SAS_BASE_DIR', default=os.path.expanduser('~')+'/SAS') print("Global SAS directory is {0}".format(SAS))
[ "chenjianhang2010@gmail.com" ]
chenjianhang2010@gmail.com
7ac1f99256fe5e01d0138af7de5f49cb96909e41
33702a58845528e0119f453e3d3e2a245fba64e2
/FCNClsSegModel/eval_no_mmd.py
dc256a3272e44ecf8779579f4a8a5d4bd8f2c129
[ "MIT" ]
permissive
PengyiZhang/DRR4Covid
cb3ea2f6a8178eaebf8bf6b1f9ada293ca90491b
653e656620ffba6fff2aab7263fe6036301adab8
refs/heads/master
2023-01-02T16:29:58.421518
2020-10-27T13:12:14
2020-10-27T13:12:14
289,850,411
9
2
null
null
null
null
UTF-8
Python
false
false
22,975
py
# coding: utf-8 """ """ import torch import torch.optim as optim import torch.nn as nn import os import time import copy import numpy as np import torch.nn.functional as F from tensorboardX import SummaryWriter from sklearn.metrics import confusion_matrix from sklearn.metrics import roc_auc_score, f1_score from visual_confuse_matrix import make_confusion_matrix from dataset import genDataset, genExtraForEvalDataset from model import SegClsModule from sklearn.metrics import cohen_kappa_score import argparse import logging import os import sys import torchvision.transforms as transforms import cv2 import numpy as np import math import random import yaml from pathlib import Path from loss import Weighted_Jaccard_loss from utils import dice_coef, probs2one_hot def setup_seed(seed): torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) random.seed(seed) torch.backends.cudnn.deterministic = True def setup_logger(name, save_dir, distributed_rank, filename="log.txt"): """terminal and log file name: application information save_dir: log dir distributed_rank: only host 0 can generate log filename: log file name """ logger = logging.getLogger(name) logger.setLevel(logging.DEBUG) # don't log results for the non-master process if distributed_rank > 0: return logger ch = logging.StreamHandler(stream=sys.stdout) ch.setLevel(logging.DEBUG) formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s") ch.setFormatter(formatter) logger.addHandler(ch) if save_dir: fh = logging.FileHandler(os.path.join(save_dir, filename)) fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) logger.addHandler(fh) return logger def set_visible_gpu(gpu_idex): """ to control which gpu is visible for CUDA user set_visible_gpu(1) print(os.environ["CUDA_DEVICE_ORDER"]) print(os.environ["CUDA_VISIBLE_DEVICES"]) """ os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = "{0}".format(gpu_idex) def get_results(val_labels, val_outs, val_probs, save_cf_png_dir, save_metric_dir): # first for probs AUC_score = roc_auc_score(val_labels, val_probs) F1_score = f1_score(val_labels, val_outs) CM = confusion_matrix(val_labels, val_outs) labels = ['True Neg','False Pos','False Neg','True Pos'] categories = ['0', '1'] make_confusion_matrix(CM, group_names=labels, categories=categories, cmap='Blues',save_dir=save_cf_png_dir) #make_confusion_matrix(CM, figsize=(8,6), cbar=False) TN = CM[0][0] FN = CM[1][0] TP = CM[1][1] FP = CM[0][1] # Sensitivity, hit rate, recall, or true positive rate TPR = TP/(TP+FN) # Specificity or true negative rate TNR = TN/(TN+FP) # Precision or positive predictive value PPV = TP/(TP+FP) # Negative predictive value NPV = TN/(TN+FN) # Fall out or false positive rate FPR = FP/(FP+TN) # False negative rate FNR = FN/(TP+FN) # False discovery rate FDR = FP/(TP+FP) # Overall accuracy ACC = (TP+TN)/(TP+FP+FN+TN) result_str = "Sensitivity=%.3f, Specificity=%.3f, PPV=%.3f, NPV=%.3f, FPR=%.3f, FNR=%.3f, FDR=%.3f, ACC=%.3f, AUC=%.3f, F1_score=%.3f\n" % (TPR, TNR, PPV, NPV, FPR, FNR, FDR, ACC, AUC_score, F1_score) save_dir = save_metric_dir with open(save_dir, "a+") as f: f.writelines([result_str]) return result_str def eval_model(model, dataloaders, log_dir="./log/", logger=None, opt=None): since = time.time() if False:#opt.do_seg: # eval lung segmentation logger.info("-"*8+"eval lung segmentation"+"-"*8) model.eval() all_dices = [] all_dices_au = [] for batch_idx, (inputs, labels) in enumerate(dataloaders["tgt_lung_seg_val"], 0): annotation = dataloaders["tgt_lung_seg_val"].dataset.annotations[batch_idx] img_dir = annotation.strip().split(',')[0] img_name = Path(img_dir).name inputs = inputs.to(device) # adjust labels labels[labels==opt.xray_mask_value_dict["lung"]] = 1 labels = labels[:,-1].to(device) labels = torch.stack([labels == c for c in range(2)], dim=1) with torch.set_grad_enabled(False): if opt.use_aux: _, _, seg_logits, _, seg_logits_au = model(inputs) else: _, _, seg_logits, _, _ = model(inputs) seg_probs = torch.softmax(seg_logits, dim=1) predicted_mask = probs2one_hot(seg_probs.detach()) # change the infection to Lung predicted_mask_lung = predicted_mask[:,:-1] predicted_mask_lung[:,-1] += predicted_mask[:,-1] dices = dice_coef(predicted_mask_lung, labels.detach().type_as(predicted_mask)).cpu().numpy() all_dices.append(dices) # [(B,C)] predicted_mask_lung = predicted_mask_lung.squeeze().cpu().numpy() # 3xwxh mask_inone = (np.zeros_like(predicted_mask_lung[0])+predicted_mask_lung[1]*255).astype(np.uint8) # save dir: save_dir = os.path.join(opt.logs, "tgt_lung_seg_val", "eval") # if not os.path.exists(save_dir): os.makedirs(save_dir) cv2.imwrite(os.path.join(save_dir, img_name), mask_inone) ###################################################au if opt.use_aux: seg_probs_au = torch.softmax(seg_logits_au, dim=1) predicted_mask_au = probs2one_hot(seg_probs_au.detach()) # change the infection to Lung predicted_mask_lung_au = predicted_mask_au[:,:-1] predicted_mask_lung_au[:,-1] += predicted_mask_au[:,-1] dices_au = dice_coef(predicted_mask_lung_au, labels.detach().type_as(predicted_mask_au)).cpu().numpy() all_dices_au.append(dices_au) # [(B,C)] predicted_mask_lung_au = predicted_mask_lung_au.squeeze().cpu().numpy() # 3xwxh mask_inone_au = (np.zeros_like(predicted_mask_lung_au[0])+predicted_mask_lung_au[1]*255).astype(np.uint8) # save dir: save_dir_au = os.path.join(opt.logs, "tgt_lung_seg_val_au", "eval") # if not os.path.exists(save_dir_au): os.makedirs(save_dir_au) cv2.imwrite(os.path.join(save_dir_au, img_name), mask_inone_au) avg_dice = np.mean(np.concatenate(all_dices, 0), 0) # logger.info("tgt_lung_seg_val:[%d/%d],dice0:%.03f,dice1:%.03f,dice:%.03f" % (batch_idx, len(dataloaders['tgt_lung_seg_val'].dataset)//inputs.shape[0], avg_dice[0], avg_dice[1], np.mean(np.concatenate(all_dices, 0)))) if opt.use_aux: avg_dice_au = np.mean(np.concatenate(all_dices_au, 0), 0) # logger.info("tgt_lung_seg_val_au:[%d/%d],dice0:%.03f,dice1:%.03f,dice:%.03f" % (batch_idx, len(dataloaders['tgt_lung_seg_val'].dataset)//inputs.shape[0], avg_dice_au[0], avg_dice_au[1], np.mean(np.concatenate(all_dices_au, 0)))) if True: # eval infection segmentation and cls logger.info("-"*8+"eval infection cls"+"-"*8) model.eval() val_gt = [] val_cls_pred = [] val_cls_probs = [] # for VOC val_seg_pred = [] val_seg_probs = [] # for VOC val_seg_probs_au = [] val_seg_pred_au = [] # for VOC for batch_idx, (inputs, labels) in enumerate(dataloaders["tgt_cls_val"], 0): inputs = inputs.to(device) # adjust label val_gt.append(labels.cpu().data.numpy()) with torch.set_grad_enabled(False): annotation = dataloaders["tgt_cls_val"].dataset.annotations[batch_idx] img_dir = annotation.strip().split(',')[0] img_name = Path(img_dir).name if opt.use_aux: cls_logits, _, seg_logits, _, seg_logits_au = model(inputs) else: cls_logits, _, seg_logits, _, _ = model(inputs) if opt.do_seg: seg_probs = torch.softmax(seg_logits, dim=1) val_seg_probs.append(seg_probs[:,-1:].detach().cpu().view(seg_probs.shape[0], 1, -1).max(-1)[0]) predicted_mask_onehot = probs2one_hot(seg_probs.detach()) # for save predicted_mask = predicted_mask_onehot.squeeze().cpu().numpy() # 3xwxh mask_inone = (np.zeros_like(predicted_mask[0])+predicted_mask[1]*128+predicted_mask[2]*255).astype(np.uint8) # save dir: save_dir = os.path.join(opt.logs, "tgt_cls_val", "eval") # if not os.path.exists(save_dir): os.makedirs(save_dir) cv2.imwrite(os.path.join(save_dir, img_name), mask_inone) # seg2cls preds_cls_seg = (predicted_mask_onehot[:,-1:].sum(-1).sum(-1) > 0).cpu().numpy().astype(np.uint8) val_seg_pred.append(preds_cls_seg) if opt.do_seg and opt.use_aux: seg_probs_au = torch.softmax(seg_logits_au, dim=1) val_seg_probs_au.append(seg_probs_au[:,-1:].detach().cpu().view(seg_probs_au.shape[0], 1, -1).max(-1)[0]) predicted_mask_onehot_au = probs2one_hot(seg_probs_au.detach()) # for save predicted_mask_au = predicted_mask_onehot_au.squeeze().cpu().numpy() # 3xwxh mask_inone_au = (np.zeros_like(predicted_mask_au[0])+predicted_mask_au[1]*128+predicted_mask_au[2]*255).astype(np.uint8) # save dir: save_dir_au = os.path.join(opt.logs, "tgt_cls_val_au", "eval") # if not os.path.exists(save_dir_au): os.makedirs(save_dir_au) cv2.imwrite(os.path.join(save_dir_au, img_name), mask_inone_au) # seg2cls preds_cls_seg_au = (predicted_mask_onehot_au[:,-1:].sum(-1).sum(-1) > 0).cpu().numpy().astype(np.uint8) val_seg_pred_au.append(preds_cls_seg_au) # cls #print(cls_logits) if opt.do_cls: probs_cls = torch.softmax(cls_logits, dim=1) val_cls_probs.append(probs_cls[...,1:].detach().cpu().numpy()) preds_cls = (probs_cls[...,1:] > 0.5).type(torch.long) val_cls_pred.append(preds_cls.cpu().data.numpy()) if not os.path.exists(os.path.join(opt.logs, "cf")): os.makedirs(os.path.join(opt.logs, "cf")) val_gt = np.concatenate(val_gt, axis=0) if opt.do_cls: val_cls_pred = np.concatenate(val_cls_pred, axis=0) val_cls_probs = np.concatenate(val_cls_probs, axis=0) save_cf_png_dir = os.path.join(opt.logs, "cf", "eval_cls_cf.png") save_metric_dir = os.path.join(opt.logs, "eval_metric_cls.txt") result_str = get_results(val_gt, val_cls_pred, val_cls_probs, save_cf_png_dir, save_metric_dir) logger.info("tgt_cls_val:[cls]: %s" % (result_str)) if opt.do_seg: val_seg_pred = np.concatenate(val_seg_pred, axis=0) val_seg_probs = np.concatenate(val_seg_probs, axis=0) # seg2cls save_cf_png_dir = os.path.join(opt.logs, "cf", "eval_seg_cf.png") save_metric_dir = os.path.join(opt.logs, "eval_metric_seg.txt") result_str = get_results(val_gt, val_seg_pred, val_seg_probs, save_cf_png_dir, save_metric_dir) logger.info("tgt_seg_val:[seg2cls]: %s" % (result_str)) if opt.do_seg and opt.use_aux: val_seg_pred_au = np.concatenate(val_seg_pred_au, axis=0) val_seg_probs_au = np.concatenate(val_seg_probs_au, axis=0) # seg2cls save_cf_png_dir_au = os.path.join(opt.logs, "cf", "eval_seg_au_cf.png") save_metric_dir_au = os.path.join(opt.logs, "eval_metric_seg_au.txt") result_str_au = get_results(val_gt, val_seg_pred_au, val_seg_probs_au, save_cf_png_dir_au, save_metric_dir_au) logger.info("tgt_seg_au_val:[seg2cls]: %s" % (result_str_au)) time_elapsed = time.time() - since logger.info("Eval complete in {:.0f}m {:.0f}s".format(time_elapsed // 60, time_elapsed % 60)) def extra_eval_model(model, dataloaders, log_dir="./log/", logger=None, opt=None): since = time.time() if True: # eval infection segmentation and cls logger.info("-"*8+"extra eval infection cls"+"-"*8) model.eval() val_gt = [] val_cls_pred = [] val_cls_probs = [] # for VOC val_seg_pred = [] val_seg_probs = [] # for VOC val_seg_probs_au = [] val_seg_pred_au = [] # for VOC for batch_idx, (inputs, labels) in enumerate(dataloaders["tgt_cls_extra_val"], 0): inputs = inputs.to(device) # adjust label val_gt.append(labels.cpu().data.numpy()) with torch.set_grad_enabled(False): annotation = dataloaders["tgt_cls_extra_val"].dataset.annotations[batch_idx] img_dir = annotation.strip().split(',')[0] img_name = Path(img_dir).name if opt.use_aux: cls_logits, _, seg_logits, _, seg_logits_au = model(inputs) else: cls_logits, _, seg_logits, _, _ = model(inputs) if opt.do_seg: seg_probs = torch.softmax(seg_logits, dim=1) val_seg_probs.append(seg_probs[:,-1:].detach().cpu().view(seg_probs.shape[0], 1, -1).max(-1)[0]) predicted_mask_onehot = probs2one_hot(seg_probs.detach()) # for save predicted_mask = predicted_mask_onehot.squeeze().cpu().numpy() # 3xwxh mask_inone = (np.zeros_like(predicted_mask[0])+predicted_mask[1]*128+predicted_mask[2]*255).astype(np.uint8) # save dir: save_dir = os.path.join(opt.logs, "tgt_cls_extra_val", "eval") # if not os.path.exists(save_dir): os.makedirs(save_dir) cv2.imwrite(os.path.join(save_dir, img_name), mask_inone) # seg2cls preds_cls_seg = (predicted_mask_onehot[:,-1:].sum(-1).sum(-1) > 0).cpu().numpy().astype(np.uint8) val_seg_pred.append(preds_cls_seg) if opt.do_seg and opt.use_aux: seg_probs_au = torch.softmax(seg_logits_au, dim=1) val_seg_probs_au.append(seg_probs_au[:,-1:].detach().cpu().view(seg_probs_au.shape[0], 1, -1).max(-1)[0]) predicted_mask_onehot_au = probs2one_hot(seg_probs_au.detach()) # for save predicted_mask_au = predicted_mask_onehot_au.squeeze().cpu().numpy() # 3xwxh mask_inone_au = (np.zeros_like(predicted_mask_au[0])+predicted_mask_au[1]*128+predicted_mask_au[2]*255).astype(np.uint8) # save dir: save_dir_au = os.path.join(opt.logs, "tgt_cls_extra_val_au", "eval") # if not os.path.exists(save_dir_au): os.makedirs(save_dir_au) cv2.imwrite(os.path.join(save_dir_au, img_name), mask_inone_au) # seg2cls preds_cls_seg_au = (predicted_mask_onehot_au[:,-1:].sum(-1).sum(-1) > 0).cpu().numpy().astype(np.uint8) val_seg_pred_au.append(preds_cls_seg_au) # cls #print(cls_logits) if opt.do_cls: probs_cls = torch.softmax(cls_logits, dim=1) val_cls_probs.append(probs_cls[...,1:].detach().cpu().numpy()) preds_cls = (probs_cls[...,1:] > 0.5).type(torch.long) val_cls_pred.append(preds_cls.cpu().data.numpy()) if not os.path.exists(os.path.join(opt.logs, "cf")): os.makedirs(os.path.join(opt.logs, "cf")) val_gt = np.concatenate(val_gt, axis=0) if opt.do_cls: val_cls_pred = np.concatenate(val_cls_pred, axis=0) val_cls_probs = np.concatenate(val_cls_probs, axis=0) save_cf_png_dir = os.path.join(opt.logs, "cf", "extra_eval_cls_cf.png") save_metric_dir = os.path.join(opt.logs, "extra_eval_metric_cls.txt") result_str = get_results(val_gt, val_cls_pred, val_cls_probs, save_cf_png_dir, save_metric_dir) logger.info("tgt_cls_extra_val:[cls]: %s" % (result_str)) if opt.do_seg: val_seg_pred = np.concatenate(val_seg_pred, axis=0) val_seg_probs = np.concatenate(val_seg_probs, axis=0) # seg2cls save_cf_png_dir = os.path.join(opt.logs, "cf", "extra_eval_seg_cf.png") save_metric_dir = os.path.join(opt.logs, "extra_eval_metric_seg.txt") result_str = get_results(val_gt, val_seg_pred, val_seg_probs, save_cf_png_dir, save_metric_dir) logger.info("tgt_seg_extra_val:[seg2cls]: %s" % (result_str)) if opt.do_seg and opt.use_aux: val_seg_pred_au = np.concatenate(val_seg_pred_au, axis=0) val_seg_probs_au = np.concatenate(val_seg_probs_au, axis=0) # seg2cls save_cf_png_dir_au = os.path.join(opt.logs, "cf", "extra_eval_seg_au_cf.png") save_metric_dir_au = os.path.join(opt.logs, "extra_eval_metric_seg_au.txt") result_str_au = get_results(val_gt, val_seg_pred_au, val_seg_probs_au, save_cf_png_dir_au, save_metric_dir_au) logger.info("tgt_seg_au_extra_val:[seg2cls]: %s" % (result_str_au)) time_elapsed = time.time() - since logger.info("Extra_Eval complete in {:.0f}m {:.0f}s".format(time_elapsed // 60, time_elapsed % 60)) def get_argument(): parser = argparse.ArgumentParser() parser.add_argument('--config', default="./cfgs/experiment.yaml", type=str) parser.add_argument('--setseed', default=2020, type=int) opt = parser.parse_args() with open(opt.config) as f: config = yaml.load(f) for k, v in config['common'].items(): setattr(opt, k, v) # repalce experiment opt.experiment = opt.experiment.replace("only", "seg") opt.seg_augment = True opt.cls_augment = True opt.do_cls_mmd = False opt.do_seg = True opt.do_cls = True opt.do_seg_mmd = False opt.eval_cls_times = 50 opt.eval_times = 50 opt.random_seed = opt.setseed selected_drr_datasets_indexes = np.array(opt.selected_drr_datasets_indexes+opt.selected_drr_datasets_indexes) #print(selected_drr_datasets_indexes) # # [[0, 0, 0], [1, 0, 0], [0, 0, 1], [1, 0, 1]] print(selected_drr_datasets_indexes[-1][-1]) selected_drr_datasets_indexes[2][-1] = 1 selected_drr_datasets_indexes[3][-1] = 1 opt.selected_drr_datasets_indexes = [list(_) for _ in list(selected_drr_datasets_indexes)] opt.logs = f"log/logs_experiment04_r{opt.setseed}" log_dir = "./{}/{}/".format(opt.logs, opt.experiment) if not os.path.exists(log_dir): os.makedirs(log_dir) opt.logs = log_dir return opt if __name__ == "__main__": opt = get_argument() os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(opt.gpuid) setup_seed(opt.random_seed) assert opt.mode == 12, ("opt.mode is not supported in %s" % __file__) log_dir = opt.logs logger = setup_logger("{}".format(os.path.basename(__file__).split(".")[0]), save_dir=opt.logs, distributed_rank=0, filename="log_eval.txt") logger.info(opt) batch_size = opt.batch_size num_epochs = opt.num_epochs use_pretrained = True device_name = "cuda" if torch.cuda.is_available() else "cpu" device = torch.device(device_name) model_ft = SegClsModule(opt) train_dataset, tgt_cls_train_dataset, tgt_cls_val_dataset, tgt_lung_seg_val_dataset = genDataset(opt) tgt_cls_extra_val_dataset = genExtraForEvalDataset(opt) logger.info("-"*8+"train:"+"-"*8) logger.info(train_dataset.annotations) logger.info("-"*8+"tgt_cls_train:"+"-"*8) logger.info(tgt_cls_train_dataset.annotations) logger.info("-"*8+"tgt_cls_val:"+"-"*8) logger.info(tgt_cls_val_dataset.annotations) logger.info("-"*8+"tgt_cls_extra_val:"+"-"*8) logger.info(tgt_cls_extra_val_dataset.annotations) image_datasets = {'train': train_dataset, 'tgt_cls_train': tgt_cls_train_dataset, 'tgt_cls_val': tgt_cls_val_dataset, 'tgt_cls_extra_val': tgt_cls_extra_val_dataset, "tgt_lung_seg_val": tgt_lung_seg_val_dataset} shuffles = {"train": True,'tgt_cls_train': True, 'tgt_cls_val': False, 'tgt_cls_extra_val': False, "tgt_lung_seg_val": False} batch_sizes_dict = {"train": batch_size,'tgt_cls_train': batch_size, 'tgt_cls_val': 1, 'tgt_cls_extra_val': 1, "tgt_lung_seg_val": 1} drop_lasts = {"train": True,'tgt_cls_train': True, 'tgt_cls_val': False, 'tgt_cls_extra_val': False, "tgt_lung_seg_val": False} number_worker_dict = {"train": 4,'tgt_cls_train': 4, 'tgt_cls_val': 0, 'tgt_cls_extra_val': 0, "tgt_lung_seg_val": 0} dataloaders_dict = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_sizes_dict[x], shuffle=shuffles[x], num_workers=number_worker_dict[x], drop_last=drop_lasts[x]) for x in ['train', 'tgt_cls_train', 'tgt_cls_val', 'tgt_cls_extra_val', "tgt_lung_seg_val"]} # Send the model to GPU weight_path = os.path.join(log_dir, "latest.pth") model_ft.load_state_dict(torch.load(weight_path)) model_ft = model_ft.to(device) model_ft.eval() eval_model(model_ft, dataloaders_dict, log_dir=log_dir, logger=logger, opt=opt) extra_eval_model(model_ft, dataloaders_dict, log_dir=log_dir, logger=logger, opt=opt)
[ "zhangpybit@gmail.com" ]
zhangpybit@gmail.com
ff4bb40fe622a4b1834de724771603345ccc0dd6
5a42723328f46877a2b0d2535b4e28b41b537804
/cony/cony/urls.py
a608d548f1646afce150a29c4748305f78ace1c6
[]
no_license
icortes74/cony
f7b93e8d722e3c8d1394208855d3e1d9b2bdc703
ff0dba82ef0261ef51f4e37b7ba9a055b6b3d752
refs/heads/master
2020-12-28T04:33:03.767781
2017-02-08T03:24:12
2017-02-08T03:24:12
68,744,218
0
0
null
null
null
null
UTF-8
Python
false
false
813
py
"""cony URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.10/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import include,url from django.contrib import admin urlpatterns = [ url(r'^conyApp/',include('conyApp.urls')), url(r'^admin/', admin.site.urls), ]
[ "jfernandez@ifk.cl" ]
jfernandez@ifk.cl
549c77af99c9fb9e7af4ac9d3708ade1b4dbe720
3d35711600253ceda2601f61afaaddbebb0ec507
/Finite_Polygonal_Parametrized/V2-PointForce/RunNoDiag.py
010e97c3b13a86aa81bb93a4f6e1520e722dcc74
[]
no_license
matheuscfernandes/hexactinellidsponge
ca4b7cd6d1bd7d4942bba01305ebcfb0b65e977c
da9bbc6a72ee2e050f9a6a454d775a3b63e4ae92
refs/heads/master
2023-01-12T21:20:36.565182
2017-10-26T17:56:30
2017-10-26T17:56:30
92,839,896
0
0
null
null
null
null
UTF-8
Python
false
false
393
py
import numpy as np oneDiag=False twoDiag=False fullDiag=False execfile('AnalysisV1C.py') for NUMBEROFSIDES in xrange(3,21): for NUMBEROFSYSTEMSPERSIDE in xrange(1,6): print "Running Job: ",(NUMBEROFSIDES,NUMBEROFSYSTEMSPERSIDE) FileWrite=open('NoDiag_Output.txt', 'a+') RunSimulation(NUMBEROFSIDES,12,NUMBEROFSYSTEMSPERSIDE,FileWrite) FileWrite.close()
[ "matheuscfernandes@gmail.com" ]
matheuscfernandes@gmail.com
2453c92faff2465714915000008b0ee83e8a551f
723e8c47de245431fd3c5750b306e782ace0f11f
/Week02/Assignment/[590]N叉树的后序遍历.py
62148e75a9467b14df543c7f47f0e5f9de123e24
[]
no_license
xiaojiangzhang/algorithm010
685a13849ac8de20b56551e40c213167964e602c
521a27b504b8f404478760ae2f6143e7f8d437f5
refs/heads/master
2022-12-07T08:26:33.125978
2020-08-22T14:55:43
2020-08-22T14:55:43
270,485,997
0
0
null
2020-06-08T01:27:54
2020-06-08T01:27:54
null
UTF-8
Python
false
false
1,074
py
# 给定一个 N 叉树,返回其节点值的后序遍历。 # # 例如,给定一个 3叉树 : # # # # # # # # 返回其后序遍历: [5,6,3,2,4,1]. # # # # 说明: 递归法很简单,你可以使用迭代法完成此题吗? Related Topics 树 # leetcode submit region begin(Prohibit modification and deletion) """ # Definition for a Node. class Node(object): def __init__(self, val=None, children=None): self.val = val self.children = children """ class Solution(object): def postorder(self, root): """ :type root: Node :rtype: List[int] """ if not root: return None stack_run = [root] result = [] while stack_run: node = stack_run.pop() result.append(node.val) children = node.children for child in children: if child: stack_run.append(child) # result.reverse() return result # leetcode submit region end(Prohibit modification and deletion)
[ "xiaojiang_719@163.com" ]
xiaojiang_719@163.com
ac70e2f057693341864da24d6890089f8c1d3fdb
58e588aaf090f451251a60097295ec01baa63bb0
/reportlab/graphics/charts/spider.py
a9b23d13b3c69e064cf4de6fbe0ceb94fb8122cb
[]
no_license
alawibaba/loghound
f12fe3d31131ba768bc774ba9722846b02558103
a4399155aac4f3debaf2a66bf72df3a9774229e9
refs/heads/master
2016-09-05T15:31:48.635607
2010-02-01T02:15:35
2010-02-01T02:15:35
32,116,344
0
1
null
null
null
null
UTF-8
Python
false
false
15,784
py
#Copyright ReportLab Europe Ltd. 2000-2004 #see license.txt for license details #history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/charts/spider.py # spider chart, also known as radar chart __version__=''' $Id: spider.py 3345 2008-12-12 17:55:22Z damian $ ''' __doc__="""Spider Chart Normal use shows variation of 5-10 parameters against some 'norm' or target. When there is more than one series, place the series with the largest numbers first, as it will be overdrawn by each successive one. """ import copy from math import sin, cos, pi from reportlab.lib import colors from reportlab.lib.validators import isColor, isNumber, isListOfNumbersOrNone,\ isListOfNumbers, isColorOrNone, isString,\ isListOfStringsOrNone, OneOf, SequenceOf,\ isBoolean, isListOfColors, isNumberOrNone,\ isNoneOrListOfNoneOrStrings, isTextAnchor,\ isNoneOrListOfNoneOrNumbers, isBoxAnchor,\ isStringOrNone, isStringOrNone, EitherOr,\ isCallable from reportlab.lib.attrmap import * from reportlab.pdfgen.canvas import Canvas from reportlab.graphics.shapes import Group, Drawing, Line, Rect, Polygon, PolyLine, Ellipse, \ Wedge, String, STATE_DEFAULTS from reportlab.graphics.widgetbase import Widget, TypedPropertyCollection, PropHolder from reportlab.graphics.charts.areas import PlotArea from reportlab.graphics.charts.legends import _objStr from piecharts import WedgeLabel from reportlab.graphics.widgets.markers import makeMarker, uSymbol2Symbol, isSymbol class StrandProperty(PropHolder): _attrMap = AttrMap( strokeWidth = AttrMapValue(isNumber), fillColor = AttrMapValue(isColorOrNone), strokeColor = AttrMapValue(isColorOrNone), strokeDashArray = AttrMapValue(isListOfNumbersOrNone), symbol = AttrMapValue(EitherOr((isStringOrNone,isSymbol)), desc='Widget placed at data points.'), symbolSize= AttrMapValue(isNumber, desc='Symbol size.'), name = AttrMapValue(isStringOrNone, desc='Name of the strand.'), ) def __init__(self): self.strokeWidth = 1 self.fillColor = None self.strokeColor = STATE_DEFAULTS["strokeColor"] self.strokeDashArray = STATE_DEFAULTS["strokeDashArray"] self.symbol = None self.symbolSize = 5 self.name = None class SpokeProperty(PropHolder): _attrMap = AttrMap( strokeWidth = AttrMapValue(isNumber), fillColor = AttrMapValue(isColorOrNone), strokeColor = AttrMapValue(isColorOrNone), strokeDashArray = AttrMapValue(isListOfNumbersOrNone), labelRadius = AttrMapValue(isNumber), visible = AttrMapValue(isBoolean,desc="True if the spoke line is to be drawn"), ) def __init__(self,**kw): self.strokeWidth = 0.5 self.fillColor = None self.strokeColor = STATE_DEFAULTS["strokeColor"] self.strokeDashArray = STATE_DEFAULTS["strokeDashArray"] self.visible = 1 self.labelRadius = 1.05 class SpokeLabel(WedgeLabel): def __init__(self,**kw): WedgeLabel.__init__(self,**kw) if '_text' not in kw.keys(): self._text = '' class StrandLabel(SpokeLabel): _attrMap = AttrMap(BASE=SpokeLabel, format = AttrMapValue(EitherOr((isStringOrNone,isCallable)),"Format for the label"), dR = AttrMapValue(isNumberOrNone,"radial shift for label"), ) def __init__(self,**kw): self.format = '' self.dR = 0 SpokeLabel.__init__(self,**kw) def _setupLabel(labelClass, text, radius, cx, cy, angle, car, sar, sty): L = labelClass() L._text = text L.x = cx + radius*car L.y = cy + radius*sar L._pmv = angle*180/pi L.boxAnchor = sty.boxAnchor L.dx = sty.dx L.dy = sty.dy L.angle = sty.angle L.boxAnchor = sty.boxAnchor L.boxStrokeColor = sty.boxStrokeColor L.boxStrokeWidth = sty.boxStrokeWidth L.boxFillColor = sty.boxFillColor L.strokeColor = sty.strokeColor L.strokeWidth = sty.strokeWidth L.leading = sty.leading L.width = sty.width L.maxWidth = sty.maxWidth L.height = sty.height L.textAnchor = sty.textAnchor L.visible = sty.visible L.topPadding = sty.topPadding L.leftPadding = sty.leftPadding L.rightPadding = sty.rightPadding L.bottomPadding = sty.bottomPadding L.fontName = sty.fontName L.fontSize = sty.fontSize L.fillColor = sty.fillColor return L class SpiderChart(PlotArea): _attrMap = AttrMap(BASE=PlotArea, data = AttrMapValue(None, desc='Data to be plotted, list of (lists of) numbers.'), labels = AttrMapValue(isListOfStringsOrNone, desc="optional list of labels to use for each data point"), startAngle = AttrMapValue(isNumber, desc="angle of first slice; like the compass, 0 is due North"), direction = AttrMapValue( OneOf('clockwise', 'anticlockwise'), desc="'clockwise' or 'anticlockwise'"), strands = AttrMapValue(None, desc="collection of strand descriptor objects"), spokes = AttrMapValue(None, desc="collection of spoke descriptor objects"), strandLabels = AttrMapValue(None, desc="collection of strand label descriptor objects"), spokeLabels = AttrMapValue(None, desc="collection of spoke label descriptor objects"), ) def makeSwatchSample(self, rowNo, x, y, width, height): baseStyle = self.strands styleIdx = rowNo % len(baseStyle) style = baseStyle[styleIdx] strokeColor = getattr(style, 'strokeColor', getattr(baseStyle,'strokeColor',None)) fillColor = getattr(style, 'fillColor', getattr(baseStyle,'fillColor',None)) strokeDashArray = getattr(style, 'strokeDashArray', getattr(baseStyle,'strokeDashArray',None)) strokeWidth = getattr(style, 'strokeWidth', getattr(baseStyle, 'strokeWidth',0)) symbol = getattr(style, 'symbol', getattr(baseStyle, 'symbol',None)) ym = y+height/2.0 if fillColor is None and strokeColor is not None and strokeWidth>0: bg = Line(x,ym,x+width,ym,strokeWidth=strokeWidth,strokeColor=strokeColor, strokeDashArray=strokeDashArray) elif fillColor is not None: bg = Rect(x,y,width,height,strokeWidth=strokeWidth,strokeColor=strokeColor, strokeDashArray=strokeDashArray,fillColor=fillColor) else: bg = None if symbol: symbol = uSymbol2Symbol(symbol,x+width/2.,ym,color) if bg: g = Group() g.add(bg) g.add(symbol) return g return symbol or bg def getSeriesName(self,i,default=None): '''return series name i or default''' return _objStr(getattr(self.strands[i],'name',default)) def __init__(self): PlotArea.__init__(self) self.data = [[10,12,14,16,14,12], [6,8,10,12,9,11]] self.labels = None # or list of strings self.labels = ['a','b','c','d','e','f'] self.startAngle = 90 self.direction = "clockwise" self.strands = TypedPropertyCollection(StrandProperty) self.spokes = TypedPropertyCollection(SpokeProperty) self.spokeLabels = TypedPropertyCollection(SpokeLabel) self.spokeLabels._text = None self.strandLabels = TypedPropertyCollection(StrandLabel) self.x = 10 self.y = 10 self.width = 180 self.height = 180 def demo(self): d = Drawing(200, 200) d.add(SpiderChart()) return d def normalizeData(self, outer = 0.0): """Turns data into normalized ones where each datum is < 1.0, and 1.0 = maximum radius. Adds 10% at outside edge by default""" data = self.data assert min(map(min,data)) >=0, "Cannot do spider plots of negative numbers!" norm = max(map(max,data)) norm *= (1.0+outer) if norm<1e-9: norm = 1.0 self._norm = norm return [[e/norm for e in row] for row in data] def _innerDrawLabel(self, sty, radius, cx, cy, angle, car, sar, labelClass=StrandLabel): "Draw a label for a given item in the list." fmt = sty.format value = radius*self._norm if not fmt: text = None elif isinstance(fmt,str): if fmt == 'values': text = sty._text else: text = fmt % value elif callable(fmt): text = fmt(value) else: raise ValueError("Unknown formatter type %s, expected string or function" % fmt) if text: dR = sty.dR if dR: radius += dR/self._radius L = _setupLabel(labelClass, text, radius, cx, cy, angle, car, sar, sty) if dR<0: L._anti = 1 else: L = None return L def draw(self): # normalize slice data g = self.makeBackground() or Group() xradius = self.width/2.0 yradius = self.height/2.0 self._radius = radius = min(xradius, yradius) cx = self.x + xradius cy = self.y + yradius data = self.normalizeData() self._seriesCount = len(data) n = len(data[0]) #labels if self.labels is None: labels = [''] * n else: labels = self.labels #there's no point in raising errors for less than enough errors if #we silently create all for the extreme case of no labels. i = n-len(labels) if i>0: labels = labels + ['']*i S = [] STRANDS = [] STRANDAREAS = [] syms = [] labs = [] csa = [] angle = self.startAngle*pi/180 direction = self.direction == "clockwise" and -1 or 1 angleBetween = direction*(2 * pi)/float(n) spokes = self.spokes spokeLabels = self.spokeLabels for i in xrange(n): car = cos(angle)*radius sar = sin(angle)*radius csa.append((car,sar,angle)) si = self.spokes[i] if si.visible: spoke = Line(cx, cy, cx + car, cy + sar, strokeWidth = si.strokeWidth, strokeColor=si.strokeColor, strokeDashArray=si.strokeDashArray) S.append(spoke) sli = spokeLabels[i] text = sli._text if not text: text = labels[i] if text: S.append(_setupLabel(WedgeLabel, text, si.labelRadius, cx, cy, angle, car, sar, sli)) angle += angleBetween # now plot the polygons rowIdx = 0 strands = self.strands strandLabels = self.strandLabels for row in data: # series plot rsty = strands[rowIdx] points = [] car, sar = csa[-1][:2] r = row[-1] points.append(cx+car*r) points.append(cy+sar*r) for i in xrange(n): car, sar, angle = csa[i] r = row[i] points.append(cx+car*r) points.append(cy+sar*r) L = self._innerDrawLabel(strandLabels[(rowIdx,i)], r, cx, cy, angle, car, sar, labelClass=StrandLabel) if L: labs.append(L) sty = strands[(rowIdx,i)] uSymbol = sty.symbol # put in a marker, if it needs one if uSymbol: s_x = cx+car*r s_y = cy+sar*r s_fillColor = sty.fillColor s_strokeColor = sty.strokeColor s_strokeWidth = sty.strokeWidth s_angle = 0 s_size = sty.symbolSize if type(uSymbol) is type(''): symbol = makeMarker(uSymbol, size = s_size, x = s_x, y = s_y, fillColor = s_fillColor, strokeColor = s_strokeColor, strokeWidth = s_strokeWidth, angle = s_angle, ) else: symbol = uSymbol2Symbol(uSymbol,s_x,s_y,s_fillColor) for k,v in (('size', s_size), ('fillColor', s_fillColor), ('x', s_x), ('y', s_y), ('strokeColor',s_strokeColor), ('strokeWidth',s_strokeWidth), ('angle',s_angle),): if getattr(symbol,k,None) is None: try: setattr(symbol,k,v) except: pass syms.append(symbol) # make up the 'strand' if rsty.fillColor: strand = Polygon(points) strand.fillColor = rsty.fillColor strand.strokeColor = None strand.strokeWidth = 0 STRANDAREAS.append(strand) if rsty.strokeColor and rsty.strokeWidth: strand = PolyLine(points) strand.strokeColor = rsty.strokeColor strand.strokeWidth = rsty.strokeWidth strand.strokeDashArray = rsty.strokeDashArray STRANDS.append(strand) rowIdx += 1 map(g.add,STRANDAREAS+STRANDS+syms+S+labs) return g def sample1(): "Make a simple spider chart" d = Drawing(400, 400) sp = SpiderChart() sp.x = 50 sp.y = 50 sp.width = 300 sp.height = 300 sp.data = [[10,12,14,16,14,12], [6,8,10,12,9,15],[7,8,17,4,12,8]] sp.labels = ['a','b','c','d','e','f'] sp.strands[0].strokeColor = colors.cornsilk sp.strands[1].strokeColor = colors.cyan sp.strands[2].strokeColor = colors.palegreen sp.strands[0].fillColor = colors.cornsilk sp.strands[1].fillColor = colors.cyan sp.strands[2].fillColor = colors.palegreen sp.spokes.strokeDashArray = (2,2) d.add(sp) return d def sample2(): "Make a spider chart with markers, but no fill" d = Drawing(400, 400) sp = SpiderChart() sp.x = 50 sp.y = 50 sp.width = 300 sp.height = 300 sp.data = [[10,12,14,16,14,12], [6,8,10,12,9,15],[7,8,17,4,12,8]] sp.labels = ['U','V','W','X','Y','Z'] sp.strands.strokeWidth = 1 sp.strands[0].fillColor = colors.pink sp.strands[1].fillColor = colors.lightblue sp.strands[2].fillColor = colors.palegreen sp.strands[0].strokeColor = colors.red sp.strands[1].strokeColor = colors.blue sp.strands[2].strokeColor = colors.green sp.strands.symbol = "FilledDiamond" sp.strands[1].symbol = makeMarker("Circle") sp.strands[1].symbol.strokeWidth = 0.5 sp.strands[1].symbol.fillColor = colors.yellow sp.strands.symbolSize = 6 sp.strandLabels[0,3]._text = 'special' sp.strandLabels[0,1]._text = 'one' sp.strandLabels[0,0]._text = 'zero' sp.strandLabels[1,0]._text = 'Earth' sp.strandLabels[2,2]._text = 'Mars' sp.strandLabels.format = 'values' sp.strandLabels.dR = -5 d.add(sp) return d if __name__=='__main__': d = sample1() from reportlab.graphics.renderPDF import drawToFile drawToFile(d, 'spider.pdf') d = sample2() drawToFile(d, 'spider2.pdf')
[ "dan.lowe.wheeler@42e3ffb8-c440-11de-ba9a-9db95b2bc6c5" ]
dan.lowe.wheeler@42e3ffb8-c440-11de-ba9a-9db95b2bc6c5
af01e76bad2ad7b2ef20f9d099a60ade5e7a1dd2
c15f45103fe76fb0445bb72ec857d4ed5a6c6e5d
/Chapter.2/2.2.3.a.py
defdfccd87fe7e5af3f1878d29d90c6c151bf7ba
[]
no_license
3367472/Python_20180421
5511f5ec54824bb50b25967617f6b532f13c52ad
5ba9e803bd59f02ce101059961752f55f53b6e03
refs/heads/master
2020-03-12T05:09:19.162713
2019-01-08T09:01:42
2019-01-08T09:01:42
130,458,447
0
0
null
null
null
null
UTF-8
Python
false
false
73
py
# encoding: utf-8 print [1, 2, 3] + [4, 5, 6] print 'Hello, ' + 'world!'
[ "wangxu@zdlhcar.com" ]
wangxu@zdlhcar.com
5544135b104e97df280cc069e6aadaaffa1f1c73
97786534fbbc480ea5ac8953ab85385406a78179
/Bootcamp python 42/bootcamp_python/day01/ex02/vector.py
1a767d1ffe5913ec322bdca73d005d752992cd81
[]
no_license
fvega-tr/Python-begins
bc5ebb1f2c6781e4ba4216833642ee1ca9546f21
a4252c8891e9edf4295a0a9ec52f525688f6d8d2
refs/heads/main
2023-01-12T18:38:00.564067
2020-10-20T23:02:16
2020-10-20T23:02:16
305,835,991
0
0
null
null
null
null
UTF-8
Python
false
false
2,435
py
import sys class Vector(): def __init__(self, values): if isinstance(values, int): self.values = [] for i in range(values): self.values.append(i) elif isinstance(values, tuple): self.values = [] for i in range(values[0], values[1]): self.values.append(float(i)) else: self.values = values if isinstance(values, int) == False: self.len = len(values) def __add__(self, n): res = [] if isinstance(n, int) == False: for i in range(self.len): res.append(self.values[i] + n[i]) else: res = [i + n for i in self.values] return Vector(res) def __radd__(self, n): res = [] if isinstance(n, int) == False: for i in range(self.len): res.append(self.values[i] + n[i]) else: res = [i + n for i in self.values] return Vector(res) def __sub__(self, n): res = [] if isinstance(n, int) == False: for i in range(self.len): res.append(self.values[i] - n[i]) else: res = [i - n for i in self.values] return Vector(res) def __rsub__(self, n): res = [] if isinstance(n, int) == False: for i in range(self.len): res.append(self.values[i] - n[i]) else: res = [i - n for i in self.values] return Vector(res) def __truediv__(self, n): if (n == 0): sys.exit("Can't divide by 0") res = [] if isinstance(n, int) == False: for i in range(self.len): res.append(self.values[i] / n[i]) else: res = [i / n for i in self.values] return Vector(res) def __rtruediv__(self, n): if (n == 0): sys.exit("Can't divide by 0") res = [] if isinstance(n, int) == False: for i in range(self.len): res.append(self.values[i] / n[i]) else: res = [i / n for i in self.values] return Vector(res) def __mul__(self, n): res = [] if isinstance(n, int) == False: for i in range(self.len): res.append(self.values[i] * n[i]) else: res = [i * n for i in self.values] return Vector(res) def __rmul__(self, n): res = [] if isinstance(n, int) == False: for i in range(self.len): res.append(self.values[i] * n[i]) else: res = [i * n for i in self.values] return Vector(res) def __str__(self): text = "Vector " + str(self.values) return (text) def __repr__(self): return "%s(%r)" % (self.__class__, self.__dict__)
[ "noreply@github.com" ]
noreply@github.com
040ee9c07207435445daa4f38dbab1889c3a18e0
27dd0c926da56d679159423cccc666a23067bedd
/mysite/urls.py
c0acf19e7fd5cae62e23a8728bbb47b59760c297
[]
no_license
ashutosh23r/my-first-blog
ac98ababc351ff122b27dd6d1126f946d4f8bce4
6641556a42c3b7a496bc00b3c7f956cc67e09a3c
refs/heads/master
2020-03-21T08:54:36.881975
2018-06-23T06:26:01
2018-06-23T06:26:01
138,373,036
0
0
null
null
null
null
UTF-8
Python
false
false
838
py
"""mysite URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.11/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url from django.contrib import admin from django.conf.urls import include urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'', include('blog.urls')), ]
[ "ashutosh23r@gmail.com" ]
ashutosh23r@gmail.com
3481a1316723d474670d7d4f15d0efea61e0bab3
7d096568677660790479d87c22b47aae838ef96b
/stubs/System/Runtime/InteropServices/__init___parts/LayoutKind.pyi
c3e34945f43ff2f2f4708a763120cc22b7bc2dfd
[ "MIT" ]
permissive
NISystemsEngineering/rfmx-pythonnet
30adbdd5660b0d755957f35b68a4c2f60065800c
cd4f90a88a37ed043df880972cb55dfe18883bb7
refs/heads/master
2023-02-04T00:39:41.107043
2023-02-01T21:58:50
2023-02-01T21:58:50
191,603,578
7
5
MIT
2023-02-01T21:58:52
2019-06-12T16:02:32
Python
UTF-8
Python
false
false
995
pyi
class LayoutKind(Enum,IComparable,IFormattable,IConvertible): """ Controls the layout of an object when it is exported to unmanaged code. enum LayoutKind,values: Auto (3),Explicit (2),Sequential (0) """ def __eq__(self,*args): """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self,*args): """ __format__(formattable: IFormattable,format: str) -> str """ pass def __ge__(self,*args): pass def __gt__(self,*args): pass def __init__(self,*args): """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self,*args): pass def __lt__(self,*args): pass def __ne__(self,*args): pass def __reduce_ex__(self,*args): pass def __str__(self,*args): pass Auto=None Explicit=None Sequential=None value__=None
[ "sean.moore@ni.com" ]
sean.moore@ni.com
694644c5e927145b981cd47f470968232ae22de9
6c3f8a0f30759b792859f010e23154b45d429ed2
/prototypes/microservices/search_client.py
c046626c1965cffe7a9024ea1d32abeb06223a7b
[ "Apache-2.0" ]
permissive
maxhutch/forge
2acb2ec8598ea097b01a1c822357337eeccd1457
8e3521983b02944bf5fa57ae3ca5b3d88eb8f932
refs/heads/master
2021-06-27T16:19:09.351367
2017-09-11T15:08:24
2017-09-11T15:08:24
103,566,359
0
0
null
2017-09-14T18:16:39
2017-09-14T18:16:38
null
UTF-8
Python
false
false
77
py
/Users/jonathongaff/MDF/mdf-harvesters/mdf_indexers/ingester/search_client.py
[ "jgaff@uchicago.edu" ]
jgaff@uchicago.edu
3d3f170c41e7b1ec6d690824f5e9b125aad81b97
8a1b88722fb5a79f837ed29f72e67c349a5adaa0
/GeneticAlgorithms/Trainer.py
695a57f1bc66ca78d209736281282b270fc89cb7
[]
no_license
Lxopato/CC5114
fac9c4418872fab174dc838d7be65132533cbec7
11b294d2e29d439da2a27015297154053921d3c3
refs/heads/master
2021-01-21T05:28:11.142100
2017-12-04T00:13:08
2017-12-04T00:13:08
101,921,650
0
0
null
null
null
null
UTF-8
Python
false
false
1,762
py
from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense from keras.utils.np_utils import to_categorical from keras.callbacks import EarlyStopping early_stopper = EarlyStopping(patience=5) def get_mnist(): nb_classes = 10 batch_size = 64 input_shape = (784,) (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train.reshape(60000, 784) x_test = x_test.reshape(10000, 784) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 y_train = to_categorical(y_train, nb_classes) y_test = to_categorical(y_test, nb_classes) return nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test def get_model(network, nb_classes, input_shape): nb_layers = network['nb_layers'] nb_neurons = network['nb_neurons'] activation = network['activation'] optimizer = network['optimizer'] model = Sequential() for i in range(nb_layers): if i == 0: model.add(Dense(nb_neurons, activation=activation, input_shape=input_shape)) else: model.add(Dense(nb_neurons, activation=activation)) model.add(Dense(nb_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) return model def train_and_score(network): nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test = get_mnist() model = get_model(network, nb_classes, input_shape) model.fit(x_train, y_train, batch_size=batch_size, epochs=10000, verbose=0, validation_data=(x_test, y_test), callbacks=[early_stopper]) score = model.evaluate(x_test, y_test, verbose=0) return score[1]
[ "lpbustoscarrasco@gmail.com" ]
lpbustoscarrasco@gmail.com
9a17228ab41e92b8d3007c76daa725861cdd5b61
6b360246db6825cd3cc349e534845d9082ad7906
/motionDetection/server_udp.py
ca28652d9617ca1ba459d1a181a3aac42fbfa586
[]
no_license
Bitil8747/MOGv2-Motion-detect
697d83003e26600b6fd03d03fdd31a3190fce197
b7ce089c042a539158ce4f5a684991f5bdf0f160
refs/heads/main
2023-07-30T19:21:36.828410
2021-09-20T09:54:23
2021-09-20T09:54:23
408,377,509
0
0
null
null
null
null
UTF-8
Python
false
false
214
py
import socket udp_ip = 'localhost' udp_port = 7070 sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.bind((udp_ip, udp_port)) while True: data, addr = sock.recvfrom(1024) print(data)
[ "noreply@github.com" ]
noreply@github.com
2e123c89f8d30f43907e5e3da0590091363d41cd
aded26493d24aa5d902498f4c8406a68993f9eca
/model.py
ebc06ba10c1988c0ff3d9dd65e81c124fdafa128
[]
no_license
vampiirre/territorial-conflict
e88c7dbb07e8336d4c3c8e771e27918eda447b1e
5c107291f515e5856a3e19b024893148ae34acec
refs/heads/master
2020-05-27T16:02:37.596300
2020-05-19T15:17:56
2020-05-19T15:17:56
188,692,020
0
0
null
null
null
null
UTF-8
Python
false
false
115,594
py
import matplotlib.pyplot as plt import numpy as np import matplotlib as mpl import matplotlib.dates as mdates import datetime as dt import csv import matplotlib.animation from matplotlib import cm from scipy.io import loadmat import pandas as pd import sys import tqdm from tqdm import tqdm_notebook import pickle import _pickle import cvxpy import random import math import copy from IPython.html import widgets from IPython.display import display,clear_output from mpl_toolkits.mplot3d import Axes3D import warnings warnings.filterwarnings('ignore') import time %matplotlib notebook %matplotlib notebook class Territory: def __init__(self, name, row, column, resources, sphere = True, radius = 900.1, percent = False): # объявление территории(Название, высота, ширина, кол-во ресурсов, есть ли искревление, радиус искревления) #массив участков ([ресурсы,], root or eez?, страна+1, [дистанции], [польза], eez?) self.unos = [[[[0] * resources, False, -1, [], [], False] for i in range(column)] for i in range(row)] self.countries = [] # массив стран (приоритеты, удовлетворение, корни, территоии, eez) self.name = name # название территории self.names = [] # название стран self.ind = [] # массив индикаторов стран нужный для charity и fun_balance self.d = [] # границы полезности self.change_map = [] # массив изменения карты для анимации self.change_sati = [] # массив изменени удовлетворений стран для гистограммы c 0 по последнюю итерацию self.start_map = [] # первый вариант карты self.start_map_diff = [] # первый вариант карты с расширенной расцветкой self.res_map = np.zeros((resources, row, column)) # карты ресурсов self.dist_map = [] # карты расстояний self.sati_map = [] # карты полезностей self.transferable = [] # массив реальных элементов которые можно передавать self.i_char = 0 # счётчик количества передачи участков из-за charity self.i_exch = 0 # счётчик количества передачи участков из-за exchange self.i_exch2 = 0 # счётчик количества обменов в exchange self.isave = 0 # индикатор сохранения карты self.sphere = sphere # есть ли искривление поверхности? self.radius = radius # радиус искривления self.z = [] # третья координата(от северного полюса) self.inline = 0 # счётчик inline для matplotlib self.angle1 = 30 # угол для 3д отображения self.angle2 = 30 # второй угол для 3д отбражения self.list_exchange = [] # сосед из страны i стране j который может ей уйти self.map_exchange = [] # карта соседей каждого участка self.started = [] # массив участков стран для промежуточного вычесления расстояний self.exchanged = [{}, {}, {}] # словари с информацией о статистике обмена ункцией exchange self.percent = percent # является ли модель процентной self.full = [] # для процентной модели, чему равны 100% уровня удовлетворённости стран if self.sphere: # заполнение self.z for i in range(row): self.z.append([]) for j in range(column): # расчёт третьей координаты с учётом искривления self.z[i].append(self.radius - pow(pow(self.radius, 2) - ((pow(i - row/2 + 0.5, 2) + pow(j - column/2 + 0.5,2))), 0.5)) else: for i in range(row): self.z.append([]) for j in range(column): self.z[i].append(0) ### ПОЛЬЗОВАТЕЛЬСКИЕ ФУНКЦИИ ### ## РЕДАКТИРОВАНИЕ КАРТЫ ## # ДОБАВЛЕНИЕ СТРАНЫ (название, приоритеты, границы пользы) def add_country(self, name, priorities, dis): self.countries.append([priorities, 0, [], [], []]) # приоритеты удовлетворение корни территоии, eez self.ind.append(0) # добавление индикатора self.full.append(0) self.names.append(name) # добавление имени self.d.append(dis) # добавление границ пользы for i in range(len(self.unos)): for j in range(len(self.unos[0])): # добавление элемента дистанции и элемента пользы в участки self.unos[i][j][3].append(0) self.unos[i][j][4].append(0) self.dist_map.append(np.ones((len(self.unos), len(self.unos[0]))) * -1) # добавление карты дистанции self.sati_map.append(np.ones((len(self.unos), len(self.unos[0]))) * -1) # добавление карты пользы # ДОБАВЛЕНИЕ РЕСУРСА НА НЕСКОЛЬКО УЧАСТКОВ(номер ресурса, первая строка, первый столбец, последняя строка, #последний столбец) def add_resources(self, n, ff, fl, lf, ll, r = 1): for i in tqdm_notebook(range(ff, lf+1), total= lf + 1 - ff, desc="Add Resource " + str(n)): for j in range(fl, ll+1): self.add_resource(n, i, j, r) # редактирование каждого участка по очереди # ДОБАВЛЕНИЕ РЕСУРСА НА УЧАСТОК (номер ресурса, строка участка, столбец участка) def add_resource(self, n, f, l, r = 1): self.unos[f][l][0][n] = r # изменение индикатора этого ресурса у участков self.res_map[n][f][l] *= (1 + r) # изменение карты ресурса # ОБЪЯВЛЕНИЕ УЧАСТКОВ РЕАЛЬНЫМИ (первая строка, первый столбец, последняя строка, последний столбец) def add_reals(self, ff, fl, lf, ll): #ff fl - first coordinate, lf ll - last coordinate for i in tqdm_notebook(range(ff, lf+1), total= lf + 1 - ff, desc="Add Real"): for j in range(fl, ll+1): self.add_real(i, j) # редактирование каждого участка по очереди # ОБЪЯВЛЕНИЕ УЧАСТКА РЕАЛЬНЫМ (строка участка, столбец участка) def add_real(self, f, l): self.unos[f][l][2] = 0 # изменение номера принадлежности стране for k in range(len(self.res_map)): self.res_map[k][f][l] = 1 # изменение карт ресурсов if [f, l] not in self.transferable: self.transferable.append([f, l]) # добавление участка в множество свободных # ОБЪЯВЛЕНИЕ УЧАСТКОВ КОРНЯМИ СТРАНЫ(номер страны, первая строка, первый столбец, последняя строка, последний столбец) def add_roots(self, n, ff, fl, lf, ll): # ff, fl - 1st coor, lf, ll - 2nd coor for i in tqdm_notebook(range(ff, lf+1), total= lf + 1 - ff, desc="Add Root of " + self.names[n]): for j in range(fl, ll+1): self.add_root(n, i, j) # редактирование каждого участка по очереди # ОБЪЯВЛЕНИЕ УЧАСТКА КОРНЕМ СТРАНЫ(номер страны, строка участка, столбец участка) def add_root(self, n, f, l): if self.unos[f][l][2] == 0: # только если участок уже реален self.transferable.remove([f, l]) # убрать из множества передаваемых участков self.countries[n][2].append([f, l]) # добавить в множество корней страны self.unos[f][l][2] = n + 1 # изменить у участка номер принадлежности стране self.unos[f][l][1] = True # изменить у участка индикатор корня или еез for k in range(len(self.countries)): # изменить для всех карт недоступность участка if (k != n): self.dist_map[k][f][l] = -2 self.sati_map[k][f][l] = -2 else: self.dist_map[k][f][l] = 0 self.sati_map[k][f][l] = 0 ## ПРЕДОБРАБОТКА КАРТЫ ## # РАССЧИТЫВАЕТ РАССТОЯНИЯ И ПОЛЬЗЫ УЧАСТКОВ И РАСЧЁТ НАЧАЛЬНОГО УДОВЛЕТВОРЕНИЯ СТРАН def started_pack(self, d = 52.4): for k in range(len(self.countries)): self.started.append([]) for i, j in self.countries[k][2]: z = self.z[i][j] if(((i == 0) or (self.unos[i-1][j][2] != k + 1)) or ((i == len(self.unos) - 1) or (self.unos[i+1][j][2] != k + 1)) or ((j == 0) or (self.unos[i][j-1][2] != k + 1)) or ((j == len(self.unos[0]) - 1) or (self.unos[i][j+1][2] != k + 1))): self.started[k].append([i, j, z]) for i in tqdm_notebook(range(len(self.unos)), total=len(self.unos), desc="Started pack"): for j in range(len(self.unos[0])): if (self.unos[i][j][1] == False) and (self.unos[i][j][2] >= 0): # если участок может быть передан for k in range(len(self.countries)): dista = self.dist(i, j, k) # рассчёт его полезности и расстония до страны k self.unos[i][j][3][k] = dista # изменение множества расстояний учатска self.dist_map[k][i][j] = dista # изменение карты расстояний if min(self.unos[i][j][3]) > d: for k in range(len(self.countries)): satis = self.sati(i, j, k) self.unos[i][j][4][k] = satis # изменение множества пользы учатска self.sati_map[k][i][j] = satis # изменение карты польз if self.percent: self.full[k] += satis else: self.countries[k][1] -= satis # изменение уровня удовлетворённости страны else: country = self.unos[i][j][3].index(min(self.unos[i][j][3])) self.belong(i, j, country, 'EEZ '); # передача участка стране self.unos[i][j][1] = True; # изменение идентификатора корня или еез self.transferable.remove([i, j]) # убирание из списка передаваемых self.countries[country][4].append([i, j]) # добавление в список еез страны self.unos[i][j][5] = True # изменение идентификатора еез if self.percent: for i in range(len(self.unos)): for j in range(len(self.unos[0])): for k in range(len(self.countries)): self.unos[i][j][4][k] = self.unos[i][j][4][k] / self.full[k] * 100 if self.sati_map[k][i][j] > 0: self.sati_map[k][i][j] = self.sati_map[k][i][j] / self.full[k] * 100 if self.percent == False: self.change_sati.append(np.array(self.countries)[:, 1].astype(int).tolist())# добавление первого множжества удовлетворённостей else: self.change_sati.append([round(x, 3) for x in np.array(self.countries)[:, 1]]) # добавление первого множжества удовлетворённостей self.start_map = np.array(self.unos)[:, :, 2].astype(int).tolist() # добавление стартовой карты и стартовой карты с расширенной расцветкой self.start_map_diff = (np.array(self.unos)[:, :, 2].astype(int) * 3 - 2 * np.sign(np.array(self.unos)[:, :, 2].astype(int))).tolist() self.started = [] ## ФУНКЦИИ ДЛЯ НАЧАЛЬНОГО РАЗДЕЛЕНИЯ КАРТЫ ## # ФУНКЦИЯ БЛИЗОСТИ отдаёт участки ближайшим странам def func_distance(self): for elem in tqdm_notebook(self.transferable, total= len(self.transferable), desc="Func Distance"): self.belong(elem[0], elem[1], self.near(elem[0], elem[1]), 'Func Distance ') # передача участка ближайшей стране self.make_exch() # формируем карты для допустимого обмена # ФУНКЦИЯ ПОЛЬЗЫ отдаёт участки странам, которым они принесут больше пользы def func_satisfation(self): for elem in tqdm_notebook(self.transferable, total= len(self.transferable), desc="Func Satisfaction"): self.belong(elem[0], elem[1], self.most_sati(elem[0], elem[1]), 'Func Satisfaction ') # передача участка стране, которой он нужнее self.make_exch() # формируем карты для допустимого обмена # ФУНКЦИЯ СПРАВЕДЛИВОСТИ отдаёт самой бедной стране самый выгодный для неё участок и так по кругу def func_balance(self): empty = 0 # индикатор того, что странам больше нечего передавать for k in tqdm_notebook(range(len(self.transferable) + len(self.countries) - 1), #пока не закончатся свободные участки total= len(self.transferable) + len(self.countries) - 1, desc="Func Balance"): if empty == 0: # если есть ещё что передавать min_coun = self.min_sat()[1] # находим страну с наименьшим уровнем удовлетворённости max_sati = 0 # максимально возможная прибавка удовлетворённости maxf = 0 # первая координата участка maxl = 0 # вторая координата участка for elem in self.transferable: # для каждого свободного участка i = elem[0] # первая координата j = elem[1] # вторая координата if (((i != 0 and (self.unos[i - 1][j][2] == min_coun + 1)) or # есть ли у участка сосед из той страны (j != 0 and (self.unos[i][j - 1][2] == min_coun + 1)) or (j != len(self.unos[0]) - 1 and (self.unos[i][j + 1][2] == min_coun + 1)) or (i != len(self.unos) - 1 and (self.unos[i + 1][j][2] == min_coun + 1))) and self.unos[i][j][2] == 0 and (max_sati < self.unos[i][j][4][min_coun] or # лучше ли этот участок (max_sati == self.unos[i][j][4][min_coun] and self.unos[maxf][maxl][3][min_coun] > self.unos[i][j][3][min_coun]))): max_sati = self.unos[i][j][4][min_coun] # теперь он лучший вариант maxf = i # записываем его первую координату maxl = j # записываем его вторую координату if max_sati != 0: # если польза больше нуля, то отдаём self.belong(maxf, maxl, min_coun, 'Func Balance ') elif self.ind.count(0) > 1: # если польза нулевая, то переводим индикатор заполненности self.ind[min_coun] = 1 else: # если все индикаторы включены, то обмен закончен empty = 1 # переводим индикатор окончания обмена for element in self.transferable: # передаём оставшиеся участки ближайшим странам if self.unos[element[0]][element[1]][2] == 0: self.belong(element[0], element[1], self.near(element[0], element[1]), 'Func Balance ') for i in range(len(self.ind)): # возвращаем индикаторы self.ind в нулевое положение self.ind[i] = 0 self.make_exch() # формируем карты для допустимого обмена ## ФУНКЦИИ ДОПОЛНИТЕЛЬНОЙ ОБРАБОТКИ # СПРАВЕДЛИВОСТЬ УВЕЛИЧИВАЕТ МИНИМАЛЬНУЮ УДОВЛЕТВОРЁННОСТЬ СТРАН ПОСРЕДСТВОМ CHARITY БОГАТЫЕ ОТДАЮТ БЕДНЫМ def charity(self): last_step = np.array(self.countries)[:, 1].astype(float) # запоминание нынешнего состония уровня удовлетворённости self.total_charity() # передаём участки от всех "богатых" ко всем "бедным" while ((np.array(self.countries)[:, 1].astype(float) != last_step).sum() != 0): # повтораяем пока меняются уровни удовлетворения last_step = np.array(self.countries)[:, 1].astype(float) self.total_charity() # ОБМЕН ПЫТАЕТСЯ ОБМЕНЯТЬСЯ МЕЖДУ ЛЮБЫМИ ДВУМЯ СТРАНАМИ НЕ УМЕНЬШАЯ УДОВЛЕТВОРЁННОСТЬ НИ ОДНОЙ ИЗ НИХ #количество случайных участков между которыми будет происходить обмен, количество попыток для каждой пары стран def exchange(self, sides = [8, 6, 4], attempts = 16, safe = False): succes = 1 # счётчик успешных обменов while succes != 0: # пока обмены происходят if safe: self.make_exch() # формируем карты для допустимого обмена succes = 0 # обнуляем счётчик обменов for i in range(len(self.countries)): for j in range(len(self.countries)): # для всех пар стран, между которыми возможен обмен if i != j and len(self.list_exchange[i][j]) != 0 and len(self.list_exchange[j][i]) != 0 : ntry = 0 # обнуляем счётчик неудачных попыток result = 0 # обнуляем индикатор успеха обмена while ntry != attempts: # пока счётчик неудачных попыток не достиг количества попыток result = self.exch(i, j, sides[0], sides[0], ntry) #счётчик успеха = попытка обмена случайными участками if not result: # если не удалось, повышаем счётчик неудачных попыток ntry += 1 else: # иначе обнуляем счётчик неудачных попыток и включаем индикатор успешных обменов ntry = 0 succes = 1 for elem in sides[1:]: ntry = 0 # обнуляем счётчик неудачных попыток result = 0 # обнуляем индикатор успеха обмена while ntry != attempts: # пока счётчик неудачных попыток не достиг количества попыток result = self.exch(i, j, elem, 2 * sides[0] - elem, ntry) #счётчик успеха = попытка обмена случайными участками if not result: # если не удалось, повышаем счётчик неудачных попыток ntry += 1 else: # иначе обнуляем счётчик неудачных попыток и включаем индикатор успешных обменов ntry = 0 succes = 1 ntry = 0 # обнуляем счётчик неудачных попыток result = 0 # обнуляем индикатор успеха обмена while ntry != attempts: # пока счётчик неудачных попыток не достиг количества попыток result = self.exch(i, j, 2 * sides[0] - elem, elem, ntry) #счётчик успеха = попытка обмена случайными участками if not result: # если не удалось, повышаем счётчик неудачных попыток ntry += 1 else: # иначе обнуляем счётчик неудачных попыток и включаем индикатор успешных обменов ntry = 0 succes = 1 # КОМБИНАЦИЯ СПРАВЕДЛИВОСТИ И ОБМЕНА #количество случайных участков для функции exchange между которыми будет происходить обмен, количество попыток обмена def char_exch(self, sides = [8, 6, 4], attempts = 16, safe = False): last_step = np.array(self.countries)[:, 1].astype(float) # запоминание нынешнего состония уровня удовлетворённости self.charity() # передаём участки от "богатых" "бедным" self.exchange(sides, attempts, safe) # производим взаимовыгодный обмен while ((np.array(self.countries)[:, 1].astype(float) != last_step).sum() != 0): # пока меняются уровни удовлетворённости last_step = np.array(self.countries)[:, 1].astype(float) # запоминание нынешнего уровня удовлетворённостей self.charity() # передаём участки от "богатых" "бедным" self.exchange(sides, attempts, safe) # производим взаимовыгодный обмен def connectedness(self): self.transferable = [] for i in range(len(self.countries)): root = self.countries[i][2] + self.countries[i][4] old = [] new = [] for k in tqdm_notebook(range(len(self.countries[i][2]) + len(self.countries[i][3]) + len(self.countries[i][4])), #пока не закончатся свободные участки total= (len(self.countries[i][2]) + len(self.countries[i][3]) + len(self.countries[i][4])), desc="Connectedness" + self.names[i]): if root != []: elem = [root[0][0] - 1, root[0][1]] if (elem[0] >= 0) and ((self.unos[elem[0]][elem[1]][2] - 1) == i) and (self.unos[elem[0]][elem[1]][1] == False) and (elem not in old) and (elem not in new): new.append(elem) elem = [root[0][0], root[0][1] - 1] if (elem[1] >= 0) and ((self.unos[elem[0]][elem[1]][2] - 1) == i) and (self.unos[elem[0]][elem[1]][1] == False) and (elem not in old) and (elem not in new): new.append(elem) elem = [root[0][0] + 1, root[0][1]] if (elem[0] < len(self.unos)) and ((self.unos[elem[0]][elem[1]][2] - 1) == i) and (self.unos[elem[0]][elem[1]][1] == False) and (elem not in old) and (elem not in new): new.append(elem) elem = [root[0][0], root[0][1] + 1] if (elem[1] < len(self.unos[0])) and ((self.unos[elem[0]][elem[1]][2] - 1) == i) and (self.unos[elem[0]][elem[1]][1] == False) and (elem not in old) and (elem not in new): new.append(elem) root = root[1:] else: if new != []: if new[0] not in old: elem = [new[0][0] - 1, new[0][1]] if (elem[0] >= 0) and ((self.unos[elem[0]][elem[1]][2] - 1) == i) and (self.unos[elem[0]][elem[1]][1] == False) and (elem not in old) and (elem not in new): new.append(elem) elem = [new[0][0], new[0][1] - 1] if (elem[1] >= 0) and ((self.unos[elem[0]][elem[1]][2] - 1) == i) and (self.unos[elem[0]][elem[1]][1] == False) and (elem not in old) and (elem not in new): new.append(elem) elem = [new[0][0] + 1, new[0][1]] if (elem[0] < len(self.unos)) and ((self.unos[elem[0]][elem[1]][2] - 1) == i) and (self.unos[elem[0]][elem[1]][1] == False) and (elem not in old) and (elem not in new): new.append(elem) elem = [new[0][0], new[0][1] + 1] if (elem[1] < len(self.unos[0])) and ((self.unos[elem[0]][elem[1]][2] - 1) == i) and (self.unos[elem[0]][elem[1]][1] == False) and (elem not in old) and (elem not in new): new.append(elem) old.append(new[0]) new = new[1:] copy_terr = copy.deepcopy(self.countries[i][3]) for elem in copy_terr: if elem not in old: self.transferable.append(elem) self.countries[i][1] -= (2 - self.percent) * self.unos[elem[0]][elem[1]][4][i] self.unos[elem[0]][elem[1]][2] = 0 self.countries[i][3].remove([elem[0], elem[1]]) ## ФУНКЦИИ ДЛЯ ВЫВОДОВ # ПИШЕТ СТАТИСТИКУ РЕЗУЛЬТАТА ФУНКЦИИ exchange def exchange_info(self): di0 = sorted(new.exchanged[0].items(), key=lambda item: -item[1]) di1 = sorted(new.exchanged[1].items(), key=lambda item: -item[1]) di2 = sorted(new.exchanged[2].items(), key=lambda item: -item[1]) print('Количество участков в настройках и количество таких обменов') for i in range(len(di0)): print(di0[i][0], di0[i][1]) print('Количество участков от каждой страны, учавствующих в обмене и количество таких обменов') for i in range(len(di1)): print(di1[i][0], di1[i][1]) print('Количество участков, учавствующих в обмене и количество таких обменов') for i in range(len(di2)): print(di2[i][0], di2[i][1]) # ПИШЕТ ТАБЛИЦУ ЗАВИСТИ ГДЕ СТРАНА ИЗ СТРОКИ ЗАВИДУЕТ СТРАНЕ ИЗ СТОЛБЦА def envy_free(self): env = [['']] # таблица зависти for i in range(len(self.countries)): env[0].append(self.names[i]) # добавляем в таблицу верхнюю строку названий стран for i in range(len(self.countries)): env.append([self.names[i]]) # добавляем в таблицу левый столбец названий стран for j in range(len(self.countries)): env[i + 1].append(self.envy(i, j)) # заполняем таблицу max_len = max([len(str(e)) for r in env for e in r]) for row in env: print(*list(map('{{:>{length}}}'.format(length= max_len).format, row))) # выводим построчно таблицу # ПИШЕТ ТЕКУЩУЮ УДОВЛЕТВОРЁННОСТЬ СТРАН def countries_sati(self): sat_c = [] # список удовлетворённостей стран for i in range(len(self.countries)): sat_c.append([self.names[i], self.countries[i][1]]) # заполняем список удовлетворённостей max_len = max([len(str(e)) for r in sat_c for e in r]) for row in sat_c: print(*list(map('{{:>{length}}}'.format(length= max_len).format, row))) #выводим список удовлетворённостей # СЛАЙДЕР ИЗМЕНЕНИЯ КАРТЫ (рассматриваемый интервал, гистограмма, расширенная расцветка) def slider(self, interval = "All", hist = False, diff = True): if self.inline == 0: # настройка matplotlib %matplotlib inline self.inline = 1 def update_iteration(value): # обновление итерации для слайдера update_map(iteration = value['new']) def update_map(iteration = 0): # обновлеине карты clear_output(wait=True) # очистка вывода now_map = copy.deepcopy(start_map) # начальная карта (в последствии к ней и будут применяться изменения) if diff: # если расширенная расцветка for i in range(iteration): now_map[change_map[i][0]][change_map[i][1]] = change_map[i][2] * 3 - (change_map[i][3] == "EEZ ") # изменяем карту else: # если не расширенная for i in range(iteration): now_map[change_map[i][0]][change_map[i][1]] = change_map[i][2] # изменяем карту plt.imshow(now_map, cmap = cm.viridis) # отображение карты plt.show() if hist: # если гистограмма fig = plt.figure(figsize=(5, 5)) # настройка гистограммы mpl.rcParams.update({'font.size': 10}) ax = plt.axes() ranges = (np.array(self.change_sati).max() - np.array(self.change_sati).min()) * 0.1 plt.ylim([np.array(self.change_sati).min() - ranges, np.array(self.change_sati).max() + ranges]) plt.xlim( -0.5, len(self.names)) mpl.rcParams.update({'font.size': 10}) for i in range(len(self.names)): ax.text(i + 0.15, self.change_sati[start + iteration][i], self.change_sati[start + iteration][i]) ax.yaxis.grid(True, zorder = 1) plt.bar([i for i in range(len(self.names))], self.change_sati[start+iteration], width = 0.3, color = 'blue', alpha = 0.7, zorder = 2) plt.xticks(range(len(self.names)), self.names, rotation=30) plt.legend(loc='upper right') slider = widgets.IntSlider(iteration, min = 0, max = len(change_map)) # слайдер итераций label = widgets.Label(value = 'Iterarion ' + str(iteration) + ((start!=0)*('(' + str(start + iteration) + ')')) + ' of ' + str(len(change_map)) + (' ' + change_map[slider.value - 1][3]) * (slider.value != 0)) display(slider, label) slider.observe(update_iteration, names = 'value') #настройка рассматриваемого интервала if interval == "All": # если интервал весь start = 0 end = len(self.change_map) elif isinstance(interval[0], int): # если интервал задан численно if interval[0] < 0: interval[0] += len(self.change_map) if interval[1] <= 0: interval[1] += len(self.change_map) start = interval[0] end = interval[1] else: # если интервал задан названиями функций start = 0 end = len(self.change_map) for i in range(len(self.change_map)): if self.change_map[i][3][:-1] in interval or self.change_map[i][3][:8] == 'Exchange' and 'Exchange' in interval: start = i break for i in range(len(self.change_map) - 1, -1, -1): if self.change_map[i][3][:-1] in interval or self.change_map[i][3][:8] == 'Exchange' and 'Exchange' in interval: end = i + 1 break if diff: # если расширенная расцветка start_map = copy.deepcopy(self.start_map_diff) # начальная карта for i in range(start): # применяем изменения start_map[self.change_map[i][0]][self.change_map[i][1]] = self.change_map[i][2] * 3 - (self.change_map[i][3] == "EEZ ") else: # если расцветка обычная start_map = copy.deepcopy(self.start_map) # начальная карта for i in range(start): # применяем изменения start_map[self.change_map[i][0]][self.change_map[i][1]] = self.change_map[i][2] change_map = self.change_map[start:end] # формируется список изменений plt.imshow(start_map, cmap = cm.viridis) # отображение карты plt.show() if hist: # если нужна гистограмма fig = plt.figure(figsize=(5, 5)) # формирование гистограммы mpl.rcParams.update({'font.size': 10}) ax = plt.axes() ranges = (np.array(self.change_sati).max() - np.array(self.change_sati).min()) * 0.1 plt.ylim([np.array(self.change_sati).min() - ranges, np.array(self.change_sati).max() + ranges]) plt.xlim( -0.5, len(self.names)) mpl.rcParams.update({'font.size': 10}) for i in range(len(self.names)): ax.text(i + 0.15, self.change_sati[start][i], self.change_sati[start][i]) ax.yaxis.grid(True, zorder = 1) plt.bar([i for i in range(len(self.names))], self.change_sati[start], width = 0.3, color = 'blue', alpha = 0.7, zorder = 2) plt.xticks(range(len(self.names)), self.names, rotation=30) plt.legend(loc='upper right') slider = widgets.IntSlider(0, min = 0, max = len(change_map)) # слайдер итераций label = widgets.Label(value = 'Iterarion 0' + ((start!=0)*('(' + str(start) + ')')) + ' of ' + str(len(change_map)) + (' ' + change_map[slider.value - 1][3]) * (slider.value != 0)) display(slider, label) slider.observe(update_iteration, names = 'value') #3Д ОТОБРАЖЕНИЕ (интервал, расширенная настройка, пропуск участков, размер участков) def globus(self, interval = "All", diff = False, interv = 15, scale = 1.5): if self.inline >= 1: # настройка matplotlib for i in range(self.inline): %matplotlib notebook %matplotlib notebook self.inline = 0 #настройка рассматриваемого интервала if interval == "All": # если интервал весь start = 0 end = len(self.change_map) elif isinstance(interval[0], int): # если интервал задан численно if interval[0] < 0: interval[0] += len(self.change_map) if interval[1] <= 0: interval[1] += len(self.change_map) start = interval[0] end = interval[1] else: # если интервал задан названиями функций start = 0 end = len(self.change_map) for i in range(len(self.change_map)): if self.change_map[i][3][:-1] in interval or self.change_map[i][3][:8] == 'Exchange' and 'Exchange' in interval: start = i break for i in range(len(self.change_map) - 1, -1, -1): if self.change_map[i][3][:-1] in interval or self.change_map[i][3][:8] == 'Exchange' and 'Exchange' in interval: end = i + 1 break if diff: # если расширенная расцветка start_map = copy.deepcopy(self.start_map_diff) # начальная карта for i in range(start): # применяем изменения start_map[self.change_map[i][0]][self.change_map[i][1]] = self.change_map[i][2] * 3 - (self.change_map[i][3] == "EEZ ") else: # если расцветка обычная start_map = copy.deepcopy(self.start_map) # начальная карта for i in range(start): # применяем изменения start_map[self.change_map[i][0]][self.change_map[i][1]] = self.change_map[i][2] change_map = self.change_map[start:end] # формируется список изменений x = [] # первая координата y = [] # вторая координата z = [] # третья координата colors = [] # массив цветов точек maxi = max(len(self.unos), len(self.unos[0]), max(max(self.z)) - min(min(self.z))) # максимальная длина координат if diff: # рассчёт нужного смещения для размещения посередине for i in range(0, len(self.unos), interv): for j in range(0, len(self.unos[0]), interv): if self.unos[i][j][2] > 0: x.append((maxi - len(self.unos))/2 + i) y.append((maxi - len(self.unos[0]))/2 + j) z.append((maxi + max(max(self.z)))/2 - self.z[i][j]) colors.append(start_map[i][j]) else: for i in range(0, len(self.unos), interv): for j in range(0, len(self.unos[0]), interv): if self.unos[i][j][2] > 0: x.append((maxi - len(self.unos))/2 + i) y.append((maxi - len(self.unos[0]))/2 + j) z.append((maxi + max(max(self.z)))/2 - self.z[i][j]) colors.append(start_map[i][j]) fig = plt.figure(figsize=(5,5)) # настройка трёхмерной модели ax = fig.add_subplot(111, projection='3d') ax.set_xlabel('X axis') ax.set_ylabel('Y axis') ax.set_zlabel('Z axis') ax.set_xlim([0, maxi]) ax.set_ylim([0, maxi]) ax.set_zlim([0, maxi]) ax.scatter(x, y, z, c=colors, cmap=cm.viridis, s = 2 * interv * scale) ax.view_init(30, 30) plt.show() def update_plot(angle1 = 30, angle2 = 30): # функция обновления угла self.angle1 = angle1 self.angle2 = angle2 ax.view_init(angle1, angle2) fig.canvas.draw_idle() angle1_slider = widgets.IntSlider(30, min = -180, max = 180) # слайдер первого угла display(angle1_slider) angle2_slider = widgets.IntSlider(30, min = -180, max = 180) # слайдер второго угла display(angle2_slider) slider = widgets.IntSlider(0, min = 0, max = len(change_map)) # слайдер итерации label = widgets.Label(value = 'Iterarion 0' + ((start!=0)*('(' + str(start) + ')')) + ' of ' + str(len(change_map)) + (' ' + change_map[slider.value - 1][3]) * (slider.value != 0)) display(slider, label) # функции обновления для слайдеров def update_angle1(value): update_plot(angle1 = value['new'], angle2 = self.angle2) def update_angle2(value): update_plot(angle1 = self.angle1, angle2 = value['new']) def update_iteration(value): # обновление итерации update_map(iteration = value['new']) def update_map(iteration = 0): # обновлеине карты clear_output(wait=True) # очистка вывода now_map = copy.deepcopy(start_map) # начальная карта (в последствии к ней и будут применяться изменения) if diff: # если расширенная расцветка for i in range(iteration): now_map[change_map[i][0]][change_map[i][1]] = change_map[i][2] * 3 - (change_map[i][3] == "EEZ ") # изменяем карту else: # если не расширенная for i in range(iteration): now_map[change_map[i][0]][change_map[i][1]] = change_map[i][2] # изменяем карту colors = [] for i in range(0, len(self.unos), interv): for j in range(0, len(self.unos[0]), interv): if self.unos[i][j][2] > 0: colors.append(now_map[i][j]) fig = plt.figure(figsize=(5,5)) ax = fig.add_subplot(111, projection='3d') ax.set_xlabel('X axis') ax.set_ylabel('Y axis') ax.set_zlabel('Z axis') ax.set_xlim([0, maxi]) ax.set_ylim([0, maxi]) ax.set_zlim([0, maxi]) ax.scatter(x, y, z, c=colors, cmap=cm.viridis, s = 2 * interv * scale) ax.view_init(self.angle1, self.angle2) plt.show() angle1_slider = widgets.IntSlider(self.angle1, min = -180, max = 180) display(angle1_slider) angle2_slider = widgets.IntSlider(self.angle2, min = -180, max = 180) display(angle2_slider) slider = widgets.IntSlider(iteration, min = 0, max = len(change_map)) # сам слайдер label = widgets.Label(value = 'Iterarion ' + str(iteration) + ((start!=0)*('(' + str(start + iteration) + ')')) + ' of ' + str(len(change_map)) + (' ' + change_map[slider.value - 1][3]) * (slider.value != 0)) display(slider, label) def update_plot(angle1 = 30, angle2 = 30): self.angle1 = angle1 self.angle2 = angle2 ax.view_init(angle1, angle2) fig.canvas.draw_idle() def update_angle1(value): update_plot(angle1 = value['new'], angle2 = self.angle2) def update_angle2(value): update_plot(angle1 = self.angle1, angle2 = value['new']) angle1_slider.observe(update_angle1, names = 'value') angle2_slider.observe(update_angle2, names = 'value') slider.observe(update_iteration, names = 'value') angle1_slider.observe(update_angle1, names = 'value') angle2_slider.observe(update_angle2, names = 'value') slider.observe(update_iteration, names = 'value') # ТЕКУЩАЯ КАРТА (расширенная расцветка) def terr(self, diff = True): if self.inline == 0: # настройка matplotlib %matplotlib inline self.inline = 1 if (diff): # отображение карты plt.imshow(list(map(lambda a, b, c: list(map(lambda x, y, z: 2*x*(x>0) + x - 2 * y + z, a, b, c)), np.array(self.unos)[:, :, 2].astype(int), np.array(self.unos)[:, :, 1].astype(int), np.array(self.unos)[:, :, 5].astype(int))), cmap = cm.viridis) else: plt.imshow(np.array(self.unos)[:, :, 2].astype(int), cmap = cm.viridis) if self.percent == False: plt.title(str(len(self.change_map)) + ' ' + str(np.array(self.countries)[:, 1].astype(int))) else: plt.title(str(len(self.change_map)) + ' ' + str([round(x, 3) for x in np.array(self.countries)[:, 1]])) plt.show() # АНИМАЦИЯ ИЗМЕНЕНИЯ КАРТЫ необязательно указывать #(расширеная расцветкаб длительность каждого кадра в милисекундах, пропуск кадров, повторять анимацию?) def anim_terr(self, diff = True, interval = 200, x = 100, repeat = False): if self.inline >= 1: # настройка matplotlib for i in range(self.inline): %matplotlib notebook %matplotlib notebook self.inline = 0 if diff: # анимация f = plt.figure() ax = f.gca() im = copy.deepcopy(self.start_map_diff) image = plt.imshow(im, interpolation='None', animated=True, cmap = cm.viridis) def function_for_animation(frame_index): for i in range(x): im[self.change_map[min(frame_index * x + i, len(self.change_map) - 1)][0]][self.change_map[min(frame_index * x + i, len(self.change_map) - 1)][1]] = (self.change_map[min(frame_index * x + i, len(self.change_map) - 1)][2] * 3 - (self.change_map[min(frame_index * x + i, len(self.change_map) - 1)][3] == 'EEZ ')) image.set_data(im) ax.set_title(self.change_map[min(frame_index * x, len(self.change_map) - 1)][3] + str(min(frame_index * x, len(self.change_map) - 1)) + ' ' + str(self.change_sati[min(frame_index * x, len(self.change_map) - 1)])) return matplotlib.animation.FuncAnimation(f, function_for_animation, interval=interval, frames=(((len(self.change_map) - 1) // x) + 2), repeat = repeat, blit=True) else: f = plt.figure() ax = f.gca() im = copy.deepcopy(self.start_map) image = plt.imshow(im, interpolation='None', animated=True, cmap = cm.viridis) def function_for_animation(frame_index): for i in range(x): im[self.change_map[min(frame_index * x + i, len(self.change_map) - 1)][0]][self.change_map[min(frame_index * x + i, len(self.change_map) - 1)][1]] = self.change_map[min(frame_index * x + i, len(self.change_map) - 1)][2] image.set_data(im) ax.set_title(self.change_map[min(frame_index * x, len(self.change_map) - 1)][3] + str(min(frame_index * x, len(self.change_map) - 1)) + ' ' + str(self.change_sati[min(frame_index * x, len(self.change_map) - 1)])) return matplotlib.animation.FuncAnimation(f, function_for_animation, interval=interval, frames=(((len(self.change_map) - 1) // x) + 2), repeat = repeat, blit = True) # ГИСТОГРАММА ТЕКУЩЕГО УДОВЛЕТВОРЕНИЯ СТРАН def hist(self): if self.inline == 0: # настройка matplotlib %matplotlib inline self.inline = 1 fig = plt.figure(dpi = 80, figsize = (8, 4)) # гистограмма plt.title(str(len(self.change_sati))) mpl.rcParams.update({'font.size': 10}) ax = plt.axes() plt.xlim( -0.5, len(self.names) - 0.5) for i in range(len(self.names)): if self.percent == False: ax.text(i + 0.15, np.array(self.countries)[:, 1].astype(int)[i], np.array(self.countries)[:, 1].astype(int)[i]) else: ax.text(i + 0.15, round(np.array(self.countries)[i][1], 3), round(np.array(self.countries)[i][1], 3)) ax.yaxis.grid(True, zorder = 1) if self.percent == False: plt.bar([x for x in range(len(self.names))], np.array(self.countries)[:, 1].astype(int), width = 0.3, color = 'blue', alpha = 0.7, zorder = 2) else: plt.bar([x for x in range(len(self.names))], [round(x, 3) for x in np.array(self.countries)[:, 1]], width = 0.3, color = 'blue', alpha = 0.7, zorder = 2) plt.xticks(range(len(self.names)), self.names) plt.legend(loc='upper right') # АНИМАЦИЯ ГИСТОГРАММЫ УДОВЛЕТВОРЕНИЯ необязательно #длительность каждого кадра в милисекундах, пропуск кадров, повторять анимацию?) def anim_hist(self, interval = 200, x = 1, repeat = False): if self.inline >= 1: # настройка matplotlib for i in range(self.inline): %matplotlib notebook %matplotlib notebook self.inline = 0 fig = plt.figure(dpi = 80, figsize = (8, 4)) # анимация гистограммы ranges = (np.array(self.change_sati).max() - np.array(self.change_sati).min()) * 0.1 def function_for_animation(frame_index): plt.clf() plt.title(self.change_map[min(frame_index * x, len(self.change_map) - 1)][3] + str(min(frame_index * x, len(self.change_map) - 1))) plt.ylim([np.array(self.change_sati).min() - ranges, np.array(self.change_sati).max() + ranges]) plt.xlim( -0.5, len(self.names) - 0.5) mpl.rcParams.update({'font.size': 10}) ax = plt.axes() for i in range(len(self.names)): ax.text(i + 0.15, self.change_sati[min(frame_index * x, len(self.change_map) - 1)][i], self.change_sati[min(frame_index * x, len(self.change_map) - 1)][i]) ax.yaxis.grid(True, zorder = 1) plt.bar([x for x in range(len(self.names))], self.change_sati[min(frame_index * x, len(self.change_map) - 1)], width = 0.3, color = 'blue', alpha = 0.7, zorder = 2) plt.xticks(range(len(self.names)), self.names) plt.legend(loc='upper right') return matplotlib.animation.FuncAnimation(fig, function_for_animation, interval=interval, repeat = repeat, init_func = None, frames=(((len(self.change_sati) - 1) // x) + 2), blit=True) # ТЕКУЩАЯ КАРТА И ГИСТОГРАММА УДОВЛеТВОРЕНИЯ СТРАН (расширенная расцветка) def terr_hist(self, diff = True): if self.inline == 0: # настройка matplotlib %matplotlib inline self.inline = 1 nrows = 1 # фигура ncols = 2 fig = plt.figure(figsize=(10, 5)) ax = fig.add_subplot(nrows, ncols, 1) if diff: # карта plt.imshow(list(map(lambda a, b, c: list(map(lambda x, y, z: 2*x*(x>0) + x - 2 * y + z, a, b, c)), np.array(self.unos)[:, :, 2].astype(int), np.array(self.unos)[:, :, 1].astype(int), np.array(self.unos)[:, :, 5].astype(int))), cmap = cm.viridis) else: plt.imshow(np.array(self.unos)[:, :, 2].astype(int)) # гистограмма if self.percent == False: plt.title(str(len(self.change_map)) + ' ' + str(np.array(self.countries)[:, 1].astype(int))) else: plt.title(str(len(self.change_map)) + ' ' + str([round(x, 3) for x in np.array(self.countries)[:, 1]])) plt.show() ax = fig.add_subplot(nrows, ncols, 2) plt.title(str(len(self.change_sati))) mpl.rcParams.update({'font.size': 10}) plt.xlim( -0.5, len(self.names)) for i in range(len(self.names)): if self.percent == False: ax.text(i + 0.15, np.array(self.countries)[:, 1].astype(int)[i], np.array(self.countries)[:, 1].astype(int)[i]) else: ax.text(i + 0.15, round(np.array(self.countries)[i][1], 3), round(np.array(self.countries)[i][1], 3)) ax.yaxis.grid(True, zorder = 1) if self.percent == False: plt.bar([x for x in range(len(self.names))], np.array(self.countries)[:, 1].astype(int), width = 0.3, color = 'blue', alpha = 0.7, zorder = 2) else: plt.bar([x for x in range(len(self.names))], [round(x, 3) for x in np.array(self.countries)[:, 1]], width = 0.3, color = 'blue', alpha = 0.7, zorder = 2) plt.xticks(range(len(self.names)), self.names) plt.legend(loc='upper right') # АНИМАЦИЯ КАРsТЫ И ГИСТОГРАММЫ необязательно # расширенная расцветка, длительность каждого кадра в милисекундах, пропуск кадров, повторять анимацию?) def anim(self, diff = True, interval = 200, x = 1, repeat = False): if self.inline >= 1: # настройка matplotlib for i in range(self.inline): %matplotlib notebook %matplotlib notebook self.inline = 0 nrows = 1 # фигура ncols = 2 fig = plt.figure(figsize=(10, 5)) ranges = (np.array(self.change_sati).max() - np.array(self.change_sati).min()) * 0.1 if diff: # анимация карты и гистограммы im = copy.deepcopy(self.start_map_diff) def function_for_animation(frame_index): plt.clf() ax = fig.add_subplot(nrows, ncols, 2) plt.title(self.change_map[min(frame_index * x, len(self.change_map) - 1)][3] + str(min(frame_index * x, len(self.change_map) - 1))) plt.ylim([np.array(self.change_sati).min() - ranges, np.array(self.change_sati).max() + ranges]) plt.xlim( -0.5, len(self.names)) mpl.rcParams.update({'font.size': 10}) for i in range(len(self.names)): ax.text(i + 0.15, self.change_sati[min(frame_index * x, len(self.change_map) - 1)][i], self.change_sati[min(frame_index * x, len(self.change_map) - 1)][i]) ax.yaxis.grid(True, zorder = 1) plt.bar([x for x in range(len(self.names))], self.change_sati[min(frame_index * x, len(self.change_map) - 1)], width = 0.3, color = 'blue', alpha = 0.7, zorder = 2) plt.xticks(range(len(self.names)), self.names, rotation=30) plt.legend(loc='upper right') ax = fig.add_subplot(nrows, ncols, 1) for i in range(x): im[self.change_map[min(frame_index * x + i, len(self.change_map) - 1)][0]][self.change_map[min(frame_index * x + i, len(self.change_map) - 1)][1]] = (self.change_map[min(frame_index * x + i, len(self.change_map) - 1)][2] * 3 - (self.change_map[min(frame_index * x + i, len(self.change_map) - 1)][3] == 'EEZ ')) image = plt.imshow(im, interpolation='None', animated=True, cmap = cm.viridis) ax.set_title(self.change_map[min(frame_index * x + i, len(self.change_map) - 1)][3] + str(min(frame_index * x, len(self.change_map) - 1)) + ' ' + str(self.change_sati[min(frame_index * x, len(self.change_map) - 1)])) else: im = copy.deepcopy(self.start_map) def function_for_animation(frame_index): plt.clf() ax = fig.add_subplot(nrows, ncols, 2) plt.title(self.change_map[min(frame_index * x, len(self.change_map) - 1)][3] + str(min(frame_index * x, len(self.change_map) - 1))) plt.ylim([np.array(self.change_sati).min() - ranges, np.array(self.change_sati).max() + ranges]) plt.xlim( -0.5, len(self.names)) mpl.rcParams.update({'font.size': 10}) for i in range(len(self.names)): ax.text(i + 0.15, self.change_sati[min(frame_index * x, len(self.change_map) - 1)][i], self.change_sati[min(frame_index * x, len(self.change_map) - 1)][i]) ax.yaxis.grid(True, zorder = 1) plt.bar([x for x in range(len(self.names))], self.change_sati[min(frame_index * x, len(self.change_map) - 1)], width = 0.3, color = 'blue', alpha = 0.7, zorder = 2) plt.xticks(range(len(self.names)), self.names, rotation=30) plt.legend(loc='upper right') ax = fig.add_subplot(nrows, ncols, 1) for i in range(x): im[self.change_map[min(frame_index * x + i, len(self.change_map) - 1)][0]][self.change_map[min(frame_index * x + i, len(self.change_map) - 1)][1]]= self.change_map[min(frame_index * x + i, len(self.change_map) - 1)][2] image = plt.imshow(im, interpolation='None', animated=True, cmap = cm.viridis) ax.set_title(self.change_map[min(frame_index * x + i, len(self.change_map) - 1)][3] + str(min(frame_index * x, len(self.change_map) - 1)) + ' ' + str(self.change_sati[min(frame_index * x, len(self.change_map) - 1)])) return matplotlib.animation.FuncAnimation(fig, function_for_animation, interval=interval, repeat = repeat, init_func = None, frames=(((len(self.change_sati) - 1) // x) + 2), blit=True) # КАРТА РЕСУРСА (номер ресурса) def map_resource(self, n): if self.inline == 0: # настройка matplotlib %matplotlib inline self.inline = 1 plt.imshow(np.array(self.res_map[n]), cmap = cm.viridis) plt.show() # КАРТА ВСЕХ РЕСУРСОВ ВМЕСТЕ def map_all_resources(self): if self.inline == 0: # настройка matplotlib %matplotlib inline self.inline = 1 arr = self.res_map[0].copy() for i in range(len(self.res_map) - 1): arr += self.res_map[i + 1] plt.imshow(np.array(arr)) plt.show() # ВСЕ КАРТЫ РЕСУРСОВ def map_resources(self): if self.inline == 0: # настройка matplotlib %matplotlib inline self.inline = 1 f, axarr = plt.subplots(len(self.res_map), 1) for i in range(len(self.res_map)): axarr[i].imshow(self.res_map[i]) plt.show() # КАРТА РАССТОЯНИЯ ДЛЯ СТРАНЫ (номер страны) def map_dist(self, n): if self.inline == 0: # настройка matplotlib %matplotlib inline self.inline = 1 plt.imshow(np.array(self.dist_map[n])) plt.show() # КАРТА ПОЛЬЗЫ ДЛЯ СТРАНЫ (номер страны) def map_sati(self, n): if self.inline == 0: # настройка matplotlib %matplotlib inline self.inline = 1 plt.imshow(np.array(self.sati_map[n])) plt.show() # КАРТА ПОЛЬЗЫ И РАССТОЯНИЯ ДЛЯ СТРАНЫ (номер страны) def map_country(self, n): if self.inline == 0: # настройка matplotlib %matplotlib inline self.inline = 1 f, axarr = plt.subplots(1,2) axarr[0].imshow(self.dist_map[n]) axarr[1].imshow(self.sati_map[n]) plt.show() # ВСЕ КАРТЫ РАССТОЯНИЙ ДЛЯ СТРАН def map_dists(self): if self.inline == 0: # настройка matplotlib %matplotlib inline self.inline = 1 f, axarr = plt.subplots(len(self.countries), 1) for i in range(len(self.countries)): axarr[i].imshow(self.dist_map[i]) plt.show() # ВСЕ КАРТЫ ПОЛЬЗЫ ДЛЯ СТРАН def map_satis(self): if self.inline == 0: # настройка matplotlib %matplotlib inline self.inline = 1 f, axarr = plt.subplots(len(self.countries), 1) for i in range(len(self.countries)): axarr[i].imshow(self.sati_map[i]) plt.show() # ВСЕ КАРТЫ ПОЛЬЗ И РАССТОЯНИЙ ДЛЯ СТРАН def map_dists_satis(self): if self.inline == 0: # настройка matplotlib %matplotlib inline self.inline = 1 f, axarr = plt.subplots(len(self.countries), 2) for i in range(len(self.countries)): axarr[i, 0].imshow(self.dist_map[i]) axarr[i, 1].imshow(self.sati_map[i]) plt.show() ## СОХРАНЕНИЕ И ЗАГРУЗКА ДАННЫХ ## # СОХРАНИТЬ ТЕРРИТОРИЮ (указать название файла сохранения) def save(self, name): if(self.isave == 1): # проверка индикатора для сохранения print('') # вывод пустого сообщения self.isave = 0 sys.stdout.write("Saving...\r".format()) sys.stdout.flush() pd.DataFrame([pd.DataFrame(self.unos), pd.DataFrame(self.countries), # сохранение всех переменных pd.DataFrame([self.names]), pd.DataFrame([self.ind]), pd.DataFrame(self.d), pd.DataFrame(self.change_map), pd.DataFrame(self.change_sati), pd.DataFrame(self.start_map), pd.DataFrame(self.start_map_diff), pd.DataFrame(self.transferable), pd.DataFrame(self.z), pd.DataFrame([self.exchanged]), pd.DataFrame([self.full]), pd.DataFrame([self.name, self.i_char, self.i_exch, self.isave, self.sphere, self.radius, self.inline, self.angle1, self.angle2, self.percent, self.i_exch2]) ]).to_pickle(name) print('Saved! ') # ЗАГРУЗИТЬ ТЕРРИТОРИЮ (указать название файла) def load(self, name): if(self.isave == 1): # проверка индикатора для сохранения print('') # вывод пустого сообщения self.isave = 0 # загрузка всех переменных sys.stdout.write("Loading. \r".format()) sys.stdout.flush() df = pd.read_pickle(name) sys.stdout.write("Loading.. \r".format()) sys.stdout.flush() self.unos = df[0][0].values.tolist() sys.stdout.write("Loading...\r".format()) sys.stdout.flush() self.countries = df[0][1].values.tolist() sys.stdout.write("Loading. \r".format()) sys.stdout.flush() self.names = df[0][2].values[0].tolist() sys.stdout.write("Loading.. \r".format()) sys.stdout.flush() self.ind = df[0][3].values[0].tolist() sys.stdout.write("Loading... \r".format()) sys.stdout.flush() self.d = df[0][4].values.tolist() sys.stdout.write("Loading. \r".format()) sys.stdout.flush() self.change_map = df[0][5].values.tolist() sys.stdout.write("Loading.. \r".format()) sys.stdout.flush() self.change_jus = df[0][6].values.tolist() sys.stdout.write("Loading... \r".format()) sys.stdout.flush() self.start_map =df[0][7].values.tolist() sys.stdout.write("Loading. \r".format()) sys.stdout.flush() self.start_map_diff =df[0][8].values.tolist() sys.stdout.write("Loading.. \r".format()) sys.stdout.flush() self.transferable =df[0][9].values.tolist() sys.stdout.write("Loading... \r".format()) sys.stdout.flush() self.z =df[0][10].values.tolist() sys.stdout.write("Loading. \r".format()) sys.stdout.flush() self.name =df[0][11].values[0][0] sys.stdout.write("Loading.. \r".format()) sys.stdout.flush() self.i_char =df[0][11].values[1][0] sys.stdout.write("Loading... \r".format()) sys.stdout.flush() self.i_exch =df[0][11].values[2][0] sys.stdout.write("Loading. \r".format()) sys.stdout.flush() self.isave =df[0][11].values[3][0] sys.stdout.write("Loading.. \r".format()) sys.stdout.flush() self.sphere =df[0][11].values[4][0] sys.stdout.write("Loading... \r".format()) sys.stdout.flush() self.radius =df[0][11].values[5][0] sys.stdout.write("Loading. \r".format()) sys.stdout.flush() self.inline =df[0][11].values[6][0] sys.stdout.write("Loading.. \r".format()) sys.stdout.flush() self.angle1 =df[0][11].values[7][0] sys.stdout.write("Loading... \r".format()) sys.stdout.flush() self.angle2 =df[0][11].values[8][0] sys.stdout.write("Loading. \r".format()) sys.stdout.flush() self.percent =df[0][11].values[9][0] sys.stdout.write("Loading.. \r".format()) sys.stdout.flush() self.i_exch2 =df[0][11].values[10][0] sys.stdout.write("Loading.. \r".format()) sys.stdout.flush() #подсчёт карт ресурсов, расстояний и польз self.res_map = np.zeros((len(self.unos[0][0][0]), len(self.unos), len(self.unos[0]))) self.dist_map = [] self.saty_map = [] for i in range(len(self.countries)): self.dist_map.append(np.ones((len(self.unos), len(self.unos[0]))) * -1) self.saty_map.append(np.ones((len(self.unos), len(self.unos[0]))) * -1) for i in range(len(self.unos)): for j in range(len(self.unos[0])): for k in range(len(self.unos[0][0][0])): if self.unos[i][j][2] != -1: self.res_map[k][i][j] += self.unos[i][j][0][k] + 1 for k in range(len(self.countries)): if self.unos[i][j][2] != -1: if (self.unos[i][j][1] == False) or (self.unos[i][j][5] == True) or (self.unos[i][j][2] == k + 1): self.dist_map[k][i][j] = self.unos[i][j][3][k] else: self.dist_map[k][i][j] = -2 else: self.dist_map[k][i][j] = -1 if self.unos[i][j][2] != -1: if (self.unos[i][j][1] == False) or (self.unos[i][j][5] == True) or (self.unos[i][j][2] == k + 1): self.saty_map[k][i][j] = self.unos[i][j][4][k] else: self.saty_map[k][i][j] = -2 else: self.saty_map[k][i][j] = -1 sys.stdout.write("Loading... \r".format()) sys.stdout.flush() self.make_exch() print('Loaded! ') ### СИСТЕМНЫЕ ФУНКЦИИ ###(не для вызова и использования, но если аккуратно, то можно) ## СТАДНАРТНЫЕ РАССЧЁТЫ И ПРИСВОЕНИЯ ## # РАСЧЁТ РАССТОЯНИЯ ДЛЯ СТРАНЫ (строка участка, столбец участка, номер страны) возвращает минимально расстояние def dist(self, f, l, i_coun): if self.sphere: # рассчёт для сферической модели d = np.linalg.norm(np.array(self.started[i_coun] - np.array([f, l, self.z[f][l]])), axis = 1).min() return math.acos(1 - 0.5*pow(d / self.radius, 2))*self.radius else: # рассчёт для плоской модели return np.linalg.norm(np.array(self.countries[i_coun][2][:, :2]) - np.array([f, l]), axis = 1).min() # РАСЧЁТ ПОЛЬЗЫ УЧАСТКА ДЛЯ СТРАНЫ (строка участка, столбец участка, номер страны)возвращает[пользая, #минимальное расстояние] def sati(self, f, l, i_cun): dista = self.unos[f][l][3][i_cun] # рассчёт минимального расстояния # возвращает пользу участка стране и минимальное расстояние return max(0, ((np.array(self.unos[f][l][0]) * np.array(self.countries[i_cun][0])).sum() * ((self.d[i_cun][1] - dista + 1)) / (self.d[i_cun][1] - min(dista, self.d[i_cun][0]) + 1)) ** 2) # БЛИЖАЙШАЯ К УЧАСТКУ СТРАНА (строка участка, столбец участка) возвращает номер ближайшей страны начиная с нуля def near(self, f, l): a = [[self.unos[f][l][3][i], -self.unos[f][l][4][i], self.countries[i][1]] for i in range(len(self.countries))] return a.index(min(a)) # СТРАНА ДЛЯ КОТОРОЙ УЧАСТОК ПРИНЕСЁТ БОЛЬШЕ ПОЛЬЗЫ (строка участка, столбец участка) возвращает номер страны def most_sati(self, f, l): a = [[self.unos[f][l][4][i], -self.unos[f][l][3][i], -self.countries[i][1]] for i in range(len(self.countries))] return a.index(max(a)) # ПРИСВОИТЬ УЧАСТОК СТРАНЕ (строка участка, столбец участка, номер сраны) def belong(self, f, l, i_cun, func = ''): if self.unos[f][l][2] > 0: # если страна уже принадлежит кому-то name_i = self.unos[f][l][2] # перемення прежней страны участка self.countries[name_i - 1][1] -= (2 - self.percent) * self.unos[f][l][4][name_i - 1] # вычитаем двойную пользу участку у старой страны self.countries[name_i - 1][3].remove([f, l])# удаление участка из списка участков прежней страны self.unos[f][l][2] = i_cun + 1 # смена информации о хозяине у участк if func != 'EEZ ': # если функция передачи не еез self.countries[i_cun][1] += (2 - self.percent) * self.unos[f][l][4][i_cun] # добавление двойной пользы участка новой стране self.countries[i_cun][3].append([f, l]) # добавление участка в список участков страны if func[:8] != 'Exchange': self.change_map.append([f, l, i_cun + 1, func]) # добавление изменения в список изменения карты else: self.change_map.append([f, l, i_cun + 1, func + '(' + str(self.i_exch2) + ')']) # добавление изменения в список изменения карты if self.percent == False: self.change_sati.append(np.array(self.countries)[:, 1].astype(int).tolist()) # добавлеине изменения в список польз else: self.change_sati.append([round(x, 3) for x in np.array(self.countries)[:, 1]]) # добавлеине изменения в список польз if func == 'Charity ': # если функция передачи charity self.i_char += 1 # изменяем счётчик i_char и пишем изменения sys.stdout.write("Charity: {0}, exchange: {1} ({4}), From {2} to {3} \r".format(str(self.i_char), str(self.i_exch), self.names[name_i - 1], self.names[i_cun], self.i_exch2)) sys.stdout.flush() self.isave = 1 # меняем счётчик сохранения elif func[:8] == 'Exchange': # если функция передачи exchange self.i_exch += 1 # изменяем счётчик i_exch и пишем изменения sys.stdout.write("charity: {0}, Exchange: {1} ({4}), {5} From {2} to {3} \r".format(str(self.i_char), str(self.i_exch), self.names[name_i - 1], self.names[i_cun], self.i_exch2, func[9:])) sys.stdout.flush() self.isave = 1 # меняем счётчик сохранения if (self.exchanged[0].get(int(func[8:])) == None): self.exchanged[0][int(func[8:])] = 1 else: self.exchanged[0][int(func[8:])] += 1 ## ВСПОМОГАТЕЛЬНЫЕ ФУНКЦИИ ДЛЯ CHARITY РАБОТАЮЩИЕ С SELF.IND ## # МИНИМАЛЬНО УДОВЛЕТВРЁННАЯ СТРАНА ИЗ ДОСТУПНЫХ = тех, у которых в self.ind соответсвуещему индексу сопоставлен 0 def min_sat(self): mini = self.countries[self.ind.index(0)][1] # первая доступная страна answer = self.ind.index(0) # удовлетворённость первой доступной страны for i in range(1, len(self.countries)): if self.ind[i] == 0 and self.countries[i][1] < mini: # если удовлетворённость ещё меньше mini = self.countries[i][1] # то она становится отвевтом answer = i return [mini, answer] # возвращаем номер страны и её уровень удовлетворённости # МАКСИМАЛЬНО УДОВЛЕТВОРЁННАЯ СТРАНА ИЗ ДОСТУПНЫХ def max_sat(self): maxi = self.countries[self.ind.index(0)][1] # первая доступная страна answer = self.ind.index(0) # её удовлетворённость for i in range(1, len(self.countries)): if self.ind[i] == 0 and self.countries[i][1] > maxi: # если удовлетворённость ещё больше maxi = self.countries[i][1] # то она становится ответом answer = i return [maxi, answer] # возвращаем номер страны и уровень удовлетворённости # МАКСИМАЛЬНО УДОВЛЕТВРЁННАЯ СТРАНА ИЗ НЕДОСТУПНЫХ def max_sat_re(self): maxi = self.countries[self.ind.index(1)][1] # первая недоступная страна answer = self.ind.index(1) # уровень удовлетворённости первой недоступной страны for i in range(1, len(self.countries)): if self.ind[i] == 1 and self.countries[i][1] > maxi: # если удовлетворённость ещё больше maxi = self.countries[i][1] # то она становится ответом answer = i return [maxi, answer] # возвращаем номер страны и уровень удовлетворённости ## ВСПОМОГАТЕЛЬНЫЕ ДЛЯ ОБМЕНА И СПРАВЕДЛИВОСТИ # ФОРМИРУЕТ СПИСОК УЧАСТКОВ ДЛЯ ВЗАИМНОГО ОБМЕНА def make_exch(self): # формирование пустых списков self.list_exchange = [[[] for i in range(len(self.countries))] for i in range(len(self.countries))] self.map_exchange = [[[[] for i in range(len(self.unos[0]))] for i in range(len(self.unos))] for i in range(len(self.countries) + 1)] for i in range(len(self.unos)): # проход по свободным участком и их запись в список готовых к обмену и в карту обмена for j in range(len(self.unos[0])): if ((not self.unos[i][j][1]) and self.unos[i][j][2] not in [-1, 0]): if (i != 0 and (self.unos[i - 1][j][2] not in [-1, 0])): if (self.unos[i][j][2] != self.unos[i - 1][j][2]): self.list_exchange[self.unos[i][j][2] - 1][self.unos[i - 1][j][2] - 1].append([i, j]) self.map_exchange[self.unos[i - 1][j][2] - 1][i][j].append([-1, 0]) if (j != 0 and (self.unos[i][j - 1][2] not in [-1, 0])): if (self.unos[i][j][2] != self.unos[i][j - 1][2] and len(self.map_exchange[self.unos[i][j - 1][2] - 1][i][j]) == 0): self.list_exchange[self.unos[i][j][2] - 1][self.unos[i][j - 1][2] - 1].append([i, j]) self.map_exchange[self.unos[i][j - 1][2] - 1][i][j].append([0, -1]) if ((j != len(self.unos[0]) - 1) and (self.unos[i][j + 1][2] not in [-1, 0])): if (self.unos[i][j][2] != self.unos[i][j + 1][2] and len(self.map_exchange[self.unos[i][j + 1][2] - 1][i][j]) == 0): self.list_exchange[self.unos[i][j][2] - 1][self.unos[i][j + 1][2] - 1].append([i, j]) self.map_exchange[self.unos[i][j + 1][2] - 1][i][j].append([0, 1]) if ((i != len(self.unos) - 1) and (self.unos[i + 1][j][2] not in [-1, 0])): if (self.unos[i][j][2] != self.unos[i + 1][j][2] and len(self.map_exchange[self.unos[i + 1][j][2] - 1][i][j] )== 0): self.list_exchange[self.unos[i][j][2] - 1][self.unos[i + 1][j][2] - 1].append([i, j]) self.map_exchange[self.unos[i + 1][j][2] - 1][i][j].append([1, 0]) for i in range(len(self.unos)): for j in range(len(self.unos[0])): # формирование карты обмена несвободных участков if ((self.unos[i][j][1]) or (self.unos[i][j][2] in [-1, 0])): if (i != 0 and (self.unos[i - 1][j][2] not in [-1, 0]) and (not self.unos[i - 1][j][1])): self.map_exchange[self.unos[i - 1][j][2] - 1][i][j].append([-1, 0]) if (j != 0 and (self.unos[i][j - 1][2] not in [-1, 0]) and (not self.unos[i][j - 1][1])): self.map_exchange[self.unos[i][j - 1][2] - 1][i][j].append([0, -1]) if ((j != len(self.unos[0]) - 1) and (self.unos[i][j + 1][2] not in [-1, 0]) and (not self.unos[i][j + 1][1])): self.map_exchange[self.unos[i][j + 1][2] - 1][i][j].append([0, 1]) if ((i != len(self.unos) - 1) and (self.unos[i + 1][j][2] not in [-1, 0]) and (not self.unos[i + 1][j][1])): self.map_exchange[self.unos[i + 1][j][2] - 1][i][j].append([1, 0]) for i in range(len(self.unos)): for j in range(len(self.unos[0])): # формирование списка опасных участков if ((not self.unos[i][j][1]) and self.unos[i][j][2] not in [-1, 0]): if (len(self.map_exchange[self.unos[i][j][2] - 1][i][j]) == 1): if (i != 0 and (self.unos[i - 1][j][2] == self.unos[i][j][2])): self.map_exchange[-1][i - 1][j].append([1, 0]) elif ((i != len(self.unos) - 1) and (self.unos[i + 1][j][2] == self.unos[i][j][2])): self.map_exchange[-1][i + 1][j].append([-1, 0]) elif ((j != len(self.unos) - 1) and (self.unos[i][j + 1][2] == self.unos[i][j][2])): self.map_exchange[-1][i][j + 1].append([0, -1]) elif (j != 0 and (self.unos[i][j - 1][2] == self.unos[i][j][2])): self.map_exchange[-1][i][j - 1].append([0, 1]) # ВЗАИМОВЫГОДНЫЙ ОБМЕН СЛУЧАЙНЫМИ УЧАСТКАМИ МЕЖДУ ДВУМЯ СТАРАНАМИ def exch(self, one, two, sides1 = 8, sides2 = 8, ntry = 0): sys.stdout.write("charity: {0}, Exchange: {1} ({5}), {6} Try {4} from {2} to {3} \r".format(str(self.i_char), str(self.i_exch), self.names[one], self.names[two], ntry, self.i_exch2, str(min(sides1, sides2)))) first = [] # список готовых к обмену от первой страны second = [] # список готовых к обмену от второй страны firstsati = [] # список изменения удовлетворения стран от передачи участков первой страны secondsati = [] # список изменения удовлетворения стран от передачи участков второй страны constteamone = [] # условия для участков первой страны чтобы все соседи участка не ушли без него constteamtwo = [] # условия для участков второй страны чтобы все соседи участка не ушли без него constenemyone = [] # условия для участков первой страны чтобы все чужие соседи участка не стали своими, а этот не ушёл constenemytwo = [] # условия для участков второй страны чтобы все чужие соседи участка не стали своими, а этот не ушёл # номера случайных участков первой страны one_numbers = random.sample(range(len(self.list_exchange[one][two])), min(sides1, len(self.list_exchange[one][two]))) # номера случайных участков второй страны two_numbers = random.sample(range(len(self.list_exchange[two][one])), min(sides2, len(self.list_exchange[two][one]))) # заполнение множеств участков первой страны for elem in one_numbers: eleme = self.list_exchange[one][two][elem] if len(self.map_exchange[-1][eleme[0]][eleme[1]]) == 0: if eleme not in first: first.append(eleme) else: no = 0 for element in self.map_exchange[-1][eleme[0]][eleme[1]]: if len(self.map_exchange[two][element[0] + eleme[0]][element[1] + eleme[1]]) == 0: no = 1 break if no == 0: if eleme not in first: first.append(eleme) for element in self.map_exchange[-1][eleme[0]][eleme[1]]: if [element[0] + eleme[0], element[1] + eleme[1]] not in first: first.append([element[0] + eleme[0], element[1] + eleme[1]]) if len(first) >= sides1: break # заполнение множества участков второй страны for elem in two_numbers: eleme = self.list_exchange[two][one][elem] if len(self.map_exchange[-1][eleme[0]][eleme[1]]) == 0: if eleme not in second: second.append(eleme) else: no = 0 for element in self.map_exchange[-1][eleme[0]][eleme[1]]: if len(self.map_exchange[one][element[0] + eleme[0]][element[1] + eleme[1]]) == 0: no = 1 break if no == 0: if eleme not in second: second.append(eleme) for element in self.map_exchange[-1][eleme[0]][eleme[1]]: if [element[0] + eleme[0], element[1] + eleme[1]] not in second: second.append([element[0] + eleme[0], element[1] + eleme[1]]) if len(second) >= sides2: break # формирование списков условий первой страны for i in range(len(first)): team = len(self.map_exchange[one][first[i][0]][first[i][1]]) teammates = [] enemies = [] enemy = len(self.map_exchange[two][first[i][0]][first[i][1]]) for elem in self.map_exchange[one][first[i][0]][first[i][1]]: if ([elem[0] + first[i][0], elem[1] + first[i][1]] in first): team -= 1 teammates.append(first.index([elem[0] + first[i][0], elem[1] + first[i][1]])) if team == 0: constteamone.append([i, teammates]) for elem in self.map_exchange[two][first[i][0]][first[i][1]]: if ([elem[0] + first[i][0], elem[1] + first[i][1]] in second): enemy -= 1 enemies.append(second.index([elem[0] + first[i][0], elem[1] + first[i][1]])) if enemy == 0: constenemyone.append([i, enemies]) # формирование списков условий второй страны for i in range(len(second)): team = len(self.map_exchange[two][second[i][0]][second[i][1]]) teammates = [] enemies = [] enemy = len(self.map_exchange[one][second[i][0]][second[i][1]]) for elem in self.map_exchange[two][second[i][0]][second[i][1]]: if ([elem[0] + second[i][0], elem[1] + second[i][1]] in second): team -= 1 teammates.append(second.index([elem[0] + second[i][0], elem[1] + second[i][1]])) if team == 0: constteamtwo.append([i, teammates]) for elem in self.map_exchange[one][second[i][0]][second[i][1]]: if ([elem[0] + second[i][0], elem[1] + second[i][1]] in first): enemy -= 1 enemies.append(first.index([elem[0] + second[i][0], elem[1] + second[i][1]])) if enemy == 0: constenemytwo.append([i, enemies]) # заполнение множеств удовлетворений первой и второй страны for elem in first: firstsati.append([-self.unos[elem[0]][elem[1]][4][one], self.unos[elem[0]][elem[1]][4][two]]) for elem in second: secondsati.append([self.unos[elem[0]][elem[1]][4][one], -self.unos[elem[0]][elem[1]][4][two]]) if (len(first) == 0) or (len(second) == 0): # если хоть кому-то нечем обмениваться, то заканчиваем return 0 sati1 = firstsati + secondsati # объединение множеств польз selection1 = cvxpy.Bool(len(sati1)) # идентификаторы обмена z = cvxpy.Variable() # переменная минимального изменения обмена a = len(first) constraint1 = [z <= np.array(sati1)[:, 1] * selection1, z <= np.array(sati1)[:, 0] * selection1] # условие поиска оптимума # добавление условий for elem in constteamone: constraint1.append(selection1[elem[0]] - cvxpy.sum_entries(selection1[elem[1]]) >= 1 - len(elem[1])) for elem in constteamtwo: constraint1.append(selection1[elem[0] + a] - cvxpy.sum_entries(selection1[[i + a for i in elem[1]]]) >= 1 - len(elem[1])) for elem in constenemyone: constraint1.append(selection1[elem[0]] + cvxpy.sum_entries(selection1[[i + a for i in elem[1]]]) <= + len(elem[1])) for elem in constenemytwo: constraint1.append(selection1[elem[0] + a] + cvxpy.sum_entries(selection1[elem[1]]) <= len(elem[1])) total_utility1 = z # оптимизируем z my_problem1 = cvxpy.Problem(cvxpy.Maximize(total_utility1), constraint1) my_problem1.solve(solver=cvxpy.GLPK_MI) # решаем проблему first1 = (np.array(sati1)[:, 0] * selection1).value # прибавление удовлетворённости первой страны second1 = (np.array(sati1)[:, 1] * selection1).value # прибавление удовлетворённости второй страны if (first1 != 0 or second1 != 0): # если хоть одной из них лучше self.i_exch2 += 1 # счётчик обменов увеличивает for j in range(len(selection1.value)): # для всех переданных if selection1[j].value: if j < a: # если от первой страны второй self.redact_exch(first[j][0], first[j][1], one, two) # учитываем влияение на карты допустимых обменов self.belong(first[j][0], first[j][1], two, 'Exchange ' + str(min(sides1, sides2))) else: # если от второй страны первой j2 = j - a self.redact_exch(second[j2][0], second[j2][1], two, one) # учитываем влияение на карты допустимых обменов self.belong(second[j2][0], second[j2][1], one, 'Exchange ' + str(min(sides1, sides2))) exch_info = str(sorted([int(sum(selection1.value[:a])), int(sum(selection1.value[a:]))])) if self.exchanged[1].get(exch_info) == None: self.exchanged[1][exch_info] = 1 else: self.exchanged[1][exch_info] += 1 if self.exchanged[2].get(int(sum(selection1.value))) == None: self.exchanged[2][int(sum(selection1.value))] = 1 else: self.exchanged[2][int(sum(selection1.value))] += 1 return 1 return 0 # УЧЁТ ВЛИЯНИЕ ПЕРЕДАЧИ УЧАСТКА НА КАРТЫ ОПУСТИМЫХ ОБМЕНОВ (первая координата, вторая координата, от какой страны, какой стране) def redact_exch(self, first, last, one, two): if (first != 0) and (len(self.map_exchange[one][first - 1][last]) == 1) and (self.unos[first - 1][last][2] not in [one + 1, 0, -1]) and not self.unos[first - 1][last][1]: self.list_exchange[self.unos[first - 1][last][2] - 1][one].remove([first - 1, last]) if (first != len(self.unos) - 1) and (len(self.map_exchange[one][first + 1][last]) == 1) and (self.unos[first + 1][last][2] not in [one + 1, 0, -1]) and not self.unos[first + 1][last][1]: self.list_exchange[self.unos[first + 1][last][2] - 1][one].remove([first + 1, last]) if (last != 0) and (len(self.map_exchange[one][first][last - 1]) == 1) and (self.unos[first][last - 1][2] not in [one + 1, 0, -1]) and not self.unos[first][last - 1][1]: self.list_exchange[self.unos[first][last - 1][2] - 1][one].remove([first, last - 1]) if (last != len(self.unos[0]) - 1) and (len(self.map_exchange[one][first][last + 1]) == 1) and (self.unos[first][last + 1][2] not in [one + 1, 0, -1]) and not self.unos[first][last + 1][1]: self.list_exchange[self.unos[first][last + 1][2] - 1][one].remove([first, last + 1]) # добавить в список нового своих соседей if (first != 0) and (len(self.map_exchange[two][first - 1][last]) == 0) and (self.unos[first - 1][last][2] not in [two + 1, 0, -1]) and not self.unos[first - 1][last][1]: self.list_exchange[self.unos[first - 1][last][2] - 1][two].append([first - 1, last]) if (first != len(self.unos) - 1) and (len(self.map_exchange[two][first + 1][last]) == 0) and (self.unos[first + 1][last][2] not in [two + 1, 0, -1]) and not self.unos[first + 1][last][1]: self.list_exchange[self.unos[first + 1][last][2] - 1][two].append([first + 1, last]) if (last != 0) and (len(self.map_exchange[two][first][last - 1]) == 0) and (self.unos[first][last - 1][2] not in [two + 1, 0, -1]) and not self.unos[first][last - 1][1]: self.list_exchange[self.unos[first][last - 1][2] - 1][two].append([first, last - 1]) if (last != len(self.unos[0]) - 1) and (len(self.map_exchange[two][first][last + 1]) == 0) and (self.unos[first][last + 1][2] not in [two + 1, 0, -1]) and not self.unos[first][last + 1][1]: self.list_exchange[self.unos[first][last + 1][2] - 1][two].append([first, last + 1]) # убрать себя из списка соседей и добавить нового себя в список соседей team1 = [] enemy1 = [] if (first != 0) and (self.unos[first - 1][last][2] not in [-1, 0]): if self.unos[first - 1][last][2] != one + 1: team1.append(self.unos[first - 1][last][2]) if self.unos[first - 1][last][2] != two + 1: enemy1.append(self.unos[first - 1][last][2]) if (first != len(self.unos) - 1) and (self.unos[first + 1][last][2] not in [-1, 0]): if self.unos[first + 1][last][2] != one + 1: team1.append(self.unos[first + 1][last][2]) if self.unos[first + 1][last][2] != two + 1: enemy1.append(self.unos[first + 1][last][2]) if (last != 0) and (self.unos[first][last - 1][2] not in [-1, 0]): if self.unos[first][last - 1][2] != one + 1: team1.append(self.unos[first][last - 1][2]) if self.unos[first][last - 1][2] != two + 1: enemy1.append(self.unos[first][last - 1][2]) if (last != len(self.unos[0]) - 1) and (self.unos[first][last + 1][2] not in [-1, 0]): if self.unos[first][last + 1][2] != one + 1: team1.append(self.unos[first][last + 1][2]) if self.unos[first][last + 1][2] != two + 1: enemy1.append(self.unos[first][last + 1][2]) for elem in list(set(team1)): self.list_exchange[one][elem - 1].remove([first, last]) for elem in list(set(enemy1)): self.list_exchange[two][elem - 1].append([first, last]) self.map_exchange[-1][first][last] = [] #обнуление своего счётчика # составление своего счётчика if (first != 0) and (self.map_exchange[two][first - 1][last] == []) and (self.unos[first - 1][last][2] == two + 1) and not self.unos[first - 1][last][1]: self.map_exchange[-1][first][last].append([-1, 0]) if (first != len(self.unos) - 1) and (self.map_exchange[two][first + 1][last] == []) and (self.unos[first + 1][last][2] == two + 1) and not self.unos[first + 1][last][1]: self.map_exchange[-1][first][last].append([1, 0]) if (last != 0) and (self.map_exchange[two][first][last - 1] == []) and (self.unos[first][last - 1][2] == two + 1) and not self.unos[first][last - 1][1]: self.map_exchange[-1][first][last].append([0, -1]) if (last != len(self.unos[0]) - 1) and (self.map_exchange[two][first][last + 1] == []) and (self.unos[first][last + 1][2] == two + 1) and not self.unos[first][last + 1][1]: self.map_exchange[-1][first][last].append([0, 1]) if len(self.map_exchange[one][first][last]) == 1: #обнуление счётчика бывшего self.map_exchange[-1][self.map_exchange[one][first][last][0][0] + first][self.map_exchange[one][first][last][0][1] + last].remove([-self.map_exchange[one][first][last][0][0], -self.map_exchange[one][first][last][0][1]]) # возможно сам стал опасным if len(self.map_exchange[two][first][last]) == 1: self.map_exchange[-1][self.map_exchange[two][first][last][0][0] + first][self.map_exchange[two][first][last][0][1] + last].append([-self.map_exchange[two][first][last][0][0], -self.map_exchange[two][first][last][0][1]]) # возможно спас новых опасных if (first != 0) and (len(self.map_exchange[two][first - 1][last]) == 1) and (self.unos[first - 1][last][2] == two + 1) and not self.unos[first - 1][last][1]: self.map_exchange[-1][self.map_exchange[two][first - 1][last][0][0] + first - 1][self.map_exchange[two][first - 1][last][0][1] + last].remove([-self.map_exchange[two][first - 1][last][0][0], -self.map_exchange[two][first - 1][last][0][1]]) if (first != len(self.unos) - 1) and (len(self.map_exchange[two][first + 1][last]) == 1) and (self.unos[first + 1][last][2] == two + 1) and not self.unos[first + 1][last][1]: self.map_exchange[-1][self.map_exchange[two][first + 1][last][0][0] + first + 1][self.map_exchange[two][first + 1][last][0][1] + last].remove([-self.map_exchange[two][first + 1][last][0][0], -self.map_exchange[two][first + 1][last][0][1]]) if (last != 0) and (len(self.map_exchange[two][first][last - 1]) == 1) and (self.unos[first][last - 1][2] == two + 1) and not self.unos[first][last - 1][1]: self.map_exchange[-1][self.map_exchange[two][first][last - 1][0][0] + first][self.map_exchange[two][first][last - 1][0][1] + last - 1].remove([-self.map_exchange[two][first][last - 1][0][0], -self.map_exchange[two][first][last - 1][0][1]]) if (last != len(self.unos[0]) - 1) and (len(self.map_exchange[two][first][last + 1]) == 1) and (self.unos[first][last + 1][2] == two + 1) and not self.unos[first][last + 1][1]: self.map_exchange[-1][self.map_exchange[two][first][last + 1][0][0] + first][self.map_exchange[two][first][last + 1][0][1] + last + 1].remove([-self.map_exchange[two][first][last + 1][0][0], -self.map_exchange[two][first][last + 1][0][1]]) # удаление старых соседств и прибавление новых if first != 0: self.map_exchange[one][first - 1][last].remove([1, 0]) self.map_exchange[two][first - 1][last].append([1, 0]) if first != len(self.unos) - 1: self.map_exchange[one][first + 1][last].remove([-1, 0]) self.map_exchange[two][first + 1][last].append([-1, 0]) if last != 0: self.map_exchange[one][first][last - 1].remove([0, 1]) self.map_exchange[two][first][last - 1].append([0, 1]) if last != len(self.unos[0]) - 1: self.map_exchange[one][first][last + 1].remove([0, -1]) self.map_exchange[two][first][last + 1].append([0, -1]) # возможно сделал опасными старых if (first != 0) and (len(self.map_exchange[one][first - 1][last]) == 1) and (self.unos[first - 1][last][2] == one + 1) and not self.unos[first - 1][last][1]: self.map_exchange[-1][self.map_exchange[one][first - 1][last][0][0] + first - 1][self.map_exchange[one][first - 1][last][0][1] + last].append([-self.map_exchange[one][first - 1][last][0][0], -self.map_exchange[one][first - 1][last][0][1]]) if (first != len(self.unos) - 1) and (len(self.map_exchange[one][first + 1][last]) == 1) and (self.unos[first + 1][last][2] == one + 1) and not self.unos[first + 1][last][1]: self.map_exchange[-1][self.map_exchange[one][first + 1][last][0][0] + first + 1][self.map_exchange[one][first + 1][last][0][1] + last].append([-self.map_exchange[one][first + 1][last][0][0], -self.map_exchange[one][first + 1][last][0][1]]) if (last != 0) and (len(self.map_exchange[one][first][last - 1]) == 1) and (self.unos[first][last - 1][2] == one + 1) and not self.unos[first][last - 1][1]: self.map_exchange[-1][self.map_exchange[one][first][last - 1][0][0] + first][self.map_exchange[one][first][last - 1][0][1] + last - 1].append([-self.map_exchange[one][first][last - 1][0][0], -self.map_exchange[one][first][last - 1][0][1]]) if (last != len(self.unos[0]) - 1) and (len(self.map_exchange[one][first][last + 1]) == 1) and (self.unos[first][last + 1][2] == one + 1) and not self.unos[first][last + 1][1]: self.map_exchange[-1][self.map_exchange[one][first][last + 1][0][0] + first][self.map_exchange[one][first][last + 1][0][1] + last + 1].append([-self.map_exchange[one][first][last + 1][0][0], -self.map_exchange[one][first][last + 1][0][1]]) # ОТДАЁТ САМЫЙ ВЫГОДНЫЙ УЧАСТОК ВТОРОЙ СТРАНЫ ПЕРВОЙ СТРАНЕ (номер первой и второй страны) возвращает индексы участка def chari(self, maxi_i, mini_i): # и номер второй страны sys.stdout.write("Charity: {0}, exchange: {1} ({4}), Try from {2} to {3} \r".format(str(self.i_char), str(self.i_exch), self.names[maxi_i], self.names[mini_i], self.i_exch2)) ind_max = 0 # индекс найденного максимума maximum = 0 # максимальная относительная разница пользы for i in self.list_exchange[maxi_i][mini_i]: # проходим по всем участкам второй страны firs = i[0] las = i[1] if ([self.countries[mini_i][1], self.countries[maxi_i][1]] < [self.countries[maxi_i][1] - 2 * self.unos[firs][las][4][maxi_i], self.countries[mini_i][1] + 2 * self.unos[firs][las][4][mini_i]] and # если имеет смысл передать # если её относительная польза больше maximum < (self.unos[firs][las][4][mini_i] / (self.unos[firs][las][4][maxi_i] + sys.float_info.epsilon))): maximum = (self.unos[firs][las][4][mini_i] / (self.unos[firs][las][4][maxi_i] + sys.float_info.epsilon)) ind_max = i # в индекс записывается очерёдность выбранного участка в множестве if (ind_max != 0): # если максимум найден self.redact_exch(ind_max[0], ind_max[1], maxi_i, mini_i) # учитываем влияение на карты допустимых обменов self.belong(ind_max[0], ind_max[1], mini_i, 'Charity ') # передаём участок return 1 #возвращаем что передали и номер второго участка return 0 # ОТ САМОЙ БОГАТОЙ СТРАНЕ ОТДАЁТ БЕДНОЙ С ПОМОЩЬЮ CHARITY def one_charity(self): min1 = self.min_sat()[1] # запоминаем страну с наименьшей удовлеторённостью max1 = self.max_sat()[1] # запоминаем страну с наибольшей удовлетворённостью result = self.chari(max1, min1) # запоминаем что передали while result: # пока имеет смысл отдавать min1 = self.min_sat()[1] #повторяем max1 = self.max_sat()[1] result = self.chari(max1, min1) # ОТ ВСЕХ СТРАН ОТДАЁТ САМОЙ БЕДНОЙ def all_charity(self): maxsat = self.max_sat()[1] # запоминаем самую богатую страну self.one_charity() # от самой богатой отдаём самой бедной self.ind[maxsat] = 1 # блокируем самую богатую if self.ind.count(0) > 1: # если ещё есть кому отдавать, то повторяем self.all_charity() self.ind[self.max_sat_re()[1]] = 0 # возвращаем индикатор обратно # ОТ ВСЕХ СТРАН ОТДАЁТ ВСЕМ(БОГАТЕЙШИЕ БЕДНЕЙШИМ) def total_charity(self): minsat = self.min_sat()[1] # запоминаем самую бедную страну self.all_charity() # производим обмен от всех ей self.ind[minsat] = 1 # блокируем её if self.ind.count(0) > 1: # повтораяем с другой пока есть страны self.total_charity() else: for i in range(len(self.ind)): # обнуляем инндикаторы self.ind[i] = 0 ## ВСПОМОГАТЕЛНЫЕ ФУНКЦИИ ДЛЯ ВЫВОДОВ # ЗАВИСТЬ ПЕРВОЙ СТРАНЫ ВТОРОЙ СТРАНЕ (номер первой страны, номер второй страны) def envy(self, coun_1, coun_2): result = 0 # результат for i in range(len(self.countries[coun_1][3])): # учитываем участки первой страны result += self.unos[self.countries[coun_1][3][i][0]][self.countries[coun_1][3][i][1]][4][coun_1] for i in range(len(self.countries[coun_2][3])): # учитываем участки второй страны result -= self.unos[self.countries[coun_2][3][i][0]][self.countries[coun_2][3][i][1]][4][coun_1] if self.percent == False: return int(result) return round(result, 3)
[ "noreply@github.com" ]
noreply@github.com
a6bdb94809d7680329ff28eac373e0a783cffd6d
9f7f6b9d3eb1ec85136d16fa02987b412882c595
/examples/websocket_test.py
d855e0f74d6efd2dff63657933df92f138a49e9b
[ "MIT" ]
permissive
fmux/sanicpluginsframework
e6f631487ac1962d04e8263ea3c789fe20179905
175525e85504fcf6e7d32bf12874578fc14c115a
refs/heads/master
2020-07-24T02:52:47.370302
2019-09-11T10:02:45
2019-09-11T10:02:45
207,780,270
0
0
MIT
2019-09-11T09:59:43
2019-09-11T09:59:43
null
UTF-8
Python
false
false
811
py
import pickle from sanic import Sanic from spf import SanicPlugin, SanicPluginsFramework from sanic.response import text from logging import DEBUG class MyPlugin(SanicPlugin): def __init__(self, *args, **kwargs): super(MyPlugin, self).__init__(*args, **kwargs) instance = MyPlugin() @instance.middleware(priority=6, with_context=True, attach_to="cleanup") def mw1(request, context): context['test1'] = "test" print("Doing Cleanup!") app = Sanic(__name__) spf = SanicPluginsFramework(app) assoc_reg = spf.register_plugin(instance) @app.route('/') def index(request): return text("hello world") @app.websocket('/test1') async def we_test(request, ws): print("hi") return if __name__ == "__main__": app.run("127.0.0.1", port=8098, debug=True, auto_reload=False)
[ "ashleysommer@gmail.com" ]
ashleysommer@gmail.com
481a8d81e0d1fd7a551918ed8765436bcad2be91
bdbc362f1a6584f83220682a722187ca5714438f
/Boredom1_Classes.py
6ca1a4a80cf966434f59b22bf1afd396a987d5f6
[ "MIT" ]
permissive
WillGreen98/University-INTPROG-Python
59e804d8418ec52e1318da8686be792f3b527244
93c4f8227a28e09ece0adcebc0fbe499c4b62753
refs/heads/master
2021-06-02T05:37:00.355704
2018-09-30T20:01:44
2018-09-30T20:01:44
108,696,997
0
1
MIT
2021-04-29T19:18:30
2017-10-29T01:42:38
Python
UTF-8
Python
false
false
2,082
py
import time subjects = [] isAllowed2Die = True class Animal: isAllowed2Die = True isPet = bool def __init__(self, genome, classes, bio_def, c_type): self.genome = genome self.a_class = classes self.bio_def = bio_def self.type = c_type class Dog(Animal): isPet = True def __init__(self, name, breed): super().__init__(self, "Canis", "Carnivore", "Dog") self.name = name self.breed = breed def bork(self, d): if d == "quiet": print("Bork Bork Bork... My name is: {0}".format(self.name)) elif d == "loud": print("BORK BORK BORK... MY NAME IS: {0}".format(self.name)) elif d == "sassy": print("Bork Bork Boooork... My name is: {0}".format(self.name)) else: print("Bork") def sit(self, duration): t = time.process_time() print("I am now sitting, I have been sitting for: {0}".format(time.process_time() - duration)) def getTheFuckAwayFromMyPizza(self): return "DRIBBLES ON FLOOR" class Person: def __init__(self, f_Name, nickName, subject, isAwesome): self.fName = f_Name self.sName = nickName self.subject = subject subjects.append(self.subject) self._isAwesome = bool(isAwesome) def killMyself(self): if self._isAwesome: ToBkilledOrNotToBKilledThatIsTheQuestion = " is awesome, they are not allowed to be killed." else: ToBkilledOrNotToBKilledThatIsTheQuestion = " is now dead." killed = "{0}{1}".format(self.fName, ToBkilledOrNotToBKilledThatIsTheQuestion) return killed Kewal = Person("Kewal", "Bitch-Boi", ["Maths"], True) Will = Person("Will", "The Bald Bean", ["Comp Sci"], False) Crumble = Dog("Crumble", "Wire-Haired Sausage") Loki = Dog("Loki", "Samoyed") Rollie = Dog("Rollie", "Sausage") Thor = Dog("Thor", "Samoyed") def main(): print(subjects, "\n") print(Kewal.killMyself()) print(Will.killMyself()) if __name__ == '__main__': main()
[ "will.green98@hotmail.com" ]
will.green98@hotmail.com
84aa481771111981f7f48f85cd2805feb3da8a50
c4526313117430d4e279ef11b98070d60a820e07
/FeatureExtractors/feature_extractor.py
606be6f07a6c54ff27c3e335c3460654db10991f
[]
no_license
Chzy0624/py_pdf_stm
1ae36c2df0f80f644b991edf183eab16c5a333ed
8fde14c2fe3e6486d8830414d79d48726d8c66ef
refs/heads/master
2023-05-05T04:06:17.698359
2019-10-22T05:48:24
2019-10-22T05:48:24
null
0
0
null
null
null
null
UTF-8
Python
false
false
11,417
py
import sys import traceback from pprint import pprint from typing import List, Dict, Any from DataSheetParsers.DataSheet import DataSheet from PinManager import PinManager from TableExtractor import TableExtractor, Table from Utils import is_numeric, is_dict, remove_units, replace_i, merge def convert_type(name: str, value): if type(value) == str: value = value.replace(',', '') value = value.strip('\n ') if 'KB' in name.upper(): name = remove_units(name, 'kb') if is_numeric(value): value = int(value) if 'MB' in name.upper(): name = remove_units(name, 'mb') if is_numeric(value): value = int(value) * 1024 elif type(value) == int: value *= 1024 if 'MHZ' in name.upper(): name = remove_units(name, 'mhz') if is_numeric(value): value = int(value) if type(value) == str: if 'KB' in value: value = replace_i(value, 'kb', '') if is_numeric(value): value = int(value) elif type(value) == int: pass else: value += 'KB' return name, value if 'MB' in value: value = replace_i(value, 'mb', '') if is_numeric(value): value = int(value) * 1024 elif type(value) == int: value *= 1024 else: value += 'MB' return name, value if 'MHZ' in value.upper(): value = replace_i(value, 'MHz', '') if is_numeric(value): value = int(value) elif type(value) == int: pass else: value += 'MHz' return name, value # UNIFIED NAMES # int_values = ['Flash memory', 'RAM', 'UART', 'SPI', 'Total GPIOS','CPU Frequency'] # if name in int_values: if type(value) != int and is_numeric(value): if type(value) == str: if not (value.lower() == 'no' or value.lower() == 'yes'): try: value = int(value) except Exception as ex: print('Failed to convert {} {} to int\n{}'.format(name, value, ex)) return name, value class FeatureListExtractor: # This class is adapted to STM def fix_name(self, name): name = "".join([part[::-1] for part in name[::1][::-1].split('\n')]) return self.config['corrections'].get(name, name) def __init__(self, controller: str, datasheet: DataSheet, config) -> None: """ Class for comparing multiple STM32 controllers :type controller_list: list of stm controllers that you want to compare """ self.controller = controller self.config = config # type: Dict[str,Dict] self.datasheet = datasheet self.features_tables = [] # type: List[Table] self.features = {} # type: Dict[str,Dict] self.pin_data = {} # type: Dict[str, Dict[str, Any]] self.config_name = 'UNKNOWN CONTROLLER' self.mc_family = 'UNKNOWN' self.pin_manager = PinManager(self.pin_data,{}) self.post_init() def post_init(self): pass def process(self): self.extract_tables() self.extract_features() del self.features_tables self.extract_pinout() return self.features def extract_table(self, datasheet, page): print('Extracting table from {} page'.format(page + 1)) pdf_int = TableExtractor(str(datasheet.path)) try: table = pdf_int.parse_page(page) except Exception as ex: pass table = None return table def extract_tables(self): # OVERRIDE THIS FUNCTION FOR NEW CONTROLLER return def handle_feature(self, name, value): if '\u2013' in name: name = name.replace('\u2013', '-') if type(value) == str: if '\u2013' in value: value = value.replace('\u2013', '-') if '\n' in value: value = value.replace('\n', ' ') return [(name, value)] # Can be list of values and names def extract_features(self): controller_features_names = [] controller_features = {} feature_offset = 0 for table in self.features_tables: try: if not table.global_map: continue _, features_cell_span = table.get_cell_span(table.get_col(0)[0]) # EXTRACTING NAMES OF FEATURES if features_cell_span > 1: for row_id, row in table.global_map.items(): if row_id == 0: continue features = set(list(row.values())[:features_cell_span]) features = sorted(features, key=lambda cell: cell.center.x) texts = list(map(lambda cell: cell.clean_text, features)) controller_features_names.append(' '.join(texts)) else: texts = list(map(lambda cell: cell.clean_text, table.get_col(0)[1:])) controller_features_names.extend(texts) # EXTRACTING STM FEATURES current_stm_name = "" mcu_counter = {} name = 'ERROR' for col_id in range(features_cell_span, len(table.get_row(0))): features = table.get_col(col_id) for n, feature in enumerate(features): if n == 0: name = table.get_cell(col_id, 0).clean_text if name == current_stm_name: num = mcu_counter[current_stm_name] name += '-{}'.format(num) mcu_counter[current_stm_name] += 1 else: current_stm_name = name if not mcu_counter.get(current_stm_name, False): mcu_counter[current_stm_name] = 1 if not controller_features.get(name, False): controller_features[name] = {} continue feature_name = controller_features_names[feature_offset + n - 1] feature_value = feature.text for n, v in self.handle_feature(feature_name, feature_value): if n and v: n, v = convert_type(n, v) if controller_features[name].get(n, False): v = self.merge_features(controller_features[name].get(n), v) controller_features[name][n] = v else: controller_features[name][n] = v feature_offset = len(controller_features_names) except Exception as ex: sys.stderr.write("ERROR {}".format(ex)) traceback.print_exc() # FILL MISSING FIELDS for stm_name in controller_features.keys(): for stm_name2 in controller_features.keys(): if stm_name == stm_name2: continue if stm_name in stm_name2: for feature_name, value in controller_features[stm_name].items(): if controller_features[stm_name2].get(feature_name, False): continue else: controller_features[stm_name2][feature_name] = value self.features = controller_features return controller_features def extract_pinout(self): for package, pin_data in self.pin_data.items(): for mcu,mcu_features in self.features.items(): if package in mcu_features.get('PACKAGE',[]): if 'PINOUT' in self.features[mcu]: self.features[mcu]['PINOUT'][package]=pin_data else: self.features[mcu]['PINOUT'] = {package:pin_data} return self.pin_data def unify_names(self): unknown_names = {} for mc, features in self.features.items(): unknown_names[mc] = [] mc_features = self.features[mc].copy() mc_features = {k.upper(): v for k, v in mc_features.items()} for feature_name, features_value in features.items(): feature_name = feature_name.upper() if features_value: if self.config_name in self.config['unify']: unify_list = self.config['unify'][self.config_name] # type: Dict[str,str] unify_list = {k.upper(): v.upper() for k, v in unify_list.items()} known = True if feature_name not in unify_list: if feature_name not in unify_list.values(): known = False if feature_name not in unknown_names: unknown_names[mc].append(feature_name) if known: new_name = unify_list.get(feature_name, feature_name).upper() # in case name is already unified values = mc_features.pop(feature_name) new_name, values = convert_type(new_name, values) new_name = new_name.upper() if new_name in mc_features: mc_features[new_name] = self.merge_features(mc_features[new_name], values) else: mc_features[new_name] = values else: new_name = feature_name # in case name is already unified values = mc_features.pop(feature_name) new_name, values = convert_type(new_name, values) mc_features[new_name.upper()] = values else: unknown_names[mc].append(feature_name) self.features[mc] = mc_features for mc, features in unknown_names.items(): unknown_names = list(set(features)) if unknown_names: print('List of unknown features for', mc) print('Add correction if name is mangled') print('Or add unify for this feature') for unknown_feature in unknown_names: print('\t', unknown_feature) print('=' * 20) print() @staticmethod def merge_features(old, new): return merge(old, new) if __name__ == '__main__': datasheet = DataSheet(r"D:\PYTHON\py_pdf_stm\datasheets\stm32L\STM32L476.pdf") feature_extractor = FeatureListExtractor('STM32L476', datasheet, {}) feature_extractor.process() pprint(feature_extractor.features)
[ "med45c@gmail.com" ]
med45c@gmail.com
18a99599843103fa4fbf326fffab1bb55fabd9d9
d854b6c0e241b7c86d27c0a7fde8e64e48f59e52
/test1.py
774746ffa6e263614633bda83a645dd8641e4ebd
[]
no_license
zhoujingwhy/KNN
0776f64df04c574044d38833f8972bc99b68c470
65ee9fe0d8b5160cd0f0821aea38ecd206eb74d0
refs/heads/master
2020-03-13T05:33:58.541114
2018-05-25T10:48:03
2018-05-25T10:48:03
130,986,734
0
0
null
null
null
null
UTF-8
Python
false
false
2,178
py
import numpy as np import operator """ 函数说明:创建数据集 Returns: group - 数据集 labels - 分类标签 """ def createDataSet(): group=np.array([[1,101],[5,89],[108,5],[115,8]]) labels=['爱情片','爱情片','动作片','动作片'] return group,labels """ 函数说明:kNN算法,分类器 Parameters: inX - 用于分类的数据(测试集) dataSet - 用于训练的数据(训练集) labes - 分类标签 k - kNN算法参数,选择距离最小的k个点 Returns: sortedClassCount[0][0] - 分类结果 """ def classify0(inX,dataSet,labels,k): # numpy函数shape[0]返回dataSet的行数 dataSetSize=dataSet.shape[0] #在列向量方向上重复inX共1次(横向),行向量方向上重复inX共dataSetSize次(纵向) diffMat =np.tile(inX,(dataSetSize,1))-dataSet #二维特征相减后平方 sqDiffMat =diffMat**2 # sum()所有元素相加,sum(0)列相加,sum(1)行相加 sqDistances=sqDiffMat.sum(axis=1) #开方,计算出距离 distances=sqDistances**0.5 #返回distances中元素从小到大排序后的索引值 sortedDistIndices =distances.argsort() # 定一个记录类别次数的字典 classCount={} for i in range(k): # 取出前k个元素的类别 voteIlable=labels[sortedDistIndices[i]] # dict.get(key,default=None),字典的get()方法,返回指定键的值,如果值不在字典中返回默认值。 # 计算类别次数 classCount[voteIlable]=classCount.get(voteIlable,0)+1 # python3中用items()替换python2中的iteritems() # key=operator.itemgetter(1)根据字典的值进行排序 # key=operator.itemgetter(0)根据字典的键进行排序 # reverse降序排序字典 sortedClassCount=sorted(classCount.items(),key=operator.itemgetter(1),reverse=True) #返回次数最多的类别,即所要分类的类别 return sortedClassCount[0][0] if __name__ =='__main__': # 创建数据集 group,labels=createDataSet() test = [101, 20] # kNN分类 test_class = classify0(test, group, labels, 3) # 打印分类结果 print(test_class)
[ "zhoujingwhy@163.com" ]
zhoujingwhy@163.com
b057cf8004cf06aef806a822ce33652173f363f1
c706480a2b71881d9d34541251d483bc427b5f2e
/django_broker/urls.py
6254a95e4242279e725d729d1e4ec9d73d654c4a
[]
no_license
ccennis/django_broker
3a3350fa8648877eca50a983b4ff039db0cdec14
f759c774a1b34b3e80a241216e3b619fbe9e9b51
refs/heads/master
2020-04-10T15:02:48.775968
2019-06-17T16:46:31
2019-06-17T16:46:31
161,095,269
1
0
null
null
null
null
UTF-8
Python
false
false
239
py
#!/usr/bin/env python from django.urls import include, path from . import views urlpatterns = [ path('broker/', include('reindex.urls')), path('broker/', include('rebuild.urls')), path('', views.hello_world, name='hello'), ]
[ "carolinecennis@gmail.com" ]
carolinecennis@gmail.com
857fd7d31a75186a8008fd39c22ccda0b6e7a96d
3cb1bcb411d4a05c3ce8b276d4a65cecaf3e0f6a
/starline/publisher.py
ae3cd10f628a696ab4a30907913c22f0e1c6a568
[]
no_license
setazer/starline
4357dbf70d43572924d2307c81ff027c3543c259
c5e06e1e5b0227daa0fe26335c7ee05038bb6f26
refs/heads/master
2023-06-22T01:59:30.152240
2021-07-21T14:22:51
2021-07-21T14:22:51
378,434,618
0
0
null
null
null
null
UTF-8
Python
false
false
1,057
py
from channels import Channel from interfaces import MessageInterface from model import TelegramMessage, QueueItem from queue_providers import QueueProvider from storage_providers import StorageProvider class Publisher: def __init__(self, queue_provider: QueueProvider, history_provider: StorageProvider, channels: list[Channel], message_interface: MessageInterface): self.queue = queue_provider self.history = history_provider self.channels = channels self.output = message_interface def publish(self): queue_item: QueueItem = self.queue.get_item() queue_item.lock = True post = queue_item.post results = [channel.publish(post) for channel in self.channels if channel.enabled] if any(map(lambda r: r.success, results)): self.history.write(post) self.queue.remove(queue_item) else: self.output.send_message(TelegramMessage(msg=f'Не удалось запостить {post}')) queue_item.lock = False
[ "we.are@setazer.us" ]
we.are@setazer.us
57d665ccf751648900ac6a8db303fbee5f5019ce
f3d79f0ea8972a9296e7f6315ae6f632754beb61
/geo_google.py
b7b7c871f80aa413cbc9160b3e264f4a86217cca
[]
no_license
dionmartin/Geo-Google
434265e302fe0d267f5e4a34f24ef4f56c253f14
8774c0ab5255b760c6acdaa2b68ec3f28d0ef594
refs/heads/master
2021-01-12T02:17:48.926433
2017-01-10T03:53:25
2017-01-10T03:53:25
78,495,426
0
0
null
null
null
null
UTF-8
Python
false
false
1,142
py
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from itertools import groupby from datetime import datetime, timedelta from odoo import api, fields, models, _ from odoo.exceptions import UserError from odoo.tools import float_is_zero, float_compare, DEFAULT_SERVER_DATETIME_FORMAT from odoo.tools.misc import formatLang import odoo.addons.decimal_precision as dp from geopy.geocoders import Nominatim class history_detail(models.Model): _name = "history.detail" _description = "History Detail" latitute = fields.Float('Latitute' , digits=(16, 5)) longitude = fields.Float('Longitude' , digits=(16, 5)) address = fields.Char('Address') city = fields.Char('City') state = fields.Char('State') country = fields.Char('Country') @api.multi def check(self): geolocator = Nominatim() lat = self.latitute longi = self.longitude location = geolocator.reverse((lat, longi)) self.address = location.raw['display_name'] self.city = location.raw['address']['city'] self.state = location.raw['address']['state_district'] self.country = location.raw['address']['country']
[ "d.m.hamonangan@gmail.com" ]
d.m.hamonangan@gmail.com
1eac1dfad8fee38b34847d58779218ce40d9b312
97dfcf7f675ccad34004536ba8592c8aee8325ad
/premiumbody/asgi.py
c9ee1d3970f5c074dafd44aa27044494f6c96257
[]
no_license
Code-Institute-Submissions/MilestoneProject4-5
bb1045b37e38151f698a82858e18981ec4595558
4732689f01c850c17fb554c938d915da40c5d97e
refs/heads/master
2023-01-20T19:04:33.865082
2020-12-02T03:34:14
2020-12-02T03:34:14
null
0
0
null
null
null
null
UTF-8
Python
false
false
399
py
""" ASGI config for premiumbody project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'premiumbody.settings') application = get_asgi_application()
[ "mendesf@hotmail.com" ]
mendesf@hotmail.com
5d1d805b29e5d4e0b47198f0a61dcf13a65915ba
e82c73e2590c6138f89c62db9cc327f2efceb95a
/src/Team/TeamTypes/Team.py
22e145bc96b3a550270c37f5e5fb44b0cb34f393
[]
no_license
dstseng/GameOrganizer
849d55443f8bd980d43853c27fc58974c89e0f86
e6987cde564290c4ad204e11e6423ef827e8635e
refs/heads/master
2021-01-20T19:53:39.641319
2016-05-31T01:39:49
2016-05-31T01:39:49
60,049,502
0
0
null
null
null
null
UTF-8
Python
false
false
437
py
__author__="alfaflight" __date__ ="$Apr 9, 2016 10:32:15 PM$" class Team: def __init__(self, List_Registration_teammates): self.__Int_numOfWins = 0 self.__List_Registration_teammates = List_Registration_teammates def addWin(self): self.__Int_numOfWins += 1 def getNumOfWins(self): return self.__Int_numOfWins def getTeammates(self): return List_Registration_teammates
[ "dstseng@gmail.com" ]
dstseng@gmail.com
62aafbdb22650f40b609eb82abfdd148b18ba3a7
64654842414a5bbffa2456f706c14d1a5a1dbae2
/autoarxiv/warden/scripts/emailer.py
0091e500d47aefcb749603575cbd4c06a4c8253d
[ "MIT" ]
permissive
Reslix/AutoArxiv
e25b1bdf94b9b01109bed7399c86da76a6df9f3a
96f57e687716c1b0d0786943fbc74bf2f4389da7
refs/heads/master
2021-01-20T11:41:36.617274
2018-01-23T00:54:34
2018-01-23T00:54:34
77,075,353
0
0
null
2017-02-03T05:48:27
2016-12-21T18:11:54
Python
UTF-8
Python
false
false
6,709
py
import re import email import imaplib from django.core.mail import send_mail from autoarxiv import settings from warden.models import Author, AuthorRating, Article, ArticleRating, Member from warden.scripts.data_connector import DataConnector def send_listing(e_mail, listing): """ Formats the sorted listing into some readable plaintext form. Hasn't been tested, so this will prove to be interesting. """ message = "\n" for i, msg in enumerate(listing): message = message + msg + '\n\n' message = message + """\n\n To update ratings for an article or author, send an email (not a reply!) to this sender address with ARTICLE or AUTHOR in the subject line. For articles, list line-by-line the article Arxiv ID as it came in the listing and an integer rating between 1 and 5, separated by a comma. If the article is not currently in the library it will be added. For authors, do the same with the author's name and have the rating added in the same way. Please make sure to use of the full scale range in your ratings library to help the ML aspects. If new users want to subscribe, they should email this address with SUBSCRIBE as the subject, and have <email>, <name> in the first line of the body. """ # len(listing-3) because of the extra header stuff we put in send_mail(str(len(listing) - 3) + ' New listings, ordered by relevance', message, settings.EMAIL_HOST_USER, [e_mail]) print("Sent listing to " + e_mail) def receive_emails(): try: mail = imaplib.IMAP4_SSL('imap.gmail.com') mail.login(settings.EMAIL_HOST_USER, settings.EMAIL_HOST_PASSWORD) except: print('Unable to connect to imap') mail.select('inbox') rawmessage = [] retcode, data = mail.search(None, '(UNSEEN)') for num in data[0].split(): typ, data = mail.fetch(num, '(RFC822)') msg = email.message_from_bytes(data[0][1]) #typ, data = mail.store(num, '+FLAGS', '\\Seen') rawmessage.append(msg) for message in rawmessage: header = email.header.make_header(email.header.decode_header(message['Subject'])) subject = str(header) sender = message['From'].split()[-1][1:-1] payload = [m.get_payload() for m in message.get_payload()][0] member = Member.objects.filter(email=sender) print("Updating preferences for: " + message['From']) if len(member) != 0: member = member[0] if subject == 'AUTHOR': body = payload.split('\n') for line in body: print(line) line = line.split(',') if len(line) == 2: if '@' in line[0]: author = Author.objects.filter(email=line[0]) else: author = Author.objects.filter(name=line[0]) arating = [] if len(author) != 0: author = author[0] arating = AuthorRating.objects.filter(member=member, author=author) else: author = Author(name=line[0]) author.save() if len(arating) != 0: arating = arating[0] arating.rating = int(line[1]) else: arating = AuthorRating(member=member, author=author, rating=int(line[1])) arating.save() elif subject == 'ARTICLE': body = payload.split('\n') for line in body: print(line) line = line.split(',') if len(line) == 2: article = Article.objects.filter(shortid=line[0]) if len(article) != 0: arating = ArticleRating.objects.filter(member=member, article=article[0]) if len(arating) != 0: arating = arating[0] arating.rating = int(line[1]) else: arating = ArticleRating(member=member, article=article[0], rating=int(line[1])) else: d = DataConnector() d.fetch_links(query=line[0]) d.fetch_pdfs() d.pdf_to_txt() d.save(add_new=False) article = d.articles[0] arating = ArticleRating(member=member, article=article, rating=int(line[1])) arating.save() elif subject == 'SUBSCRIBE': body = payload.split('\n')[0].split(',') if len(Member.objects.all().filter(name=body[1], email=body[0])) == 0: member = Member(name=body[1], email=body[0]) member.save() send_mail('You have subscribed!', """ To update ratings for an article or author, send an email (not a reply!) to this sender address with ARTICLE or AUTHOR in the subject line. For articles, list line-by-line the article Arxiv ID as it came in the listing and an integer rating between 1 and 5, separated by a comma. If the article is not currently in the library it will be added. For authors, do the same with the author's name and have the rating added in the same way. Please make sure to use of the full scale range in your ratings library to help the ML aspects.""", settings.EMAIL_HOST_USER, [sender]) mail.close()
[ "huashengz@gmail.com" ]
huashengz@gmail.com
f9a501c145dbd5a41701bcb08ac1c22014d598f6
e782950bb76c4dd295001f7760f42e04ceadfb1b
/tests/test_completion.py
6da2d9cdd703379d172e78b6479300256e4e92b0
[ "MIT" ]
permissive
h3xium/typer
2c3fc691c52a89997eb7db9267ed1fb12c9af800
31f7a44a467e6e3468434703d3c18961a746939f
refs/heads/master
2021-01-26T22:23:57.520688
2020-02-15T12:39:47
2020-02-15T12:39:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,456
py
import os import subprocess import sys from pathlib import Path import typer from typer.testing import CliRunner from first_steps import tutorial001 as mod runner = CliRunner() app = typer.Typer() app.command()(mod.main) def test_show_completion(): result = subprocess.run( [ "bash", "-c", f"{sys.executable} -m coverage run {mod.__file__} --show-completion", ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8", env={**os.environ, "SHELL": "/bin/bash"}, ) assert "_TUTORIAL001.PY_COMPLETE=complete-bash" in result.stdout def test_install_completion(): bash_completion_path: Path = Path.home() / ".bash_completion" text = "" if bash_completion_path.is_file(): text = bash_completion_path.read_text() result = subprocess.run( [ "bash", "-c", f"{sys.executable} -m coverage run {mod.__file__} --install-completion", ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8", env={**os.environ, "SHELL": "/bin/bash"}, ) new_text = bash_completion_path.read_text() bash_completion_path.write_text(text) assert "_TUTORIAL001.PY_COMPLETE=complete-bash" in new_text assert "completion installed in" in result.stdout assert "Completion will take effect once you restart the terminal." in result.stdout
[ "tiangolo@gmail.com" ]
tiangolo@gmail.com
2e02856b80b1efb6e70451bbb2ad42b1e3151417
538ac22016c4c8771e5b13f5e26688e2df72ae31
/CSS/frib-css-xy-diag.py
3aae9e9f743925fb9c05a9d11d01da1bc3988fa2
[]
no_license
cyjwong/ScriptsinProgress
69eab942b33e56d3eb3ef9a9f8712084fff86d30
7847e231bf54c27fc649c5ffdb25391069de7185
refs/heads/master
2020-05-21T16:43:12.813876
2016-09-20T04:39:06
2016-09-20T04:39:06
60,614,955
0
0
null
null
null
null
UTF-8
Python
false
false
58,826
py
# Diagnostics for FRIB front-end simulations using the x-y slice model # Notes: # * Slice model is not intrinsically well adapted to multi-species # simulations so some diagnostics repeat (for clarity) what can be # generated within Warp with other methods. # * Model allows easy generalization to include diagnostic quantities not # in the usual Warp suite. ############################################################################## # Begin Inputs ############################################################################## # Diagnostic Parameters # Diagnostics are grouped into several classes: # - Particle: Snapshot plots of distribution function projections # - Field: Snapshot plots of self fields # - History: History plots on the evolution of moments and particle counts # accumulated as the simulation advances. # --- set max simulation step for diagnostic setup max_diag_step = 1.e10 # --- set history diagnostic and moment accumulations ds_diag = 1.*cm top.nhist = max(1,nint(ds_diag/wxy.ds)) # step interval for histories top.itmomnts[0:3] = [0,max_diag_step,top.nhist] # do loop ranges for moments # and status writes to tty # --- Plot limits for particle phase space plots. If lframe = true (default # false) diagnostics such as ppxxp for x-x' particle phase space will # use these ranges. # max/min x,y plot coordinates (m) # max/min x',y' plot coordinates (rad) #l_diag = r_p l_diag = 75*mm top.xplmax = l_diag top.xplmin = -l_diag top.yplmax = l_diag top.yplmin = -l_diag top.xpplmax = 75.*mr top.xpplmin = -top.xpplmax top.ypplmax = top.xpplmax top.ypplmin = -top.xpplmax # --- Color palette for phase-space plots (comment for default) # Search for .gp suffix files in the Warp scripts directory for possible # choices. Some useful ones include: # earth.gp (default) heat.gp (heat) # gray.gp (gray scale) rainbow.gp (rainbow) #palette("heat.gp") # --- Set a chop factor for particle phase space plots to avoid plotting # too many particles (large storage and features will obscure). Set # for approx 10 K particles per species plotted. chop_fraction = 10.e3/float(top.npmax) # Particle phase space diagnostics. # * The list diag_step_part contains all steps where diagnostics in # diag_part() are made. # * The list can contain repeated elements and need not be ordered. diag_part_z = array([ z_launch, d5p1_zs, (d5p1_zs+d5p1_zc)/2, d5p1_zc, (d5p1_zc+d5p1_ze)/2, d5p1_ze, valve_zc + 2*mm, q7t1p1_zc, q7t1_mid_12 + 2*mm, q7t1p2_zc, q7t1_mid_23 + 2*mm, q7t1p3_zc, (q7t2p1_zc + q7t1p3_zc)/2, q7t2p1_zc, (q7t2p1_zc + q7t2p2_zc)/2, q7t2p2_zc, (q7t2p2_zc + q7t2p3_zc)/2, q7t2p3_zc, d5p2_zs, (d5p2_zs+d5p2_zc)/2, d5p2_zc, (d5p2_zc+d5p2_ze)/2, d5p2_ze, z_adv]) diag_part_z_name = [ "Initial Launch", "D5 Dipole #1: z-start", "D5 Dipole #1: 1/4 of dipole length", "D5 Dipole #1: z-Center", "D5 Dipole #1: 3/4 of dipole length", "D5 Dipole #1: z-end", "after gate valve", "1st Q7 ESQ Triplet #1: z-Center", "after slits between Q7 #1 and #2", "1st Q7 ESQ Triplet #2: z-Center", "after slits between Q7 #2 and #3", "1st Q7 ESQ Triplet #3: z-Center", "Four-jaw collimator", "2nd Q7 ESQ Triplet #1: z-Center", "2nd Q7 ESQ Triplet between #1 and #2", "2nd Q7 ESQ Triplet #2: z-Center", "2nd Q7 ESQ Triplet between #2 and #3", "2nd Q7 ESQ Triplet #3: z-Center", "D5 Dipole #2: z-start", "D5 Dipole #2: 1/4 of dipole length", "D5 Dipole #2: z-Center", "D5 Dipole #2: 3/4 of dipole length", "D5 Dipole #2: z-end", "Final position" ] diag_part_step = nint((diag_part_z-z_launch)/wxy.ds) diag_part_z_names = {diag_part_step[i]:diag_part_z_name[i] for i in range(len(diag_part_step))} # Field diagnostics. # * The list diag_step_field containins all steps where # diagnostics in diag_field() are made. # * The list can contain repeated elements and need not be ordered. diag_field_z = array([ z_launch, d5p1_zc, z_adv ]) diag_field_z_name = [ "Initial Launch", "D5 Dipole #1: z-Center", "Final position" ] diag_field_step = nint((diag_field_z-z_launch)/wxy.ds) diag_field_z_names = {diag_field_step[i]:diag_field_z_name[i] for i in range(len(diag_field_step))} # History diagnostics. # * Can be made at intermediate stages of the # run as well as at the end. # * The list diag_step_hist contains all # steps where diagnostics in diag_hsit() are made. # * The list can contain repeated elements and need not be ordered. diag_hist_z = array([z_adv]) #array([gag_col_zs,z_adv]) diag_hist_step = nint((diag_hist_z-z_launch)/wxy.ds) ###################################################################################################### # End Inputs ###################################################################################################### # Diagnostic plot function of [B rho] vs Q/A for species. # * Should work correctly at any point in the simulation while the beam # accelerates. def plt_diag_bro(label=None): if label == None: label = " " brho_min = largepos brho_max = -largepos for ii in sp.keys(): s = sp[ii] js = s.js # weight = sum(s.sw*s.w) # total weight # vbeam = sum( (s.sw*s.w)*s.getvz() )/weight # avg axial velocity gammabeam = 1./sqrt(1.-(vbeam/clight)**2) # gamma from avg axial velocity brho = s.mass*gammabeam*vbeam/s.charge # rigidity # brho_min = min(brho,brho_min) brho_max = max(brho,brho_max) # plt(ii,sp_qovm[ii],brho,tosys=1,color=s.color) # [qovm_min,qovm_max] = [minnd(sp_qovm.values()),maxnd(sp_qovm.values())] qovm_pad = 0.1*(qovm_max - qovm_min) brho_pad = 0.1*(brho_max - brho_min) # limits(qovm_min-qovm_pad,qovm_max+qovm_pad,brho_min-brho_pad,brho_max+brho_pad) ptitles(label,"Q/A","[B rho] [Tesla-m]",) fma() # Potential profile plot diagnostic for potential along x-y axes # * Primarily for initial beam but should work at any point in simulation. def diag_plt_phi_ax(xmax=None,label=None): if xmax == None: xmax = max(w3d.xmesh.max(),w3d.ymesh.max()) ixmax = sum(where(w3d.xmesh < xmax, 1, 0)) iymax = sum(where(w3d.ymesh < xmax, 1, 0)) if label == None: label = "Beam Potential at y,x = 0 b,r" # ix_cen = sum(where(w3d.xmesh < 0., 1, 0)) iy_cen = sum(where(w3d.ymesh < 0., 1, 0)) phix = getphi(iy=iy_cen) phiy = getphi(ix=ix_cen) phimin = min(phix[ixmax],phiy[iymax]) # plg(phix,w3d.xmesh/mm) plg(phiy,w3d.ymesh/mm,color="red") ptitles(label,"x,y [mm]","phi [V]", ) limits(-xmax/mm,xmax/mm,phimin,'e') # Augmented History Diagnostics for xy Slice Model # * Some by species, some all species # * Flag variables with prefix hl_ for "history local" # --- History variable accumulation arrays hl_lenhist_max = 10000 # max accumulation points # hl_zbeam = fzeros(hl_lenhist_max) # z of beam at hl_ diagnostic accumulations (redundant with top.hzbeam) # hl_vbeam = fzeros([hl_lenhist_max,top.ns]) # axial beam velocity [m/s] hl_ekin = fzeros([hl_lenhist_max,top.ns]) # axial beam NR kinetic energy [eV] hl_brho = fzeros([hl_lenhist_max,top.ns]) # rigidity [B rho]_js [Tesla-m] # hl_xrms = fzeros([hl_lenhist_max,top.ns]) # rms radius sqrt( <x*x>_js ) hl_yrms = fzeros([hl_lenhist_max,top.ns]) # rms radius sqrt( <y*y>_js ) hl_rrms = fzeros([hl_lenhist_max,top.ns]) # rms radius sqrt( <r*r>_js ) # hl_xrmst = fzeros(hl_lenhist_max) # Total species measures of above hl_yrmst = fzeros(hl_lenhist_max) # hl_rrmst = fzeros(hl_lenhist_max) # # hl_spnum = fzeros([hl_lenhist_max,top.ns]) # number active simulation particles hl_spnumt = fzeros(hl_lenhist_max) # number active simulation particles (all species) # hl_ibeam_p = fzeros([hl_lenhist_max,top.ns]) # beam current (particle) hl_ibeam_e = fzeros([hl_lenhist_max,top.ns]) # beam current (electrical) hl_ibeam_pt = fzeros([hl_lenhist_max]) # total beam current (particle) hl_ibeam_et = fzeros([hl_lenhist_max]) # total beam current (electrical) # hl_lambda_p = fzeros([hl_lenhist_max,top.ns]) # line charge (particle) hl_lambda_e = fzeros([hl_lenhist_max,top.ns]) # line charge (electrical) # #hl_ptheta = fzeros([hl_lenhist_max,top.ns]) # canonical angular momentum <P_theta>_j (nonlinear appl field version) #hl_pth = fzeros([hl_lenhist_max,top.ns]) # <P_theta>_j in emittance units <P_theta>_j/(gamma_j*beta_j*m_j*c) #hl_pthn = fzeros([hl_lenhist_max,top.ns]) # <P_theta>_j in norm emittance units <P_theta>_j/(m_j*c) # #hl_ptheta_l = fzeros([hl_lenhist_max,top.ns]) # Same canonical angular momentum measures with #hl_pth_l = fzeros([hl_lenhist_max,top.ns]) # linear applied magnetic field approximation. #hl_pthn_l = fzeros([hl_lenhist_max,top.ns]) # (redundant with above for linear lattice) # hl_lz = fzeros([hl_lenhist_max,top.ns]) # mechanical angular momentum hl_krot = fzeros([hl_lenhist_max,top.ns]) # rotation wavenumber hl_lang = fzeros([hl_lenhist_max,top.ns]) # Larmor rotation angle (from initial zero value) # hl_epsx = fzeros([hl_lenhist_max,top.ns]) # rms x-emittance (usual version) hl_epsy = fzeros([hl_lenhist_max,top.ns]) # rms y-emittance (usual version) # hl_epsxn = fzeros([hl_lenhist_max,top.ns]) # rms normalized x-emittance (usual version) hl_epsyn = fzeros([hl_lenhist_max,top.ns]) # rms normalized y-emittance (usual version) # hl_epsr = fzeros([hl_lenhist_max,top.ns]) # rms radial emittance (envelope model version) hl_epsrn = fzeros([hl_lenhist_max,top.ns]) # rms normalized radial emittance (envelope model version) # hl_epspv = fzeros([hl_lenhist_max,top.ns]) # rms total phase volume emittance (envelope model sense) hl_epspvn = fzeros([hl_lenhist_max,top.ns]) # rms normalized total phase volume emittance (envelope model sense) # hl_temp = fzeros([hl_lenhist_max,top.ns]) # Effective transverse ion temperature measure [eV] # hl_Qperv = fzeros([hl_lenhist_max,top.ns]) # Generalized perveance Q_js for species: note matrix perv # Q_js,s calculable from this and line-charge densities [1] hl_neutf = fzeros([hl_lenhist_max,top.ns]) # Neutralization factor [1] hl_dz = top.nhist*wxy.ds # Axial step size between diagnostic accumulations # ---- Function to Fill Auxillary History Arrays # * Install after step in particle advance cycle @callfromafterstep def diag_hist_hl(): # check step in history accumulation cycle if top.it%top.nhist != 0: return hl_zbeam[top.jhist] = top.zbeam # z location of diagnostic accumulations # accumulate history diagnostics by species weightt_work = 0. xrmst_work = 0. yrmst_work = 0. rrmst_work = 0. for ii in sp.keys(): # --- species info and index js s = sp[ii] js = s.js # --- species weight: (real particle per macroparticle)/meter weight = sum(s.sw*s.w) # --- <v_z>_js, gamma_js and [B rho]_js calculated from result vbeam = sum( (s.sw*s.w)*s.getvz() )/weight gammabeam = 1./sqrt(1.-(vbeam/clight)**2) brho = s.mass*gammabeam*vbeam/s.charge hl_vbeam[top.jhist,js] = vbeam hl_brho[top.jhist,js] = brho # # --- species quantities for later use # --- avg_rsq = <r*r>_js r = s.getr() rsq = r*r rsq_wsum = sum( (s.sw*s.w)*rsq ) avg_rsq = rsq_wsum/weight # --- avg_xyp = <x*y'>_js and avg_yxp = <y*x'>_js avg_xyp = sum( (s.sw*s.w)*s.getx()*s.getyp() )/weight avg_yxp = sum( (s.sw*s.w)*s.gety()*s.getxp() )/weight # --- avg_xpy = <x*p_y>_js and avg_ypx = <y*p_x>_js # * Relativistically correct here avg_xpy = s.mass*sum( (s.sw*s.w)*s.getx()*s.getuy() )/weight avg_ypx = s.mass*sum( (s.sw*s.w)*s.gety()*s.getux() )/weight # --- applied field B_z(r=0,z) at z location of beam bz0 = getappliedfields(x=0.,y=0.,z=top.zbeam)[5] # --- Axial kinetic energy [eV], ekin_js, NR calcuation hl_ekin[top.jhist,js] = (0.5*s.mass*sum( (s.sw*s.w)*s.getvz()**2 )/weight)/jperev # s.mass*clight**2*(gammabeam - 1.)/jperev # --- rms x = <x*x>_js xsq_wsum = sum( (s.sw*s.w)*s.getx()**2 ) hl_xrms[top.jhist,js] = sqrt( xsq_wsum/weight ) # --- rms y = <y*y>_js ysq_wsum = sum( (s.sw*s.w)*s.gety()**2 ) hl_yrms[top.jhist,js] = sqrt( ysq_wsum/weight ) # --- rms r = <r*r>_js hl_rrms[top.jhist,js] = sqrt( avg_rsq ) # --- Simulation Particle Number hl_spnum[top.jhist,js] = s.getn() # --- Current, electrical, Ie_js [A] hl_ibeam_e[top.jhist,js] = s.charge*sum( (s.sw*s.w)*s.getvz() ) # slice code weight is particles/meter # --- Current, particle, Ip_js [A] # * Use way to calculate to remove neutralization factor # * Formula as given approx (paraxial) using appropriate weights hl_ibeam_p[top.jhist,js] = s.charge*s.sw*(s.vbeam0/vbeam)*sum( s.getvz() ) # --- line charge Lambda_js hl_lambda_p[top.jhist,js] = hl_ibeam_p[top.jhist,js]/vbeam hl_lambda_e[top.jhist,js] = hl_ibeam_e[top.jhist,js]/vbeam # --- Mechanical angular momentum: <x*y'>_js - <y*x'>_js hl_lz[top.jhist,js] = avg_xyp - avg_yxp # --- Canonical angular momentum <P_theta>_js # Notes: * Uses A_theta via getatheata() consistently with linear/nonlinear elements. #hl_ptheta[top.jhist,js] = avg_xpy - avg_ypx + sum( (s.sw*s.w)*s.charge*r*getatheta(r) )/weight # --- Normalized canonical angular momentum in emittance units. <P_theta>_js/(m_js*c) # * <P_theta>_j/(m_j*c) in envelope model scales as a normalized emittance # and should not vary with acceleration with linear forces. # * This employs the nonlinear definition of P_theta if the lattice is nonlinear ! #hl_pthn[top.jhist,js] = hl_ptheta[top.jhist,js]/(s.mass*clight) # --- Canonical angular momentum of species in emittance units #hl_pth[top.jhist,js] = hl_pthn[top.jhist,js]/(gammabeam*(vbeam/clight)) # --- Canonical angular momentum in linear applied field approx (all 3 versions above) # * These are redundant in linear field lattice # * Use _l for "linear" flag #hl_ptheta_l[top.jhist,js] = avg_xpy - avg_ypx + sum( (s.sw*s.w)*(s.charge*bz0/2.)*avg_rsq )/weight #hl_pthn_l[top.jhist,js] = hl_ptheta_l[top.jhist,js]/(s.mass*clight) #hl_pth_l[top.jhist,js] = hl_pthn_l[top.jhist,js]/(gammabeam*(vbeam/clight)) # --- rms x- and y-emittances: account for factor of 4 diff between Warp rms edge and rms measures hl_epsx[top.jhist,js] = top.hepsx[0,top.jhist,js]/4. hl_epsy[top.jhist,js] = top.hepsy[0,top.jhist,js]/4. # --- normalized rms x- and y-emittances: paraxial equivalent version hl_epsxn[top.jhist,js] = (gammabeam*(vbeam/clight))*hl_epsx[top.jhist,js] hl_epsyn[top.jhist,js] = (gammabeam*(vbeam/clight))*hl_epsy[top.jhist,js] # --- rms radial thermal emittance eps_r_js as derived in envelope model: # * Warp accumulation used to extract has a factor of 2 diference from rms envelope model # due to use of an "edge" measure. Note: this is different than the factor of 4 in epsx etc. hl_epsr[top.jhist,js] = top.hepsr[0,top.jhist,js]/2. # --- rms normalized radial thermal emittance epsn_r_js as derived in envelope model hl_epsrn[top.jhist,js] = (gammabeam*(vbeam/clight))*hl_epsr[top.jhist,js] # --- rms total phase volume emittance including radial thermal and canonical angular momentum # contributions based on envelope model intrpretation of total phase-space area. #hl_epspv[top.jhist,js] = sqrt( (hl_epsr[top.jhist,js])**2 + (hl_pth[top.jhist,js])**2 ) # --- rms normalized total phase volume emittance #hl_epspvn[top.jhist,js] = sqrt( (hl_epsrn[top.jhist,js])**2 + (hl_pthn[top.jhist,js])**2 ) # --- ion temperature calculated from emittance [eV] hl_temp[top.jhist,js] = hl_ekin[top.jhist,js]*hl_epsr[top.jhist,js]**2/dvnz(hl_rrms[top.jhist,js]**2) # --- Perveance, NR formula for species # Note: * Define bare ... not accounting for neutralization fractions. # Factor (s.charge/echarge) = Q accounts for charge state with particle line-charge to # get bare (unneutralized) electrical line charge. # * This is Q_js NOT the matrix perveance Q_j,s in the envelope model notes. # * Envelope model Q_js can be obtained from Q_j and line charges lambda_j: no need to save hl_Qperv[top.jhist,js] = s.charge*(s.charge/echarge)*hl_lambda_p[top.jhist,js]/(2.*pi*eps0*s.mass*vbeam**2) # --- Ion rho electron neutralization factor [1] = No space-charge, [0] full space-charge hl_neutf[top.jhist,js] = rho_neut_f(top.zbeam,ii) # --- Rotation wavenumber hl_krot[top.jhist,js] = hl_lz[top.jhist,js]/dvnz(avg_rsq) # --- Larmor Rotation angle: integrate from previous step if top.jhist == 0: hl_lang[0,js] = 0. # initial condition of zero angle else: hl_lang[top.jhist,js] = hl_lang[top.jhist-1,js] + 0.5*hl_dz*(hl_krot[top.jhist-1,js]+hl_krot[top.jhist,js]) # --- total (all species) accumulations weightt_work = weightt_work + weight xrmst_work = xrmst_work + xsq_wsum yrmst_work = yrmst_work + ysq_wsum rrmst_work = rrmst_work + rsq_wsum # --- total number of simulation particles hl_spnumt[top.jhist] = float(sum(hl_spnum[top.jhist,:])) # --- total currents hl_ibeam_pt[top.jhist] = sum(hl_ibeam_p[top.jhist,:]) hl_ibeam_et[top.jhist] = sum(hl_ibeam_e[top.jhist,:]) # --- total species rms measures hl_xrmst[top.jhist] = sqrt( xrmst_work/weightt_work ) hl_yrmst[top.jhist] = sqrt( yrmst_work/weightt_work ) hl_rrmst[top.jhist] = sqrt( rrmst_work/weightt_work ) # Particle Phase-Space Diagnostic Functions # * Make specified plots at location of simulation where diag_part() is called. ### Make phase space projections of individual species using the syntax "ppxxp(js=js)" instead of "s.ppxxp" ### The latter had trouble with the argument ' slope="auto" ' def diag_part(plt_xy=False,plt_xxp=False,plt_yyp=False,plt_xpyp=False, plt_trace=False, plt_denxy=False, plt_denr=False): print "Making particle diagnostic plots" # try: z_label = diag_part_z_names[top.it] except: z_label = "" # # --- x-y projection if plt_xy: # --- All Species # Caution: js=-1 with density plot will just overlay species contour plots #ppxy(js=-1,lframe=true,chopped=chop_fraction,color='density',ncolor=25, # titles=false,yscale=1./mm,xscale=1./mm) ppxy(js=-1,lframe=true,chopped=chop_fraction,titles=false,yscale=1./mm,xscale=1./mm) ptitles("x-y Phase Space: All Species, z = %5.2f m"%(top.zbeam), "x [mm]","y [mm]",z_label) fma() # --- Target Species lab = "" for ii in sp_target: s = sp[ii] co = s.color lab+= ii + "("+co+"), " js = s.js ppxy(js=js,lframe=true,chopped=chop_fraction,titles=false,yscale=1./mm,xscale=1./mm,color=co) ptitles("x-y Phase Space: "+lab+" z = %5.2f m"%(top.zbeam),"x [mm]","y [mm]",z_label) fma() # --- x-x' projection if plt_xxp: # --- All Species # Caution: js = -1 with density plot will overlay species contour plots #ppxxp(js = -1,lframe=true,chopped=chop_fraction,slope='auto',color='density',ncolor=25, # titles=false,yscale=1./mr,xscale=1./mm) ppxxp(js = -1,lframe=true,chopped=chop_fraction,slope='auto',titles=false,yscale=1./mr,xscale=1./mm) ptitles("x-x' Phase Space: All Species, z = %5.2f m"%(top.zbeam),"x [mm]","x' [mrad]",z_label) fma() # --- Target Species lab = "" for ii in sp_target: s = sp[ii] co = s.color lab+= ii + "("+co+"), " js = s.js ppxxp(js=js,lframe=true,chopped=chop_fraction,slope='auto',titles=false,yscale=1./mr,xscale=1./mm,color=co) ptitles("x-x' Phase Space: "+lab+" z = %5.2f m"%(top.zbeam),"x [mm]","x' [mrad]",z_label) fma() # --- y-y' projection if plt_yyp: # --- All Species # Caution: js=-1 with denisty plot will overlay species contour plots #ppyyp(js=-1,lframe=true,chopped=chop_fraction,slope='auto',color='density',ncolor=25, # titles=false,yscale=1./mr,xscale=1./mm) ppyyp(js=-1,lframe=true,chopped=chop_fraction,slope='auto',color='density',ncolor=25, titles=false,yscale=1./mr,xscale=1./mm) ptitles("y-y' Phase Space: All Species, z = %5.2f m"%(top.zbeam), "y [mm]","y' [mrad]",z_label) fma() # --- Target Species lab = "" for ii in sp_target: s = sp[ii] co = s.color lab+= ii + "("+co+"), " js = s.js ppyyp(js=js,lframe=true,chopped=chop_fraction,slope='auto',titles=false,yscale=1./mr,xscale=1./mm,color=co) ptitles("y-y' Phase Space: "+lab+" z = %5.2f m"%(top.zbeam),"y [mm]","y' [mrad]",z_label) fma() # --- x'-y' projection if plt_xpyp: # --- All Species # Caution: js=-1 with density plot will overlay species countours #ppxpyp(js=-1,lframe=true,chopped=chop_fraction,slope='auto',color='density',ncolor=25, # titles=false,yscale=1./mr,xscale=1./mr) ppxpyp(js=-1,lframe=true,chopped=chop_fraction,slope='auto',titles=false,yscale=1./mr,xscale=1./mr) ptitles("x'-y' Phase Space: All Species, z = %5.2f m"%(top.zbeam),"x' [mrad]","y' [mrad]",z_label) fma() # --- Target Species lab = "" for ii in sp_target: s = sp[ii] co = s.color lab+= ii + "("+co+"), " js = s.js ppxpyp(js=js,lframe=true,chopped=chop_fraction,slope='auto',titles=false,yscale=1./mr,xscale=1./mm,color=co) ptitles("x'-y' Phase Space: "+lab+" z = %5.2f m"%(top.zbeam),"x' [mrad]","y' [mrad]",z_label) fma() # --- x-y, x-x', y-y', x'-y' projections, 4 to a page (trace-space) if plt_trace: # --- All Species pptrace(lframe=true,chopped=chop_fraction,slope='auto',color='density',ncolor=25) fma() # --- charge density on x and y axes if plt_denxy: rho_sc = 1. ix_cen = sum(where(w3d.xmesh < 0.,1,0)) iy_cen = sum(where(w3d.ymesh < 0.,1,0)) # --- All Species rho_x = getrho(iy=iy_cen) rho_y = getrho(ix=ix_cen) # plg(rho_x/rho_sc,w3d.xmesh/mm) if w3d.l4symtry: plg(rho_x/rho_sc,-w3d.xmesh/mm) plg(rho_y/rho_sc,w3d.ymesh/mm,color="red") if w3d.l4symtry or w3d.l2symtry: plg(rho_y/rho_sc,-w3d.ymesh/mm,color="red") ptitles("Charge Density: All Species, on x[b], y[r] Axes: z = %5.2f m"%(top.zbeam), "x,y [mm]","Density [arb units]",z_label) fma() # --- Target Species: species.get_density() returns density for ii in sp_target: s = sp[ii] co = s.color den = s.get_density()/cm**3 plg(den[:,iy_cen],w3d.xmesh/mm) if w3d.l4symtry: plg(den[:,iy_cen],-w3d.xmesh/mm) plg(den[ix_cen,:],w3d.ymesh/mm,color="red") if w3d.l4symtry or w3d.l2symtry: plg(den[ix_cen,:],-w3d.ymesh/mm,color="red") ptitles("Density: "+ii+" on x[b], y[r] Axes: z = %5.2f m"%(top.zbeam), "x,y [mm]","Density [#/cm^3]",z_label) fma() # --- charge density on radial mesh if plt_denr: # --- radial mesh reflecting x-y grid structure to illustrate simulation noise nr = nint(sqrt(w3d.nx/(2.*sym_x)*w3d.ny/(2.*sym_y))) rmax = sqrt(w3d.xmmax*w3d.ymmax) dr = rmax/nr rmesh = linspace(0.,rmax,num=nr+1) # sp_list = sp_target #+ ["All"] ns = len(sp_list) # --- density as a function or r on mesh array den = zeros(nr+1) # weightr = zeros(nr+1) count = zeros(nr+1) # --- for all species on mesh for ii in sp.keys(): s = sp[ii] # np = s.getn() rp = s.getr() wp = s.getweights() # deposgrid1d(1,np,rp,wp,nr,weightr,count,0.,rmax) # den[1:nr+1] = weightr[1:nr+1]/(2.*pi*dr*rmesh[1:nr+1]) den[0] = den[1] # set origin by next grid up to remove distraction # plg(den/cm**3, rmesh/mm) # pos axis plg(den/cm**3,-rmesh/mm) # neg axis ptitles("Radial Number Density: All Species, z = %5.2f m"%(top.zbeam),"radius r [mm]","rho [particles/cm**3]",z_label) ir = min(nr,sum(where(den>0,1,0))) # index farthest radial extent of rho in radial mesh assuming no halo rmmax = max(1.2*rmesh[ir],0.01) # set curoff to contain radial density rmmax = cm*nint(rmmax/cm + 0.5) # round up to nearest cm to contain plot denmax = 1.2*maxnd(den) limits(-rmmax/mm,rmmax/mm,0.,denmax/cm**3) fma() # --- for all species (common log scale) for ii in sp.keys(): s = sp[ii] co = s.color # np = s.getn() rp = s.getr() wp = s.getweights() # weightr = zeros(nr+1) # reset for clean accumulation/count with itask = 1 count = zeros(nr+1) deposgrid1d(1,np,rp,wp,nr,weightr,count,0.,rmax) # den[1:nr+1] = weightr[1:nr+1]/(2.*pi*dr*rmesh[1:nr+1]) den[0] = den[1] # set origin by next grid up to remove distraction (origin location high noise) # plg(den/cm**3, rmesh/mm,color=co) plg(den/cm**3,-rmesh/mm,color=co) # ptitles("Radial Number Density: All species, z = %5.2f m"%(top.zbeam),"radius r [mm]","rho [particles/cm**3]",z_label) limits(-rmmax/mm,rmmax/mm,1.e-4*denmax/cm**3,denmax/cm**3) logxy(0,1) # specify log scale on y-axis fma() # --- for target species on mesh for ii in sp_target: s = sp[ii] co = s.color lab = ii + "("+co+"), " # np = s.getn() rp = s.getr() wp = s.getweights() # weightr = zeros(nr+1) # reset for clean accumulation/count with itask = 1 count = zeros(nr+1) deposgrid1d(1,np,rp,wp,nr,weightr,count,0.,rmax) # den[1:nr+1] = weightr[1:nr+1]/(2.*pi*dr*rmesh[1:nr+1]) den[0] = den[1] # set origin by next grid up to remove distraction # plg(den/cm**3, rmesh/mm,color=co) plg(den/cm**3,-rmesh/mm,color=co) ptitles("Radial Number Density: "+lab+" z = %5.2f m"%(top.zbeam),"radius r [mm]","rho [particles/cm**3]",z_label) ir = sum(where(den>0,1,0)) # index farthest radial extent of rho in radial mesh assuming no halo rmmax = max(1.2*rmesh[ir],0.01) # set curoff to contain radial density rmmax = cm*nint(rmmax/cm + 0.5) # round up to nearest cm to contain plot denmax = 1.2*maxnd(den) limits(-rmmax/mm,rmmax/mm,0.,denmax/cm**3) fma() # Field Diagnostic Functions # * Make specified plots at location of simulation where diag_field() is called. def diag_field(plt_pa=False,plt_pc=False,plt_pc_xy=False): print "Making field diagnostic plots" # try: z_label = diag_field_z_names[top.it] except: z_label = "" # --- self-field electrostatic potential if plt_pc: pfxy(cond=true,titles=false,yscale=1./mm,xscale=1./mm,iz = 0) ptitles("Self-Field Potential: z = %5.2f"%(top.zbeam), "x [mm]","y [mm]",z_label) fma() # --- self-field electrostatic potential and particles together if plt_pc_xy: # --- All particle species included pfxy(cond=true,titles=false,yscale=1./mm,xscale=1./mm) # Caution: js=-1 with density plot will superimpose species contours #ppxy(js=-1,lframe=true,chopped=chop_fraction,color='density',ncolor=25, # titles=false,yscale=1./mm,xscale=1./mm) ppxy(js=-1,lframe=true,chopped=chop_fraction,titles=false,yscale=1./mm,xscale=1./mm) ptitles("Self-Field Potential: z = %5.2f"%(top.zbeam), "x [mm]","y [mm]",z_label) fma() # --- Target particle species lab = "" pfxy(cond=true,titles=false,yscale=1./mm,xscale=1./mm) for ii in sp_target: s = sp[ii] co = s.color lab+= ii + "("+co+"), " s.ppxy(lframe=true,chopped=chop_fraction,titles=false,yscale=1./mm,xscale=1./mm) s.ppxy(lframe=true,chopped=chop_fraction,titles=false,yscale=1./mm,xscale=1./mm) ptitles("Self-Field Potential: + "+lab+" Particles, z = %5.2f"%(top.zbeam),"x [mm]","y [mm]",z_label) fma() # --- Electrostatic potential on principal axes if plt_pa: diag_plt_phi_ax(label="Beam Potential along y,x = 0 [b,r] at z = %5.2f"%(top.zbeam)) fma() # xrms = max(top.xrms[0,sp['U33'].js],top.xrms[0,sp['U34'].js]) diag_plt_phi_ax(label="Beam Potential along y,x = 0 [b,r] at z = %5.2f"%(top.zbeam),xmax=2.*xrms) fma() # History diagnostics. # * Makes specified history plots from begining of simulation at point called. # * Many additional history diagnostics can be added by looking for # relevant moments accumulated in the Warp (see the variable group # "Hist" in top.v for an extensive list of variables that can be # used) and using gist commands to make relevant plots def diag_hist( plt_ekin = False, plt_spnum = False, plt_curr_p = False, plt_curr_e = False, plt_lam_p = False, plt_lam_e = False, plt_lz = False, plt_pth = False, plt_pthn = False, plt_krot = False, plt_lang = False, plt_cen = False, plt_envrms = False, plt_envmax = False, plt_envrmsp = False, plt_emit = False, plt_emitn = False, plt_emitg = False, plt_emitng = False, plt_emitr = False, plt_emitnr = False, plt_emitpv = False, plt_emitpvn = False, plt_temp = False, plt_Qperv = False, plt_neutf = False): print "Making history diagnostic plots" # # --- kinetic energy if plt_ekin: # --- All Species Combined, MeV #hpekin(titles=false,yscale=1.,lhzbeam=true) #ptitles("History: All Species Kinetic Energy","z [m]","MeV", ) #fma() # --- All Species, in keV/u for ii in sort(sp.keys()): s = sp[ii] js = s.js co = s.color A = s.mass/amu plg(hl_ekin[0:top.jhist+1,js]/(A*kV),hl_zbeam[0:top.jhist+1],color=co) #hpekin(js=js,color=co,titles=false,yscale=1./A,lhzbeam=true) ptitles("History: Kinetic Energy","z [m]","KeV/u", ) fma() # --- Operating species, in keV/u for ii in sort(sp_Operate.keys()): s = sp[ii] js = s.js co = s.color A = s.mass/amu #hpekin(js=js,color=co,titles=false,yscale=1./A,lhzbeam=true) plg(hl_ekin[0:top.jhist+1,js]/(A*kV),hl_zbeam[0:top.jhist+1],color=co) ptitles("History: Operating Species Kinetic Energy","z [m]","KeV/u", ) fma() # --- Support species, in keV/u for ii in sort(sp_Support.keys()): s = sp[ii] js = s.js co = s.color A = s.mass/amu plg(hl_ekin[0:top.jhist+1,js]/(A*kV),hl_zbeam[0:top.jhist+1],color=co) #hpekin(js=js,color=co,titles=false,yscale=1./A,lhzbeam=true) # Was getting wrong answer !! ptitles("History: Support Species Kinetic Energy","z [m]","KeV/u", ) fma() # --- By Target Species, in kV/Q # Plot by KV/Q so you can see total potential gain falling through # full bias to check system tuning zi = top.hzbeam[0] zf = top.hzbeam[top.jhist] ekin_t = Bias/kV lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color Q = s.charge_state lab+= ii + "("+co+"), " plg(hl_ekin[0:top.jhist+1,js]/(Q*kV),hl_zbeam[0:top.jhist+1],color=co) #hpekin(js=js,color=co,titles=false,yscale=1./Q,lhzbeam=true) plg(array([ekin_t,ekin_t]),array([zi,zf]),type="dash") ptitles("History: "+lab+"Kinetic Energy","z [m]","KeV/Q", ) limits(zi,zf,0.,1.2*ekin_t) fma() # --- simulation particle number (to check for lost particles) # Comment: tried using hppnum() but was unclear what was being plotted if plt_spnum: # --- All Species Combined plg(hl_spnumt[0:top.jhist+1],hl_zbeam[0:top.jhist+1]) ptitles("History: Live Sim Particle Number (all species)", "z [m]","Particle Number (simulation)", ) fma() # --- All Species Individually for ii in sort(sp.keys()): s = sp[ii] js = s.js co = s.color plg(hl_spnum[0:top.jhist+1,js],hl_zbeam[0:top.jhist+1],color=co) ptitles("History: Live Sim Particle Number (by species)","z [m]","Particle Number (simulation)", ) fma() # --- Target Species lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " plg(hl_spnum[0:top.jhist+1,js],hl_zbeam[0:top.jhist+1],color=co) ptitles("History: "+lab+" Live Sim Particle Number","z [m]","Particle Number (simulation)", ) fma() # --- current (particle) if plt_curr_p: # --- All Species Combined for ii in sort(sp.keys()): s = sp[ii] js = s.js co = s.color plg(hl_ibeam_p[0:top.jhist+1,js]*1.e6,hl_zbeam[0:top.jhist+1],color=co) ptitles("History: Species Particle Current (approx)", "z [m]","Current (microA)", ) fma() # --- Target Species lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " plg(hl_ibeam_p[0:top.jhist+1,js]*1.e6,hl_zbeam[0:top.jhist+1],color=co) ptitles("History: "+lab+" Particle Current (approx)","z [m]","Current (microA)", ) fma() # --- Total plg(hl_ibeam_pt[0:top.jhist+1]*1.e3,hl_zbeam[0:top.jhist+1]) ptitles("History: Total Particle Current (approx)","z [m]","Current (mA)", ) fma() # --- current (electrical) if plt_curr_e: # --- All Species Combined for ii in sort(sp.keys()): s = sp[ii] js = s.js co = s.color plg(hl_ibeam_e[0:top.jhist+1,js]*1.e6,hl_zbeam[0:top.jhist+1],color=co) ptitles("History: Species Electrical Current", "z [m]","Current (microA)", ) fma() # --- Target Species lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " plg(hl_ibeam_e[0:top.jhist+1,js]*1.e6,hl_zbeam[0:top.jhist+1],color=co) ptitles("History: "+lab+" Electrical Current","z [m]","Current (microA)", ) fma() # --- Total plg(hl_ibeam_et[0:top.jhist+1]*1.e3,hl_zbeam[0:top.jhist+1]) ptitles("History: Total Electrical Current","z [m]","Current (mA)", ) fma() # --- line charge (particle) if plt_lam_p: # --- All Species Combined for ii in sort(sp.keys()): s = sp[ii] js = s.js co = s.color plg(hl_lambda_p[0:top.jhist+1,js]*10**9,hl_zbeam[0:top.jhist+1],color=co) ptitles("History: Species Particle Line Charge", "z [m]","Line Charge (nC/m)", ) fma() # --- Target Species lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " plg(hl_lambda_p[0:top.jhist+1,js]*10**9,hl_zbeam[0:top.jhist+1],color=co) ptitles("History: "+lab+" Particle Line Charge","z [m]","Line Charge (nC/m)", ) fma() # --- line charge (electrical) if plt_lam_e: # --- All Species Combined for ii in sort(sp.keys()): s = sp[ii] js = s.js co = s.color plg(hl_lambda_e[0:top.jhist+1,js]*10**9,hl_zbeam[0:top.jhist+1],color=co) ptitles("History: Species Electrical Line Charge", "z [m]","Line Charge (nC/m)", ) fma() # --- Target Species lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " plg(hl_lambda_e[0:top.jhist+1,js]*10**9,hl_zbeam[0:top.jhist+1],color=co) ptitles("History: "+lab+" Electrical Line Charge","z [m]","Line Charge (nC/m)", ) fma() # --- lz mechanical angular momentum if plt_lz: # --- All Species Combined for ii in sort(sp.keys()): s = sp[ii] js = s.js co = s.color plg(hl_lz[0:top.jhist+1,js]*10**6,hl_zbeam[0:top.jhist+1],color=co) ptitles("History: Species Mechanical Angular Mom", "z [m]","<xy'>-<yx'> [mm-mrad]", ) fma() # --- Target Species lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " plg(hl_lz[0:top.jhist+1,js]*10**6,hl_zbeam[0:top.jhist+1],color=co) ptitles("History: "+lab+" Mechanical Angular Mom","z [m]","<xy'>-<yx'> [mm-mrad]", ) fma() # --- canonical angular momentum <P_theta>_j/(gamma_j*beta_j*m_j*c) in mm-mrad units if plt_pth: # --- All Species Combined for ii in sort(sp.keys()): s = sp[ii] js = s.js co = s.color plg(hl_pth[0:top.jhist+1,js]*10**6,hl_zbeam[0:top.jhist+1],color=co) ptitles("History: Species Canonical Angular Mom <Ptheta>/(gamma*beta*m*c)", "z [m]", "Canonical Ang Mom [mm-mrad]", ) fma() # --- Target Species lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " plg(hl_pth[0:top.jhist+1,js]*10**6,hl_zbeam[0:top.jhist+1],color=co) ptitles("History: "+lab+" Canonical Angular Mom <Ptheta>/(gamma*beta*m*c)","z [m]", "Canonical Ang Mom [mm-mrad]", ) fma() # --- canonical angular momentum (normalized) <P_theta>_j/(m_j*c) in mm-mrad units if plt_pthn: # --- All Species Combined for ii in sort(sp.keys()): s = sp[ii] js = s.js co = s.color plg(hl_pthn[0:top.jhist+1,js]*10**6,hl_zbeam[0:top.jhist+1],color=co) ptitles("History: Species Norm Canonical Angular Mom <Ptheta>/(m*c)", "z [m]", "Canonical Ang Mom [mm-mrad]", ) fma() # --- Target Species lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " plg(hl_pthn[0:top.jhist+1,js]*10**6,hl_zbeam[0:top.jhist+1],color=co) ptitles("History: "+lab+" Norm Canonical Angular Mom <Ptheta>/(m*c)","z [m]", "Canonical Ang Mom [mm-mrad]", ) fma() # --- effective rotation wavenumber if plt_krot: # --- All Species Combined for ii in sort(sp.keys()): s = sp[ii] js = s.js co = s.color plg(hl_krot[0:top.jhist+1,js],hl_zbeam[0:top.jhist+1],color=co) ptitles("History: Species Effective Rot Wavenumber", "z [m]","krot [rad/m]", ) fma() # --- Target Species lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " plg(hl_krot[0:top.jhist+1,js],hl_zbeam[0:top.jhist+1],color=co) ptitles("History: "+lab+" Effective Rot Wavenumber","z [m]","krot [rad/m]", ) fma() # --- Larmor rotation angle if plt_lang: # --- All Species Combined for ii in sort(sp.keys()): s = sp[ii] js = s.js co = s.color plg((180./pi)*hl_lang[0:top.jhist+1,js],hl_zbeam[0:top.jhist+1],color=co) ptitles("History: Species Larmor Rot Angle", "z [m]","Rotation [deg]", ) fma() # --- Target Species lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " plg((180./pi)*hl_lang[0:top.jhist+1,js],hl_zbeam[0:top.jhist+1],color=co) ptitles("History: "+lab+" Larmor Rot Angle","z [m]","Rotation [deg]", ) fma() # --- centroid if plt_cen: # All Species Combined, x- and y-plane hpxbar(titles=false,yscale=1./mm,lhzbeam=true) hpybar(titles=false,yscale=1./mm,lhzbeam=true,color="red") ptitles("History: All Species x-,y-Centroid: x[b], y[r]","z [m]","<x>, <y> Centroids [mm]", ) fma() # --- By Target Species, x-plane hpxbar(titles=false,yscale=1./(sqrt(2.)*mm),lhzbeam=true) lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpxbar(js=js,color=co,titles=false,yscale=1./(sqrt(2.)*mm),lhzbeam=true) ptitles("History: "+lab+"x-Centroid","z [m]","<x> [mm]", ) fma() # lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpxbar(js=js,color=co,titles=false,yscale=1./(sqrt(2.)*mm),lhzbeam=true) ptitles("History: "+lab+"x-Centroid","z [m]","<x> [mm]", ) fma() # --- By Target Species, y-plane hpybar(titles=false,yscale=1./(sqrt(2.)*mm),lhzbeam=true) lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpybar(js=js,color=co,titles=false,yscale=1./(sqrt(2.)*mm),lhzbeam=true) ptitles("History: "+lab+"y-Centroid","z [m]","<y> [mm]", ) fma() # lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpybar(js=js,color=co,titles=false,yscale=1./(sqrt(2.)*mm),lhzbeam=true) ptitles("History: "+lab+"y-Centroid","z [m]","<y> [mm]", ) fma() # --- rms envelope width if plt_envrms: # --- All Species Combined, x- and y-plane hpenvx(titles=false,yscale=1./(2.*mm),lhzbeam=true) hpenvy(titles=false,yscale=1./(2.*mm),lhzbeam=true,color="red") ptitles("History: All Species RMS Envelope: x[b], y[r]","z [m]","RMS Width [mm]", ) fma() # --- Target Species, x-plane hpenvx(titles=false,yscale=1./(2.*mm),lhzbeam=true) lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpenvx(js=js,color=co,titles=false,yscale=1./(2.*mm),lhzbeam=true) ptitles("History: "+lab+"RMS x-Envelope","z [m]","RMS Width [mm]", ) fma() # lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpenvx(js=js,color=co,titles=false,yscale=1./(2.*mm),lhzbeam=true) ptitles("History: "+lab+"RMS x-Envelope","z [m]","RMS Width [mm]", ) fma() # --- Target Species, y-plane hpenvy(titles=false,yscale=1./(2.*mm),lhzbeam=true) lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpenvy(js=js,color=co,titles=false,yscale=1./(2.*mm),lhzbeam=true) ptitles("History: "+lab+"RMS y-Envelope","z [m]","RMS Width [mm]", ) fma() # lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpenvy(js=js,color=co,titles=false,yscale=1./(2.*mm),lhzbeam=true) ptitles("History: "+lab+"RMS y-Envelope","z [m]","RMS Width [mm]", ) fma() # --- max particle envelopes if plt_envmax: # --- x-plane, All Species Combined for ii in sort(sp.keys()): s = sp[ii] js = s.js co = s.color plg(top.hxmaxp[0:top.jhist+1,js]/mm,top.hzbeam[0:top.jhist+1],color=co) ptitles("History: Species max particle x", "z [m]","Max x [mm]", ) fma() # --- x-plane, Target Species lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " plg(top.hxmaxp[0:top.jhist+1,js]/mm,top.hzbeam[0:top.jhist+1],color=co) ptitles("History: "+lab+" max particle x","z [m]","Max x [mm]", ) fma() # --- y-plane, All Species Combined for ii in sort(sp.keys()): s = sp[ii] js = s.js co = s.color plg(top.hymaxp[0:top.jhist+1,js]/mm,top.hzbeam[0:top.jhist+1],color=co) ptitles("History: Species max particle y", "z [m]","Max y [mm]", ) fma() # --- y-plane, Target Species lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " plg(top.hymaxp[0:top.jhist+1,js]/mm,top.hzbeam[0:top.jhist+1],color=co) ptitles("History: "+lab+" max particle y","z [m]","Max y [mm]", ) fma() # --- rms envelope angle if plt_envrmsp: # --- Target Species, x-plane lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " plg(top.hxxpbar[0,0:top.jhist+1,js]/(top.hxrms[0,0:top.jhist+1,js]*mr),top.hzbeam[0:top.jhist+1],color=co) ptitles("History: "+lab+"RMS x-Envelope Angle","z [m]","RMS Angle [mr]", ) fma() # --- Target Species, y-plane lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " plg(top.hyypbar[0,0:top.jhist+1,js]/(top.hyrms[0,0:top.jhist+1,js]*mr),top.hzbeam[0:top.jhist+1],color=co) ptitles("History: "+lab+"RMS y-Envelope Angle","z [m]","RMS Angle [mr]", ) fma() # --- emittance, unnormalized if plt_emit: # --- All Species Combined, x- and y-plane: Factor 4 in scale to account for Warp edge measure hpepsx(titles=false,yscale=1./(4.*mm*mr),lhzbeam=true) hpepsy(titles=false,yscale=1./(4.*mm*mr),lhzbeam=true,color="red") ptitles("History: All Species RMS x-, y-Emittance: x[b],y[r]","z [m]","Emittance [mm-mr]", ) fma() # --- Target Species, x-plane: Factor 4 in scale to account for Warp edge measure hpepsx(titles=false,yscale=1./(4.*mm*mr),lhzbeam=true) lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpepsx(js=js,color=co,titles=false,yscale=1./(4.*mm*mr),lhzbeam=true) ptitles("History: "+lab+"RMS x-Emittance","z [m]","Emittance [mm-mr]", ) fma() # lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpepsx(js=js,color=co,titles=false,yscale=1./(mm*mr),lhzbeam=true) ptitles("History: "+lab+"RMS x-Emittance","z [m]","Emittance [mm-mr]", ) fma() # --- Target Species, y-plane hpepsy(titles=false,yscale=1./(4.*mm*mr),lhzbeam=true) lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpepsy(js=js,color=co,titles=false,yscale=1./(4.*mm*mr),lhzbeam=true) ptitles("History: "+lab+"RMS y-Emittance","z [m]","Emittance [mm-mr]", ) fma() # lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpepsy(js=js,color=co,titles=false,yscale=1./(mm*mr),lhzbeam=true) ptitles("History: "+lab+"RMS y-Emittance","z [m]","Emittance [mm-mr]", ) fma() # --- emittance, normalized if plt_emitn: # --- All Species Combined, x- and y-plane # ** warning norm emittance scaled mm-mrad by default in Warp ** hpepsnx(titles=false,yscale=1./4.,lhzbeam=true) hpepsny(titles=false,yscale=1./4.,lhzbeam=true,color="red") ptitles("History: All Species Norm RMS x-, y-Emittance: x[b],y[r]","z [m]","Norm Emittance [mm-mr]", ) fma() # --- By Target Species, x-plane hpepsnx(titles=false,yscale=1./4.,lhzbeam=true) lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpepsnx(js=js,color=co,titles=false,yscale=1./4.,lhzbeam=true) ptitles("History: "+lab+"Norm RMS x-Emittance","z [m]","Norm Emittance [mm-mr]", ) fma() # lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpepsnx(js=js,color=co,titles=false,yscale=1./4.,lhzbeam=true) ptitles("History: "+lab+"Norm RMS x-Emittance","z [m]","Norm Emittance [mm-mr]", ) fma() # --- By Target Species, y-plane hpepsny(titles=false,yscale=1./4.,lhzbeam=true) lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpepsny(js=js,color=co,titles=false,yscale=1./4.,lhzbeam=true) ptitles("History: "+lab+"Norm RMS y-Emittance","z [m]","Norm Emittance [mm-mr]", ) fma() # lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpepsny(js=js,color=co,titles=false,yscale=1./4.,lhzbeam=true) ptitles("History: "+lab+"Norm RMS y-Emittance","z [m]","Emittance [mm-mr]", ) fma() # --- emittance, generalized unnormalized if plt_emitg: # --- All Species Combined, g- and h-plane hpepsg(titles=false,yscale=1./(mm*mr),lhzbeam=true) hpepsh(titles=false,yscale=1./(mm*mr),lhzbeam=true,color="red") ptitles("History: All Species RMS g-, h-Emittance: g[b],h[r]","z [m]","Emittance [mm-mr]", ) fma() # --- By Target Species, g-plane hpepsg(titles=false,yscale=1./(mm*mr),lhzbeam=true) lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpepsg(js=js,color=co,titles=false,yscale=1./(mm*mr),lhzbeam=true) ptitles("History: "+lab+"RMS g-Emittance","z [m]","Emittance [mm-mr]", ) fma() # lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpepsg(js=js,color=co,titles=false,yscale=1./(mm*mr),lhzbeam=true) ptitles("History: "+lab+"RMS g-Emittance","z [m]","Emittance [mm-mr]", ) fma() # --- By Target Species, h-plane hpepsh(titles=false,yscale=1./(mm*mr),lhzbeam=true) lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpepsh(js=js,color=co,titles=false,yscale=1./(mm*mr),lhzbeam=true) ptitles("History: "+lab+"RMS h-Emittance","z [m]","Emittance [mm-mr]", ) fma() # lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpepsh(js=js,color=co,titles=false,yscale=1./(mm*mr),lhzbeam=true) ptitles("History: "+lab+"RMS h-Emittance","z [m]","Emittance [mm-mr]", ) fma() # --- emittance, generalized normalized # ** scaled mm-mrad by defualt in Warp ** if plt_emitng: # --- All Species Combined, g- and h-plane hpepsng(titles=false,yscale=1.,lhzbeam=true) hpepsnh(titles=false,yscale=1.,lhzbeam=true,color="red") ptitles("History: All Species RMS Norm g-, h-Emittance: g[b],h[r]","z [m]","Norm Emittance [mm-mr]", ) fma() # --- By Target Species, g-plane hpepsng(titles=false,yscale=1.,lhzbeam=true) lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpepsng(js=js,color=co,titles=false,yscale=1.,lhzbeam=true) ptitles("History: "+lab+"RMS Norm g-Emittance","z [m]","Norm Emittance [mm-mr]", ) fma() # lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpepsng(js=js,color=co,titles=false,yscale=1.,lhzbeam=true) ptitles("History: "+lab+"RMS Norm g-Emittance","z [m]","Norm Emittance [mm-mr]", ) fma() # --- By Target Species, h-plane hpepsnh(titles=false,yscale=1.,lhzbeam=true) lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpepsnh(js=js,color=co,titles=false,yscale=1.,lhzbeam=true) ptitles("History: "+lab+"RMS Norm h-Emittance","z [m]","Norm Emittance [mm-mr]", ) fma() # lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpepsnh(js=js,color=co,titles=false,yscale=1.,lhzbeam=true) ptitles("History: "+lab+"RMS Norm h-Emittance","z [m]","Norm Emittance [mm-mr]", ) fma() # --- emittance, generalized radial unnormalized if plt_emitr: # --- All Species Combined hpepsr(titles=false,yscale=1./(2.*mm*mr),lhzbeam=true) ptitles("History: All Species RMS r-Emittance","z [m]","Emittance [mm-mr]", ) fma() # --- By Target Species hpepsr(titles=false,yscale=1./(2.*mm*mr),lhzbeam=true) lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpepsr(js=js,color=co,titles=false,yscale=1./(2.*mm*mr),lhzbeam=true) ptitles("History: "+lab+"RMS r-Emittance","z [m]","Emittance [mm-mr]", ) fma() # lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpepsr(js=js,color=co,titles=false,yscale=1./(2.*mm*mr),lhzbeam=true) ptitles("History: "+lab+"RMS r-Emittance","z [m]","Emittance [mm-mr]", ) fma() # --- emittance, generalized radial normalized ** warning norm emittance scaled mm-mrad by default ** if plt_emitnr: # --- All Species Combined hpepsnr(titles=false,yscale=1./2.,lhzbeam=true) ptitles("History: All Species Norm RMS r-Emittance","z [m]","Norm Emittance [mm-mr]", ) fma() # --- By Target Species hpepsnr(titles=false,yscale=1./2.,lhzbeam=true) lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpepsnr(js=js,color=co,titles=false,yscale=1./2.,lhzbeam=true) ptitles("History: "+lab+"RMS Norm r-Emittance","z [m]","Norm Emittance [mm-mr]", ) fma() # lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " hpepsnr(js=js,color=co,titles=false,yscale=1./2.,lhzbeam=true) ptitles("History: "+lab+"RMS Norm r-Emittance","z [m]","Norm Emittance [mm-mr]", ) fma() # --- emittance, total phase volume, unnormalized if plt_emitpv: # --- All Species Combined for ii in sort(sp.keys()): s = sp[ii] js = s.js co = s.color plg(hl_epspv[0:top.jhist+1,js]/(mm*mr),hl_zbeam[0:top.jhist+1],color=co) ptitles("History: Species Total Phase Volume Emittance", "z [m]","Emittance [mm-mrad]", ) fma() # --- Target Species lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " plg(hl_epspv[0:top.jhist+1,js]/(mm*mr),hl_zbeam[0:top.jhist+1],color=co) ptitles("History: "+lab+" Total Phase Volume Emittance","z [m]","Emittance [mm-mrad]", ) fma() # --- emittance, total phase volume, normalized if plt_emitpvn: # --- All Species Combined for ii in sort(sp.keys()): s = sp[ii] js = s.js co = s.color plg(hl_epspvn[0:top.jhist+1,js]/(mm*mr),hl_zbeam[0:top.jhist+1],color=co) ptitles("History: Species Total Phase Volume Norm Emittance", "z [m]","Norm Emittance [mm-mrad]", ) fma() # --- Target Species lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " plg(hl_epspvn[0:top.jhist+1,js]/(mm*mr),hl_zbeam[0:top.jhist+1],color=co) ptitles("History: "+lab+" Total Phase Volume Norm Emittance","z [m]","Norm Emittance [mm-mrad]", ) fma() # --- Effective ion temperature calculated from radial thermal emittance if plt_temp: # --- All Species Combined for ii in sort(sp.keys()): s = sp[ii] js = s.js co = s.color plg(hl_temp[0:top.jhist+1,js],hl_zbeam[0:top.jhist+1],color=co) ptitles("History: Species Transverse Thermal Temperature", "z [m]","Temp [eV]", ) fma() # --- Target Species lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " plg(hl_temp[0:top.jhist+1,js],hl_zbeam[0:top.jhist+1],color=co) ptitles("History: "+lab+" Species Transverse Thermal Temperature","z [m]","Temp [eV]", ) fma() # --- Perveance if plt_Qperv: # --- All Species Combined for ii in sort(sp.keys()): s = sp[ii] js = s.js co = s.color plg(hl_Qperv[0:top.jhist+1,js],hl_zbeam[0:top.jhist+1],color=co) ptitles("History: Species Bare Perveance Q", "z [m]","Perveance [1]", ) fma() # --- Target Species lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " plg(hl_Qperv[0:top.jhist+1,js],hl_zbeam[0:top.jhist+1],color=co) ptitles("History: "+lab+" Species Bare Perveance Q","z [m]","Perveance [1]", ) fma() # --- Neutralization Factor if plt_neutf: # --- All Species Combined for ii in sort(sp.keys()): s = sp[ii] js = s.js co = s.color plg(hl_neutf[0:top.jhist+1,js],hl_zbeam[0:top.jhist+1],color=co) ptitles("History: Species Electron Neutralization Fractions", "z [m]","Fraction [1]", ) fma() # --- Target Species lab = "" for ii in sp_target: s = sp[ii] js = s.js co = s.color lab+= ii + "("+co+"), " plg(hl_neutf[0:top.jhist+1,js],hl_zbeam[0:top.jhist+1],color=co) ptitles("History: "+lab+" Electron Neutralization Factors","z [m]","Fraction [1]", ) fma() # -- Install diagnostics at appropriate intervals after steps # Add options to generate plots desired # -- Install diagnostics at appropriate intervals after steps # Add options to generate plots desired # Function to call diagnostics at a timestep in step control lists def diag_calls(): if top.it in diag_part_step: diag_part(plt_xy=true,plt_xxp=true,plt_yyp=false,plt_xpyp=true, plt_trace=false,plt_denxy=true,plt_denr=true) if top.it in diag_field_step: diag_field(plt_pc=true,plt_pc_xy=true,plt_pa=true) if top.it in diag_hist_step: diag_hist(plt_ekin=true,plt_spnum=true,plt_curr_e=true,plt_curr_p=true,plt_lam_p=true,plt_lam_e=true, plt_lz=true,plt_pth=false,plt_pthn=false,plt_krot=true,plt_lang=true, plt_cen=true,plt_envrms=true,plt_envmax=true,plt_envrmsp=true, plt_emit=true,plt_emitn=true,plt_emitg=true,plt_emitng=true,plt_emitr=true,plt_emitnr=true, plt_emitpv=false,plt_emitpvn=false,plt_temp=true,plt_Qperv=true,plt_neutf=true)
[ "wong@intranet.nscl.msu.edu" ]
wong@intranet.nscl.msu.edu