blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
288
content_id
stringlengths
40
40
detected_licenses
listlengths
0
112
license_type
stringclasses
2 values
repo_name
stringlengths
5
115
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
684 values
visit_date
timestamp[us]date
2015-08-06 10:31:46
2023-09-06 10:44:38
revision_date
timestamp[us]date
1970-01-01 02:38:32
2037-05-03 13:00:00
committer_date
timestamp[us]date
1970-01-01 02:38:32
2023-09-06 01:08:06
github_id
int64
4.92k
681M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
22 values
gha_event_created_at
timestamp[us]date
2012-06-04 01:52:49
2023-09-14 21:59:50
gha_created_at
timestamp[us]date
2008-05-22 07:58:19
2023-08-21 12:35:19
gha_language
stringclasses
147 values
src_encoding
stringclasses
25 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
128
12.7k
extension
stringclasses
142 values
content
stringlengths
128
8.19k
authors
listlengths
1
1
author_id
stringlengths
1
132
47ca697e284e6664d1e966bda357bb112aa56356
d3b80b8de39d4bb4bab02b0e8b5092c2e32aff24
/login-getmileage.py
d3256d39233544095f5551a9f87de3fbfbd22e09
[]
no_license
brightparagon/learn-python-crawl-scrape
658a058fee4ecdb31c92be54b5cb684f5d4d4658
f28769fd469f69a485e560dc42d20aa08c51dc77
refs/heads/master
2021-05-02T05:23:13.512441
2018-07-02T14:58:54
2018-07-02T14:58:54
120,919,940
1
0
null
null
null
null
UTF-8
Python
false
false
731
py
import requests from bs4 import BeautifulSoup from urllib.parse import urljoin USER = "<TEST>" PASS = "<TEST>" # start a session session = requests.session() # login login_info = { "m_id": USER, "m_passwd": PASS } url_login = "http://www.hanbit.co.kr/member/login_proc.php" res = session.post(url_login, data=login_info) res.raise_for_status() # access to mypage url_mypage = "http://www.hanbit.co.kr/myhanbit/myhanbit.html" res = session.get(url_mypage) res.raise_for_status() # get mileage and ecoin soup = BeautifulSoup(res.text, "html.parser") mileage = soup.select_one(".mileage_section1 span").get_text() ecoin = soup.select_one(".mileage_section2 span").get_text() print("milage: ", mileage) print("ecoi: ", ecoin)
[ "kyeongmo2@gmail.com" ]
kyeongmo2@gmail.com
5c94e0ecd48a3e1e6b1341ab6049f1b1c6cc7455
e0e55462707e8257559736f212ad086fbb5f9af5
/util/path_config.py
47d0bb3fd2df3e4777e54233ca5de1acb9a0a277
[]
no_license
jtpils/SSRNet
45e5c97f82e21c4f672d3c7e61de0c6036b7a95c
05d70706f4ecdecd502890a799b0d316db15ebd3
refs/heads/master
2022-11-26T11:46:24.597002
2020-07-28T02:56:33
2020-07-28T02:56:33
null
0
0
null
null
null
null
UTF-8
Python
false
false
204
py
import sys open3d_path = '/mnt/A/jokery/projects/Open3D_test3/src/build/lib/' tc_path = '/mnt/A/jokery/projects/08_2/' sys.path.append(open3d_path) from py3d import * def get_tc_path(): return tc_path
[ "noreply@github.com" ]
jtpils.noreply@github.com
b306dcb0889d1ec282242b257ade90f599f43fea
227b02ca30168c31accd1b7d38c3436b737a2f8e
/lenstronomy/Data/image_noise.py
3ab46bd7ca164bfecd4003235a32c6a0ead736ba
[ "MIT" ]
permissive
Thomas-01/lenstronomy
b7d802c6213f0b7780acb7946b9bb150081b5d34
36db4c7f43ba28d6bdecdab1f15c537043f4a286
refs/heads/master
2020-06-03T04:22:06.123622
2020-03-31T18:33:47
2020-03-31T18:33:47
191,435,987
0
0
MIT
2019-07-11T03:22:29
2019-06-11T19:21:27
Python
UTF-8
Python
false
false
4,711
py
import numpy as np class ImageNoise(object): """ class that deals with noise properties of imaging data """ def __init__(self, image_data, exposure_time=None, background_rms=None, noise_map=None, verbose=True): """ :param image_data: numpy array, pixel data values :param exposure_time: int or array of size the data; exposure time (common for all pixels or individually for each individual pixel) :param background_rms: root-mean-square value of Gaussian background noise :param noise_map: int or array of size the data; joint noise sqrt(variance) of each individual pixel. Overwrites meaning of background_rms and exposure_time. """ if exposure_time is not None: # make sure no negative exposure values are present no dividing by zero if isinstance(exposure_time, int) or isinstance(exposure_time, float): if exposure_time <= 10 ** (-10): exposure_time = 10 ** (-10) else: exposure_time[exposure_time <= 10 ** (-10)] = 10 ** (-10) self._exp_map = exposure_time self._background_rms = background_rms self._noise_map = noise_map if noise_map is not None: assert np.shape(noise_map) == np.shape(image_data) else: if background_rms is not None and exposure_time is not None: if background_rms * np.max(exposure_time) < 1 and verbose is True: print("WARNING! sigma_b*f %s < 1 count may introduce unstable error estimates with a Gaussian" " error function for a Poisson distribution with mean < 1." % ( background_rms * np.max(exposure_time))) self._data = image_data @property def background_rms(self): """ :return: rms value of background noise """ if self._background_rms is None: if self._noise_map is None: raise ValueError("rms background value as 'background_rms' not specified!") self._background_rms = np.median(self._noise_map) return self._background_rms @property def exposure_map(self): """ Units of data and exposure map should result in: number of flux counts = data * exposure_map :return: exposure map for each pixel """ if self._exp_map is None: if self._noise_map is None: raise ValueError("Exposure map has not been specified in Noise() class!") return self._exp_map @property def C_D(self): """ Covariance matrix of all pixel values in 2d numpy array (only diagonal component) The covariance matrix is estimated from the data. WARNING: For low count statistics, the noise in the data may lead to biased estimates of the covariance matrix. :return: covariance matrix of all pixel values in 2d numpy array (only diagonal component). """ if not hasattr(self, '_C_D'): if self._noise_map is not None: self._C_D = self._noise_map ** 2 else: self._C_D = covariance_matrix(self._data, self.background_rms, self.exposure_map) return self._C_D def C_D_model(self, model): """ :param model: model (same as data but without noise) :return: estimate of the noise per pixel based on the model flux """ if self._noise_map is not None: return self._noise_map ** 2 else: return covariance_matrix(model, self._background_rms, self._exp_map) def covariance_matrix(data, background_rms, exposure_map): """ returns a diagonal matrix for the covariance estimation which describes the error Notes: - the exposure map must be positive definite. Values that deviate too much from the mean exposure time will be given a lower limit to not under-predict the Poisson component of the noise. - the data must be positive semi-definite for the Poisson noise estimate. Values < 0 (Possible after mean subtraction) will not have a Poisson component in their noise estimate. :param data: data array, eg in units of photons/second :param background_rms: background noise rms, eg. in units (photons/second)^2 :param exposure_map: exposure time per pixel, e.g. in units of seconds :return: len(d) x len(d) matrix that give the error of background and Poisson components; (photons/second)^2 """ d_pos = np.zeros_like(data) d_pos[data >= 0] = data[data >= 0] sigma = d_pos / exposure_map + background_rms ** 2 return sigma
[ "sibirrer@gmail.com" ]
sibirrer@gmail.com
ec3e99c49cd07250adacf2d417ff14a17a27c5f3
eea704186322a0441124bae2eaefc185c75a69f1
/setup.py
581f4773531a0ab6f4988a45d2bc94e64bcfe9dd
[ "BSD-3-Clause" ]
permissive
ScottTaing/taolib
ff5c78c8e6ba0522f5d932975fdc8805c0564b4e
fbd4138d1be9a3ef032284a52662213833921efc
refs/heads/master
2021-01-17T23:27:14.511401
2011-05-06T05:04:55
2011-05-06T05:04:55
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,423
py
#!/usr/bin/env python import os import sys from distutils.core import setup, Extension def main(): if not float(sys.version[:3])>=2.4: sys.stderr.write("CRITICAL: Python version must be greater than or equal to 2.4! python 2.6.2 is recommended!\n") sys.exit(1) setup(name="taolib", version="1.0", description="Tao's libraries", author='Tao (Foo) Liu', author_email='vladimir.liu@gmail.com', url='http://vladimirliu.com/~taoliu/', package_dir={'taolib' : '.'}, packages=['taolib','taolib.CoreLib', 'taolib.CoreLib.DB','taolib.CoreLib.FeatIO', 'taolib.CoreLib.BasicStat','taolib.CoreLib.WWW', 'taolib.CoreLib.Parser','taolib.CoreLib.SeqIO', 'taolib.CoreLib.BinKeeper','taolib.CoreLib.Algorithm', 'taolib.Assoc', 'taolib.ExtApp', 'taolib.Motif', # 'taolib.IntegrativeBioinformatics', # 'taolib.IntegrativeBioinformatics.elements', # 'taolib.IntegrativeBioinformatics.networks', # 'taolib.IntegrativeBioinformatics.algos', # 'taolib.IntegrativeBioinformatics.features', # 'taolib.IntegrativeBioinformatics.links', # 'taolib.IntegrativeBioinformatics.apache', ], scripts=['Scripts/motif_enrich.py', 'Scripts/qc_chIP_peak.py', 'Scripts/qc_chIP_whole.py', 'Scripts/count_probes_in_peaks.py', 'Scripts/count_probes_in_ranges.py', 'Scripts/xyz2image.py', 'Scripts/refine_peak.py', 'Scripts/fq2fa.py', 'Scripts/wiggle_reformat.py', 'Scripts/wig_correlation.py', 'Scripts/wig_correlation_in_bed_file.py', 'Scripts/conservation_plot.py', 'Scripts/wig_extract_chrom.py', 'Scripts/wig_split.py', 'Scripts/wig_call_peaks.py', 'Scripts/wig_call_peaks2.py', 'Scripts/naive_call_peaks.py', 'Scripts/wig2bedGraphBins.py', 'Scripts/bed_correlation.py', 'Scripts/ce_histone_matrix.py', 'Scripts/rand_pos.py', 'Scripts/draw_BED.py', 'Scripts/norm.py', 'Scripts/cutoff.py', 'Scripts/ChIP-seq_Pipeline1.py', 'Scripts/convert_gene_ids.py', # 'Scripts/hmm_conception.py', ], classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: Artistic License', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Programming Language :: Python', 'Topic :: Database', ], requires=['MySQL_python','PIL'] ) if __name__ == '__main__': main()
[ "vladimir.liu@gmail.com" ]
vladimir.liu@gmail.com
7bcb803514e2cca016a206f9f03f15936cec735d
66a9c25cf0c53e2c3029b423018b856103d709d4
/sleekxmpp/features/feature_starttls/starttls.py
eb5eee1d5f5b8798c4f721522214b325b276dc94
[ "MIT", "BSD-3-Clause", "BSD-2-Clause" ]
permissive
fritzy/SleekXMPP
1b02d3e2b22efeb6bf3f8f487e6c0343b9b85baf
cc1d470397de768ffcc41d2ed5ac3118d19f09f5
refs/heads/develop
2020-05-22T04:14:58.568822
2020-02-18T22:54:57
2020-02-18T22:54:57
463,405
658
254
NOASSERTION
2023-06-27T20:05:54
2010-01-08T05:54:45
Python
UTF-8
Python
false
false
2,100
py
""" SleekXMPP: The Sleek XMPP Library Copyright (C) 2011 Nathanael C. Fritz This file is part of SleekXMPP. See the file LICENSE for copying permission. """ import logging from sleekxmpp.stanza import StreamFeatures from sleekxmpp.xmlstream import RestartStream, register_stanza_plugin from sleekxmpp.plugins import BasePlugin from sleekxmpp.xmlstream.matcher import MatchXPath from sleekxmpp.xmlstream.handler import Callback from sleekxmpp.features.feature_starttls import stanza log = logging.getLogger(__name__) class FeatureSTARTTLS(BasePlugin): name = 'feature_starttls' description = 'RFC 6120: Stream Feature: STARTTLS' dependencies = set() stanza = stanza def plugin_init(self): self.xmpp.register_handler( Callback('STARTTLS Proceed', MatchXPath(stanza.Proceed.tag_name()), self._handle_starttls_proceed, instream=True)) self.xmpp.register_feature('starttls', self._handle_starttls, restart=True, order=self.config.get('order', 0)) self.xmpp.register_stanza(stanza.Proceed) self.xmpp.register_stanza(stanza.Failure) register_stanza_plugin(StreamFeatures, stanza.STARTTLS) def _handle_starttls(self, features): """ Handle notification that the server supports TLS. Arguments: features -- The stream:features element. """ if 'starttls' in self.xmpp.features: # We have already negotiated TLS, but the server is # offering it again, against spec. return False elif not self.xmpp.use_tls: return False else: self.xmpp.send(features['starttls'], now=True) return True def _handle_starttls_proceed(self, proceed): """Restart the XML stream when TLS is accepted.""" log.debug("Starting TLS") if self.xmpp.start_tls(): self.xmpp.features.add('starttls') raise RestartStream()
[ "lancestout@gmail.com" ]
lancestout@gmail.com
73ccc88785e3b447db6019e216b1ee9fca44786e
1b87d5f7cba7e068f7b2ea902bba494599d20a78
/tools/upload/googlecode_upload.py
7d179f3a0d96db814dd7cef9a9724e7b4579bd08
[ "BSD-3-Clause" ]
permissive
jpaalasm/pyglet
906d03fe53160885665beaed20314b5909903cc9
bf1d1f209ca3e702fd4b6611377257f0e2767282
refs/heads/master
2021-01-25T03:27:08.941964
2014-01-25T17:50:57
2014-01-25T17:50:57
16,236,090
2
2
null
null
null
null
UTF-8
Python
false
false
6,896
py
#!/usr/bin/env python # # Copyright 2006 Google Inc. All Rights Reserved. # Author: danderson@google.com (David Anderson) # # Script for uploading files to a Google Code project. # # This is intended to be both a useful script for people who want to # streamline project uploads and a reference implementation for # uploading files to Google Code projects. # # To upload a file to Google Code, you need to provide a path to the # file on your local machine, a small summary of what the file is, a # project name, and a valid account that is a member or owner of that # project. You can optionally provide a list of labels that apply to # the file. The file will be uploaded under the same name that it has # in your local filesystem (that is, the "basename" or last path # component). Run the script with '--help' to get the exact syntax # and available options. # # Note that the upload script requests that you enter your # googlecode.com password. This is NOT your Gmail account password! # This is the password you use on googlecode.com for committing to # Subversion and uploading files. You can find your password by going # to http://code.google.com/hosting/settings when logged in with your # Gmail account. # # If you are looking at this script as a reference for implementing # your own Google Code file uploader, then you should take a look at # the upload() function, which is the meat of the uploader. You # basically need to build a multipart/form-data POST request with the # right fields and send it to https://PROJECT.googlecode.com/files . # Authenticate the request using HTTP Basic authentication, as is # shown below. # # Licensed under the terms of the Apache Software License 2.0: # http://www.apache.org/licenses/LICENSE-2.0 # # Questions, comments, feature requests and patches are most welcome. # Please direct all of these to the Google Code users group: # http://groups-beta.google.com/group/google-code-hosting """Google Code file uploader script. """ __author__ = 'danderson@google.com (David Anderson)' import httplib import os.path import optparse import getpass import base64 def upload(file, project_name, user_name, password, summary, labels=None): """Upload a file to a Google Code project's file server. Args: file: The local path to the file. project_name: The name of your project on Google Code. user_name: Your Google account name. password: The googlecode.com password for your account. Note that this is NOT your global Google Account password! summary: A small description for the file. labels: an optional list of label strings with which to tag the file. Returns: a tuple: http_status: 201 if the upload succeeded, something else if an error occured. http_reason: The human-readable string associated with http_status file_url: If the upload succeeded, the URL of the file on Google Code, None otherwise. """ # The login is the user part of user@gmail.com. If the login provided # is in the full user@domain form, strip it down. if '@' in user_name: user_name = user_name[:user_name.index('@')] form_fields = [('summary', summary)] if labels is not None: form_fields.extend([('label', l.strip()) for l in labels]) content_type, body = encode_upload_request(form_fields, file) upload_host = '%s.googlecode.com' % project_name upload_uri = '/files' auth_token = base64.b64encode('%s:%s'% (user_name, password)) headers = { 'Authorization': 'Basic %s' % auth_token, 'User-Agent': 'Googlecode.com uploader v0.9.4', 'Content-Type': content_type, } server = httplib.HTTPSConnection(upload_host) server.request('POST', upload_uri, body, headers) resp = server.getresponse() server.close() if resp.status == 201: location = resp.getheader('Location', None) else: location = None return resp.status, resp.reason, location def encode_upload_request(fields, file_path): """Encode the given fields and file into a multipart form body. fields is a sequence of (name, value) pairs. file is the path of the file to upload. The file will be uploaded to Google Code with the same file name. Returns: (content_type, body) ready for httplib.HTTP instance """ BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla' CRLF = '\r\n' body = [] # Add the metadata about the upload first for key, value in fields: body.extend( ['--' + BOUNDARY, 'Content-Disposition: form-data; name="%s"' % key, '', value, ]) # Now add the file itself file_name = os.path.basename(file_path) f = open(file_path) file_content = f.read() f.close() body.extend( ['--' + BOUNDARY, 'Content-Disposition: form-data; name="filename"; filename="%s"' % file_name, # The upload server determines the mime-type, no need to set it. 'Content-Type: application/octet-stream', '', file_content, ]) # Finalize the form body body.extend(['--' + BOUNDARY + '--', '']) return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body) def main(): parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY ' '-p PROJECT -u USERNAME FILE') parser.add_option('-s', '--summary', dest='summary', help='Short description of the file') parser.add_option('-p', '--project', dest='project', help='Google Code project name') parser.add_option('-u', '--user', dest='user', help='Your Google Code username') parser.add_option('-l', '--labels', dest='labels', help='An optional list of labels to attach to the file') options, args = parser.parse_args() if not options.summary: parser.error('File summary is missing.') elif not options.project: parser.error('Project name is missing.') elif not options.user: parser.error('User name is missing.') elif len(args) < 1: parser.error('File to upload not provided.') print 'Please enter your googlecode.com password.' print '** Note that this is NOT your Gmail account password! **' print 'It is the password you use to access Subversion repositories,' print 'and can be found here: http://code.google.com/hosting/settings' password = getpass.getpass() file_path = args[0] if options.labels: labels = options.labels.split(',') else: labels = None status, reason, url = upload(file_path, options.project, options.user, password, options.summary, labels) if url: print 'The file was uploaded successfully.' print 'URL: %s' % url else: print 'An error occurred. Your file was not uploaded.' print 'Google Code upload server said: %s (%s)' % (reason, status) if __name__ == '__main__': main()
[ "joonas.paalasmaa@gmail.com" ]
joonas.paalasmaa@gmail.com
038857cb63e1a53e8498e0e7db5a344f570b070f
b501a5eae1018c1c26caa96793c6ee17865ebb2d
/Networking/socket/socket_echo_client_dgram.py
9a94a43d153d9f68fb0d97cdb5884623b3572b54
[]
no_license
jincurry/standard_Library_Learn
12b02f9e86d31ca574bb6863aefc95d63cc558fc
6c7197f12747456e0f1f3efd09667682a2d1a567
refs/heads/master
2022-10-26T07:28:36.545847
2018-05-04T12:54:50
2018-05-04T12:54:50
125,447,397
0
1
null
2022-10-02T17:21:50
2018-03-16T01:32:50
Python
UTF-8
Python
false
false
439
py
import socket import sys sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) server_address = ('localhost', 10000) message = b'This is the message, it will be repeated' try: print('Sending {!r}'.format(message)) sent = sock.sendto(message, server_address) print('Waiting for receive') data, server = sock.recvfrom(4096) print('Received {!r}'.format(data)) finally: print('Closing socket') sock.close()
[ "jintao422516@gmail.com" ]
jintao422516@gmail.com
f72480accb059522869217f8cf37e191dde1a7b4
201f07e3ddfd4f1b24c24fc794aa980a255fb2ab
/barriers/views/assessments/overview.py
e1bd181b2078e37846d386e93ed49544758a1f6b
[ "MIT" ]
permissive
uktrade/market-access-python-frontend
7dab68eed0b35205f4c78758ab88c815f65631c2
9510c31f7264c4092f76fce336d05b6709802b1c
refs/heads/master
2023-09-05T09:38:42.473232
2023-08-31T08:42:49
2023-08-31T08:42:49
225,406,107
7
3
MIT
2023-09-13T09:41:55
2019-12-02T15:20:46
Python
UTF-8
Python
false
false
586
py
from django.conf import settings from django.views.generic import TemplateView from ..mixins import BarrierMixin class AssessmentOverview(BarrierMixin, TemplateView): template_name = "barriers/assessments/overview.html" def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) assement_class = "assessment-item" # If not configured, hide if not settings.PRIORITISATION_STRATEGIC_ASSESSMENTS: assement_class += " visually-hidden" context["strategic_ass"] = assement_class return context
[ "noreply@github.com" ]
uktrade.noreply@github.com
71522388895aa9b96e91d33115a18a44030a8f11
dc182e5b4597bdd104d6695c03744a12ebfe2533
/Hackerrank Solutions/array.py
548b0132cc10c538e8612616e21c82103c603c13
[]
no_license
srinaveendesu/Programs
06fb4a4b452445e4260f9691fe632c732078d54d
f6dbd8db444678b7ae7658126b59b381b3ab0bab
refs/heads/master
2023-01-27T14:42:40.989127
2023-01-18T22:36:14
2023-01-18T22:36:14
129,948,488
1
0
null
2022-09-13T23:06:04
2018-04-17T18:30:13
Python
UTF-8
Python
false
false
4,002
py
#QQ# #https://www.hackerrank.com/challenges/array-left-rotation/problem #!/bin/python3 import math import os import random import re import sys if __name__ == '__main__': nd = input().split() n = int(nd[0]) d = int(nd[1]) a = list(map(int, input().rstrip().split())) l = (a[::][d:]+ a[:d]) print (' '.join(map(str,l))) #QQ# https://www.hackerrank.com/challenges/sparse-arrays/problem #!/bin/python3 import math import os import random import re import sys # Complete the matchingStrings function below. def matchingStrings(strings, queries): d = {} for val in strings: if val not in d.keys(): d[val] = 1 else: d[val] = d[val] +1 print (d) lst = [] for val in queries: if val in d.keys(): lst.append(d[val]) else: lst.append(0) return lst if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') strings_count = int(input()) strings = [] for _ in range(strings_count): strings_item = input() strings.append(strings_item) queries_count = int(input()) queries = [] for _ in range(queries_count): queries_item = input() queries.append(queries_item) res = matchingStrings(strings, queries) fptr.write('\n'.join(map(str, res))) fptr.write('\n') fptr.close() #QQ# https://www.hackerrank.com/challenges/missing-numbers/problem # !/bin/python3 import math import os import random import re import sys # Complete the missingNumbers function below. def missingNumbers(arr, brr): arr.sort() brr.sort() d1 = {} for val in arr: if val not in d1.keys(): d1[val] = 1 else: d1[val] = d1[val] + 1 d2 = {} for val in brr: if val not in d1.keys(): d1[val] = -1 else: d1[val] = d1[val] - 1 lst = [] for val in d1.keys(): if d1[val] < 0: lst.append(val) print(d1) lst.sort() return lst if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') n = int(input()) arr = list(map(int, input().rstrip().split())) m = int(input()) brr = list(map(int, input().rstrip().split())) result = missingNumbers(arr, brr) fptr.write(' '.join(map(str, result))) fptr.write('\n') fptr.close() #QQ# https://www.hackerrank.com/challenges/sherlock-and-array/problem # !/bin/python3 import math import os import random import re import sys # Complete the balancedSums function below. def balancedSums(arr): flag = False s = sum(arr) half = 0 index = 0 n = len(arr) for i in range(0, n): temp_s = (s - arr[i]) / 2 if temp_s == half: index = i i = n flag = True else: half = half + arr[i] if not flag: return 'NO' return 'YES' if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') T = int(input().strip()) for T_itr in range(T): n = int(input().strip()) arr = list(map(int, input().rstrip().split())) result = balancedSums(arr) fptr.write(result + '\n') fptr.close() #QQ# https://www.hackerrank.com/challenges/beautiful-triplets/problem # !/bin/python3 import math import os import random import re import sys # Complete the beautifulTriplets function below. def beautifulTriplets(d, arr): lst = [] for val in arr: a = val b = val + d c = val + (2 * d) if b in arr and c in arr: lst.append(str(a) + str(b) + str(c)) print(lst) return len(lst) if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') nd = input().split() n = int(nd[0]) d = int(nd[1]) arr = list(map(int, input().rstrip().split())) result = beautifulTriplets(d, arr) fptr.write(str(result) + '\n') fptr.close()
[ "srinaveen.desu@gmail.com" ]
srinaveen.desu@gmail.com
26d0cb12f5092278fa8ddcee18b2141fd5dd5574
9382a3acd3637a1c242045bff8109dee844d869a
/src/webhook_server_for_evaluation.py
10a64c5634e81ba458af050880bc5ec18bf38dba
[ "MIT" ]
permissive
byeongkyu/dialogflow_dialog
4cf19133f73d8ea82b8fb98e33a661804217a5db
c7ae5ce65f8fb6fa830817ab186a9851a26473e0
refs/heads/master
2020-03-17T20:07:05.222328
2018-08-23T23:42:39
2018-08-23T23:42:39
133,893,838
0
2
null
null
null
null
UTF-8
Python
false
false
5,691
py
#!/usr/bin/env python #-*- encoding: utf8 -*- import os import time import rospy import threading import logging import json import requests import random from std_msgs.msg import String, Int16, Empty from flask import Flask, Response, request, make_response, jsonify WEATHER_TEXT = [ "The weather in {city} now is {current_weather_desc}, current temperature is {current_temp} degree and wind speed is {current_wind_speed} m/s.", ] class WebhookServer: def __init__(self): self.app = Flask(__name__) self.app.add_url_rule('/', 'fulfillment', self.handle_fulfillment, methods=['POST']) self.app.add_url_rule('/', 'index', self.handle_index, methods=['GET']) # 0: Neutral, 1: Forward Lean, 2: Self disclosure, 3: voice pitch self.current_scenario = 0 # Neutral rospy.Subscriber('/select_evaluation_scenario', Int16, self.handle_select_scenario) self.pub_complete = rospy.Publisher('/complete_execute_scenario', Empty, queue_size=1) self.port_num = rospy.get_param('~port_num', default=8888) try: with open(rospy.get_param('~weather_api')) as f: self.weather_api_key = json.loads(f.read()) except KeyError, e: logging.error('Need parameter ~weather_api') exit(-1) # print self.weather_api_key def run(self): self.app.run(host="0.0.0.0", port=self.port_num) def handle_select_scenario(self, msg): self.current_scenario = msg.data def handle_index(self): return "<h1>This page is index page of UoA webhook server...</h1>" def handle_fulfillment(self): req = request.get_json(silent=True, force=True) try: action = req.get('queryResult').get('action') except AttributeError: rospy.logwarn('JSON error from fulfillment request') return "json error" if action == 'weather': res = self.get_weather(req) elif action == 'welcome': if self.current_scenario == 2: # 2: Self disclosure res = "Hi there, my name is Nao, the receptionist robot. I'm a little nervous about this task, but how may I help you?" else: res = "Hi there, my name is Nao, the receptionist robot. How may I help you?" elif action == "prescription_not_ready": if self.current_scenario == 3: # 3: voice pitch res = ''' <prosody pitch="-15%"> I'm sorry Sam, your doctor has not yet written your prescription and so it is not ready for collection at the moment</prosody>. <prosody pitch="-15%"> However, I have sent a message to your doctor</prosody>. <prosody pitch="-15%"> Once the prescription has been written, someone will call you and let you know</prosody>. <prosody pitch="-15%"> Is there anything else I can help you with</prosody>? ''' else: res = req.get('queryResult').get('fulfillmentText') elif action == "dontknow_doctor_name": if self.current_scenario == 2: # 2: Self disclosure res = ''' No problem Sam, I forget things too sometimes. I can see that you have an appointment with Dr Jones today and have checked you in. Is there anything else I can help you with? ''' elif self.current_scenario == 3: # 3: voice pitch res = ''' <prosody pitch="10%"> No problem Sam, I can see that you have an appointment with Dr Jones today and have checked you in</prosody>. <prosody pitch="10%"> Is there anything else I can help you with</prosody>? ''' else: res = req.get('queryResult').get('fulfillmentText') elif action == "request_bathroom": if self.current_scenario == 3: # 3: voice pitch res = ''' %pointing=objects:door% <prosody pitch="10%"> Certainly, the bathroom is located down the hall, second door on the right</prosody>. ''' else: res = req.get('queryResult').get('fulfillmentText') elif action == "goodbye": if self.current_scenario == 3: # 3: voice pitch res = ''' <prosody pitch="10%"> I hope you have a nice day, Sam</prosody>. ''' else: res = req.get('queryResult').get('fulfillmentText') self.pub_complete.publish() return make_response(jsonify({'fulfillmentText': res})) def get_weather(self, req): parameters = req.get('queryResult').get('parameters') result = requests.get('http://api.openweathermap.org/data/2.5/weather?q=%s&appid=%s'%(parameters['geo-city'], self.weather_api_key['api_key'])) weather_data = json.loads(result.text) # print weather_data current_city = weather_data['name'] current_weather = weather_data['weather'][0]['main'] current_weather_desc = weather_data['weather'][0]['description'] current_temp = weather_data['main']['temp'] - 273.15 # Kelvin to Celcius current_wind_speed = weather_data['wind']['speed'] output_string = random.choice(WEATHER_TEXT) return output_string.format(city=current_city, current_weather_desc=current_weather_desc, current_temp=current_temp, current_wind_speed=current_wind_speed) if __name__ == '__main__': threading.Thread(target=lambda: rospy.init_node('webhook_server_node', disable_signals=True)).start() time.sleep(0.5) m = WebhookServer() m.run()
[ "byeongkyu@gmail.com" ]
byeongkyu@gmail.com
3455029efbae033f2c0da3c1c4522c8c9b33cb33
e6a90d21e9a983476adda3cccc832a828875cea9
/Lib/site-packages/ffc/jitobject.py
19a9d2db3872418e1f60e0ccf70f4753e65356c4
[]
no_license
maciekswat/dolfin_python_deps
e28c27780d535c961e4b3129bb17628a4ff7167a
7af15cd0ab522436ca285f8422faa42675345f55
refs/heads/master
2021-01-25T12:14:55.779591
2014-04-23T22:51:19
2014-04-23T22:51:19
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,931
py
# Copyright (C) 2008-2013 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see <http://www.gnu.org/licenses/>. # # Modified by Martin Alnaes, 2013 # # First added: 2008-09-04 # Last changed: 2013-01-25 # Python modules. from hashlib import sha1 # Instant modules. from instant import get_swig_version # UFL modules. import ufl # FFC modules. from constants import FFC_VERSION # UFC modules. import ufc_utils # Compute signature of all ufc headers combined ufc_signature = sha1(''.join(getattr(ufc_utils, header) for header in (k for k in vars(ufc_utils).keys() if k.endswith("_header"))) ).hexdigest() class JITObject: """This class is a wrapper for a compiled object in the context of specific compiler parameters. A JITObject is identified either by its hash value or by its signature. The hash value is valid only in a single instance of an application (at runtime). The signature is persistent and may be used for caching modules on disk.""" def __init__(self, form, parameters): "Create JITObject for given form and parameters" assert(isinstance(form, ufl.Form)) # Store data self.form = form self.parameters = parameters self._hash = None self._signature = None def __hash__(self): "Return unique integer for form + parameters" # Check if we have computed the hash before if not self._hash is None: return self._hash # Compute hash based on signature self._hash = int(self.signature(), 16) return self._hash def __eq__(self, other): "Check for equality" return hash(self) == hash(other) def signature(self): "Return unique string for form + parameters" # Check if we have computed the signature before if not self._signature is None: return self._signature # Get signature from assumed precomputed form_data form_signature = self.form.form_data().signature # Compute other relevant signatures parameters_signature = _parameters_signature(self.parameters) ffc_signature = str(FFC_VERSION) swig_signature = str(get_swig_version()) cell_signature = str(self.form.form_data().cell) # Build common signature signatures = [form_signature, parameters_signature, ffc_signature, cell_signature, ufc_signature] string = ";".join(signatures) self._signature = sha1(string).hexdigest() # Uncomment for debugging #print "form_signature =", form_signature #print "parameters_signature =", parameters_signature #print "ffc_signature =", ffc_signature #print "cell_signature =", cell_signature #print "signature =", self._signature return self._signature def _parameters_signature(parameters): "Return parameters signature (some parameters must be ignored)." parameters = parameters.copy() ignores = ["log_prefix"] for ignore in ignores: if ignore in parameters: del parameters[ignore] return str(parameters)
[ "maciekswat@gmail.com" ]
maciekswat@gmail.com
e47ec77483d8ea21ac2c7f17ceca3591cb18192a
df858cb8172f73aad1af25496ac86e637a203bf4
/Introdução a Programação com Python - exercícios baixados do site oficial/Listagens/07.45 - Jogo da forca.py
bf6002fa6bef2b078f9bd73b91e7c106e5860d59
[]
no_license
emersonleite/python
33cd48788e4f641da244ba9fd0460b9a5b1ef0bc
8157fcd5c7ee7f942a4503ad386e7d2054d5acfc
refs/heads/master
2020-03-08T04:09:57.857429
2019-03-27T14:56:46
2019-03-27T14:56:46
127,913,519
0
0
null
null
null
null
UTF-8
Python
false
false
1,790
py
############################################################################## # Parte do livro Introdução à Programação com Python # Autor: Nilo Ney Coutinho Menezes # Editora Novatec (c) 2010-2014 # Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8 # Primeira reimpressão - Outubro/2011 # Segunda reimpressão - Novembro/1012 # Terceira reimpressão - Agosto/2013 # Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3 # Site: http://python.nilo.pro.br/ # # Arquivo: capitulo 07\07.45 - Jogo da forca.py ############################################################################## palavra = input("Digite a palavra secreta:").lower().strip() for x in range(100): print() digitadas = [] acertos = [] erros = 0 while True: senha = "" for letra in palavra: senha += letra if letra in acertos else "." print(senha) if senha == palavra: print("Você acertou!") break tentativa = input("\nDigite uma letra:").lower().strip() if tentativa in digitadas: print("Você já tentou esta letra!") continue else: digitadas += tentativa if tentativa in palavra: acertos += tentativa else: erros += 1 print("Você errou!") print("X==:==\nX : ") print("X O " if erros >= 1 else "X") linha2 = "" if erros == 2: linha2 = " | " elif erros == 3: linha2 = " \| " elif erros >= 4: linha2 = " \|/ " print("X%s" % linha2) linha3 = "" if erros == 5: linha3 += " / " elif erros >= 6: linha3 += " / \ " print("X%s" % linha3) print("X\n===========") if erros == 6: print("Enforcado!") break
[ "emerson.bl@gmail.com" ]
emerson.bl@gmail.com
7287bd809c049bf9525538a634f1b9f8bb262e56
866a3a0b02ad10ba7e0f4db18efd38a213b0e3c5
/18_0.py
b4b60322a0693c11ff34a160f7ce3c5641122b56
[]
no_license
SleepwalkerCh/Leetcode-
6da7f5859184ae86c6f16d10e3570f837f2508d6
9472b9dd388a8774339ecf9d8ff6cb61b7ea821f
refs/heads/master
2021-03-07T10:58:25.086406
2020-03-19T07:50:32
2020-03-19T07:50:32
246,260,919
0
0
null
null
null
null
UTF-8
Python
false
false
749
py
#18. 4Sum18 # 很简陋的做法,大概率会超时,在O(n^4)基础上做了一些小优化,但是结果未进行查重 # WRONG ANSWER class Solution: def fourSum(self, nums: List[int], target: int) -> List[List[int]]: nums.sort() result=[] print(nums) for i in range(len(nums)-3): for j in range(i+1,len(nums)-2): for k in range(j+1,len(nums)-1): for l in range(k+1,len(nums)): if nums[i]+nums[j]+nums[k]+nums[l]==target: result.append([nums[i],nums[j],nums[k],nums[l]]) if nums[i]+nums[j]+nums[k]+nums[l]>target: break return result
[ "1024089291@qq.com" ]
1024089291@qq.com
c73ae77ac18bd3d3bd5970245667f5e8fd8b2471
6b6f68f507746e3e39b0e8789af5d044e27d6b0a
/Math/0204_CountPrimes_E.py
e266761a3e920a2ddefc2e292ae002021428d1b4
[]
no_license
PFZ86/LeetcodePractice
bb0012d8b3120451dda1745875836278d3362e45
6db9db1934bc0a8142124d8b56bf6c07bdf43d79
refs/heads/master
2021-08-28T08:43:27.343395
2021-08-17T20:38:32
2021-08-17T20:38:32
230,925,656
1
1
null
2021-08-17T20:38:32
2019-12-30T14:01:27
Python
UTF-8
Python
false
false
1,430
py
# https://leetcode.com/problems/count-primes/ # Solution 1: the naive method, time complexity O(n^{1.5}) class Solution(object): def isPrime(self, num): if num <= 1: return False i = 2 # Use i*i <= num as the ending condition; # do not use the expensive function sqrt(num) while i*i <= num: if num % i == 0: return False i += 1 return True def countPrimes(self, n): """ :type n: int :rtype: int """ count = 0 for i in range(1, n): if self.isPrime(i): count += 1 return count # Solution 2: the Sieve method; time complexity O(nloglogn), space complexity O(n) class Solution(object): def countPrimes(self, n): """ :type n: int :rtype: int """ isPrime = [True] * n i = 2 # the ending condition is i*i <= n instead of i <= sqrt(n) for i in range(2, n): if i*i > n: break if isPrime[i]: # we can start from i*i because multiples of i that # are less than i*i are already marked as non-prime j = i*i while j < n: isPrime[j] = False j += i return sum(isPrime[2:])
[ "pengfeizang@pengfeis-iMac.fios-router.home" ]
pengfeizang@pengfeis-iMac.fios-router.home
478995ae08d50e8625c61409890afdaa49990940
d41d18d3ea6edd2ec478b500386375a8693f1392
/plotly/validators/layout/polar/angularaxis/_tickcolor.py
c767e805316121395caad30ece9326f5d675e0b8
[ "MIT" ]
permissive
miladrux/plotly.py
38921dd6618650d03be9891d6078e771ffccc99a
dbb79e43e2cc6c5762251537d24bad1dab930fff
refs/heads/master
2020-03-27T01:46:57.497871
2018-08-20T22:37:38
2018-08-20T22:37:38
145,742,203
1
0
MIT
2018-08-22T17:37:07
2018-08-22T17:37:07
null
UTF-8
Python
false
false
458
py
import _plotly_utils.basevalidators class TickcolorValidator(_plotly_utils.basevalidators.ColorValidator): def __init__( self, plotly_name='tickcolor', parent_name='layout.polar.angularaxis', **kwargs ): super(TickcolorValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type='plot', role='style', **kwargs )
[ "adam.kulidjian@gmail.com" ]
adam.kulidjian@gmail.com
f5048e620a1d249b16bfa06ee8a33bc414722ba4
9d831207b43422b40e54cf6258a29b2f92b66290
/src/boot_navigation/reports.py
9348679320e16ccddb3aafca90894e6209798d01
[]
no_license
AndreaCensi/yc1304
714e70e972e9ee31ac011bdb94a57a8ab568f853
0bc2b759423db7da73fac47572719a37a80fee0e
refs/heads/master
2020-06-04T12:52:10.902082
2013-07-19T07:00:49
2013-07-19T07:00:49
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,822
py
from .navigation_map import NavigationMap from .plots import plot_arrow_SE2, plot_arrow_se2 from contracts import contract from geometry import se2_from_linear_angular from reprep import Report, rgb_zoom, scale import numpy as np import warnings __all__ = ['display_nmap'] @contract(report=Report, nmap=NavigationMap) def display_nmap(report, nmap): with report.subsection('sensing') as sub: display_nmap_sensing(sub, nmap) f = report.figure() with f.plot('map') as pylab: for bd, pose in nmap.data: commands = bd['commands'] warnings.warn('redo this properly') if len(commands) == 3: x, y, omega = commands else: x, y = commands omega = 0 vel = se2_from_linear_angular([x, y], omega) plot_arrow_SE2(pylab, pose) plot_arrow_se2(pylab, pose, vel, length=0.04, color='g') pylab.axis('equal') @contract(report=Report, nmap=NavigationMap) def display_nmap_sensing(report, nmap): obss = list(nmap.get_all_observations()) map1 = np.vstack(obss) nreps = 4 nspaces = 1 obss2 = [] for o in obss: for _ in range(nreps): obss2.append(o) for _ in range(nspaces): obss2.append(o * np.nan) map2 = np.vstack(obss2) f = report.figure(cols=1) f.data_rgb('observations', _nmapobs_to_rgb(map1)) f.data_rgb('observations2', _nmapobs_to_rgb(map2)) def _nmapobs_to_rgb(m): print m.shape m = m.T rgb = scale(m, min_value=0, max_value=1, nan_color=[.6, 1, .6]) return rgb_zoom(rgb, 4) @contract(obss='list(array)') def _nmapobslist_to_rgb(obss): map2 = np.vstack(obss) return _nmapobs_to_rgb(map2)
[ "andrea@cds.caltech.edu" ]
andrea@cds.caltech.edu
d364a514127247742d43f012ec5b553a968c4bf0
f4dd8aa4e5476ffde24e27273dd47913c7f9177a
/Dlv2_safe2/tests/parser/range.3.test.py
5291d84363c7b37476ea8dd1fa8f8da8958c4baa
[ "Apache-2.0" ]
permissive
dave90/Dlv_safe2
e56071ec1b07c45defda571cb721852e2391abfb
f127f413e3f35d599554e64aaa918bc1629985bc
refs/heads/master
2020-05-30T10:44:13.473537
2015-07-12T12:35:22
2015-07-12T12:35:22
38,256,201
0
0
null
null
null
null
UTF-8
Python
false
false
373
py
input = """ %#maxint=10. f(a). % intuitively, one could expect: g(4). g(3). g(2). here % but this does not produce any g(): %g(4..2). %h(1..3). f(b). intersect(X) :- g(X), h(X). """ output = """ %#maxint=10. f(a). % intuitively, one could expect: g(4). g(3). g(2). here % but this does not produce any g(): %g(4..2). %h(1..3). f(b). intersect(X) :- g(X), h(X). """
[ "davide@davide-All-Series" ]
davide@davide-All-Series
7220d2c71f026f768b003347430670f8bafceab5
9321d3460ffbbb6cd7917b2bac77ce8321e04737
/contributions/Legacy/MOO/optimization/master/master_to_slave.py
d57ff0e707a482d8afa58f4b3774b4524cec5be3
[ "MIT" ]
permissive
muehleisen/CEAforArcGIS
b820d837cd5373b95851b4e5dda609d69f054b97
b6aeca5a9d70835381625a9162d5695714e1a02b
refs/heads/master
2021-01-11T21:24:18.482264
2017-01-06T05:28:48
2017-01-06T05:28:48
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,673
py
"""Data required for Slave from Master""" """ This File sets all variables for the slave optimization, that have to be set by the Master """ #import os #Energy_Models_path ="/Users/Tim/Desktop/ETH/Masterarbeit/Github_Files/urben/Masterarbeit/EnergySystem_Models" #Network_Raw_Data_Path = "/Users/Tim/Desktop/ETH/Masterarbeit/Tools/Results/Network_loads" #os.chdir(Energy_Models_path) import contributions.Legacy.moo.globalVar as gV import numpy as np reload(gV) class MasterSlaveVariables(object): def __init__(self): # Name the file which should be loaded: self.configKey = "" self.NETWORK_DATA_FILE = "" self.nBuildingsConnected = 0 self.fNameTotalCSV = "" #self.Network_Supply_Temp = 70 + 273.0 # Electricity_Type: self.EL_TYPE = 'normal' # type normal or green (=green power) # Geothermal Heat Pump, #self.GHP_max_i = gV.GHP_Cmax_Size_th # [W] Heat power (thermal output) self.GHP_number = 0.0 # number of probes #self.GHP_max = self.GHP_number * self.GHP_max_i self.GHP_SEASON_ON = 0 # Hour in Year, when to switch on GHP self.GHP_SEASON_OFF = 8760 # Hour in Year, when to switch off GHP # Sewage Heat Pump self.HPSew_maxSize = 0 # Lake Heat Pump self.HPLake_maxSize = 0 # Furnace self.Furnace_Q_max = 0 self.Furn_Moist_type = "wet" #gV.Furn_Moist_type # set the moisture content of wood chips, either "dry" or "wet" # GAS TURBINE VARIABLES #self.gt_size = 1.0E6 # in Watt self.CC_GT_SIZE = 0 self.gt_fuel = "NG" # Boiler - Thermal output power! # add BG / NG Story for both peak and normal boilers self.Boiler_Q_max = 0 self.BoilerPeak_Q_max = 0 self.BoilerType = "NG" #Choose "NG" or "BG" self.BoilerPeakType = "NG" #Choose "NG" or "BG" self.BoilerBackupType = "NG" #Choose "NG" or "BG" # Cooling Tower : #self.CT_Qdesign = 0 # Storage self.STORAGE_SIZE = 1000000.0 # in m^3 - size of hot water storage tank (up to now a random variable) self.STORAGE_HEIGHT = 3.0 # in m - height of hot water storage tank self.A_storage_outside = self.STORAGE_SIZE/self.STORAGE_HEIGHT + 2 * np.pi * \ (self.STORAGE_SIZE/self.STORAGE_HEIGHT / np.pi)**0.5 #neglecting ground area for heat losses self.alpha_loss = 0.0111 # EnergyPRO: 0.3 * 0.037 ; \ # Saplamidis: 0.293542 # Wh / h= 0( .005 / (math.log10(26/25.0) ) , # from Vassilis-Storage Optimization Code ** ACHTUNG !! CHANGE - SCALES WITH SIZE (?!) self.Storage_conv_loss = 0.0111 # losses due to energy conversion from and to storage self.T_storage_initial = 10 + 273.0 # initial Storage Temperature self.T_storage_zero = 10 + 273.0 # Reference Temperature Storage self.Q_in_storage_zero = self.STORAGE_SIZE * 1/ 3600 * 983.21 * 4185 * (self.T_storage_zero - self.T_storage_initial) self.dT_buffer = 5 # maintain a buffer for "uncertainties", never go below this temperature # Storage is initially empty self.T_ST_MAX = 90 + 273.0 # Maximum Temperature of storage allowed self.T_ST_MIN = 10 + 273.0 # Solar self.SOLCOL_TYPE_PVT = "PVT_35.csv" # file used as PVT type of collectors self.SOLCOL_TYPE_SC = "SC_75.csv" self.SOLCOL_TYPE_PV = "Pv.csv" self.SOLAR_PART_PVT = 0.0 # [%] How much of the total area is available for PVT self.SOLAR_PART_SC = 0.0 # How much of the total area is available for Solar Collectors self.SOLAR_PART_PV = 0.0 # How much of the total area is available for PV (no thermal output, selling electricity) self.nPVT_installations = 2 # number of PVT installations, required for PVT average size, which goes into KEV remuneration self.nPV_installations = 2 # number of PVT installations, required for PVT average size, which goes into KEV remuneration # declare, which power plants will be used : USED = 1 ; NOT USED = 0 self.Boiler_on = 0 self.BoilerPeak_on = 0 self.Furnace_on = 0 self.GHP_on = 0 self.HP_Lake_on = 0 self.HP_Sew_on = 0 self.CC_on = 0 self.WasteServersHeatRecovery = 0 # server heat self.WasteCompressorHeatRecovery = 0
[ "fonseca@arch.ethz.ch" ]
fonseca@arch.ethz.ch
c15199f76236b5e1a4aa7c00237e9015dab2015a
9c9512d92f4693a40e80e2dc8df9a74ef34a9b02
/archive/fibcoll_cmass_pm.py
0bf73edf6ea47071ec8819e7d74dcb28dacd6166
[]
no_license
changhoonhahn/FiberCollisions
9184600bbd596f861755425c46b311b2ab342af5
ee0bfab26cc0167982822d8bc5c5654eaccbe2ef
refs/heads/master
2021-01-18T21:19:12.457250
2017-01-31T20:33:29
2017-01-31T20:33:29
34,329,426
0
0
null
null
null
null
UTF-8
Python
false
false
3,587
py
import numpy as np import pylab as py from scipy.integrate import simps from scipy.optimize import curve_fit from matplotlib import rc rc('text', usetex=True) rc('font', family='serif') prismdir = '/global/data/scr/chh327/powercode/data/' disp_los = np.loadtxt(prismdir+'cmass-dr11v2-N-Anderson-disp_los_pm.dat') disp_perp = np.loadtxt(prismdir+'cmass-dr11v2-N-Anderson-disp_perp.dat') disp_los_tail_red = np.loadtxt(prismdir+'cmass-dr11v2-N-Anderson-tail_red.dat') disp_los_disttail_red = disp_los_tail_red[ (disp_los_tail_red < 0.7) & (disp_los_tail_red > 0.43) ] data = np.loadtxt('/global/data/scr/chh327/powercode/data/cmass-dr11v2-N-Anderson-nzw-zlim.dat') mpc_bin = -1000.0+0.1*np.array(range(20001)) mpc_bin_perp = 0.05*np.array(range(21)) red_bin = 0.01*np.array(range(101)) fig4 = py.figure(4) dump = fig4.add_subplot(111) fig1 = py.figure(1) ax1 = fig1.add_subplot(111) fig3 = py.figure(3) ax12 = fig3.add_subplot(111) hist_disp_los = dump.hist(disp_los,mpc_bin, label='Line of Sight Displacement Histogram') hist_disp_perp = ax12.hist(disp_perp,mpc_bin_perp, log='True',label=r'Histogram of $d_{\perp}$') disp_los_x = [ (hist_disp_los[1][i] + hist_disp_los[1][i+1])/2.0 for i in range(len(hist_disp_los[1])-1) ] disp_perp_x = [ (hist_disp_perp[1][i] + hist_disp_perp[1][i+1])/2.0 for i in range(len(hist_disp_perp[1])-1) ] def gauss(x,sig): return np.max(hist_disp_los[0])*np.exp(-0.5*x**2/sig**2) def expon(x,sig): return np.max(hist_disp_los[0])*np.exp(-x/sig) popt, pcov = curve_fit(expon, np.array(disp_los_x[10000:10500]), hist_disp_los[0][10000:10500]) print popt ax1.plot(disp_los_x, hist_disp_los[0],linewidth=3, label=r'Histogram of $d_{LOS}$') ax1.plot(np.array(disp_los_x[10000:10500]), expon(np.array(disp_los_x[10000:10500]), popt[0]), 'r', linewidth=3, label=r'Exponential distribution with $\sigma=$'+str(popt)) #ax1.set_yscale('log') ax1.set_xlim([-50,50]) ax1.set_ylim([0,300]) ax1.set_title(r'$d_{\rm{LOS}}$ Distribution of CMASS DR11v2 North Fiber Collided Pairs') ax1.set_xlabel('Displacement (Mpc)') ax1.set_ylabel('Number of Galaxies') ax1.legend(loc='best') # Writing the normalized histogram to file: hist_disp_los_normed = dump.hist( disp_los, mpc_bin, normed=1 ) output = np.zeros(2*len(disp_los_x)).reshape((len(disp_los_x),2)) output[:,0] = disp_los_x output[:,1] = hist_disp_los_normed[0] np.savetxt(prismdir+'cmass-dr11v2-N-Anderson-disp_los_hist_normed_pm.dat', output) #for d in [20, 30, 40]: # RMSfrac = float(len(disp_los[(disp_los<d) & (disp_los>0)]))/float(len(disp_los))*100.0 # caption = r''+str(np.int(RMSfrac))+"$\%$" # ax1.annotate(caption, (float(d),200), xycoords='data', xytext=(float(d), 500), textcoords='data', # arrowprops=dict(arrowstyle="fancy", facecolor='black', connectionstyle="angle3,angleA=0,angleB=-90"), # fontsize=20, horizontalalignment='center', verticalalignment='top') fig2 = py.figure(2,figsize=(10,10)) ax2 = fig2.add_subplot(111) hist_disttail_red = ax2.hist( disp_los_disttail_red, red_bin, normed=1, label=r'Redshift Distribution of Galaxies with $d_{LOS} > 30$') hist_data_red = dump.hist(data[:,2], red_bin, normed=1,label='Redshift Distribution of Galaxies for data') hist_data_red_x = [ (hist_data_red[1][i] + hist_data_red[1][i+1])/2.0 for i in range(len(hist_data_red[1])-1) ] ax2.plot(hist_data_red_x, hist_data_red[0],'r', linewidth=3, label='Redshift Distribution of Galaxies for CMASS dr11v2 NGC') ax2.set_xlim([0.4, 0.8]) ax2.set_ylim([0.0, 8.0]) ax2.set_xlabel('Redshift (z)') ax2.set_ylabel('Galaxies') ax2.legend(loc='best') py.show()
[ "chh327@nyu.edu" ]
chh327@nyu.edu
02256d1be416fd37e092a3e263c29dcedad1ef63
78a15793be1ba71ea7eecee33abef4ecbe11d8f2
/apps/users/migrations/0016_auto_20151102_1457.py
5a3e982e5c71bdaae3706ce02e4d2db9cbd42842
[]
no_license
teresaylin/my2009
f5df9c62492d4c88931f6aa45af31ee88dbe3a1a
2486750ad73df313d596497b0eb7f4c47518e6a6
refs/heads/master
2021-03-21T23:53:55.581074
2016-06-01T18:13:44
2016-06-01T18:13:44
23,392,283
0
0
null
null
null
null
UTF-8
Python
false
false
870
py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('contenttypes', '0001_initial'), ('users', '0015_auto_20151102_1456'), ] operations = [ migrations.AddField( model_name='commentthread', name='content_type', field=models.ForeignKey(to='contenttypes.ContentType', null=True), preserve_default=True, ), migrations.AddField( model_name='commentthread', name='object_id', field=models.PositiveIntegerField(null=True), preserve_default=True, ), migrations.AlterUniqueTogether( name='commentthread', unique_together=set([('content_type', 'object_id')]), ), ]
[ "jam.hann@gmail.com" ]
jam.hann@gmail.com
b68b7e14f09c7cdcf1d4e14991aaea2461b218bd
e986ebbf73a6dff7ccc58feb886e54afa57e49d9
/sdk/python/pulumi_awsx/_utilities.py
83985732e9f67955c5ab392bc27761660f9323b9
[ "BSD-3-Clause", "Apache-2.0" ]
permissive
pulumi/pulumi-awsx
5a5bdd77afaa674e9a5dd9f26540ddea5a1cde1c
45136c540f29eb3dc6efa5b4f51cfe05ee75c7d8
refs/heads/master
2023-09-01T21:47:40.877155
2023-08-24T04:14:12
2023-08-24T04:14:12
132,053,036
186
107
Apache-2.0
2023-09-13T07:28:54
2018-05-03T21:46:28
TypeScript
UTF-8
Python
false
false
8,056
py
# coding=utf-8 # *** WARNING: this file was generated by pulumi-gen-awsx. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import importlib.util import inspect import json import os import pkg_resources import sys import typing import pulumi import pulumi.runtime from semver import VersionInfo as SemverVersion from parver import Version as PEP440Version def get_env(*args): for v in args: value = os.getenv(v) if value is not None: return value return None def get_env_bool(*args): str = get_env(*args) if str is not None: # NOTE: these values are taken from https://golang.org/src/strconv/atob.go?s=351:391#L1, which is what # Terraform uses internally when parsing boolean values. if str in ["1", "t", "T", "true", "TRUE", "True"]: return True if str in ["0", "f", "F", "false", "FALSE", "False"]: return False return None def get_env_int(*args): str = get_env(*args) if str is not None: try: return int(str) except: return None return None def get_env_float(*args): str = get_env(*args) if str is not None: try: return float(str) except: return None return None def _get_semver_version(): # __name__ is set to the fully-qualified name of the current module, In our case, it will be # <some module>._utilities. <some module> is the module we want to query the version for. root_package, *rest = __name__.split('.') # pkg_resources uses setuptools to inspect the set of installed packages. We use it here to ask # for the currently installed version of the root package (i.e. us) and get its version. # Unfortunately, PEP440 and semver differ slightly in incompatible ways. The Pulumi engine expects # to receive a valid semver string when receiving requests from the language host, so it's our # responsibility as the library to convert our own PEP440 version into a valid semver string. pep440_version_string = pkg_resources.require(root_package)[0].version pep440_version = PEP440Version.parse(pep440_version_string) (major, minor, patch) = pep440_version.release prerelease = None if pep440_version.pre_tag == 'a': prerelease = f"alpha.{pep440_version.pre}" elif pep440_version.pre_tag == 'b': prerelease = f"beta.{pep440_version.pre}" elif pep440_version.pre_tag == 'rc': prerelease = f"rc.{pep440_version.pre}" elif pep440_version.dev is not None: prerelease = f"dev.{pep440_version.dev}" # The only significant difference between PEP440 and semver as it pertains to us is that PEP440 has explicit support # for dev builds, while semver encodes them as "prerelease" versions. In order to bridge between the two, we convert # our dev build version into a prerelease tag. This matches what all of our other packages do when constructing # their own semver string. return SemverVersion(major=major, minor=minor, patch=patch, prerelease=prerelease) # Determine the version once and cache the value, which measurably improves program performance. _version = _get_semver_version() _version_str = str(_version) def get_version(): return _version_str def get_resource_opts_defaults() -> pulumi.ResourceOptions: return pulumi.ResourceOptions( version=get_version(), plugin_download_url=get_plugin_download_url(), ) def get_invoke_opts_defaults() -> pulumi.InvokeOptions: return pulumi.InvokeOptions( version=get_version(), plugin_download_url=get_plugin_download_url(), ) def get_resource_args_opts(resource_args_type, resource_options_type, *args, **kwargs): """ Return the resource args and options given the *args and **kwargs of a resource's __init__ method. """ resource_args, opts = None, None # If the first item is the resource args type, save it and remove it from the args list. if args and isinstance(args[0], resource_args_type): resource_args, args = args[0], args[1:] # Now look at the first item in the args list again. # If the first item is the resource options class, save it. if args and isinstance(args[0], resource_options_type): opts = args[0] # If resource_args is None, see if "args" is in kwargs, and, if so, if it's typed as the # the resource args type. if resource_args is None: a = kwargs.get("args") if isinstance(a, resource_args_type): resource_args = a # If opts is None, look it up in kwargs. if opts is None: opts = kwargs.get("opts") return resource_args, opts # Temporary: just use pulumi._utils.lazy_import once everyone upgrades. def lazy_import(fullname): import pulumi._utils as u f = getattr(u, 'lazy_import', None) if f is None: f = _lazy_import_temp return f(fullname) # Copied from pulumi._utils.lazy_import, see comments there. def _lazy_import_temp(fullname): m = sys.modules.get(fullname, None) if m is not None: return m spec = importlib.util.find_spec(fullname) m = sys.modules.get(fullname, None) if m is not None: return m loader = importlib.util.LazyLoader(spec.loader) spec.loader = loader module = importlib.util.module_from_spec(spec) m = sys.modules.get(fullname, None) if m is not None: return m sys.modules[fullname] = module loader.exec_module(module) return module class Package(pulumi.runtime.ResourcePackage): def __init__(self, pkg_info): super().__init__() self.pkg_info = pkg_info def version(self): return _version def construct_provider(self, name: str, typ: str, urn: str) -> pulumi.ProviderResource: if typ != self.pkg_info['token']: raise Exception(f"unknown provider type {typ}") Provider = getattr(lazy_import(self.pkg_info['fqn']), self.pkg_info['class']) return Provider(name, pulumi.ResourceOptions(urn=urn)) class Module(pulumi.runtime.ResourceModule): def __init__(self, mod_info): super().__init__() self.mod_info = mod_info def version(self): return _version def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource: class_name = self.mod_info['classes'].get(typ, None) if class_name is None: raise Exception(f"unknown resource type {typ}") TheClass = getattr(lazy_import(self.mod_info['fqn']), class_name) return TheClass(name, pulumi.ResourceOptions(urn=urn)) def register(resource_modules, resource_packages): resource_modules = json.loads(resource_modules) resource_packages = json.loads(resource_packages) for pkg_info in resource_packages: pulumi.runtime.register_resource_package(pkg_info['pkg'], Package(pkg_info)) for mod_info in resource_modules: pulumi.runtime.register_resource_module( mod_info['pkg'], mod_info['mod'], Module(mod_info)) _F = typing.TypeVar('_F', bound=typing.Callable[..., typing.Any]) def lift_output_func(func: typing.Any) -> typing.Callable[[_F], _F]: """Decorator internally used on {fn}_output lifted function versions to implement them automatically from the un-lifted function.""" func_sig = inspect.signature(func) def lifted_func(*args, opts=None, **kwargs): bound_args = func_sig.bind(*args, **kwargs) # Convert tuple to list, see pulumi/pulumi#8172 args_list = list(bound_args.args) return pulumi.Output.from_input({ 'args': args_list, 'kwargs': bound_args.kwargs }).apply(lambda resolved_args: func(*resolved_args['args'], opts=opts, **resolved_args['kwargs'])) return (lambda _: lifted_func) def get_plugin_download_url(): return None
[ "noreply@github.com" ]
pulumi.noreply@github.com
c4674a7fdc765d2349e6a916e2744d418ebef5eb
97f2852420d6fdc98e5a4a0321c35920ff070d41
/examples/scripts/csc/cbpdn_ams_clr.py
9b7714d8854ec1722a6998cd4d1d3d001830f1ee
[ "BSD-3-Clause" ]
permissive
eglxiang/sporco
93595f3afb6acda758425f7332513eeb892fa51f
e4a716b32b675d6e23ba0bfc3b2d7c6f9bc5d7a3
refs/heads/master
2021-05-04T23:03:57.430340
2018-01-19T19:03:01
2018-01-19T19:03:01
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,460
py
#!/usr/bin/env python # -*- coding: utf-8 -*- # This file is part of the SPORCO package. Details of the copyright # and user license can be found in the 'LICENSE.txt' file distributed # with the package. """ CSC with a Spatial Mask ======================= This example demonstrates the use of :class:`.cbpdn.AddMaskSim` for convolutional sparse coding with a spatial mask :cite:`wohlberg-2016-boundary`. The example problem is inpainting of randomly distributed corruption of a colour image :cite:`wohlberg-2016-convolutional`. """ from __future__ import print_function from builtins import input from builtins import range import numpy as np from sporco.admm import tvl2 from sporco.admm import cbpdn from sporco import util from sporco import metric from sporco import plot """ Load a reference image. """ img = util.ExampleImages().image('monarch.png', zoom=0.5, scaled=True, idxexp=np.s_[:, 160:672]) """ Create random mask and apply to reference image to obtain test image. (The call to ``numpy.random.seed`` ensures that the pseudo-random noise is reproducible.) """ t = 0.5 np.random.seed(12345) msk = np.random.randn(*(img.shape)) msk[np.abs(msk) > t] = 1; msk[np.abs(msk) < t] = 0; imgw = msk * img """ Define pad and crop functions. """ pn = 8 spad = lambda x: np.pad(x, ((pn, pn), (pn, pn), (0, 0)), mode='symmetric') zpad = lambda x: np.pad(x, ((pn, pn), (pn, pn), (0, 0)), mode='constant') crop = lambda x: x[pn:-pn, pn:-pn] """ Construct padded mask and test image. """ mskp = zpad(msk) imgwp = spad(imgw) """ :math:`\ell_2`-TV denoising with a spatial mask as a non-linear lowpass filter. """ lmbda = 0.05 opt = tvl2.TVL2Denoise.Options({'Verbose': False, 'MaxMainIter': 200, 'DFidWeight': mskp, 'gEvalY': False, 'AutoRho': {'Enabled': True}}) b = tvl2.TVL2Denoise(imgwp, lmbda, opt, caxis=2) sl = b.solve() sh = imgwp - sl """ Load dictionary. """ D = util.convdicts()['RGB:8x8x3x64'] """ Set up :class:`.admm.cbpdn.ConvBPDN` options. """ lmbda = 2e-2 opt = cbpdn.ConvBPDN.Options({'Verbose': True, 'MaxMainIter': 250, 'HighMemSolve': True, 'RelStopTol': 1e-3, 'AuxVarObj': False, 'RelaxParam': 1.8, 'rho': 5e1*lmbda + 1e-1, 'AutoRho': {'Enabled': False, 'StdResiduals': True}}) """ Construct :class:`.admm.cbpdn.AddMaskSim` wrapper for :class`.admm.cbpdn.ConvBPDN` and solve via wrapper. This example could also have made use of :class`.admm.cbpdn.ConvBPDNMaskDcpl`, which has very similar performance in this application, but :class:`.admm.cbpdn.AddMaskSim` has the advantage of greater flexibility in that the wrapper can be applied to a variety of CSC solver objects. """ ams = cbpdn.AddMaskSim(cbpdn.ConvBPDN, D, sh, mskp, lmbda, opt=opt) X = ams.solve() """ Reconstruct from representation. """ imgr = crop(sl + ams.reconstruct().squeeze()) """ Display solve time and reconstruction performance. """ print("AddMaskSim wrapped ConvBPDN solve time: %.2fs" % ams.timer.elapsed('solve')) print("Corrupted image PSNR: %5.2f dB" % metric.psnr(img, imgw)) print("Recovered image PSNR: %5.2f dB" % metric.psnr(img, imgr)) """ Display reference, test, and reconstructed image """ fig = plot.figure(figsize=(21, 7)) plot.subplot(1, 3, 1) plot.imview(img, fgrf=fig, title='Reference image') plot.subplot(1, 3, 2) plot.imview(imgw, fgrf=fig, title='Corrupted image') plot.subplot(1, 3, 3) plot.imview(imgr, fgrf=fig, title='Reconstructed image') fig.show() """ Display lowpass component and sparse representation """ fig = plot.figure(figsize=(14, 7)) plot.subplot(1, 2, 1) plot.imview(sl, fgrf=fig, cmap=plot.cm.Blues, title='Lowpass component') plot.subplot(1, 2, 2) plot.imview(np.squeeze(np.sum(abs(X), axis=ams.cri.axisM)), fgrf=fig, cmap=plot.cm.Blues, title='Sparse representation') fig.show() """ Plot functional value, residuals, and rho """ its = ams.getitstat() fig = plot.figure(figsize=(21, 7)) plot.subplot(1, 3, 1) plot.plot(its.ObjFun, fgrf=fig, xlbl='Iterations', ylbl='Functional') plot.subplot(1, 3, 2) plot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T, fgrf=fig, ptyp='semilogy', xlbl='Iterations', ylbl='Residual', lgnd=['Primal', 'Dual']) plot.subplot(1, 3, 3) plot.plot(its.Rho, fgrf=fig, xlbl='Iterations', ylbl='Penalty Parameter') fig.show() # Wait for enter on keyboard input()
[ "brendt@ieee.org" ]
brendt@ieee.org
951f433acec27ae7970c718810065a209f4c17b4
c2602ec4c504914c0831ab061b6cee779be344a2
/python/aead/kms_envelope_aead.py
ba9b8f25dc2ed14a763e30b393d4095f7add5406
[ "Apache-2.0" ]
permissive
shigakio/tink
85f8da6033139154528bf00bdadd8f3f800f6223
a61aaeeb5da223357b5ec3513231bc8e493a4ac1
refs/heads/master
2021-02-06T14:41:38.074226
2020-02-20T06:18:02
2020-02-20T06:18:30
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,463
py
# Copyright 2020 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module for envelope encryption with KMS.""" from __future__ import absolute_import from __future__ import division # Placeholder for import for type annotations from __future__ import print_function import struct from tink.proto import tink_pb2 from tink.python import core from tink.python.aead import aead from tink.python.core import tink_error # Defines in how many bytes the DEK length will be encoded. DEK_LEN_BYTES = 4 class KmsEnvelopeAead(aead.Aead): """Implements envelope encryption. Envelope encryption generates a data encryption key (DEK) which is used to encrypt the payload. The DEK is then send to a KMS to be encrypted and the encrypted DEK is attached to the ciphertext. In order to decrypt the ciphertext, the DEK first has to be decrypted by the KMS, and then the DEK can be used to decrypt the ciphertext. For further information see https://cloud.google.com/kms/docs/envelope-encryption. The ciphertext structure is as follows: * Length of the encrypted DEK: 4 bytes (big endian) * Encrypted DEK: variable length, specified by the previous 4 bytes * AEAD payload: variable length """ def __init__(self, key_template: tink_pb2.KeyTemplate, remote: aead.Aead): self.key_template = key_template self.remote_aead = remote def encrypt(self, plaintext: bytes, associated_data: bytes) -> bytes: # Get new key from template dek = core.Registry.new_key_data(self.key_template) dek_aead = core.Registry.primitive(dek, aead.Aead) # Encrypt plaintext ciphertext = dek_aead.encrypt(plaintext, associated_data) # Wrap DEK key values with remote encrypted_dek = self.remote_aead.encrypt(dek.value, b'') # Construct ciphertext, DEK length encoded as big endian enc_dek_len = struct.pack('>I', len(encrypted_dek)) return enc_dek_len + encrypted_dek + ciphertext def decrypt(self, ciphertext: bytes, associated_data: bytes) -> bytes: ct_len = len(ciphertext) # Recover DEK length if ct_len < DEK_LEN_BYTES: raise tink_error.TinkError dek_len = struct.unpack('>I', ciphertext[0:DEK_LEN_BYTES])[0] # Basic check if DEK length can be valid. if dek_len > (ct_len - DEK_LEN_BYTES) or dek_len < 0: raise tink_error.TinkError # Decrypt DEK with remote AEAD encrypted_dek_bytes = ciphertext[DEK_LEN_BYTES:DEK_LEN_BYTES + dek_len] dek_bytes = self.remote_aead.decrypt(encrypted_dek_bytes, b'') # Get AEAD primitive based on DEK dek = tink_pb2.KeyData() dek.type_url = self.key_template.type_url dek.value = dek_bytes dek.key_material_type = tink_pb2.KeyData.KeyMaterialType.SYMMETRIC dek_aead = core.Registry.primitive(dek, aead.Aead) # Extract ciphertext payload and decrypt ct_bytes = ciphertext[DEK_LEN_BYTES + dek_len:] return dek_aead.decrypt(ct_bytes, associated_data)
[ "copybara-worker@google.com" ]
copybara-worker@google.com
5cc22cbff16ea64707ce2511eef96003aec9056c
53fab060fa262e5d5026e0807d93c75fb81e67b9
/backup/user_173/ch161_2020_06_15_19_32_40_555261.py
a28363ec0ee02b8ca4432c1e1eb5d32ebf08b4a3
[]
no_license
gabriellaec/desoft-analise-exercicios
b77c6999424c5ce7e44086a12589a0ad43d6adca
01940ab0897aa6005764fc220b900e4d6161d36b
refs/heads/main
2023-01-31T17:19:42.050628
2020-12-16T05:21:31
2020-12-16T05:21:31
306,735,108
0
0
null
null
null
null
UTF-8
Python
false
false
286
py
def PiWallis(elementos): numerador,denominador = 2,1 i = 0 mul = 1 while i < elementos: mul*= numerador/denominador if i%2 == 0: denominador += 2 else: numerador +=2 i+=1 oi=mul*2 return oi
[ "you@example.com" ]
you@example.com
0042cd7ad97726820014cb8b6a3f087d560913d2
b41da6f351f27bf0d45a4e4d0e1be8f3a86f4b64
/itsybitsy/leetcode/sliding_window.py
2ddaef9a489cf95fe6af1b0a16bd1c8ef0f3cf44
[]
no_license
santoshr1016/WeekendMasala
a5adbabe0b78cde567667376d7ddf05bb505a0ff
e099f9ac9677f7acb8faf620af94a06d76cae044
refs/heads/master
2020-03-26T00:26:32.649429
2019-08-30T07:32:24
2019-08-30T07:32:24
144,320,624
0
0
null
2019-06-03T23:08:00
2018-08-10T18:36:38
Python
UTF-8
Python
false
false
278
py
def sliding_window(nums, k): size = len(nums) - (k-1) op_list = [] for i in range(size): op_list.append(max(nums[i: i+k])) print(op_list) return op_list nums = [1, 3, -1, -3, 5, 3, 6, 7, 7, 8, 1, 34, -9] k = 3 rv = sliding_window(nums, k) print(rv)
[ "santosh.ratnala@acquia.com" ]
santosh.ratnala@acquia.com
3dd7edb585e13d632ba412a0b12b8b9348c2948a
578db86c51d44ebddd0dc7b1738985b3dc69eb74
/corehq/apps/hqadmin/migrations/0015_rename_sqlhqdeploy.py
0b35e450b0a336e0722dad15f4ee9786d7198ab2
[ "BSD-3-Clause" ]
permissive
dimagi/commcare-hq
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
e7391ddae1af1dbf118211ecb52c83fc508aa656
refs/heads/master
2023-08-16T22:38:27.853437
2023-08-16T19:07:19
2023-08-16T19:07:19
247,278
499
203
BSD-3-Clause
2023-09-14T19:03:24
2009-07-09T17:00:07
Python
UTF-8
Python
false
false
512
py
# -*- coding: utf-8 -*- # Generated by Django 1.11.27 on 2020-01-31 20:24 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('hqadmin', '0014_remove_sqlhqdeploy_couch_id'), ] operations = [ migrations.RenameModel( old_name='SQLHqDeploy', new_name='HqDeploy', ), migrations.AlterModelTable( name='hqdeploy', table=None, ), ]
[ "orange.jenny@gmail.com" ]
orange.jenny@gmail.com
41003197a8029e2bdd2fb2389695572510a70bda
76e9afdf16eabcc9e1a3facd308e56362112efc4
/plot/lossplot3.py
22ff012e1ffa7ab7c93816de25e1713f6ecdc95c
[]
no_license
rerejii/pwb_work_2021
c65c5e787ad98b7d847cb63ebadc24a02f001e90
8ecfb2a98d9d396ed505ecc939e384cf6400412d
refs/heads/main
2023-03-30T10:43:18.115386
2021-03-24T05:38:41
2021-03-24T05:38:41
350,954,969
0
0
null
null
null
null
UTF-8
Python
false
false
755
py
import os import glob import csv import sys import numpy as np import pandas as pd from natsort import natsorted import matplotlib.pyplot as plt # import matplotlib markset = ['A', 'B', 'C', 'D', 'E', 'F'] for mark in markset: csv_path = 'Z:/hayakawa/binary/20210227/unet_use-bias_beta/unet_use-bias_beta-'+mark+'/CsvDatas/train_loss.csv' # csv_path = 'Z:/hayakawa/binary/20210227/unet_use-bias_beta_loss/unet_use-bias_beta_loss-A/CsvDatas/train_loss.csv' df = pd.read_csv(csv_path) # print(df) df.plot(y=['loss']) # print(df['loss'].values) # print(df.index.values) # x = df.index.values # y = df['loss'].values # plt.plot(x, y) plt.ylim([0.00, 0.05]) plt.savefig('train_loss-'+mark+'.png') plt.show()
[ "hayakawa.shinya.kochi@gmail.com" ]
hayakawa.shinya.kochi@gmail.com
4255372facaf9a7101262b53db5d91bb11fa70e9
de24f83a5e3768a2638ebcf13cbe717e75740168
/moodledata/vpl_data/103/usersdata/222/50669/submittedfiles/av1_3.py
a078720d7ccf91267555d632f67ab1c0ab99e466
[]
no_license
rafaelperazzo/programacao-web
95643423a35c44613b0f64bed05bd34780fe2436
170dd5440afb9ee68a973f3de13a99aa4c735d79
refs/heads/master
2021-01-12T14:06:25.773146
2017-12-22T16:05:45
2017-12-22T16:05:45
69,566,344
0
0
null
null
null
null
UTF-8
Python
false
false
188
py
# -*- coding: utf-8 -*- import math a=int(input('a:')) b=int(input('b:')) r=1 resto=1 cont=0 while r>0: r=a%b a=b b=r cont=cont+1 resto=resto+1 print(resto) print(cont)
[ "rafael.mota@ufca.edu.br" ]
rafael.mota@ufca.edu.br
700c496a84a90d5c782ada2ec88467c3c5ab4266
a913309bda87feee7f0637cb73901b4bcdca44bd
/0x0C-python-input_output/2-read_lines.py
a37333dfcb5deb5dd181b3116277abe6647cebde
[]
no_license
KamalTaleb/holbertonschool-python
e77b4f88a7ae60db158c0defa6c3f7737ad96562
29ffbccf1d02c7cf76a5df04d9386105dc149a81
refs/heads/master
2023-01-20T20:24:32.388729
2020-11-26T12:22:57
2020-11-26T12:22:57
292,331,337
0
1
null
null
null
null
UTF-8
Python
false
false
436
py
#!/usr/bin/python3 """read lines""" def read_lines(filename="", nb_lines=0): """ read_lines """ line_number = 0 with open(filename, 'r', encoding='utf-8') as f: for line in f: line_number += 1 if line_number <= nb_lines and nb_lines > 0: print(line, end="") elif nb_lines <= 0: print(line, end="") else: break
[ "kamal.talebb@gmail.com" ]
kamal.talebb@gmail.com
0e365224111f952db09c0c48889ec831a0dc1b5c
d7b4e2e391e1f15fd7cb4fbf4d9aee598131b007
/models/BiLSTM1d.py
38efbe949140c842932c9d98b0a6c2aa0f9616d9
[ "MIT" ]
permissive
wuyou33/DL-based-Intelligent-Diagnosis-Benchmark
eba2ce6f948b5abe68069e749f64501a32e1d7ca
e534f925cf454d07352f7ef82d75a8d6dac5355c
refs/heads/master
2021-01-02T15:06:29.041349
2019-12-28T21:47:21
2019-12-28T21:47:21
239,673,952
1
0
MIT
2020-02-11T04:15:21
2020-02-11T04:15:20
null
UTF-8
Python
false
false
1,692
py
import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable class BiLSTM(nn.Module): def __init__(self, in_channel=1, out_channel=10): super(BiLSTM, self).__init__() self.hidden_dim = 64 self.kernel_num = 16 self.num_layers = 2 self.V = 25 self.embed1 = nn.Sequential( nn.Conv1d(in_channel, self.kernel_num, kernel_size=3, padding=1), nn.BatchNorm1d(self.kernel_num), nn.ReLU(inplace=True), nn.MaxPool1d(kernel_size=2, stride=2)) self.embed2 = nn.Sequential( nn.Conv1d(self.kernel_num, self.kernel_num*2, kernel_size=3, padding=1), nn.BatchNorm1d(self.kernel_num*2), nn.ReLU(inplace=True), nn.AdaptiveMaxPool1d(self.V)) self.hidden2label1 = nn.Sequential(nn.Linear(self.V * 2 * self.hidden_dim, self.hidden_dim * 4), nn.ReLU(), nn.Dropout()) self.hidden2label2 = nn.Linear(self.hidden_dim * 4, out_channel) self.bilstm = nn.LSTM(self.kernel_num*2, self.hidden_dim, num_layers=self.num_layers, bidirectional=True, batch_first=True, bias=False) def forward(self, x): x = self.embed1(x) x = self.embed2(x) x = x.view(-1, self.kernel_num*2, self.V) x = torch.transpose(x, 1, 2) bilstm_out, _ = self.bilstm(x) bilstm_out = torch.tanh(bilstm_out) bilstm_out = bilstm_out.view(bilstm_out.size(0), -1) logit = self.hidden2label1(bilstm_out) logit = self.hidden2label2(logit) return logit
[ "646032073@qq.com" ]
646032073@qq.com
e54a254830aa0ee6382ff75d8b3544e326d9c316
3b7d82cc23bb9a760e897e881a2cbfb1d4cb954f
/labcoat/attributes.py
720be9a8f39e048d6a4c02aa7c26741a4478da53
[]
no_license
gulopine/labcoat
af972a17e7f165d49e9333c7d1a779d6f616d608
4a6bfe05f97ad5f63d6c4d097553ae659bad2312
refs/heads/master
2021-01-17T06:25:21.033563
2011-03-13T15:30:34
2011-03-13T15:30:34
1,448,589
10
2
null
null
null
null
UTF-8
Python
false
false
4,523
py
class AttributeTester(object): def __init__(self, specimen): self.__dict__['specimen'] = specimen self.__dict__['instance'] = specimen.instance self.__dict__['results'] = specimen.results def __getattr__(self, name): self.__dict__['name'] = name result = self.test(self.__dict__['instance'], name) self.__dict__['results'].append((result, self.test.output, self.__dict__)) class S(AttributeTester): def __getattr__(self, name): self.__dict__['name'] = name return AttributeComparison(self, name) def __setattr__(self, name, value): self.__dict__['name'] = name setattr(self.instance, name, value) class AttributeComparison: def __init__(self, specimen, name): self.instance = specimen.instance self.results = specimen.results self.name = name def test(self, func, other, display): value = getattr(self.instance, self.name) success = func(value, other) data = dict(self.__dict__, value=other) return (success, display, data) def __le__(self, other): self.results.append(self.test(lambda a, b: a <= b, other, '%s is at most %r')) def __lt__(self, other): self.results.append(self.test(lambda a, b: a < b, other, '%s is less than %r')) def __eq__(self, other): self.results.append(self.test(lambda a, b: a == b, other, '%s is equal to %r')) def __ne__(self, other): self.results.append(self.test(lambda a, b: a != b, other, '%s is different from %r')) def __gt__(self, other): self.results.append(self.test(lambda a, b: a > b, other, '%s is greater than %r')) def __ge__(self, other): self.results.append(self.test(lambda a, b: a >= b, other, '%s is at least %r')) class Has(AttributeTester): def test(self, instance, name): # Passing requires that the attribute exist and evaluate to True return hasattr(instance, name) and bool(getattr(instance, name)) test.output = 'has %(name)s' def __call__(self, num): return HasNum(self.specimen, num) class HasNum(AttributeTester): def __init__(self, specimen, num, **kwargs): super(HasNum, self).__init__(specimen, **kwargs) self.__dict__['num'] = num def test(self, instance, name): # Passing requires that the attribute exist and evaluate to True return hasattr(instance, name) and len(getattr(instance, name)) == self.__dict__['num'] test.output = 'has %(num)s %(name)s' @property def or_less(self): return HasNumOrLess(self.specimen, self.num) @property def or_more(self): return HasNumOrMore(self.specimen, self.num) class HasNumOrMore(HasNum): def test(self, instance, name): return hasattr(instance, name) and len(getattr(instance, name)) >= self.num test.output = 'has %(num)s or more %(name)s' class HasNumOrLess(HasNum): def test(self, instance, name): return hasattr(instance, name) and len(getattr(instance, name)) <= self.num test.output = 'has %(num)s or less %(name)s' class Lacks(AttributeTester): def test(self, instance, name): # Passing requires that the attribute evaluate to False or not exist return not (hasattr(instance, name) and bool(getattr(instance, name))) test.output = 'lacks %(name)s' def __call__(self, num): return LacksNum(self.specimen, num) class LacksNum(Lacks): def __init__(self, specimen, num, **kwargs): super(LacksNum, self).__init__(specimen, **kwargs) self.__dict__['num'] = num def test(self, instance, name): return not hasattr(instance, name) or len(getattr(instance, name)) != self.num test.output = 'lacks %(num)s %(name)s' @property def or_less(self): return LacksNumOrLess(self.specimen, self.num) @property def or_more(self): return LacksNumOrMore(self.specimen, self.num) class LacksNumOrMore(LacksNum): def test(self, instance, name): return hasattr(instance, name) and len(getattr(instance, name)) < self.num test.output = 'lacks %(num)s or more %(name)s' class LacksNumOrLess(LacksNum): def test(self, instance, name): return hasattr(instance, name) and len(getattr(instance, name)) > self.num test.output = 'lacks %(num)s or less %(name)s'
[ "marty@martyalchin.com" ]
marty@martyalchin.com
1885e133317728b591d8e215221e805903af13f0
867b5d6efc6761e97412613c19d41c4fbe927238
/demos/callLineEdit.py
b58032c82c1b8cca29274ff8e02ab12d8525c398
[]
no_license
LouisLu78/pyqt5
516bdcd35a6678b1add300a4a14854ef61165a08
10e0ab9b186c88131180dba19ded483431c6966f
refs/heads/master
2020-09-26T13:55:54.350566
2020-04-23T14:33:49
2020-04-23T14:33:49
226,268,644
0
0
null
null
null
null
UTF-8
Python
false
false
649
py
# -*- coding: utf-8 -*- # author: Guangqiang Lu time:2019/12/2 #The codes below are copied from textbook. import sys from PyQt5.QtWidgets import QDialog, QApplication from demos.demoLineEdit import * class MyForm(QDialog): def __init__(self): super().__init__() self.ui = Ui_Dialog() self.ui.setupUi(self) self.ui.ButtonClickMe.clicked.connect(self.dispmessage) self.show() def dispmessage(self): self.ui.labelResponse.setText("Hello " +self.ui.lineEditName.text()) if __name__=="__main__": app = QApplication(sys.argv) w = MyForm() w.show() sys.exit(app.exec_())
[ "lewgq@yahoo.com" ]
lewgq@yahoo.com
8e6086f8659c6e50223d4ade9b087dd525b2aabc
6294e1613c812612d4463da83cfc24d8c213d3f6
/arjuna/interact/gui/dispatcher/driver/driver_commands.py
449247a61e551959f2f3511cbf38eef4b541f5b8
[ "Apache-2.0" ]
permissive
prabhudatta22/arjuna
52747bc2d9600f1cd04457b29c919221464a7b88
37a9afe3f8f2b2e82da854c3e497a67a77c6749f
refs/heads/master
2022-12-06T04:10:21.346896
2020-08-31T06:03:13
2020-08-31T06:03:13
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,897
py
# This file is a part of Arjuna # Copyright 2015-2020 Rahul Verma # Website: www.RahulVerma.net # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from selenium.webdriver.common.action_chains import ActionChains class DriverCommands: @classmethod def go_to_url(cls, driver, url): driver.get(url) @classmethod def refresh_browser(cls, driver): driver.refresh() @classmethod def go_back_in_browser(cls, driver): driver.back() @classmethod def go_forward_in_browser(cls, driver): driver.forward() @classmethod def quit(cls, driver): driver.quit() @classmethod def get_page_title(cls, driver): return driver.title @classmethod def get_url(cls, driver): return driver @classmethod def get_source(cls, driver): return driver.page_source @classmethod def send_keys(cls, driver, key_str): print(key_str) ActionChains(driver).send_keys(key_str).perform() @classmethod def is_web_alert_present(cls, driver): from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC try: WebDriverWait(driver, 1).until(EC.alert_is_present(),'Timed out.') return True except Exception as e: return False @classmethod def confirm_web_alert(cls, driver): driver.switch_to.alert.accept() @classmethod def dismiss_web_alert(cls, driver): driver.switch_to.alert.dismiss() @classmethod def send_text_to_web_alert(cls, driver, text): driver.switch_to.alert.send_keys(text) @classmethod def get_text_from_web_alert(cls, driver): return driver.switch_to.alert.text @classmethod def focus_on_frame(cls, driver, element): driver.switch_to.frame(element) @classmethod def focus_on_dom_root(cls, driver): return driver.switch_to.default_content() @classmethod def focus_on_parent_frame(cls, driver): driver.switch_to.parent_frame() @classmethod def execute_javascript(cls, driver, script, *args): from arjuna import log_debug log_debug("Executing JavaScript {} with args {}.".format(script, args)) return driver.execute_script(script, *args) @classmethod def take_screenshot(cls, driver, file_path): return driver.save_screenshot(file_path) @classmethod def take_screenshot_as_base64(cls, driver): return driver.get_screenshot_as_base64() @classmethod def set_window_size(cls, driver, width, height): driver.set_window_size(width, height) @classmethod def maximize_window(cls, driver): driver.maximize_window() @classmethod def get_current_window_handle(cls, driver): return driver.current_window_handle @classmethod def focus_on_window(cls, driver, window_handle): driver.switch_to.window(window_handle) @classmethod def close_current_window(cls, driver): driver.close() @classmethod def get_window_title(cls, driver): return driver.title @classmethod def get_current_window_size(cls, driver): return driver.get_window_size() @classmethod def get_all_winodw_handles(cls, driver): return driver.window_handles @classmethod def replace_with_element(cls, setu_driver, value_tuple): if value_tuple[1] == True: return setu_driver.get_element_for_setu_id(value_tuple[0]) else: return value_tuple[0] @classmethod def perform_action_chain(cls, setu_driver, driver, action_chain): chain = ActionChains(driver) for action in action_chain: kwargs = {k:cls.replace_with_element(setu_driver, v) for k,v in action[1].items()} getattr(chain, action[0])(**kwargs) chain.perform() @classmethod def hover_on_element(cls, driver, webelement): chain = ActionChains(driver).move_to_element(webelement).perform() @classmethod def mouse_click_on_element(cls, driver, webelement): chain = ActionChains(driver).click(webelement).perform() @classmethod def scroll_to_element(cls, driver, webelement): cls.execute_javascript(driver, "arguments[0].scrollIntoView(true);", webelement)
[ "rahulverma81@gmail.com" ]
rahulverma81@gmail.com
9b48c9385f3523743bd3f869ee21796c098b6f19
728871b962f2a5ec8d8ec7d5b607def074fb8864
/W261/HW13-Questions/PageRank.py
e2a57722c38f075390050316f7fe32acfb01088f
[]
no_license
leiyang-mids/MIDS
0191ffbaf9f7f6ec0e77522241c3e76d012850f1
918b0d8afc395840626eb31c451ad6c4b2f3bc39
refs/heads/master
2020-05-25T15:46:56.480467
2019-03-28T16:16:17
2019-03-28T16:16:17
35,463,263
1
2
null
null
null
null
UTF-8
Python
false
false
1,302
py
from time import time from datetime import datetime def initialize(line): # parse line nid, adj = line.strip().split('\t', 1) exec 'adj = %s' %adj # initialize node struct node = {'a':adj.keys(), 'p':0} rankMass = 1.0/len(adj) # emit pageRank mass and node return [(m, rankMass) for m in node['a']] + [(nid.strip('"'), node)] def accumulateMass(a, b): if isinstance(a, float) and isinstance(b, float): return a+b if isinstance(a, float) and not isinstance(b, float): b['p'] += a return b else: a['p'] += b return a def getDangling(node): global nDangling if isinstance(node[1], float): nDangling += 1 return (node[0], {'a':[], 'p':node[1]}) else: return node def redistributeMass(node): node[1]['p'] = (p_dangling.value+node[1]['p'])*damping + alpha return node def distributeMass(node): global lossMass mass, adj = node[1]['p'], node[1]['a'] node[1]['p'] = 0 if len(adj) == 0: lossMass += mass return [node] else: rankMass = mass/len(adj) return [(x, rankMass) for x in adj]+[node] def getIndex(line): elem = line.strip().split('\t') return (elem[1], elem[0]) def logTime(): return str(datetime.now())
[ "ynglei@gmail.com" ]
ynglei@gmail.com
9bc3b610843612d19d76e61bd47db7d4bfb9af9d
08b74293c409086681eda77310f61831552478f1
/instafilter/model.py
8861ab8fc61abd65f2855c66af97c9cde85038f8
[]
no_license
jiaxinwang/instafilter
d895928a3c311edf8ce14f49e716334842a51acf
cdc84b1b1055fd3d8b5ba81db69f9abeef5346e7
refs/heads/master
2023-07-21T19:57:38.976494
2021-09-06T14:17:42
2021-09-06T14:17:42
null
0
0
null
null
null
null
UTF-8
Python
false
false
450
py
import torch from torch import nn class ColorNet(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(5, 25) self.fc2 = nn.Linear(25, 25) self.fc3 = nn.Linear(25, 25) self.fc4 = nn.Linear(25, 5) def forward(self, x): x = torch.tanh(self.fc1(x)) x = torch.tanh(self.fc2(x)) x = torch.tanh(self.fc3(x)) x = torch.tanh(self.fc4(x)) return x
[ "travis.hoppe@gmail.com" ]
travis.hoppe@gmail.com
4f872c0b06702838fd0288d6dcc363e5aa3b0c8a
f07a42f652f46106dee4749277d41c302e2b7406
/Data Set/bug-fixing-2/4c735f24ea954a5ced87bd0bd1573fa3eb914c18-<save_inference_model>-fix.py
f762eaaca815726528bd1b1c9533f60f924fc3cf
[]
no_license
wsgan001/PyFPattern
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
cc347e32745f99c0cd95e79a18ddacc4574d7faa
refs/heads/main
2023-08-25T23:48:26.112133
2021-10-23T14:11:22
2021-10-23T14:11:22
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,018
py
def save_inference_model(self, executor, dirname, feeded_var_names, target_vars, main_program=None, export_for_deployment=True): '\n Prune the given `main_program` to build a new program especially for inference,\n and then save it and all related parameters to given `dirname` by the `executor`.\n ' if (main_program is not None): io.save_inference_model(dirname, feeded_var_names, target_vars, executor, main_program, None, None, export_for_deployment) else: io.save_inference_model(dirname, feeded_var_names, target_vars, executor, self._origin_program, None, None, export_for_deployment, True) model_basename = '__model__' model_filename = os.path.join(dirname, model_basename) with open(model_filename, 'rb') as f: program_desc_str = f.read() program = Program.parse_from_string(program_desc_str) program._copy_dist_param_info_from(self.main_program) self.save_persistables(executor, dirname, program)
[ "dg1732004@smail.nju.edu.cn" ]
dg1732004@smail.nju.edu.cn
80b3def3345e608e8f51501194c5d23249ed50dc
634fb5fe10e8f944da44ab31896acc8471ec5f18
/hq_env/bin/sphinx-autogen
42f0972c1ff3e67f0b39e10ebd4c8b542116b016
[]
no_license
dimagi/commcarehq-venv
277d0b6fada24f2edd54f74850267201153412a7
2c52e3fb0f974cae5c5feaea1d5de851fe530c80
refs/heads/master
2021-01-18T14:05:47.931306
2015-07-20T10:10:41
2015-07-20T10:10:41
11,513,855
1
1
null
2015-07-20T10:10:41
2013-07-18T21:09:22
Python
UTF-8
Python
false
false
331
#!/home/travis/virtualenv/bin/python # EASY-INSTALL-ENTRY-SCRIPT: 'Sphinx==1.2b1','console_scripts','sphinx-autogen' __requires__ = 'Sphinx==1.2b1' import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.exit( load_entry_point('Sphinx==1.2b1', 'console_scripts', 'sphinx-autogen')() )
[ "droberts@dimagi.com" ]
droberts@dimagi.com
276413737c57b3c74b11ccee0d0df56f0c65692a
d802a0793a4a4af0336912932c35499edac16845
/Python/Python/Regex and Parsing/regex_substitution.py
c7a8c4e9aca66eb624f877498c6606e23f9b07fa
[]
no_license
butterflylady/hackerrank
fb1ca6801855b3956bbfb72a2f7a95db4513aca4
c42e2c80c41a95eb10d9a061eb8e7132e52a71ac
refs/heads/master
2021-08-27T21:02:50.512292
2021-08-05T08:40:37
2021-08-05T08:40:37
167,610,834
0
0
null
null
null
null
UTF-8
Python
false
false
322
py
import re def change_symb(match): symb = match.group(0) if symb == "&&": return "and" elif symb == "||": return "or" n = int(input()) for i in range(n): line = input() pattern = '(?<= )(&&|\|\|)(?= )' # Ex. s="A && && && && && && B" print(re.sub(pattern, change_symb, line))
[ "mpaluyanava@gmail.com" ]
mpaluyanava@gmail.com
3df929b6a508a9d626634464b85f4d50299530ae
e88c152d699cef4af64fa5aa4b9c61631c03c8b6
/Solutions/0949.Largest-Time-for-Given-Digits.py
816118cebc673cecb74253fdf254dfeec22a97db
[]
no_license
arnabs542/Leetcode-3
062af047b1f828b9def2a6e2a4d906e77090b569
7e10ff62981db88053b511c3ef8bd284d728d2fc
refs/heads/master
2023-02-26T19:21:05.037170
2021-01-20T07:04:48
2021-01-20T07:04:48
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,939
py
""" 949. Largest Time for Given Digits Given an array arr of 4 digits, find the latest 24-hour time that can be made using each digit exactly once. 24-hour times are formatted as "HH:MM", where HH is between 00 and 23, and MM is between 00 and 59. The earliest 24-hour time is 00:00, and the latest is 23:59. Return the latest 24-hour time in "HH:MM" format. If no valid time can be made, return an empty string. Example 1: Input: A = [1,2,3,4] Output: "23:41" Explanation: The valid 24-hour times are "12:34", "12:43", "13:24", "13:42", "14:23", "14:32", "21:34", "21:43", "23:14", and "23:41". Of these times, "23:41" is the latest. Example 2: Input: A = [5,5,5,5] Output: "" Explanation: There are no valid 24-hour times as "55:55" is not valid. Example 3: Input: A = [0,0,0,0] Output: "00:00" Example 4: Input: A = [0,0,1,0] Output: "10:00" """ """ step 1: find all possible permutations - O(4!). step 2: update max_possible time that can be constructed from the permutations. """ class Solution: def largestTimeFromDigits(self, arr: List[int]) -> str: def backtrack(curr_comb): if len(curr_comb) == 4: permut.append(curr_comb) return for idx in range(4): if idx not in visited: visited.add(idx) backtrack(curr_comb + str(arr[idx])) visited.remove(idx) permut = [] visited = set() backtrack("") max_time = -1 res = "" for a, b, c, d in permut: hour = int(a) * 10 + int(b) minute = int(c) * 10 + int(d) if 0 <= hour < 24 and 0 <= minute < 60: time = hour * 60 + minute if time > max_time: max_time = time res = str(a) + str(b) + ":" + str(c) + str(d) return res
[ "noreply@github.com" ]
arnabs542.noreply@github.com
70e3ea01aca468d440c1dbfa13b939fad9364327
67bdebd561b19af9bf759b6ed5de8556b93ea91f
/lower_priority.py
ba5a00b47b30c1d9e0248d2c3e339ba751417911
[]
no_license
rlowrance/re-avm
91371ec79f6b6f48e17643da4dfb7a4894d0a0ca
d4cfa62e9f65d325e8ac98caa61d3fb666b8a6a2
refs/heads/master
2021-01-17T07:34:16.876133
2017-02-06T21:04:59
2017-02-06T21:04:59
42,865,972
31
10
null
null
null
null
UTF-8
Python
false
false
638
py
'lower priority of current process' import os import pdb def lower_priority(): # ref: http://stackoverflow.com/questions/1023038/change-process-priority-in-python-cross-platform assert os.name in ('nt', 'posix'), os.name if os.name == 'nt': import win32api import win32process import win32con pid = win32api.GetCurrentProcessId() handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid) win32process.SetPriorityClass(handle, win32process.BELOW_NORMAL_PRIORITY_CLASS) elif os.name == 'posix': os.nice(1) if __name__ == '__main__': lower_priority()
[ "roy.lowrance@gmail.com" ]
roy.lowrance@gmail.com
d839256a8d0dfd778bd501e3df9f2c3f253c6d65
e23512edf95ea66640eab85adb8ca0c24ae6e3f7
/tensorflow/contrib/cluster_resolver/python/training/tpu_cluster_resolver.py
2e75ac226ea74e879edda5e03dff3d53c8a76569
[ "Apache-2.0" ]
permissive
snuspl/tensorflow
755ac46c3163adb119de0755ed706b1c960991fb
212d4e9e5f4093ecb90e5b7837d4e02da7506228
refs/heads/r1.6
2021-06-25T18:03:17.625202
2018-12-30T09:35:50
2018-12-30T09:35:50
134,066,972
1
3
Apache-2.0
2020-06-10T06:12:19
2018-05-19T14:02:25
C++
UTF-8
Python
false
false
5,508
py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Implementation of Cluster Resolvers for Cloud TPUs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves.urllib.request import Request from six.moves.urllib.request import urlopen from tensorflow.contrib.cluster_resolver.python.training.cluster_resolver import ClusterResolver from tensorflow.python.training.server_lib import ClusterSpec _GOOGLE_API_CLIENT_INSTALLED = True try: from googleapiclient import discovery # pylint: disable=g-import-not-at-top from oauth2client.client import GoogleCredentials # pylint: disable=g-import-not-at-top except ImportError: _GOOGLE_API_CLIENT_INSTALLED = False class TPUClusterResolver(ClusterResolver): """Cluster Resolver for Google Cloud TPUs. This is an implementation of cluster resolvers for the Google Cloud TPU service. As Cloud TPUs are in alpha, you will need to specify a API definition file for this to consume, in addition to a list of Cloud TPUs in your Google Cloud Platform project. """ def _requestComputeMetadata(self, path): req = Request('http://metadata/computeMetadata/v1/%s' % path, headers={'Metadata-Flavor': 'Google'}) resp = urlopen(req) return resp.read() def __init__(self, tpu_names, zone=None, project=None, job_name='tpu_worker', credentials='default', service=None): """Creates a new TPUClusterResolver object. The ClusterResolver will then use the parameters to query the Cloud TPU APIs for the IP addresses and ports of each Cloud TPU listed. Args: tpu_names: A list of names of the target Cloud TPUs. zone: Zone where the TPUs are located. If omitted or empty, we will assume that the zone of the TPU is the same as the zone of the GCE VM, which we will try to discover from the GCE metadata service. project: Name of the GCP project containing Cloud TPUs. If omitted or empty, we will try to discover the project name of the GCE VM from the GCE metadata service. job_name: Name of the TensorFlow job the TPUs belong to. credentials: GCE Credentials. If None, then we use default credentials from the oauth2client service: The GCE API object returned by the googleapiclient.discovery function. If you specify a custom service object, then the credentials parameter will be ignored. Raises: ImportError: If the googleapiclient is not installed. """ if not project: project = self._requestComputeMetadata('/project/project-id') if not zone: zone_path = self._requestComputeMetadata('/instance/zone') zone = zone_path.split('/')[-1] self._project = project self._zone = zone self._tpu_names = tpu_names self._job_name = job_name self._credentials = credentials if credentials == 'default': if _GOOGLE_API_CLIENT_INSTALLED: self._credentials = GoogleCredentials.get_application_default() if service is None: if not _GOOGLE_API_CLIENT_INSTALLED: raise ImportError('googleapiclient must be installed before using the ' 'TPU cluster resolver') self._service = discovery.build( 'tpu', 'v1alpha1', credentials=self._credentials) else: self._service = service def get_master(self): """Get the ClusterSpec grpc master path. This returns the grpc path (grpc://1.2.3.4:8470) of first instance in the ClusterSpec returned by the cluster_spec function. This is suitable for use for the `master` argument in tf.Session() when you are using one TPU. Returns: string, the grpc path of the first instance in the ClusterSpec. Raises: ValueError: If none of the TPUs specified exists. """ job_tasks = self.cluster_spec().job_tasks(self._job_name) if not job_tasks: raise ValueError('No TPUs exists with the specified names exist.') return 'grpc://' + job_tasks[0] def cluster_spec(self): """Returns a ClusterSpec object based on the latest TPU information. We retrieve the information from the GCE APIs every time this method is called. Returns: A ClusterSpec containing host information returned from Cloud TPUs. """ worker_list = [] for tpu_name in self._tpu_names: full_name = 'projects/%s/locations/%s/nodes/%s' % ( self._project, self._zone, tpu_name) request = self._service.projects().locations().nodes().get(name=full_name) response = request.execute() instance_url = '%s:%s' % (response['ipAddress'], response['port']) worker_list.append(instance_url) return ClusterSpec({self._job_name: worker_list})
[ "gardener@tensorflow.org" ]
gardener@tensorflow.org
860f1791698bd78cf19dfd6b510dded8bfc3d7e6
ccbfc7818c0b75929a1dfae41dc061d5e0b78519
/aliyun-openapi-python-sdk-master/aliyun-python-sdk-dds/aliyunsdkdds/request/v20151201/ModifyDBInstanceSSLRequest.py
fa202b9965c36683da32f4164ac33488935281df
[ "Apache-2.0" ]
permissive
P79N6A/dysms_python
44b634ffb2856b81d5f79f65889bfd5232a9b546
f44877b35817e103eed469a637813efffa1be3e4
refs/heads/master
2020-04-28T15:25:00.368913
2019-03-13T07:52:34
2019-03-13T07:52:34
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,252
py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest class ModifyDBInstanceSSLRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'Dds', '2015-12-01', 'ModifyDBInstanceSSL','dds') def get_ResourceOwnerId(self): return self.get_query_params().get('ResourceOwnerId') def set_ResourceOwnerId(self,ResourceOwnerId): self.add_query_param('ResourceOwnerId',ResourceOwnerId) def get_SecurityToken(self): return self.get_query_params().get('SecurityToken') def set_SecurityToken(self,SecurityToken): self.add_query_param('SecurityToken',SecurityToken) def get_ResourceOwnerAccount(self): return self.get_query_params().get('ResourceOwnerAccount') def set_ResourceOwnerAccount(self,ResourceOwnerAccount): self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount) def get_OwnerAccount(self): return self.get_query_params().get('OwnerAccount') def set_OwnerAccount(self,OwnerAccount): self.add_query_param('OwnerAccount',OwnerAccount) def get_SSLAction(self): return self.get_query_params().get('SSLAction') def set_SSLAction(self,SSLAction): self.add_query_param('SSLAction',SSLAction) def get_DBInstanceId(self): return self.get_query_params().get('DBInstanceId') def set_DBInstanceId(self,DBInstanceId): self.add_query_param('DBInstanceId',DBInstanceId) def get_OwnerId(self): return self.get_query_params().get('OwnerId') def set_OwnerId(self,OwnerId): self.add_query_param('OwnerId',OwnerId)
[ "1478458905@qq.com" ]
1478458905@qq.com
23d627ec0997959cf1212df3ad37627b2530ced6
c8abf01fb77b526a0a6af1f7ed5b740d8aec65ba
/user_profile/migrations/0001_initial.py
486a7471308c7ad448471795d084c1ecd6dd824b
[]
no_license
bitapardaz/diabet
f1cc6e039792c91bfb67754f5c7e18141f2573cc
8a9b38d81c512148be43ea9cf4d09acbd07c3af0
refs/heads/master
2021-07-16T17:08:24.502219
2017-10-23T21:56:27
2017-10-23T21:56:27
108,029,858
0
0
null
null
null
null
UTF-8
Python
false
false
880
py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ], ), migrations.CreateModel( name='UserType', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=100)), ], ), migrations.AddField( model_name='userprofile', name='title', field=models.ForeignKey(to='user_profile.UserType'), ), ]
[ "pourranjbar.ar@gmail.com" ]
pourranjbar.ar@gmail.com
b4825e7c09ac027db28bf8dd543d8e729e4955c0
f34d3948b707e461151ee33296a61fb23a6d3f44
/month01/day05/exercise04.py
594a4d052bb6f40071f72eab8c844809892fe23a
[]
no_license
xiao-a-jian/python-study
f9c4e3ee7a2f9ae83bec6afa7c7b5434e8243ed8
c8e8071277bcea8463bf6f2e8cd9e30ae0f1ddf3
refs/heads/master
2022-06-09T17:44:41.804228
2020-05-05T07:48:07
2020-05-05T07:48:07
256,927,969
0
0
null
null
null
null
UTF-8
Python
false
false
416
py
""" 在终端中录入10个疫情省份的确诊人数 最后打印人数最多的、最少的、平均人数.(使用内置函数实现) """ list_confirmed = [] for item in range(10): number = int(input("请输入第%d个省份的疫情人数:" % (item + 1))) list_confirmed.append(number) print(max(list_confirmed)) print(min(list_confirmed)) print(sum(list_confirmed) / len(list_confirmed))
[ "1261247299@qq.com" ]
1261247299@qq.com
a6824d7e85b0264d31b1561314abad8654470d27
401fc99cefe615f8ebefb6dd9c2b043c506f5bd0
/tests/conftest.py
640a86e0d9e1741295eded78699d2d9877f23fe9
[ "MIT" ]
permissive
atviriduomenys/spinta
0f85496860ebbcecfccd8dde2bf219564ee66baa
1fac5b6b75ec65188d815078fd135bc05d49b31c
refs/heads/master
2023-09-02T13:22:58.411937
2023-08-18T12:59:17
2023-08-18T12:59:17
168,724,854
12
4
MIT
2023-09-14T13:29:39
2019-02-01T16:16:11
Python
UTF-8
Python
false
false
4,119
py
import builtins import inspect import os import re import sys import time as time_module from itertools import chain from itertools import islice from traceback import format_stack from typing import Any from typing import Dict from typing import Iterator from typing import TextIO from typing import Type import objprint import pprintpp import sqlparse from pygments import highlight from pygments.formatters.terminal256 import Terminal256Formatter from pygments.lexers.python import Python3Lexer from pygments.lexers.python import Python3TracebackLexer from pygments.lexers.sql import PostgresLexer from sqlalchemy.sql import ClauseElement objprint.config(honor_existing=False, depth=1) def formatter(): return Terminal256Formatter(style='vim') def ppsql(qry): sql = str(qry) % qry.compile().params sql = sqlparse.format(sql, reindent=True, keyword_case='upper') sql = highlight(sql, PostgresLexer(), formatter()) print(sql) na = object() arg_re = re.compile(r'pp\(([^,)]+)') def pp( obj: Any = na, *args, v: Any = na, t: Type = na, on: bool = True, # print if on condition is true st: bool = False, tb: bool = False, time: bool = False, file: TextIO = sys.__stderr__, prefix: str = '\n', suffix: str = '', kwargs: Dict[str, Any] = None, ) -> Any: if obj is na: ret = None else: ret = obj if not on: return ret if obj is Ellipsis: print(file=file) print('_' * 72, file=file) return ret if time: start = time_module.time() ret = obj(*args, **kwargs) delta = time_module.time() - start else: delta = None if v is not na and obj is not v: return ret if t is not na and not isinstance(obj, t): return ret if obj is na: out = '' lexer = None elif isinstance(obj, Iterator): out = list(islice(obj, 10)) ret = chain(out, obj) out = '<generator> ' + pprintpp.pformat(out) lexer = Python3Lexer() elif isinstance(obj, ClauseElement): out = str(obj.compile(compile_kwargs={"literal_binds": True})) out = sqlparse.format(out, reindent=True, keyword_case='upper') out = '\n' + out lexer = PostgresLexer() else: out = pprintpp.pformat(obj) lexer = Python3Lexer() if obj is not na: frame = inspect.currentframe() frame = inspect.getouterframes(frame)[1] line = inspect.getframeinfo(frame[0]).code_context[0].strip() _, line = line.split('pp(', 1) arg = [] stack = [] term = { '(': ')', '[': ']', '{': '}', '"': '"', "'": "'", } for c in line: if (c == '\\' and (not stack or stack[-1] != '\\')) or c in term: stack.append(c) elif stack: if stack[-1] == '\\' or c == term[stack[-1]]: stack.pop() elif c in ',)': break arg.append(c) arg = ''.join(arg) out = f'{arg} = {out}' if lexer: out = highlight(out, lexer, formatter()) if prefix: print(prefix, end='', file=file) if st: stack = ["Stack trace (pp):\n"] cwd = os.getcwd() + '/' for item in format_stack(): if '/_pytest/' in item: continue if '/site-packages/pluggy/' in item: continue if '/multipledispatch/dispatcher.py' in item: continue item = item.replace(cwd, '') stack.append(item) stack = ''.join(stack) stack = highlight(stack, Python3TracebackLexer(), formatter()) print(stack, end='', file=file) print(out.strip(), file=file) if suffix: print(suffix, end='', file=file) if time: print(f'Time: {delta}s', file=file) if tb: raise RuntimeError('pp') return ret builtins.pp = pp builtins.op = objprint.op pytest_plugins = ['spinta.testing.pytest']
[ "sirexas@gmail.com" ]
sirexas@gmail.com
1ced5c6e0a04b057cee4c476830695a129dc95c5
6dd400fec6f302bd0dcf309e2deec5de906d205c
/django_test6maria/myguest/urls.py
9f02e4a7aca1f6b490e241f6a970eacb04b2142f
[]
no_license
Leo-hw/psou
aa938b7cfaa373a0980649125270c48d816202b0
70379156a623257d412bcccbac72986a61226bd4
refs/heads/master
2023-02-21T19:00:02.902510
2021-01-25T07:03:26
2021-01-25T07:03:26
332,616,685
1
0
null
null
null
null
UTF-8
Python
false
false
230
py
from django.contrib import admin from django.urls import path from myguest import views urlpatterns = [ path('', views.ListFunc), path('insert/', views.InsertFunc), path('insertok/', views.InsertFuncOk), ]
[ "Bonghwan@DESKTOP-60LSTNL" ]
Bonghwan@DESKTOP-60LSTNL
b540bb335ba07eb23f656de01bc48f3e0888a51e
7fc678c2b1a0ef8849364e9c3e272b4509003796
/py/merge-us-tracts-sql-code-gen.py
482fce872848f536f179fddb32fdcccd07dcc606
[]
no_license
nygeog/postgis_reference
ee7a599c2b60d713b5ae67039b5e5f4cfef9d7e8
d3da41fa91dcd6e667d62cb3cc2439aed99f90a9
refs/heads/master
2020-04-05T23:28:14.942786
2017-08-12T16:51:29
2017-08-12T16:51:29
42,251,833
3
1
null
null
null
null
UTF-8
Python
false
false
856
py
mergeTableName = 'tracts_2010_us' attrList = ['geoid10', 'aland10', 'awater10', 'intptlat10', 'intptlon10', 'shape_leng', 'shape_area', 'geom'] attrListString = ", ".join(attrList) statesList = ["01","02","04","05","06","08","09","10","11","12","13","15","16","17","18","19","20","21","22","23","24","25","26","27","28","29","30","31","32","33","34","35","36","37","38","39","40","41","42","44","45","46","47","48","49","50","51","53","54","55","56","72"] e1 = """CREATE TABLE """+mergeTableName+""" AS(""" print e1 e3 = """UNION""" statesListLen = len(statesList) for i, item in enumerate(statesList): e2 = """SELECT """+attrListString+""" FROM tracts_2010_state_"""+item print e2 if i < (statesListLen - 1): print e3 e4 = """);""" e5 = """SELECT Populate_Geometry_Columns('""" + mergeTableName + """'::regclass);""" print e4 print e5
[ "daniel.martin.sheehan@gmail.com" ]
daniel.martin.sheehan@gmail.com
dd0492a4e1c8d9c5c1695bf08f02984c8d021074
b71a6e7050b0a4368007350d91ee078288a7318c
/examples/issues/issue189_img.py
cb2817ea11871f73a6164e09d73305c788330037
[ "Apache-2.0" ]
permissive
jarvisteach/appJar
2dfd0da6cb85ea3535379ed000efd97fb42fe4f8
0b59ce041da2197dcff3410e20f298676f1f7266
refs/heads/appJar
2023-08-29T09:42:01.812005
2019-09-28T18:34:06
2019-09-28T18:34:06
39,996,518
696
103
NOASSERTION
2023-02-20T01:01:16
2015-07-31T08:59:20
Python
UTF-8
Python
false
false
252
py
import sys sys.path.append("../../") from appJar import gui with gui(useTtk=False) as app: # app.addImageButton("button2", None, "Capture 2.PNG", align=None) # Uncomment this app.addIconButton("button", None, "md-play", align="none") # Or this
[ "jarvisteach@gmail.com" ]
jarvisteach@gmail.com
de6921dce3a160ae4dad9ee43b7a29ee9f4d8404
95a2568c20993bd423791f6796ecff36d6a71d26
/utils.py
46c959bcd0ae74ab2155a51327b036899514e5fe
[ "MIT" ]
permissive
kugooer/nazurin-1
15dff321436eaf9ca75c79f3be9e41cc958063a2
092da0b77e50e3f81cc99ae7d86523efbf691baf
refs/heads/master
2023-01-19T08:32:11.973482
2020-12-01T15:24:23
2020-12-01T15:24:23
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,979
py
from requests.adapters import HTTPAdapter from mimetypes import guess_type from shutil import copyfileobj from functools import wraps from pathlib import Path from html import escape from time import sleep import requests import logging import re import os from config import DOWNLOAD_DIR, UA, RETRIES from telegram import ChatAction, InputMediaPhoto from telegram.error import RetryAfter # Logging logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) logger = logging.getLogger('bot') def send_action(action): """Sends `action` while processing func command.""" def decorator(func): @wraps(func) def command_func(update, context, *args, **kwargs): context.bot.send_chat_action(chat_id=update.effective_message.chat_id, action=action) return func(update, context, *args, **kwargs) return command_func return decorator typing = send_action(ChatAction.TYPING) uploading_video = send_action(ChatAction.UPLOAD_VIDEO) uploading_photo = send_action(ChatAction.UPLOAD_PHOTO) uploading_document = send_action(ChatAction.UPLOAD_DOCUMENT) @uploading_photo def sendPhotos(update, context, imgs, details=None): if details is None: details = dict() bot = context.bot message = update.message chat_id = message.chat_id message_id = message.message_id media = list() if len(imgs) > 10: imgs = imgs[:10] message.reply_text('Notice: Too many pages, sending only 10 of them' ) caption = str() for key, value in details.items(): caption += str(key) + ': ' + str(value) + '\n' if len(caption) > 1024: caption = caption[:1024] message.reply_text('Notice: Caption too long, trimmed') caption = escape(caption, quote=False) for img in imgs: filetype = str(guess_type(img.url)[0]) if filetype.startswith('image'): media.append(InputMediaPhoto(img.display_url, parse_mode='HTML')) else: message.reply_text('File is not image, try download option.') return media[0].caption = caption while True: try: bot.sendMediaGroup(chat_id, media, reply_to_message_id=message_id) except RetryAfter as error: sleep(error.retry_after) continue break @uploading_document def sendDocuments(update, context, imgs, chat_id=None): bot = context.bot message_id = update.message.message_id if not chat_id: chat_id = update.message.chat_id else: message_id = None # Sending to channel, no message to reply for img in imgs: while True: try: bot.sendDocument(chat_id, open(img.path, 'rb'), filename=img.name, reply_to_message_id=message_id) except RetryAfter as error: sleep(error.retry_after) continue break def handleBadRequest(update, context, error): logger.info('BadRequest exception: ' + str(error)) if 'Wrong file identifier/http url' in error.message or 'Failed to get http url content' in error.message: update.message.reply_text( 'Failed to send image as photo, maybe the size is too big, ' 'consider using download option or try again.\n' f'Error: {error.message}' ) elif 'Group send failed' in error.message: update.message.reply_text( 'Failed to send images because one of them is too large, ' 'consider using download option or try again.\n' f'Error: {error.message}' ) else: raise error def downloadImages(imgs, headers=None): if headers is None: headers = dict() if not os.path.exists(DOWNLOAD_DIR): os.makedirs(DOWNLOAD_DIR) with requests.Session() as session: session.headers.update({'User-Agent': UA}) session.mount('https://', HTTPAdapter(max_retries=RETRIES)) for img in imgs: response = session.get(img.url, stream=True, timeout=5).raw with open(img.path, 'wb') as f: copyfileobj(response, f) def sanitizeFilename(name): # https://docs.microsoft.com/zh-cn/windows/win32/fileio/naming-a-file name = re.sub(r"[\"*/:<>?\\|]+", '_', name) # reserved characters name = re.sub(r"[\t\n\r\f\v]+", ' ', name) name = re.sub(r"\u202E|\u200E|\u200F", '', name) # RTL marks filename, ext = os.path.splitext(name) filename = filename.strip() if Path(filename).is_reserved(): filename = '_' + filename name = filename + ext if len(name) > 255: name = filename[:255 - len(ext)] + ext return name class NazurinError(Exception): def __init__(self, msg): """Initialize with error message.""" super().__init__(msg) self.msg = str(msg) def __str__(self): """Returns the string representation of this exception.""" return self.msg
[ "yyoung2001@gmail.com" ]
yyoung2001@gmail.com
c0b1823a5549769b3efe0b1033816e1186aca332
c7e765a9bed33d3bfb21774e3995bf4a09e04add
/adminmgr/media/code/A2/python/task/BD_174_261_754_XxLnqgI.py
e6c151bc96e99f32096cdee7f9cd74743a158637
[ "Apache-2.0" ]
permissive
IamMayankThakur/test-bigdata
13dd2ac7fb76c9baed6c3a0aa943057a22e2d237
7f507918c7bec31c92eedcd94491a83486623049
refs/heads/master
2022-05-03T00:59:44.127494
2022-02-10T19:50:16
2022-02-10T19:50:16
201,585,028
10
4
Apache-2.0
2022-04-22T23:39:45
2019-08-10T05:34:09
Python
UTF-8
Python
false
false
2,960
py
from __future__ import print_function import re import sys from operator import add from pyspark.sql import SparkSession def computeContribs(urls, rank): """Calculates URL contributions to the rank of other URLs.""" num_urls = len(urls) for url in urls: yield (url, rank / num_urls) def parseNeighbors(urls): """Parses a urls pair string into urls pair.""" parts = re.split(r',', urls) return parts[0], parts[1] def getValues(urls): parts = re.split(r',', urls) return parts[0], int(parts[2])/int(parts[3]) if __name__ == "__main__": if len(sys.argv) != 4: print("Usage: pagerank <file> <iterations> <weights>", file=sys.stderr) sys.exit(-1) # print("WARN: This is a naive implementation of PageRank and is given as an example!\n" + # "Please refer to PageRank implementation provided by graphx", # file=sys.stderr) # Initialize the spark context. spark = SparkSession\ .builder\ .appName("PythonPageRank")\ .getOrCreate() # Loads in input file. It should be in format of: # URL neighbor URL # URL neighbor URL # URL neighbor URL # ... lines = spark.read.text(sys.argv[1]).rdd.map(lambda r: r[0]) # Loads all URLs from input file and initialize their neighbors. links = lines.map(lambda urls: parseNeighbors(urls) ).distinct().groupByKey().cache() # Loads all URLs with other URL(s) link to from input file and initialize ranks of them to one. ranks = lines.map(lambda x: getValues(x)).distinct().reduceByKey(add) ranks = ranks.mapValues(lambda rank: rank if rank > 1.0 else 1.0) N = ranks.count() iterations = int(sys.argv[2]) weight = float(sys.argv[3])/100 if int(sys.argv[3])!=0 else 0.8 if(iterations==0): while(1): cnt = 0 oldRanks = ranks contribs = links.join(ranks).flatMap( lambda url_urls_rank: computeContribs(url_urls_rank[1][0], url_urls_rank[1][1])) ranks = contribs.reduceByKey(add).mapValues( lambda rank: rank * weight + (1-weight)) s = 0 test = oldRanks.join(ranks).map(lambda r: abs(r[1][0]-r[1][1])) for i in test.collect(): if(i < 0.0001): cnt += 1 if(cnt == test.count()): break else: for iteration in range(int(sys.argv[2])): contribs = links.join(ranks).flatMap( lambda url_urls_rank: computeContribs(url_urls_rank[1][0], url_urls_rank[1][1])) ranks = contribs.reduceByKey(add).mapValues( lambda rank: rank * weight + (1-weight)) # Collects all URL ranks and dump them to console. for (link, rank) in ranks.sortBy(lambda x: (-x[1],x[0])).collect(): print("%s,%s" % (link, round(rank,12))) spark.stop()
[ "ubuntu@ip-172-31-18-251.ap-south-1.compute.internal" ]
ubuntu@ip-172-31-18-251.ap-south-1.compute.internal
0d9d45d96dd79d9e1c3bc2408a3f391808380dce
e874e3b4312b2beebaa42fa1489b50c618055190
/Aula 2 Semana - Turtle - Preenchendo as formas.py
2c20ce02817224506dde0a83bf5b80e026b47a64
[]
no_license
CarlosDinart/PUC-SP
611a9acb6a82b7db2174d2d439b5666db48a530e
5f5f1ea4b9c55c7d20b2dcd92c461b3d8ebbb664
refs/heads/master
2023-01-23T06:46:42.492764
2020-12-09T19:41:01
2020-12-09T19:41:01
320,058,535
0
0
null
null
null
null
UTF-8
Python
false
false
852
py
from turtle import * fillcolor('purple') # fillcolor() - esta funcao retorna ou defina a cor de preenchimento; pensize(10) #pensize()- esta funcao defina a espessura da linha para a largura ou retorne-a. # Se resizemode for definido como “auto” e a forma de tartaruga for um polígono, # esse polígono será desenhado com a mesma espessura de linha. Se nenhum argumento # for fornecido, o pensize atual é retornado. pencolor('black') #pencolor() - esta funcao defini a cor da caneta; forward(100) begin_fill() # begin_fill() - Para ser chamado antes de desenhar uma forma a ser preenchida; forward(100) left(90) forward(100) left(90) forward(100) left(90) forward(100) left(90) end_fill() #end_fill() -Esta funcao Preenche a forma desenhada após a última chamada para begin_fill(); done()
[ "cdinart@hotmail.com" ]
cdinart@hotmail.com
9bd0e691b2cf9e835167a6fa49536ee50961c4f6
fcf4b584795dbdbb24bfa5e68028f4c9ac070b69
/useraccount/models.py
5dc7dd17acbc76bebc82627ebb3fb0ecb9fbeae0
[]
no_license
vineethjpalatty/testproject
3e86ae3f030349f4c633a6ac5ef17814bb373ff6
1087ca2ecbd5e2fe72a4a5c628e674eeaa4d2b2f
refs/heads/master
2022-12-01T14:33:30.155041
2020-08-13T17:48:27
2020-08-13T17:48:27
287,338,410
0
0
null
null
null
null
UTF-8
Python
false
false
1,092
py
from django.contrib.auth.models import AbstractUser from django.db import models import pytz # Create your models here. class DateBaseModel(models.Model): created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) class Meta: abstract = True class UserDetail(AbstractUser,DateBaseModel): TIMEZONES = tuple(zip(pytz.all_timezones, pytz.all_timezones)) user_id = models.CharField(max_length=20,verbose_name="User ID", unique=True) timezone = models.CharField(max_length=50,verbose_name="Time Zone", choices=TIMEZONES, default='UTC') password = models.CharField(verbose_name="password", max_length=128, null=True, blank=True) def __str__(self): return self.username class ActivityPeriod(DateBaseModel): user = models.ForeignKey('UserDetail',on_delete=models.CASCADE,related_name='get_related_activity_period') start_time = models.DateTimeField(verbose_name='Start Time') end_time = models.DateTimeField(verbose_name='End Time') def __str__(self): return self.user.username
[ "you@example.com" ]
you@example.com
5e3f9f87ef9fec750e839eda115dfd7bb06d500a
5ee5e19a42417fdfb5248c070d41b61b86465eaf
/abc_243/b.py
8de66992b8f504c12ab2caefd7c905d55910e476
[]
no_license
w40141/atcoder
2e98cfe9fcb33aca8ac4567afecf603084964897
3ad74ca71ab77b929a097730047f4cf59ac38604
refs/heads/master
2023-08-08T21:37:42.744860
2023-08-07T00:50:34
2023-08-07T00:50:34
179,308,336
0
0
null
null
null
null
UTF-8
Python
false
false
263
py
n = int(input()) a_li = list(map(int, input().split())) b_li = list(map(int, input().split())) same_num = 0 for a, b in zip(a_li, b_li): if a == b: same_num += 1 a_s = set(a_li) b_s = set(b_li) c = a_s & b_s print(same_num) print(len(c) - same_num)
[ "w.40141@gmail.com" ]
w.40141@gmail.com
cf5fddbf61aeb35918e576d1a1ac9e69f4b6b4c1
ea5762e8754d6b039963b0125822afb261844cc8
/src/compas_rhino/geometry/__init__.py
443b7cf51d3a306b59d12a71d80dd9480b9c6d76
[ "MIT" ]
permissive
gonzalocasas/compas
787977a4712fbfb9e230c4f433b6e2be509e4855
2fabc7e5c966a02d823fa453564151e1a1e7e3c6
refs/heads/master
2020-03-23T20:17:55.126856
2018-07-24T22:30:08
2018-07-24T22:30:08
142,033,431
0
0
MIT
2018-07-31T14:54:52
2018-07-23T15:27:19
Python
UTF-8
Python
false
false
597
py
""" .. _compas_rhino.geometry: ******************************************************************************** geometry ******************************************************************************** .. module:: compas_rhino.geometry Object-oriented wrappers for native Rhino geometry. .. autosummary:: :toctree: generated/ RhinoCurve RhinoMesh RhinoPoint RhinoSurface """ from .point import RhinoPoint from .curve import RhinoCurve from .mesh import RhinoMesh from .surface import RhinoSurface __all__ = ['RhinoPoint', 'RhinoCurve', 'RhinoMesh', 'RhinoSurface', ]
[ "vanmelet@ethz.ch" ]
vanmelet@ethz.ch
f02f0c07768583e5d8cf8ec015a786ade7c11d29
f0d713996eb095bcdc701f3fab0a8110b8541cbb
/LQgpGFMK9t9MELvph_9.py
6cae0722546debd006fb3cb38f4989346c5cb5e9
[]
no_license
daniel-reich/turbo-robot
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
a7a25c63097674c0a81675eed7e6b763785f1c41
refs/heads/main
2023-03-26T01:55:14.210264
2021-03-23T16:08:01
2021-03-23T16:08:01
350,773,815
0
0
null
null
null
null
UTF-8
Python
false
false
829
py
""" Given a square list ( _n_ * _n_ size) implement a function that returns a new list containing two lists equal to the two diagonals, in the following order: diagonal 1 = from upper-left to lower-right corner diagonal 2 = from upper-right to lower-left corner ### Examples get_diagonals([ [1, 2], [3, 4] ]) ➞ [ [1, 4], [2, 3] ] get_diagonals([ ["a", "b", "c"], ["d", "e", "f"], ["g", "h", "i"] ]) ➞ [ ["a", "e", "i"], ["c", "e", "g"] ] get_diagonals([ [True] ]) ➞ [ [True], [True] ] ### Notes * Your function must also work with single elements or empty lists. * Try to build both diagonals with a single loop. """ def get_diagonals(lst): ll = [] lr = [] for i in range(1, len(lst) + 1): ll.append(lst[i - 1][i - 1]) lr.append(lst[i - 1][-i]) return [ll, lr]
[ "daniel.reich@danielreichs-MacBook-Pro.local" ]
daniel.reich@danielreichs-MacBook-Pro.local
abd5014fe7f609414f56f0c5502e2ffe8eb72e7b
bc441bb06b8948288f110af63feda4e798f30225
/user_service_sdk/model/metadata_center/stream_metric_states_pb2.pyi
9e2b0174cd0abac74c50c620d020df01b5ca5d95
[ "Apache-2.0" ]
permissive
easyopsapis/easyops-api-python
23204f8846a332c30f5f3ff627bf220940137b6b
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
refs/heads/master
2020-06-26T23:38:27.308803
2020-06-16T07:25:41
2020-06-16T07:25:41
199,773,131
5
0
null
null
null
null
UTF-8
Python
false
false
2,399
pyi
# @generated by generate_proto_mypy_stubs.py. Do not edit! import sys from google.protobuf.descriptor import ( Descriptor as google___protobuf___descriptor___Descriptor, ) from google.protobuf.internal.containers import ( RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer, ) from google.protobuf.message import ( Message as google___protobuf___message___Message, ) from typing import ( Iterable as typing___Iterable, Optional as typing___Optional, Text as typing___Text, Union as typing___Union, ) from typing_extensions import ( Literal as typing_extensions___Literal, ) from user_service_sdk.model.metadata_center.stream_metric_schema_pb2 import ( StreamMetricSchema as user_service_sdk___model___metadata_center___stream_metric_schema_pb2___StreamMetricSchema, ) builtin___bool = bool builtin___bytes = bytes builtin___float = float builtin___int = int if sys.version_info < (3,): builtin___buffer = buffer builtin___unicode = unicode class StreamMetricStates(google___protobuf___message___Message): DESCRIPTOR: google___protobuf___descriptor___Descriptor = ... org = ... # type: builtin___int command = ... # type: typing___Text @property def payload(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[user_service_sdk___model___metadata_center___stream_metric_schema_pb2___StreamMetricSchema]: ... def __init__(self, *, org : typing___Optional[builtin___int] = None, command : typing___Optional[typing___Text] = None, payload : typing___Optional[typing___Iterable[user_service_sdk___model___metadata_center___stream_metric_schema_pb2___StreamMetricSchema]] = None, ) -> None: ... if sys.version_info >= (3,): @classmethod def FromString(cls, s: builtin___bytes) -> StreamMetricStates: ... else: @classmethod def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> StreamMetricStates: ... def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ... def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ... def ClearField(self, field_name: typing_extensions___Literal[u"command",b"command",u"org",b"org",u"payload",b"payload"]) -> None: ...
[ "service@easyops.cn" ]
service@easyops.cn
8ba61262c059e952b15e3587e40bdf3fe82a14b6
db575f3401a5e25494e30d98ec915158dd7e529b
/BIO_Stocks/BCRX.py
33bc27bd3c2d590575f2a523774f25c8f3d0e061
[]
no_license
andisc/StockWebScraping
b10453295b4b16f065064db6a1e3bbcba0d62bad
41db75e941cfccaa7043a53b0e23ba6e5daa958a
refs/heads/main
2023-08-08T01:33:33.495541
2023-07-22T21:41:08
2023-07-22T21:41:08
355,332,230
0
0
null
null
null
null
UTF-8
Python
false
false
2,070
py
import requests from lxml import html from bs4 import BeautifulSoup import os from datetime import date, datetime from ValidationTools import validateday from Database_Connections import InsertData, Insert_Logging def main(id_control): try: url = 'https://ir.biocryst.com/press-releases' headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'} result = requests.get(url, headers=headers) #print(result.content.decode()) html_content = result.content.decode() soup = BeautifulSoup(html_content, 'html.parser') #print(soup) table = soup.find('table', attrs={'class':'nirtable news collapse-table'}) #print(table) table_body = table.find('tbody') rows = table_body.find_all('tr') FIRST_ROW_columns = rows[0].find_all('td') v_article_date = FIRST_ROW_columns[0].text.lstrip().rstrip() article_desc = FIRST_ROW_columns[1] #if the process find any article with the today date istoday, v_art_date = validateday(v_article_date) if (istoday == True): v_ticker = os.path.basename(__file__).replace(".py", "") v_url = article_desc.a.get('href') v_description = article_desc.text.lstrip().rstrip() now = datetime.now() print("URL: " + v_url) print("DESCRIPTION: " + v_description) print("ARTICLE_DATE: " + str(now)) # Insert articles if "https://" in v_url: InsertData(v_ticker, v_description, v_url, v_art_date) else: InsertData(v_ticker, v_description, url, v_art_date) except Exception: error_message = "Entrou na excepção ao tratar " + os.path.basename(__file__) + "..." print(error_message) Insert_Logging(id_control, 'Detail', error_message) pass if __name__ == "__main__": main()
[ "andisc_3@hotmail.com" ]
andisc_3@hotmail.com
b2038b79165d9711f194bb613dda99871eb2eb4d
d3b77550a40b860970450e702b6bcd28d5f9b3e4
/LeetCode/1464_maximum_prod_of_two_elements_in_array.py
0a66f0faf3a2108ddb4d63c69809049704a12e67
[]
no_license
CateGitau/Python_programming
47bc9277544814ad853b44a88f129713f1a40697
6ae42b3190134c4588ad785d62e08b0763cf6b3a
refs/heads/master
2023-07-08T03:08:46.236063
2021-08-12T09:38:03
2021-08-12T09:38:03
228,712,021
1
0
null
null
null
null
UTF-8
Python
false
false
463
py
""" Given the array of integers nums, you will choose two different indices i and j of that array. Return the maximum value of (nums[i]-1)*(nums[j]-1) """ nums = [10,2,5,2] def maxProduct(nums): maxim = 0 for i in range(len(nums)): for j in range(len(nums)): if i != j: ans = ((nums[i]-1)*(nums[j]-1)) if ans > maxim: maxim = ans return maxim print(maxProduct(nums))
[ "catherinegitau94@gmail.com" ]
catherinegitau94@gmail.com
595f1092a393032fbfe3530084a64011e38ba1be
bbd69601912a3361d788efd03a47f9d4e3bac09e
/demo/agw/HyperLinkCtrl.py
61df943f9642ce956aea9500436b0dd59655b898
[]
no_license
wxWidgets/Phoenix
56929484460a0399a8f1d9582bc77c20aa14748d
a1184286703cf24c4b88e5bc14cf2979c1b1ea00
refs/heads/master
2023-09-01T07:10:17.437093
2023-08-31T05:38:01
2023-08-31T05:38:01
5,078,061
2,268
677
null
2023-09-09T17:06:59
2012-07-17T06:22:25
Python
UTF-8
Python
false
false
4,894
py
#!/usr/bin/env python import wx import os import sys try: dirName = os.path.dirname(os.path.abspath(__file__)) except: dirName = os.path.dirname(os.path.abspath(sys.argv[0])) sys.path.append(os.path.split(dirName)[0]) try: from agw import hyperlink as hl except ImportError: # if it's not there locally, try the wxPython lib. import wx.lib.agw.hyperlink as hl #---------------------------------------------------------------------- class TestPanel(wx.Panel): def __init__(self, parent, log): self.log = log wx.Panel.__init__(self, parent, -1) self.SetFont(wx.Font(10, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False)) sizer = wx.BoxSizer(wx.VERTICAL) self.SetSizer(sizer) # Creator credits text1 = wx.StaticText(self, -1, "HyperLinkCtrl Example By Andrea Gavana") text1.SetFont(wx.Font(9, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False, 'Verdana')) sizer.Add((0,10)) sizer.Add(text1, 0, wx.LEFT | wx.TOP | wx.BOTTOM, 10) text2 = wx.StaticText(self, -1, "Latest Revision: 11 May 2005") text2.SetFont(wx.Font(8, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, 'Verdana')) sizer.Add(text2, 0, wx.LEFT, 10) sizer.Add((0,25)) # Default Web links: self._hyper1 = hl.HyperLinkCtrl(self, wx.ID_ANY, "wxPython Main Page", URL="http://www.wxpython.org/") sizer.Add(self._hyper1, 0, wx.ALL, 10) # Web link with underline rollovers, opens in window self._hyper2 = hl.HyperLinkCtrl(self, wx.ID_ANY, "My Home Page", URL="http://xoomer.virgilio.it/infinity77/") sizer.Add(self._hyper2, 0, wx.ALL, 10) self._hyper2.Bind(hl.EVT_HYPERLINK_MIDDLE, self.OnMiddleLink) self._hyper2.AutoBrowse(False) self._hyper2.SetColours("BLUE", "BLUE", "BLUE") self._hyper2.EnableRollover(True) self._hyper2.SetUnderlines(False, False, True) self._hyper2.SetBold(True) self._hyper2.OpenInSameWindow(True) # middle click to open in window self._hyper2.SetToolTip(wx.ToolTip("Middle-click to open in browser window")) self._hyper2.UpdateLink() # Intense link examples.. self._hyper3 = hl.HyperLinkCtrl(self, wx.ID_ANY, "wxPython Mail Archive", URL="http://lists.wxwidgets.org/") sizer.Add(self._hyper3, 0, wx.ALL, 10) self._hyper3.Bind(hl.EVT_HYPERLINK_RIGHT, self.OnRightLink) self._hyper3.SetLinkCursor(wx.CURSOR_QUESTION_ARROW) self._hyper3.SetColours("DARK GREEN", "RED", "NAVY") self._hyper3.SetUnderlines(False, False, False) self._hyper3.EnableRollover(True) self._hyper3.SetBold(True) self._hyper3.DoPopup(False) self._hyper3.UpdateLink() self._hyper4 = hl.HyperLinkCtrl(self, wx.ID_ANY, "Open Google In Current Browser Window?", URL="http://www.google.com") sizer.Add(self._hyper4, 0, wx.ALL, 10) self._hyper4.Bind(hl.EVT_HYPERLINK_LEFT, self.OnLink) self._hyper4.SetToolTip(wx.ToolTip("Click link for yes, no, cancel dialog")) self._hyper4.AutoBrowse(False) def OnLink(self, event): # Goto URL, demonstrates attempt to open link in current window: strs = "Open Google In Current Browser Window " strs = strs + "(NO Opens Google In Another Browser Window)?" nResult = wx.MessageBox(strs, "HyperLinkCtrl", wx.YES_NO | wx.CANCEL | wx.ICON_QUESTION, self) if nResult == wx.YES: self._hyper4.GotoURL("http://www.google.com", True, True) elif nResult == wx.NO: self._hyper4.GotoURL("http://www.google.com", True, False) def OnRightLink(self, event): pos = self._hyper3.GetPosition() + event.GetPosition() menuPopUp = wx.Menu("Having a nice day?") ID_MENU_YES = wx.NewIdRef() ID_MENU_NO = wx.NewIdRef() menuPopUp.Append(ID_MENU_YES, "Yes, absolutely!") menuPopUp.Append(ID_MENU_NO, "I've had better") self.PopupMenu(menuPopUp) menuPopUp.Destroy() def OnMiddleLink(self, event): self._hyper2.GotoURL("http://xoomer.virgilio.it/infinity77/", True, True) #---------------------------------------------------------------------- def runTest(frame, nb, log): win = TestPanel(nb, log) return win #---------------------------------------------------------------------- overview = hl.__doc__ if __name__ == '__main__': import sys,os import run run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
[ "robin@alldunn.com" ]
robin@alldunn.com
4f54d925f1dd8a37f173fcd6da68ed5f39fd2e46
909762751929e2fed02311953e15f8a6316efbd0
/tests/test_oskar/plot_antpos.py
6987aa1365419f0706b0b734bae1ec105e2b156a
[]
no_license
telegraphic/interfits
a166258459deaeb831d49787952a3e08d2aaaf40
0ee46e94b84d405c8381772be05b42e0b9c41158
refs/heads/master
2021-01-19T01:57:32.397157
2016-07-02T01:38:06
2016-07-02T01:38:06
10,858,699
0
0
null
null
null
null
UTF-8
Python
false
false
881
py
import numpy as np import matplotlib from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from test_main import * def plot3d(x,y,z, xl='X', yl='Y', zl='Z', c='#cc0000'): ax.scatter(x, y, z, c=c) ax.set_xlabel(xl) ax.set_ylabel(yl) ax.set_zlabel(zl) l = LedaFits('vis_00.uvfits') xyz = l.d_array_geometry["STABXYZ"] x,y,z = np.split(xyz, 3, axis=1) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') plot3d(x, y, z, 'X', 'Y', 'Z', c='#00cc00') plt.show() bls = coords.computeBaselineVectors(xyz) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') uvw = coords.computeUVW(bls, H=0, d=np.rad2deg(0)) * 1e6 u,v,w = np.split(uvw, 3, axis=1) plot3d(u, v, w, 'U', 'V', 'W') uvw = coords.computeUVW(bls, H=0, d=np.deg2rad(34.07)) * 1e6 u,v,w = np.split(uvw, 3, axis=1) plot3d(u, v, w, 'U', 'V', 'W', c='#00cc00') plt.show()
[ "dan@thetelegraphic.com" ]
dan@thetelegraphic.com
0a0481107bc945c99cf9933d981d631dccb91e5d
3fc01457951a956d62f5e8cc0a8067f6796ee200
/misago/threads/api/postingendpoint/subscribe.py
c158eccbf462feee7a1f7aa449d37b93a4b7ce7f
[]
no_license
kinsney/education
8bfa00d699a7e84701a8d49af06db22c384e0e8d
48f832f17c2df7b64647b3db288abccf65868fe6
refs/heads/master
2021-05-04T01:15:03.078130
2016-12-04T03:18:20
2016-12-04T03:18:20
71,164,542
3
1
null
null
null
null
UTF-8
Python
false
false
1,599
py
from misago.users.models import ( AUTO_SUBSCRIBE_NONE, AUTO_SUBSCRIBE_NOTIFY, AUTO_SUBSCRIBE_NOTIFY_AND_EMAIL) from ...models import Subscription from . import PostingEndpoint, PostingMiddleware class SubscribeMiddleware(PostingMiddleware): def use_this_middleware(self): return self.mode != PostingEndpoint.EDIT def post_save(self, serializer): self.subscribe_new_thread() self.subscribe_replied_thread() def subscribe_new_thread(self): if self.mode != PostingEndpoint.START: return if self.user.subscribe_to_started_threads == AUTO_SUBSCRIBE_NONE: return self.user.subscription_set.create( category=self.thread.category, thread=self.thread, send_email=self.user.subscribe_to_started_threads == AUTO_SUBSCRIBE_NOTIFY_AND_EMAIL ) def subscribe_replied_thread(self): if self.mode != PostingEndpoint.REPLY: return if self.user.subscribe_to_replied_threads == AUTO_SUBSCRIBE_NONE: return try: subscription = self.user.subscription_set.get(thread=self.thread) return except Subscription.DoesNotExist: pass # we are replying to thread again? if self.user.post_set.filter(thread=self.thread).count() > 1: return self.user.subscription_set.create( category=self.thread.category, thread=self.thread, send_email=self.user.subscribe_to_replied_threads == AUTO_SUBSCRIBE_NOTIFY_AND_EMAIL )
[ "kinsney@bupt.edu.cn" ]
kinsney@bupt.edu.cn
e7880e161d34d1259ef590b01aac6ce92b5c121b
ff0b9fad40af0f7a792033884db728756ea83756
/forge/apps/OptiML/src/NeuralNetwork/examples/mnist/visualize.py
f98383826111dc0c413d51ed09d67edf53958b49
[]
no_license
das-projects/Grothendieck
721d7ca60c4838385c43bdc17894cb154507c302
4effc374f8050655db4820db3a7deaf63effb2a4
refs/heads/master
2020-04-02T06:16:56.494997
2016-08-12T10:05:55
2016-08-12T10:05:55
65,486,230
2
0
null
null
null
null
UTF-8
Python
false
false
732
py
import numpy import scipy.misc import sys # Get the image number to display if len(sys.argv) < 2: print 'Specify the training image number to show as an input argument' print 'Requires that train_data.txt be generated.' print 'Example: >>> python visualize.py 100' sys.exit(0) img_num = int(sys.argv[1]) # Read the image from file print 'Loading training_data.txt...' train_data_file = open('train_data.txt') img_str = '' for i,l in enumerate(train_data_file): if (i-1) == img_num: img_str = l break # Write the image to a file img_1D = numpy.fromstring(img_str, dtype=float, sep="\t") * 256 img = numpy.reshape(img_1D, (28,28)) name = 'img_' + str(img_num) + '.png' scipy.misc.imsave(name, img) print 'Saved ' + name
[ "jonathansick@mac.com" ]
jonathansick@mac.com
3068a726127a93c4aa99d3ed3646918aec29708e
4389d3bfa4ded480caf5083f410bdd2253fae767
/20_Project/01_simple_linear_model/test2.py
74871ccda42383f1a4616b8ad17191981aa2a1ef
[]
no_license
Freshield/LEARN_TENSORFLOW
4fb7fec0bc7929697549ee52e453b137a24c3383
87be0362d24b748c841e5c9e185d2061ffae9272
refs/heads/master
2021-01-17T08:06:14.186041
2018-06-03T12:42:46
2018-06-03T12:42:46
83,846,167
0
2
null
null
null
null
UTF-8
Python
false
false
313
py
import numpy as np import tensorflow as tf import pandas as pd sess = tf.InteractiveSession() a = np.arange(10) print a np.random.shuffle(a) print a ta = tf.range(0, 10) tb = tf.random_shuffle(ta) print ta.eval() print tb.eval() dataset = pd.read_csv('ciena_test.csv', header=None) print dataset.shape
[ "zxdsw199182@gmail.com" ]
zxdsw199182@gmail.com
400886929db0a77432f8a35f412a27327f231b80
3784495ba55d26e22302a803861c4ba197fd82c7
/venv/lib/python3.6/site-packages/tensorflow_core/python/keras/api/_v2/keras/applications/__init__.py
dad149e01c0fae0a0684161a8a1002e405429ccc
[ "MIT" ]
permissive
databill86/HyperFoods
cf7c31f5a6eb5c0d0ddb250fd045ca68eb5e0789
9267937c8c70fd84017c0f153c241d2686a356dd
refs/heads/master
2021-01-06T17:08:48.736498
2020-02-11T05:02:18
2020-02-11T05:02:18
241,407,659
3
0
MIT
2020-02-18T16:15:48
2020-02-18T16:15:47
null
UTF-8
Python
false
false
1,872
py
# This file is MACHINE GENERATED! Do not edit. # Generated by: tensorflow/python/tools/api/generator/create_python_api.py script. """Keras Applications are canned architectures with pre-trained weights. """ from __future__ import print_function as _print_function import sys as _sys from . import densenet from . import imagenet_utils from . import inception_resnet_v2 from . import inception_v3 from . import mobilenet from . import mobilenet_v2 from . import nasnet from . import resnet from . import resnet50 from . import resnet_v2 from . import vgg16 from . import vgg19 from . import xception from tensorflow.python.keras.applications.densenet import DenseNet121 from tensorflow.python.keras.applications.densenet import DenseNet169 from tensorflow.python.keras.applications.densenet import DenseNet201 from tensorflow.python.keras.applications.inception_resnet_v2 import InceptionResNetV2 from tensorflow.python.keras.applications.inception_v3 import InceptionV3 from tensorflow.python.keras.applications.mobilenet import MobileNet from tensorflow.python.keras.applications.mobilenet_v2 import MobileNetV2 from tensorflow.python.keras.applications.nasnet import NASNetLarge from tensorflow.python.keras.applications.nasnet import NASNetMobile from tensorflow.python.keras.applications.resnet import ResNet101 from tensorflow.python.keras.applications.resnet import ResNet152 from tensorflow.python.keras.applications.resnet import ResNet50 from tensorflow.python.keras.applications.resnet_v2 import ResNet101V2 from tensorflow.python.keras.applications.resnet_v2 import ResNet152V2 from tensorflow.python.keras.applications.resnet_v2 import ResNet50V2 from tensorflow.python.keras.applications.vgg16 import VGG16 from tensorflow.python.keras.applications.vgg19 import VGG19 from tensorflow.python.keras.applications.xception import Xception del _print_function
[ "luis20dr@gmail.com" ]
luis20dr@gmail.com
42e0505d50651f07009cd94e32945d6b14075fcd
3354e6bdd4aeb2ddec84e6a8036c90cd24b6577a
/(구)자료구조와 알고리즘/(구)Quizes/backjoon/back_5397.py
86476955d445acfe52b6e2d808c0b39f9700d356
[]
no_license
hchayan/Data-Structure-and-Algorithms
1125d7073b099d8c6aae4b14fbdb5e557dcb9412
be060447e42235e94f93a0b2f94f84d2fd560ffe
refs/heads/master
2023-01-05T10:15:02.862700
2020-11-04T08:16:56
2020-11-04T08:16:56
209,513,516
1
0
null
null
null
null
UTF-8
Python
false
false
982
py
import sys ''' for n in range(int(sys.stdin.readline().rstrip())): word=[] cursor = 0 for i in sys.stdin.readline().rstrip(): if i == "<": if cursor != 0: cursor-=1 elif i == ">": if cursor != len(word): cursor+=1 elif i =="-" and len(word) !=0: word.pop() else: word = word[:cursor]+[i]+word[cursor:] cursor+=1 print(''.join(word)) ''' for _ in range(int(input())): left_stack = [] right_stack = [] for i in input(): if i == '-': if left_stack: left_stack.pop() elif i == '<': if left_stack: right_stack.append(left_stack.pop()) elif i == '>': if right_stack: left_stack.append(right_stack.pop()) else: left_stack.append(i) left_stack.extend(reversed(right_stack)) print(''.join(left_stack))
[ "k852012@naver.com" ]
k852012@naver.com
9d09fcf610c797bccf89f3f24ef9afffc8933042
67d8173a716da10a7350213d98938aae9f2115ce
/LeetCode/LC_PY_ANSWERS/sort-an-array.py
0cecad74707c151f8b38b2b110494d2e85eee7de
[ "MIT" ]
permissive
jxie0755/Learning_Python
94490d41bdf93acf8396f843328e38b6da310b0f
143422321cbc3715ca08f6c3af8f960a55887ced
refs/heads/master
2021-11-02T22:47:35.790239
2021-09-26T04:26:23
2021-09-26T04:26:23
101,445,132
0
2
null
2019-02-19T15:48:44
2017-08-25T22:00:16
Python
UTF-8
Python
false
false
2,412
py
# Time: O(nlogn) # Space: O(n) # merge sort solution class Solution(object): def sortArray(self, nums): """ :type nums: List[int] :rtype: List[int] """ def mergeSort(start, end, nums): if end - start <= 1: return mid = start + (end - start) / 2 mergeSort(start, mid, nums) mergeSort(mid, end, nums) right = mid tmp = [] for left in xrange(start, mid): while right < end and nums[right] < nums[left]: tmp.append(nums[right]) right += 1 tmp.append(nums[left]) nums[start:start + len(tmp)] = tmp mergeSort(0, len(nums), nums) return nums # Time: O(nlogn), on average # Space: O(logn) import random # quick sort solution class Solution2(object): def sortArray(self, nums): """ :type nums: List[int] :rtype: List[int] """ def kthElement(nums, left, mid, right, compare): def PartitionAroundPivot(left, right, pivot_idx, nums, compare): new_pivot_idx = left nums[pivot_idx], nums[right] = nums[right], nums[pivot_idx] for i in xrange(left, right): if compare(nums[i], nums[right]): nums[i], nums[new_pivot_idx] = nums[new_pivot_idx], nums[i] new_pivot_idx += 1 nums[right], nums[new_pivot_idx] = nums[new_pivot_idx], nums[right] return new_pivot_idx right -= 1 while left <= right: pivot_idx = random.randint(left, right) new_pivot_idx = PartitionAroundPivot(left, right, pivot_idx, nums, compare) if new_pivot_idx == mid - 1: return elif new_pivot_idx > mid - 1: right = new_pivot_idx - 1 else: # new_pivot_idx < mid - 1. left = new_pivot_idx + 1 def quickSort(start, end, nums): if end - start <= 1: return mid = start + (end - start) / 2 kthElement(nums, start, mid, end, lambda a, b: a < b) quickSort(start, mid, nums) quickSort(mid, end, nums) quickSort(0, len(nums), nums) return nums
[ "30805062+jxie0755@users.noreply.github.com" ]
30805062+jxie0755@users.noreply.github.com
e58e9401d8429723764c02edf926adfbbd8758ca
9743d5fd24822f79c156ad112229e25adb9ed6f6
/xai/brain/wordbase/verbs/_drafts.py
6da1b4da4e3df04fbf81a9bba113d2c83e568862
[ "MIT" ]
permissive
cash2one/xai
de7adad1758f50dd6786bf0111e71a903f039b64
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
refs/heads/master
2021-01-19T12:33:54.964379
2017-01-28T02:00:50
2017-01-28T02:00:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
231
py
from xai.brain.wordbase.verbs._draft import _DRAFT #calss header class _DRAFTS(_DRAFT, ): def __init__(self,): _DRAFT.__init__(self) self.name = "DRAFTS" self.specie = 'verbs' self.basic = "draft" self.jsondata = {}
[ "xingwang1991@gmail.com" ]
xingwang1991@gmail.com
1920724bea68c7268d4dc99408f617f42c248858
6b2a8dd202fdce77c971c412717e305e1caaac51
/solutions_5636311922769920_1/Python/aelg/prob4.py
4fbd0b4d1fa1da615948253f8fc70a6d1a53b3c0
[]
no_license
alexandraback/datacollection
0bc67a9ace00abbc843f4912562f3a064992e0e9
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
refs/heads/master
2021-01-24T18:27:24.417992
2017-05-23T09:23:38
2017-05-23T09:23:38
84,313,442
2
4
null
null
null
null
UTF-8
Python
false
false
694
py
#!/usr/bin/python3 def solve(): inputList = list(map(int, input().split())) k = inputList[0] c = inputList[1] s = inputList[2] res = [] students = s originalTile = 0 while students > 0: studentPos = 0 for i in range(c-1, -1, -1): studentPos += originalTile*(k**i) if originalTile == k-1: res.append(studentPos) return " ".join(map(lambda x: str(x+1), res)) originalTile += 1 res.append(studentPos) students -= 1 return 'IMPOSSIBLE' def main(): cases = int(input()) for i in range(0, cases): print("Case #%d: %s" % (i+1, solve())) main()
[ "alexandra1.back@gmail.com" ]
alexandra1.back@gmail.com
74f545ddd8aead850b286517ff15de1cb279c2a1
cbda89443b351bb2047180dad4e300c13dc3df7f
/Crystals/Morpurgo_all_atoms_Polbinds_qsplit_fittedscreens/Jobs_chelpg/PDIF-CN2/PDIF-CN2_cation_neut_inner3_outer0/PDIF-CN2_cation_neut_inner3_outer0.py
0529c196b51dd6089ab4ee51b1366f451a48a8aa
[]
no_license
sheridanfew/pythonpolarisation
080f52979f98d26360a46412a10c8e3f51ee4549
178e2684e9a239a8e60af5f7b1eb414ac5f31e92
refs/heads/master
2021-07-10T01:07:40.978790
2021-03-11T16:56:37
2021-03-11T16:56:37
96,101,351
0
0
null
2017-07-03T13:37:06
2017-07-03T10:54:52
null
UTF-8
Python
false
false
7,012
py
import sys sys.path.append('../../../../../') from BasicElements import * from BasicElements.Register import GetRegister from BasicElements.MoleculeFactory import ReadMoleculeType from BasicElements.MoleculeFactory import GetMolecule from BasicElements.Crystal import * from Polarizability.GetDipoles import get_dipoles,split_dipoles_onto_atoms from Polarizability import * from Polarizability.GetEnergyFromDips import * from Polarizability.JMatrix import JMatrix import numpy as np from math import * from time import gmtime, strftime import os print strftime("%a, %d %b %Y %X +0000", gmtime()) qdict={"anion": -1.0, "neut": 0.0, "cation": 1.0} name='PDIF-CN2_cation_neut_inner3_outer0' #For crystals here, all cubic and centred at centre insize=3 #number of TVs in each dir central mol is from edge of inner region outsize=0 state='cation' mols_cen=['PDIF_CN2_mola_cation_aniso_cifstruct_chelpg.xyz'] mols_sur=['PDIF_CN2_mola_neut_aniso_cifstruct_chelpg.xyz'] mols_outer=['sp_PDIFCN2_neut.xyz'] screenradius=2.5533199878 #From cif: ''' PDIF-CN2 _cell_length_a 5.2320(14) _cell_length_b 7.638(2) _cell_length_c 18.819(5) _cell_angle_alpha 92.512(5) _cell_angle_beta 95.247(5) _cell_angle_gamma 104.730(4) _cell_volume 722.5(3) ''' #Get translation vectors: a=5.232014/0.5291772109217 b=7.6382/0.5291772109217 c=18.8195/0.5291772109217 alpha=92.5125*(pi/180) beta=95.2475*(pi/180) gamma=104.7304*(pi/180) cif_unit_cell_volume=722.53/(a*b*c*(0.5291772109217**3)) cell_volume=sqrt(1 - (cos(alpha)**2) - (cos(beta)**2) - (cos(gamma)**2) + (2*cos(alpha)*cos(beta)*cos(gamma))) #Converts frac coords to carts matrix_to_cartesian=np.matrix( [[a, b*cos(gamma), c*cos(beta)], [0, b*sin(gamma), c*(cos(alpha) - cos(beta)*cos(gamma))/sin(gamma)], [0, 0, c*cell_volume/sin(gamma)]]) #carts to frac matrix_to_fractional=matrix_to_cartesian.I #TVs, TV[0,1,2] are the three translation vectors. TV=matrix_to_cartesian.T cut=8.0 totsize=insize+outsize #number of TVs in each dir nearest c inner mol is from edge of outer region cenpos=[totsize,totsize,totsize] length=[2*totsize+1,2*totsize+1,2*totsize+1] maxTVs=insize outer_maxTVs=insize+outsize #for diamond outer, don't specify for cube and will fill to cube edges. print 'name: ',name,'mols_cen: ', mols_cen,' mols_sur: ',mols_sur,' TVs: ', TV # Place Molecules crystal=Crystal(name=name,mols_cen=mols_cen,mols_sur=mols_sur,cenpos=cenpos,length=length,TVs=TV,maxTVs=maxTVs,mols_outer=mols_outer,outer_maxTVs=outer_maxTVs) crystal().ModifyPolarizabilityCry(jmtype='TholeExp',fittype='empirical') #crystal._mols contains all molecules. #mols[0] contains a list of all molecules in position a, mols[1] all mols in pos'n b, etc. #mols[0][x,y,z] contains molecule a in position x,y,z #mols may as such be iterated over in a number of ways to consider different molecules. print 'state',state #print 'q: ', qdict[state] #for atom in crystal()._mols[0][crystal()._cenpos[0]][crystal()._cenpos[1]][crystal()._cenpos[2]](): # atom()._crg=qdict[state] crystal().print_posns() #Calculate Properties: print strftime("%a, %d %b %Y %X +0000", gmtime()) E0 = np.matrix([0.,0.,0.]) print strftime("%a, %d %b %Y %X +0000", gmtime()) print 'Calc jm' #screenradius=1.6623/(Natoms**2) jm = JMatrix(jmtype='TholeExp',screenradius=screenradius) print strftime("%a, %d %b %Y %X +0000", gmtime()) print 'Calc dips:' d = get_dipoles(E0=E0,jm=jm._m,cutoff=cut) print strftime("%a, %d %b %Y %X +0000", gmtime()) Efield = get_electric_field(E0) potential = get_potential() print strftime("%a, %d %b %Y %X +0000", gmtime()) #print 'dips', d print 'splitting dips onto atoms' split_d = split_dipoles_onto_atoms(d) print strftime("%a, %d %b %Y %X +0000", gmtime()) print 'summing dips:' tot = np.matrix([0.,0.,0.]) for dd in split_d: tot += dd print strftime("%a, %d %b %Y %X +0000", gmtime()) print 'total dip moment', tot Uqq = np.multiply(get_U_qq(potential=potential),27.211) print strftime("%a, %d %b %Y %X +0000", gmtime()) print 'Uqq', Uqq Uqd = np.multiply(get_U_qdip(dips=d,Efield=Efield),27.211) print strftime("%a, %d %b %Y %X +0000", gmtime()) print 'Uqd', Uqd Udd = np.multiply(get_U_dipdip(jm=jm._m,dips=d.T),27.211) print strftime("%a, %d %b %Y %X +0000", gmtime()) print 'Udd', Udd energyev = Udd+Uqd+Uqq print 'energyev', energyev energy=energyev/27.211 print strftime("%a, %d %b %Y %X +0000", gmtime()) print 'Making .dat cross sections for gnuplot' # print TVs if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs') f = open('Dips_Posns_TVs/%s_TVs.dat' % name, 'w') TVstr=str(str(TV[0,0]) + ' ' + str(TV[0,1]) + ' ' + str(TV[0,2]) + '\n' + str(TV[1,0]) + ' ' + str(TV[1,1]) + ' ' + str(TV[1,2]) + '\n' + str(TV[2,0]) + ' ' + str(TV[2,1]) + ' ' + str(TV[2,2])+ '\n') f.write(TVstr) f.flush() f.close() # print dipoles if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs') f = open('Dips_Posns_TVs/%s_dipoles.dat' % name, 'w') for dd in split_d: dstr=str(dd) f.write(dstr) f.write('\n') f.flush() f.close() # print properties for charge in centrepos time=strftime("%a, %d %b %Y %X +0000", gmtime()) f = open('%s_properties.csv' % name, 'w') f.write ('time\tname\tmols_cen\tmols_sur\tmols_outer\tinsize\toutsize\tenergyev\tUqq\tUqd\tUdd\tTotdip_x\tTotdip_y\tTotdip_z') f.write ('\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (time,name,mols_cen,mols_sur,mols_outer,insize,outsize,energyev,Uqq,Uqd,Udd,tot[0,0],tot[0,1],tot[0,2])) f.flush() f.close() # print header for polbinds f = open('polbind_energies_%s_properties.csv' % name, 'w') f.write ('time\tname\tmols_cen\tmols_sur\tmols_outer\tinsize\toutsize\ta\tb\tc\tmolincell\tpolbind(eV)') f.flush() f.close() # POL BIND ENERGIES #Note that this assumes a cube, and values for which for dist in range(0,(length[0]/2)+1,1): print '\n\nDIST: ', dist, '\n' for a in range(crystal()._cenpos[0]-dist,crystal()._cenpos[0]+dist+1,1): for b in range(crystal()._cenpos[1]-dist,crystal()._cenpos[1]+dist+1,1): for c in range(crystal()._cenpos[2]-dist,crystal()._cenpos[2]+dist+1,1): print strftime("%a, %d %b %Y %X +0000", gmtime()) print 'a,b,c',a,b,c for molincell in range(0,len(crystal()._mols),1): crystal().calc_polbind(a1=crystal()._cenpos[0],b1=crystal()._cenpos[1],c1=crystal()._cenpos[2],molincell1=0,a2=a,b2=b,c2=c,molincell2=molincell,jm=jm._m,oldUqd=Uqd) print 'polbind: ', crystal()._polbinds[molincell][a][b][c] f = open('polbind_energies_%s_properties.csv' % name, 'a') f.write ('\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (time,name,mols_cen,mols_sur,mols_outer,insize,outsize,a,b,c,molincell,crystal()._polbinds[molincell][a][b][c])) f.flush() f.close() # Redo this and overwrite after each set to ensure we have some even if not all polbinds complete crystal().print_polbinds() print 'Job Completed Successfully.'
[ "sheridan.few@gmail.com" ]
sheridan.few@gmail.com
ea5b9b937f47326657b9da399ad06bdf9c9d3f9f
857d2653df85eec7b740a782005da2872d532bff
/training/reco/k_means/k_means.py
d2dc9088e57ba424bc95239998544ce1abec4e40
[]
no_license
calzonelover/CMS_DC_ANOMALY
1621924dc66ec2a80a2aa3af3bb29762bb558073
5a02ab59ec52c462c37111f83e286149dd86754b
refs/heads/master
2020-05-31T15:12:50.074681
2019-08-22T15:05:42
2019-08-22T15:05:42
190,348,831
2
2
null
null
null
null
UTF-8
Python
false
false
6,406
py
import matplotlib.pyplot as plt import numpy as np import pandas as pd import os from sklearn import svm from sklearn.cluster import KMeans from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler, normalize, MinMaxScaler # customize from data.prompt_reco.setting import REDUCED_FEATURES, FEATURES, SELECT_PD import data.prompt_reco.utility as utility from model.reco.autoencoder import ( VanillaAutoencoder, SparseAutoencoder, ContractiveAutoencoder, VariationalAutoencoder ) COLORS = ('green', 'blue') GROUP_LABELS = ('A', 'B') HUMAN_LABELS = ('Good', 'Bad') def main(): # Setting is_reduced_data = True Autoencoder = VanillaAutoencoder test_model = "Vanilla" number_model = 1 BS = 256 N_FEATURES = len(REDUCED_FEATURES*7) if is_reduced_data else 2807 # data files = utility.get_file_list(chosed_pd=SELECT_PD) # choosing only ZeroBias feature_names = utility.get_feature_name(features=FEATURES) reduced_feature_names = utility.get_feature_name(features=REDUCED_FEATURES) data = pd.DataFrame(utility.get_data(files), columns=feature_names) data["run"] = data["run"].astype(int) data["lumi"] = data["lumi"].astype(int) data.drop(["_foo", "_bar", "_baz"], axis=1, inplace=True) if is_reduced_data: not_reduced_column = feature_names for intersected_elem in reduced_feature_names: not_reduced_column.remove(intersected_elem) data.drop(not_reduced_column, axis=1, inplace=True) data = data.sort_values(["run", "lumi"], ascending=[True,True]) data = data.reset_index(drop=True) data["label"] = data.apply(utility.add_flags, axis=1) # training print("Preparing dataset...") split = int(0.8*len(data)) # train set df_train = data.iloc[:split].copy() X_train = df_train.iloc[:, 0:N_FEATURES] y_train = df_train["label"] # test set df_test = data.iloc[split:].copy() X_test = df_test.iloc[:, 0:N_FEATURES] y_test = df_test["label"] X_test = pd.concat([X_train[y_train == 1], X_test]) y_test = pd.concat([y_train[y_train == 1], y_test]) X_train = X_train[y_train == 0] y_train = y_train[y_train == 0] print("Training KMeans") # standardize data # transformer = StandardScaler() transformer = MinMaxScaler(feature_range=(0,1)) transformer.fit(X_train.values) X_train = transformer.transform(X_train.values) X_test = transformer.transform(X_test.values) # X_train = normalize(X_train, norm='l1') ## combine X = np.concatenate((X_train, X_test)) y = np.concatenate((y_train, y_test)) # training kmeans_model = KMeans(n_clusters=2).fit(X) y_pred = kmeans_model.predict(X) # PCA pca = PCA(n_components=2) principal_components = pca.fit_transform(X) # visualzie K-means fig, ax = plt.subplots() for i, group_label in enumerate(GROUP_LABELS): scat_data = principal_components[y_pred == i] ax.scatter( scat_data[:, 0], scat_data[:, 1], alpha=0.8, c = COLORS[i if i == 0 else 1], label = GROUP_LABELS[i] ) ax.legend() plt.title('Clustering by K-Means, visual in Principal Basis (JetHT)') plt.savefig('JetHT_kmeans.png') # visual labeld fig, ax = plt.subplots() for i, group_label in enumerate(GROUP_LABELS): scat_data = principal_components[y == i] ax.scatter( scat_data[:, 0], scat_data[:, 1], alpha=0.8, c = COLORS[i], label = HUMAN_LABELS[i] ) ax.legend() plt.xlabel("Principal component 1") plt.ylabel("Principal component 2") plt.title('Labeled by Human, visual in Principal Basis (JetHT)') plt.savefig('JetHT_label.png') # visual One-Class SVM cutoff svm_model = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1) svm_model.fit(X_train) sampling_svm_dvs = -svm_model.decision_function(X)[:, 0] min_sampling_svm_dvs, max_sampling_svm_dvs = min(sampling_svm_dvs), max(sampling_svm_dvs) colors_svm_dvs = list(map(lambda x: [0.2, 1.0-((x-min_sampling_svm_dvs)/(max_sampling_svm_dvs-min_sampling_svm_dvs)), (x-min_sampling_svm_dvs)/(max_sampling_svm_dvs-min_sampling_svm_dvs)], sampling_svm_dvs)) colors_svm_cutoff = list(map(lambda x: [0, 0, 0.8] if x > 20.0 else [0, 1.0, 0], sampling_svm_dvs)) fig, ax = plt.subplots() ax.scatter( principal_components[:, 0], principal_components[:, 1], alpha=0.8, c = colors_svm_dvs ) plt.title('Decision Value from SVM, visual in Principal Basis (JetHT)') plt.savefig('SVM_DCs.png') fig, ax = plt.subplots() ax.scatter( principal_components[:, 0], principal_components[:, 1], alpha=0.8, c = colors_svm_cutoff ) plt.xlabel("Principal component 1") plt.ylabel("Principal component 2") plt.title('Applying cutoff in SVM, visual in Principal Basis (JetHT)') plt.savefig('SVM_cutoff.png') # visual autoencoder loss autoencoder = Autoencoder( input_dim = [N_FEATURES], summary_dir = "model/reco/summary", model_name = "{} model {}".format(test_model, number_model), batch_size = BS ) autoencoder.restore() sampling_totalsd = autoencoder.get_sd(X, scalar=True) max_totalsd = max(sampling_totalsd) min_totalsd = min(sampling_totalsd) colors_cutoff = list(map(lambda x: [0, 0, 0.8] if x > 10.0 else [0, 1.0, 0], sampling_totalsd)) colors_loss = list(map(lambda x: [0.2, 1.0-((x-min_totalsd)/(max_totalsd-min_totalsd)), (x-min_totalsd)/(max_totalsd-min_totalsd)], sampling_totalsd)) fig, ax = plt.subplots() ax.scatter( principal_components[:, 0], principal_components[:, 1], alpha=0.8, c = np.log10(sampling_totalsd) ) plt.xlabel("Principal component 1") plt.ylabel("Principal component 2") plt.title('Loss from AE data, testing set visual in Principal Basis (JetHT)') plt.savefig('JetHT_AE_loss.png') fig, ax = plt.subplots() ax.scatter( principal_components[:, 0], principal_components[:, 1], alpha=0.8, c = colors_cutoff, ) plt.xlabel("Principal component 1") plt.ylabel("Principal component 2") plt.title('Applying cutoff in AE, testing set visual in Principal Basis (JetHT)') plt.savefig('JetHT_AE_cutoff.png')
[ "patomporn.pay@gmail.com" ]
patomporn.pay@gmail.com
260ff632015d6f5932bddcb9cfb80d61bc74add3
1e9c9f2a9639db7cdb032aae69cb4d99aef1d3a5
/codingBat/python/warmup2/stringMatch.py
885e6c9cdd2dd38ba959c80188a62fe641e07e7d
[ "MIT" ]
permissive
sagarnikam123/learnNPractice
f0da3f8acf653e56c591353ab342765a6831698c
1b3b0cb2cff2f478006626a4c37a99102acbb628
refs/heads/master
2023-02-04T11:21:18.211654
2023-01-24T14:47:52
2023-01-24T14:47:52
61,184,927
2
1
MIT
2022-03-06T11:07:18
2016-06-15T06:57:19
Python
UTF-8
Python
false
false
1,024
py
####################################################################################################################### # # stringMatch # # Given 2 strings, a and b, return the number of the positions where they contain # the same length 2 substring. So "xxcaazz" and "xxbaaz" yields 3, since the "xx", "aa", # and "az" substrings appear in the same place in both strings. # ####################################################################################################################### # # stringMatch("xxcaazz", "xxbaaz") → 3 # stringMatch("abc", "abc") → 2 # stringMatch("abc", "axc") → 0 # stringMatch("hello", "he") → 1 # stringMatch("he", "hello") → 1 # stringMatch("h", "hello") → 0 # stringMatch("", "hello") → 0 # stringMatch("aabbccdd", "abbbxxd") → 1 # stringMatch("aaxxaaxx", "iaxxai") → 3 # stringMatch("iaxxai", "aaxxaaxx") → 3 # #######################################################################################################################
[ "sagarnikam123@gmail.com" ]
sagarnikam123@gmail.com
cd9b2621354b41dab2657e8f0bae14493858399f
6725ff7ad5cbcc1413654c7fbe4d9795a35e50b5
/L4_task3.py
9e2c5e6f7a3ba2cb1e763f102fa57d46d7e29e8d
[]
no_license
MaksimKulya/PythonCourse
59e5a2e67378bfdddf5bd96db8e25782489b7db1
40b5559e2fac76d3fb3221ba4b90478dd10f442c
refs/heads/main
2023-05-03T05:13:05.238092
2021-05-18T14:44:44
2021-05-18T14:44:44
321,064,262
0
0
null
2021-01-20T12:28:47
2020-12-13T12:52:01
Python
UTF-8
Python
false
false
414
py
# Для чисел в пределах от 20 до 240 найти числа, кратные 20 или 21. Необходимо решить задание в одну строку. # Подсказка: использовать функцию range() и генератор. import random as rnd a = [rnd.randint(20, 240) for i in range(100)] print(a) b = [n for n in a if n % 20 ==0 or n % 21 ==0] print(b)
[ "maxk2350@yandex.ru" ]
maxk2350@yandex.ru
6dd0be9d6b07dba30423d4ecfba393cefadaf205
5234bc430c83d616a8214d7f77c2c081543b6b26
/src/Python/1-100/96.UniqueBinarySearchTrees.py
6c59f9a19ecaa76aa1eae5a23d0e85ffde46d062
[ "Apache-2.0" ]
permissive
AveryHuo/PeefyLeetCode
3e749b962cadfdf10d7f7b1ed21c5fafc4342950
92156e4b48ba19e3f02e4286b9f733e9769a1dee
refs/heads/master
2022-04-26T06:01:18.547761
2020-04-25T09:55:46
2020-04-25T09:55:46
null
0
0
null
null
null
null
UTF-8
Python
false
false
349
py
class Solution: def numTrees(self, n: int) -> int: dp = [0] * (n + 1) dp[0] = 1 dp[1] = 1 for i in range(2, n + 1): for j in range(1, i + 1): dp[i] += dp[j - 1] * dp[i - j] return dp[n] if __name__ == "__main__": solution = Solution() print(solution.numTrees(3))
[ "xpf6677@163.com" ]
xpf6677@163.com
53fe24223fbffd0f694c4f4c0faf15c15b2809c4
fdce456e2f0ea12f854e98583cfda95955b9a36b
/manageusers/apps.py
dca42b37dd05993510c3c0ba37e95721d15238d8
[]
no_license
atifasr/jobportal
e5fdc8058759311e8d4ca2c0291066ad86059fb6
3fe211598daa66f2a76c2b3d4d26d73459ac7457
refs/heads/master
2023-08-05T02:01:00.870360
2021-09-29T11:59:29
2021-09-29T11:59:29
388,807,519
0
0
null
null
null
null
UTF-8
Python
false
false
249
py
from django.apps import AppConfig class ManageusersConfig(AppConfig): default_auto_field = 'django.db.models.BigAutoField' name = 'manageusers' # def ready(self): # from .schedulers import scheduler # scheduler.start()
[ "atifshafi63@gmail.com" ]
atifshafi63@gmail.com
fea6f964339fb23f6f9a008d7407e6133306cc04
3740de0d6e43ea140fc09ab314e4c492603ba185
/scripts/sources/S_EllipsoidTestWaitingTimesACDres.py
af9ab8366b01c94bbb1df7f27d3b20251e3c79dd
[ "MIT" ]
permissive
s0ap/arpmRes
29c60c65fd3e11be1cc31d46494e5b3ebf6e05ab
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
refs/heads/master
2022-02-16T05:01:22.118959
2019-08-20T16:45:02
2019-08-20T16:45:02
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,056
py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.4' # jupytext_version: 1.1.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # S_EllipsoidTestWaitingTimesACDres [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_EllipsoidTestWaitingTimesACDres&codeLang=Python) # For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=IIDHFACDdTres). # ## Prepare the environment # + import os import os.path as path import sys sys.path.append(path.abspath('../../functions-legacy')) from numpy import where, diff, linspace from scipy.io import loadmat import matplotlib.pyplot as plt from matplotlib.pyplot import figure plt.style.use('seaborn') from CONFIG import GLOBAL_DB, TEMPORARY_DB from ARPM_utils import save_plot, struct_to_dict, date_mtop from autocorrelation import autocorrelation from TradeQuoteProcessing import TradeQuoteProcessing from InvarianceTestEllipsoid import InvarianceTestEllipsoid # - # ## Upload the database try: db = loadmat(os.path.join(GLOBAL_DB, 'db_US_10yr_Future_quotes_and_trades'), squeeze_me=True) except FileNotFoundError: db = loadmat(os.path.join(TEMPORARY_DB, 'db_US_10yr_Future_quotes_and_trades'), squeeze_me=True) # ## Process the time series, refining the raw data coming from the database # + quotes = struct_to_dict(db['quotes']) trades = struct_to_dict(db['trades']) dates_quotes = quotes.time_names # t = quotes.time # time vector of quotes p_bid = quotes.bid # bid prices p_ask = quotes.ask # ask prices q_bid = quotes.bsiz # bid volumes q_ask = quotes.asiz # ask volumes dates_trades = trades.time_names # t_k = trades.time # time vector of trades p_last = trades.price # last transaction prices delta_q = trades.siz # flow of traded contracts' volumes delta_sgn = trades.aggress # trade sign flow match = trades.mtch # match events: - the "1" value indicates the "start of a match event" while zeros indicates the "continuation of a match event" # - the db is ordered such that the start of a match event is in the last column corresponding to that event t, _, _, _, _, _, t_k, _, _, _, _, _ = TradeQuoteProcessing(t, dates_quotes, q_ask, p_ask, q_bid, p_bid, t_k, dates_trades, p_last, delta_q, delta_sgn, match) t = t.flatten() t_k = t_k.flatten() # ## Compute the gaps between subsequent events k_0 = where(t_k >= t[0])[0][0] # index of the first trade within the time window k_1 = where(t_k <= t[-1])[0][-1] # index of the last trade within the time window ms = (date_mtop(t_k[k_1]) - date_mtop(t_k[k_0])).seconds * 1000 + (date_mtop(t_k[k_1]) - date_mtop(t_k[k_0])).microseconds / 1000 t_k = linspace(t_k[k_0],t_k[k_1], int(ms)) # time window's wall-clock-time vector expressed in milliseconds delta_t_k = diff(t_k) # waiting times # - # ## ACD fit (Requires the external package ACD_Models_FEX) # + q = 1 # maximum lag for the duration p = 1 # maximum lag for the volatility stdMethod = 1 tmp_dt_n = [0, delta_t_k] specOut = ACD_Fit(tmp_dt_n.T,'exp', q, p, stdMethod) # fitting # estimated parameters c = specOut.w b = specOut.p a = specOut.q # estimated sigma_n sigma_n = specOut.h.T # residuals ACD_epsi = delta_t_k / sigma_n[1:] # - # ## Compute autocorrelations at different lags lag_ = 10 acf = autocorrelation(ACD_epsi, lag_) # ## Plot the results of the IID test # + lag = 10 # lag to be printed ell_scale = 1.6 # ellipsoid radius scale fit = 2 # exponential fit f = figure(figsize=(12,6)) InvarianceTestEllipsoid(delta_t_k, acf[0,1:], lag_, fit, ell_scale, [], 'Invariance test on the residuals of an ACD fit on arrival times', [-4, 19]); # save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
[ "dario.popadic@yahoo.com" ]
dario.popadic@yahoo.com
255997393c11703c927617a467958a7455c0b86b
c85b91bfdd7eb2fa5a7d6c6a9b722c8548c83105
/vscode/extensions/ms-python.python-2020.3.69010/languageServer.0.5.31/Typeshed/third_party/2and3/Crypto/Hash/SHA256.pyi
0469b7cb2b4e995bbd9b60c74f5c7d1c887412a6
[ "MIT", "Apache-2.0" ]
permissive
ryangniadek/.dotfiles
ddf52cece49c33664b56f01b17d476cf0f1fafb1
be272baf6fb7d7cd4f4db1f6812b710196511ffe
refs/heads/master
2021-01-14T07:43:12.516127
2020-03-22T20:27:22
2020-03-22T20:27:22
242,632,623
0
0
MIT
2020-09-12T17:28:01
2020-02-24T02:50:06
Python
UTF-8
Python
false
false
399
pyi
from typing import Any, Optional from Crypto.Hash.hashalgo import HashAlgo class SHA256Hash(HashAlgo): oid = ... # type: Any digest_size = ... # type: int block_size = ... # type: int def __init__(self, data: Optional[Any] = ...) -> None: ... def new(self, data: Optional[Any] = ...): ... def new(data: Optional[Any] = ...): ... digest_size = ... # type: Any
[ "ryan@gniadek.net" ]
ryan@gniadek.net
47bfc9032bf7353361b1818c44b2797b13363154
04d8f0b5a291ec6c3470f4498dd64ab9c1845f96
/library/third-party/file_formats/pypdf2/pdf_file_merger/info.py
50f3b155bd7f0b74923a56b302593acd731e7e98
[]
no_license
volitilov/Python_learn
8c0f54d89e0ead964320d17eeddeacd5b704b717
f89e52655f83a9f1105689f0302ef5b0ee30a25c
refs/heads/master
2022-01-10T13:39:59.237716
2019-07-17T11:39:10
2019-07-17T11:39:10
70,601,503
3
2
null
null
null
null
UTF-8
Python
false
false
1,223
py
from PyPDF2.PdfFileMerger import * # Инициализирует объект PdfFileMerger. PdfFileMerger объединяет # несколько PDF-файлов в один PDF-файл. Он может конкатенировать, # нарезать, вставить или любую комбинацию из вышеперечисленного # ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: PdfFileMerger( strict=True # Определяет, следует ли предупреждать пользователя обо всех # проблемах, а также приводит к тому, что некоторые # исправляемые проблемы являются фатальными. По умолчанию # используется значение True. ) # ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: addBookmark(title, pagenum, parent=None) # addMetadata(infos) # addNamedDestination(title, pagenum) # append(fileobj, bookmark=None, pages=None, import_bookmarks=True) # close() # merge(position, fileobj, bookmark=None, pages=None, import_bookmarks=True) # setPageLayout(layout) # setPageMode(mode) # write(fileobj) #
[ "volitilov@gmail.com" ]
volitilov@gmail.com
f6c63ede371e0271643d28518bd43b3d85636c61
0547d1826e99eedb959a3463520d73985a3b844e
/Data Scientist with Python Track Github/22-Statistical Thinking in Python (Part 2)/05-Putting it all together a case study/08-Beak length to depth ratio.py
ee9d31d8f9a75fbdc075f31112ca552fb4842913
[]
no_license
abhaysinh/Data-Camp
18031f8fd4ee199c2eff54a408c52da7bdd7ec0f
782c712975e14e88da4f27505adf4e5f4b457cb1
refs/heads/master
2022-11-27T10:44:11.743038
2020-07-25T16:15:03
2020-07-25T16:15:03
282,444,344
4
1
null
null
null
null
UTF-8
Python
false
false
1,654
py
""" Beak length to depth ratio The linear regressions showed interesting information about the beak geometry. The slope was the same in 1975 and 2012, suggesting that for every millimeter gained in beak length, the birds gained about half a millimeter in depth in both years. However, if we are interested in the shape of the beak, we want to compare the ratio of beak length to beak depth. Let's make that comparison. Remember, the data are stored in bd_1975, bd_2012, bl_1975, and bl_2012. Instructions 100 XP 1 Make arrays of the beak length to depth ratio of each bird for 1975 and for 2012. 2 Compute the mean of the length to depth ratio for 1975 and for 2012. 3 Generate 10,000 bootstrap replicates each for the mean ratio for 1975 and 2012 using your draw_bs_reps() function. 4 Get a 99% bootstrap confidence interval for the length to depth ratio for 1975 and 2012. 5 Print the results. """ # Compute length-to-depth ratios ratio_1975 = bl_1975 / bd_1975 ratio_2012 = bl_2012 / bd_2012 # Compute means mean_ratio_1975 = np.mean(ratio_1975) mean_ratio_2012 = np.mean(ratio_2012) # Generate bootstrap replicates of the means bs_replicates_1975 = draw_bs_reps(ratio_1975, np.mean, size=10000) bs_replicates_2012 = draw_bs_reps(ratio_2012, np.mean, size=10000) # Compute the 99% confidence intervals conf_int_1975 = np.percentile(bs_replicates_1975, [0.5, 99.5]) conf_int_2012 = np.percentile(bs_replicates_2012, [0.5, 99.5]) # Print the results print('1975: mean ratio =', mean_ratio_1975, 'conf int =', conf_int_1975) print('2012: mean ratio =', mean_ratio_2012, 'conf int =', conf_int_2012)
[ "abhaysinh.surve@gmail.com" ]
abhaysinh.surve@gmail.com
e1b06b52231c9bcd3a40ec3a68471706dc7781ed
b471470126befc48d61bf3e17c8231b33e8d3e33
/1117-mid-term-6.py
ff3fe16f7e1178ab27682245ea8b118fe673391a
[]
no_license
Xi-Plus/KUAS-DIIWS-Code
5ccd5ff512b3aad5fcf9ca37c7ca095e796aca9d
60e0e6c8c80847b5270d4d0f45028becabd08230
refs/heads/master
2021-09-03T21:48:31.755611
2018-01-12T08:37:51
2018-01-12T08:37:51
105,234,371
0
0
null
null
null
null
UTF-8
Python
false
false
750
py
from urllib.request import urlopen from bs4 import BeautifulSoup url = "http://www.books.com.tw/activity/gold66_day/?loc=P_021_1_more_001" f = urlopen(url) html = f.read() obj = BeautifulSoup(html, "html.parser") days = [] for i in obj.findAll("div", {"class":"day"}): days.append(i.get_text()) names = [] for i in obj.findAll("div", {"class":"sec_day"}): names.append(i.findAll("a")[1].get_text()) publishs = [] prices = [] prices2 = [] for i in obj.findAll("div", {"class":"sec_day"}): temp = i.findAll("h2") publishs.append(temp[0].get_text()) prices.append(temp[1].get_text()) prices2.append(temp[2].get_text()) for i in range(len(days)): print(days[i]) print(names[i]) print(prices[i]) print(prices[i]) print(prices2[i]) print()
[ "huangxuanyuxiplus@gmail.com" ]
huangxuanyuxiplus@gmail.com
72fd0fbf13da481b25826bed1e553261679655c5
c340835e4444c664fc2b261238e3738cf5bf7196
/combination_sum.py
58093ee3ed445d253ce8b136bea5e6b489626c29
[]
no_license
z-o-e/LeetCode_OJ_Python
49f2a7378eb98f707c97d7757cc19ef19622db42
ad7f5152fe404bdd4e91710d9a719f392bec7a96
refs/heads/master
2021-03-12T22:39:44.898502
2014-10-16T05:30:25
2014-10-16T05:30:25
null
0
0
null
null
null
null
UTF-8
Python
false
false
670
py
class Solution: # @param candidates, a list of integers # @param target, integer # @return a list of lists of integers def combinationSum(self, candidates, target): candidates = sorted(candidates) self.res = [] self.dfs(candidates, [], target) return self.res def dfs(self, candidates, cur, target): if target==0: if sorted(cur) not in self.res: self.res.append(cur) return for c in candidates: if target-c>=0: tmp = cur[:] tmp.append(c) self.dfs(candidates, tmp, target-c)
[ "hyzhang22@gmail.com" ]
hyzhang22@gmail.com
9b329bf9c03eb7fd5e50cebfbbc7e2fca726f262
fbc0d5c081dbfead483a1e1e226a380446bcd56e
/TDT4117 - Informasjonsgjenfinning/exercises/2/subtask2.2.py
88a2dba3adc75dd1a6e69012d2e012ad9648eba4
[]
no_license
blauks/ntnu-2
58eec17a8f6db7424a6cb44d74e029b68256320a
38fa0ddfaa726408f087d1792fd0e00810f9243c
refs/heads/master
2020-08-16T11:01:10.946232
2019-08-08T11:28:48
2019-08-08T11:28:48
215,494,235
0
1
null
2019-10-16T08:15:48
2019-10-16T08:15:48
null
UTF-8
Python
false
false
242
py
#!/usr/bin/python3 d1 = 'an apple a day keeps the doctor away.' d2 = 'the best doctor is the one you run to and can’t find.' d3 = 'one rotten apple spoils the whole barrel.' q1 = 'doctor' q2 = 'apple orange' q3 = 'doctor apple' λ = 0.5
[ "nikasmh@hotmail.com" ]
nikasmh@hotmail.com
96cd5e74289dadeea430e3920b049093288eb0a1
fcb628087b05031f2ffec5d6719714d210a9ebd2
/sukonbu/json_schema_parser.py
53b2420adf760f6b16937f06af94f854c676e54e
[ "MIT" ]
permissive
ousttrue/sukonbu
165b4aa0dcbb416367fa51bd2cfb0724dcaa475f
aca6121c3afa1fe404e6208553070895829df780
refs/heads/master
2023-08-29T17:30:28.099703
2022-03-15T17:21:01
2022-03-15T17:21:01
224,555,772
1
0
null
null
null
null
UTF-8
Python
false
false
6,741
py
from typing import Optional, NamedTuple, List import json import pathlib from .json_schema import JsonSchema class JsonSchemaItem(NamedTuple): key: str item: JsonSchema parent: Optional[JsonSchema] = None class JsonSchemaParser: def __init__(self, dir: Optional[pathlib.Path] = None): self.root: Optional[JsonSchema] = None self.path_map = {} self.dir: Optional[pathlib.Path] = dir def from_dict(self, root: dict) -> 'JsonSchema': ''' replace dict to JsonSchema by depth first ''' def traverse(node: dict, parent: Optional[dict] = None, key: Optional[str] = None) -> JsonSchema: # pass replace leaf to JsonSchema props = node.get('properties') if props: node['properties'] = { key: traverse(prop, node, key) for key, prop in props.items() } items = node.get('items') if items: node['items'] = traverse(items, node) additionalProperties = node.get('additionalProperties') if additionalProperties: node['additionalProperties'] = traverse( additionalProperties, node) if node.get('anyOf') and parent and key: # enum node['title'] = parent['title'].replace( ' ', '') + key[0:1].upper() + key[1:] if 'properties' not in node: node['properties'] = {} if 'dependencies' not in node: node['dependencies'] = [] if 'required' not in node: node['required'] = [] return JsonSchema(**node) return traverse(root) def get_or_read_ref(self, dir: pathlib.Path, filename: str, current: List[str]) -> dict: path = dir / filename if not path.exists(): assert(self.dir) path = self.dir / filename # ref = self.path_map.get(path) # if ref: # return ref text = path.read_text(encoding='utf-8') ref_parsed = json.loads(text) ref = self.preprocess(ref_parsed, path.parent, current) self.path_map[path] = ref ref['path'] = path return ref def preprocess(self, parsed: dict, directory: pathlib.Path, current: List[str]): ''' * `$ref` などを展開して1つの json に連結する * allOf を継承と見なして親 JsonSchema の属性を展開する * anyOf はひとつめの type と見なす(gltf では enum的に使われる) * properties は class として階層化する * items は list として階層化する * additionalProperties は dict として階層化する ''' if '$schema' in parsed: del parsed['$schema'] if '$ref' in parsed: # replace # print(path) ref = self.get_or_read_ref(directory, parsed['$ref'], current) for k, v in ref.items(): parsed[k] = v del parsed['$ref'] if 'allOf' in parsed: # inherited ref = self.get_or_read_ref(directory, parsed['allOf'][0]['$ref'], current) for k, v in ref.items(): if k in parsed: if k == 'properties': for pk, pv in ref[k].items(): parsed[k][pk] = pv continue elif k in ['title']: continue parsed[k] = v del parsed['allOf'] if 'anyOf' in parsed: for x in parsed['anyOf']: if 'type' in x: parsed['type'] = x['type'] break for key in ['not']: # skip if key in parsed: del parsed[key] keys = [key for key in parsed.keys()] for key in keys: if key == 'properties': for k, v in parsed[key].items(): self.preprocess(v, directory, current + [k]) elif key == 'items': parsed[key] = self.preprocess(parsed[key], directory, current + ['Item']) # array item elif key == 'additionalProperties': tmp = parsed[key] if tmp is False: # do nothing continue parsed[key] = self.preprocess(tmp, directory, current + ['Value']) # kv value elif key in [ 'path', 'title', 'type', 'description', 'gltf_detailedDescription', 'gltf_webgl', 'gltf_uriType', 'default', # 'enum', # 'additionalProperties', 'minProperties', # 'uniqueItems', 'minItems', 'maxItems', # 'minimum', 'maximum', 'multipleOf', 'exclusiveMinimum', 'pattern', 'format', # 'anyOf', 'oneOf', # 'required', 'dependencies', ]: pass else: raise Exception(f'unknown {key}') if 'title' not in parsed: parsed['title'] = '.'.join(current) if parsed['title'] == 'Extension': # set name to extension if current: parsed['title'] = '.'.join(current[0:-1] + [parsed['title']]) elif parsed['title'] == 'Extras': # set name to extras if current: parsed['title'] = '.'.join(current[0:-1] + [parsed['title']]) return parsed def process(self, entry_point: pathlib.Path): text = entry_point.read_text() parsed = json.loads(text) processed = self.preprocess(parsed, entry_point.parent, []) self.root = self.from_dict(processed)
[ "ousttrue@gmail.com" ]
ousttrue@gmail.com
df840439989d5650df6e6a7988fa5b59caa9850d
88994e2e840a70ec702cee09e1a13813aa6f800c
/tests/meta/upload/scout/test_scout_config_builder.py
811298718aac85b422b13148020a410369823810
[]
no_license
Clinical-Genomics/cg
1e9eb0852f742d555a48e8696914ebe177f7d436
d2ec6d25b577dd6938bbf92317aeff1d6b3c5b08
refs/heads/master
2023-09-01T02:04:04.229120
2023-08-31T13:50:31
2023-08-31T13:50:31
82,567,026
19
8
null
2023-09-14T15:24:13
2017-02-20T14:29:43
Python
UTF-8
Python
false
false
7,857
py
"""Tests for the file handlers.""" import logging from housekeeper.store.models import Version from cg.meta.upload.scout.balsamic_config_builder import BalsamicConfigBuilder from cg.meta.upload.scout.hk_tags import CaseTags from cg.meta.upload.scout.mip_config_builder import MipConfigBuilder from cg.meta.upload.scout.rnafusion_config_builder import RnafusionConfigBuilder from cg.store.models import Analysis from tests.mocks.limsmock import MockLimsAPI from tests.mocks.madeline import MockMadelineAPI from tests.mocks.mip_analysis_mock import MockMipAnalysis def test_mip_config_builder( hk_version: Version, mip_dna_analysis: Analysis, lims_api: MockLimsAPI, mip_analysis_api: MockMipAnalysis, madeline_api: MockMadelineAPI, ): """Test MIP config builder class.""" # GIVEN a MIP analysis # WHEN instantiating config_builder = MipConfigBuilder( hk_version_obj=hk_version, analysis_obj=mip_dna_analysis, lims_api=lims_api, mip_analysis_api=mip_analysis_api, madeline_api=madeline_api, ) # THEN assert that the correct case tags was used assert isinstance(config_builder.case_tags, CaseTags) def test_balsamic_config_builder( hk_version: Version, balsamic_analysis_obj: Analysis, lims_api: MockLimsAPI ): """Test Balsamic config builder class.""" # GIVEN a balsamic file handler # WHEN instantiating file_handler = BalsamicConfigBuilder( hk_version_obj=hk_version, analysis_obj=balsamic_analysis_obj, lims_api=lims_api ) # THEN assert that the correct case tags was used assert isinstance(file_handler.case_tags, CaseTags) def test_rnafusion_config_builder( hk_version: Version, rnafusion_analysis_obj: Analysis, lims_api: MockLimsAPI, ): """Test RNAfusion config builder class.""" # GIVEN a rnafusion file handler # WHEN instantiating file_handler = RnafusionConfigBuilder( hk_version_obj=hk_version, analysis_obj=rnafusion_analysis_obj, lims_api=lims_api ) # THEN assert that the correct case tags was used assert isinstance(file_handler.case_tags, CaseTags) def test_include_delivery_report_mip(mip_config_builder: MipConfigBuilder): """Test include delivery report.""" # GIVEN a config builder with data # GIVEN a config without a delivery report assert mip_config_builder.load_config.delivery_report is None # WHEN including the delivery report mip_config_builder.include_delivery_report() # THEN assert that the delivery report was added assert mip_config_builder.load_config.delivery_report is not None def test_include_synopsis(mip_config_builder: MipConfigBuilder): """Test include synopsis.""" # GIVEN a config builder with some data # GIVEN a config without synopsis assert mip_config_builder.load_config.synopsis is None # WHEN including the synopsis mip_config_builder.build_load_config() # THEN assert that the synopsis was added assert mip_config_builder.load_config.synopsis def test_include_phenotype_groups(mip_config_builder: MipConfigBuilder): """Test include phenotype groups.""" # GIVEN a config builder with some data # GIVEN a config without a phenotype groups assert mip_config_builder.load_config.phenotype_groups is None # WHEN including the phenotype groups mip_config_builder.include_phenotype_groups() # THEN assert that the phenotype groups were added assert mip_config_builder.load_config.phenotype_groups is not None def test_include_phenotype_terms(mip_config_builder: MipConfigBuilder): """Test include phenotype terms.""" # GIVEN a config builder with some data # GIVEN a config without a phenotype terms assert mip_config_builder.load_config.phenotype_terms is None # WHEN including the phenotype terms mip_config_builder.include_phenotype_terms() # THEN assert that the phenotype terms were added assert mip_config_builder.load_config.phenotype_terms is not None def test_include_alignment_file_individual(mip_config_builder: MipConfigBuilder, sample_id: str): """Test include alignment files.""" # GIVEN a mip config builder with some information # WHEN building the scout load config mip_config_builder.build_load_config() # THEN assert that the alignment file was added to sample id file_found = False for sample in mip_config_builder.load_config.samples: if sample.sample_id == sample_id: assert sample.alignment_path is not None file_found = True assert file_found def test_include_mip_case_files(mip_config_builder: MipConfigBuilder): """Test include MIP case files.""" # GIVEN a Housekeeper version bundle with MIP analysis files # GIVEN a case load object # GIVEN a MIP file handler # WHEN including the case level files mip_config_builder.build_load_config() # THEN assert that the mandatory SNV VCF was added assert mip_config_builder.load_config.vcf_snv def test_include_mip_sample_files(mip_config_builder: MipConfigBuilder, sample_id: str): """Test include MIP sample files.""" # GIVEN a Housekeeper version bundle with MIP analysis files # GIVEN a case load object # GIVEN that there are no sample level mt_bam # GIVEN a MIP file handler # WHEN including the case level files mip_config_builder.build_load_config() # THEN assert that the mandatory SNV VCF was added file_found = False for sample in mip_config_builder.load_config.samples: if sample.sample_id == sample_id: assert sample.mt_bam is not None file_found = True assert file_found def test_include_mip_sample_subject_id( mip_config_builder: MipConfigBuilder, sample_id: str, caplog ): """Test include MIP sample subject id.""" # GIVEN subject_id on the sample caplog.set_level(level=logging.DEBUG) # WHEN building the config mip_config_builder.build_load_config() # THEN the subject_id was added to the scout sample subject_id_found = False for sample in mip_config_builder.load_config.samples: if sample.sample_id == sample_id: subject_id_found = True assert sample.subject_id is not None assert subject_id_found def test_include_balsamic_case_files(balsamic_config_builder: BalsamicConfigBuilder): """Test include Balsamic case files.""" # GIVEN a Housekeeper version bundle with balsamic analysis files # GIVEN a case load object # WHEN including the case level files balsamic_config_builder.build_load_config() # THEN assert that the mandatory snv vcf was added assert balsamic_config_builder.load_config.vcf_cancer def test_include_balsamic_delivery_report(balsamic_config_builder: BalsamicConfigBuilder): """Test include Balsamic delivery report.""" # GIVEN a Housekeeper version bundle with balsamic analysis files # GIVEN a case load object # WHEN including the case level files balsamic_config_builder.build_load_config() # THEN assert that the delivery_report exists assert balsamic_config_builder.load_config.delivery_report def test_extract_generic_filepath(mip_config_builder: MipConfigBuilder): """Test that parsing of file path.""" # GIVEN files paths ending with file_path1 = "/some/path/gatkcomb_rhocall_vt_af_chromograph_sites_X.png" file_path2 = "/some/path/gatkcomb_rhocall_vt_af_chromograph_sites_12.png" # THEN calling extracting the generic path will remove numeric id and fuffix generic_path = "/some/path/gatkcomb_rhocall_vt_af_chromograph_sites_" # THEN assert mip_config_builder.extract_generic_filepath(file_path1) == generic_path assert mip_config_builder.extract_generic_filepath(file_path2) == generic_path
[ "noreply@github.com" ]
Clinical-Genomics.noreply@github.com
1461437e9ebeb4c81603608e67f1504f0e628c17
ad5b72656f0da99443003984c1e646cb6b3e67ea
/tools/mo/openvino/tools/mo/back/offline_transformations.py
d4615aeb524e518a1b292b6c45fd267a8d1ac306
[ "Apache-2.0" ]
permissive
novakale/openvino
9dfc89f2bc7ee0c9b4d899b4086d262f9205c4ae
544c1acd2be086c35e9f84a7b4359439515a0892
refs/heads/master
2022-12-31T08:04:48.124183
2022-12-16T09:05:34
2022-12-16T09:05:34
569,671,261
0
0
null
null
null
null
UTF-8
Python
false
false
3,184
py
# Copyright (C) 2018-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import argparse from typing import List from openvino.tools.mo.front.extractor import create_params_with_custom_types from openvino.tools.mo.utils.cli_parser import parse_transform from openvino.tools.mo.utils.error import Error from openvino.runtime import Model def get_available_transformations(): try: from openvino._offline_transformations import apply_low_latency_transformation # pylint: disable=import-error,no-name-in-module from openvino._offline_transformations import apply_make_stateful_transformation # pylint: disable=import-error,no-name-in-module from openvino._offline_transformations import apply_pruning_transformation # pylint: disable=import-error,no-name-in-module return { 'MakeStateful': apply_make_stateful_transformation, 'LowLatency2': apply_low_latency_transformation, 'Pruning': apply_pruning_transformation, } except Exception as e: return {} # net should be openvino.inference_engine.IENetwork type, but IE Engine is still optional dependency def apply_user_transformations(func: object, transforms: list): available_transformations = get_available_transformations() for name, args in transforms: if name not in available_transformations.keys(): raise Error("Transformation {} is not available.".format(name)) available_transformations[name](func, **args) def apply_moc_transformations(func: object): from openvino._offline_transformations import apply_moc_transformations # pylint: disable=import-error,no-name-in-module apply_moc_transformations(func, cf=False, smart_reshape=True) def apply_moc_legacy_transformations(func: object, params_with_custom_types: List[str]): from openvino._offline_transformations import apply_moc_legacy_transformations # pylint: disable=import-error,no-name-in-module apply_moc_legacy_transformations(func, params_with_custom_types) def compress_model(func: object): from openvino._offline_transformations import compress_model_transformation # pylint: disable=import-error,no-name-in-module compress_model_transformation(func) def apply_fused_names_cleanup(func: object): from openvino._offline_transformations import apply_fused_names_cleanup # pylint: disable=import-error,no-name-in-module apply_fused_names_cleanup(func) def apply_offline_transformations(func: Model, argv: argparse.Namespace): from openvino.tools.mo.back.preprocessing import apply_preprocessing # pylint: disable=no-name-in-module,import-error # Apply preprocessing (mean/scale/reverse_channels/convert_layout/etc) apply_preprocessing(ov_function=func, argv=argv) apply_moc_transformations(func) params_with_custom_types = create_params_with_custom_types(argv.packed_user_shapes) apply_moc_legacy_transformations(func, params_with_custom_types) apply_user_transformations(func, parse_transform(argv.transform)) if "compress_fp16" in argv and argv.compress_fp16: compress_model(func) apply_fused_names_cleanup(func) return func
[ "noreply@github.com" ]
novakale.noreply@github.com
c2fb539bc071787bca42c1d5bdc7550f71769d0f
7def22f9e61a125a8a02d85018fdc3fa34f4d060
/superlists/urls.py
7ba2fb600869500c7397a9c0af482c74b3afb792
[]
no_license
uglyboxer/superlists
188e7c659f97e77ebddeba3b07dc1b5bc03c928a
e0cf2e828991f04c4050170c13f9c4b6cc2be0e8
refs/heads/master
2021-01-10T02:24:48.031207
2015-11-25T00:24:23
2015-11-25T00:24:23
46,455,052
0
0
null
null
null
null
UTF-8
Python
false
false
975
py
"""superlists URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.8/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Add an import: from blog import urls as blog_urls 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls)) """ from django.conf.urls import include, url from django.contrib import admin from lists import views urlpatterns = [ url(r'^$', views.home_page, name='home'), url(r'^lists/the-only-list-in-the-world/$', views.view_list, name='view_list'), url(r'^lists/new$', views.new_list, name='new_list'), # url(r'^admin/', include(admin.site.urls)), ]
[ "uglyboxer@gmail.com" ]
uglyboxer@gmail.com
8384fef2b16fdeea0adf1819f3c0122330a5d974
e828c22a75dfaa7f3643352d4f1b08559a12edca
/ScrapyProject/TenxunSpider/TenxunSpider/pipelines.py
8c9505521ec8a1ee4c05f3cdfcfa153491efb9f5
[]
no_license
csgvsjay1000/spider
9c545fac9c63f89e7503a7c045ce2b83df044e49
10f584440d23b0b17a3486cde6cbc39c9d13692e
refs/heads/master
2022-02-16T20:29:53.384245
2019-08-12T03:06:37
2019-08-12T03:06:37
null
0
0
null
null
null
null
UTF-8
Python
false
false
551
py
# -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html import json,codecs class TenxunspiderPipeline(object): def __init__(self): self.file=codecs.open('duty_file.json','w',encoding='utf-8') def process_item(self, item, spider): line=json.dumps(dict(item),ensure_ascii=False)+'\n' self.file.write(line) return item def close_file(self,spider): self.file.close()
[ "593956670@qq.com" ]
593956670@qq.com
4093735ca3af4a42d6bb8b85700aa046e5c1677c
52243c4a05a296e7c042663b5942faa47eb66aee
/common_nlp/classifier_legal_phrases_regex.py
e2b84d6ec884fb5abb3de73d4361f5b499d1dbe2
[ "MIT" ]
permissive
joaoppadua/Pesquisas
fbe0311b59340c041732d6d1f7f4862fa6c53198
808d8b0ef9e432e05a4f284ce18778ed8b3acd96
refs/heads/master
2023-07-16T02:50:30.846205
2021-09-03T13:34:54
2021-09-03T13:34:54
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,048
py
import re from regex_classifier_legal_phrases import palavras_interesse def break_sentences(text, nlp): # return re.split(r'\w\.\s',text) text = re.sub(r"\s+", " ", text) text = re.sub(r"art\.", "art ", text) text = re.sub(r"fls?\.", "fls ", text) text = re.sub(r"inc\.", "inc ", text) doc = nlp(text) return [sent.text for sent in doc.sents] def dicionario_frases_tipos(): dic_tipos_frases = {} for frase, tipo in palavras_interesse.items(): if tipo not in dic_tipos_frases: dic_tipos_frases[tipo] = [] dic_tipos_frases[tipo].append(r"{}".format(frase)) return dic_tipos_frases def classifier_legal_phrases_regex(phrase, dic_tipos_frases): for conj_exp in dic_tipos_frases["decisao"]: for exp in conj_exp: if re.search(exp, phrase, re.I): return "decisao" for tipo, conj_exp in dic_tipos_frases.items(): for exp in conj_exp: if re.search(exp, phrase, re.I): return tipo return "argumento"
[ "danilopcarlotti@gmail.com" ]
danilopcarlotti@gmail.com
0624acb274bdaacc13d24078b701c3efd0584ce4
43ab8c000781c073e6723b3e93013e5f509b84ea
/attractors/3D/Pickover.py
b1953fcdf4020eec7d0559a9c3155b2e2c0695d3
[ "MIT" ]
permissive
tisnik/fractals
e6e25dbbf5675be5a78f15a2504b25f7de504cf6
2e852489cb473394e4dd9103d12d717fed53a51d
refs/heads/master
2023-04-29T03:17:28.897352
2023-04-17T14:01:59
2023-04-17T14:01:59
202,321,272
3
1
null
null
null
null
UTF-8
Python
false
false
3,098
py
"""Výpočet a vykreslení Pickoverova podivného atraktoru v 3D.""" # MIT License # # Copyright (c) 2020 Pavel Tišnovský # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # Pickover 3D attractor # In[1]: # import všech potřebných knihoven - Numpy a Matplotlibu from mpl_toolkits.mplot3d import axes3d import matplotlib.pyplot as plt import numpy as np from math import sin, cos # In[2]: def pickover(x, y, z, a=2.24, b=0.43, c=-0.65, d=-2.43, e=1.0): """Výpočet dalšího bodu Pickoverova atraktoru.""" x_dot = sin(a * y) - z * cos(b * x) y_dot = z * sin(c * x) - cos(d * y) z_dot = e * sin(x) return x_dot, y_dot, z_dot # celkový počet vypočtených bodů na Lorenzově atraktoru n = 100000 # prozatím prázdné pole připravené pro výpočet x = np.zeros((n,)) y = np.zeros((n,)) z = np.zeros((n,)) # počáteční hodnoty x[0], y[0], z[0] = (0.0, 0.0, 0.0) # vlastní výpočet atraktoru for i in range(n - 1): x_dot, y_dot, z_dot = pickover(x[i], y[i], z[i], 2.24, 0.43, -0.65, -2.43, 0.8) x[i + 1] = x_dot y[i + 1] = y_dot z[i + 1] = z_dot fig = plt.figure() ax = fig.gca(projection="3d") # vykreslení grafu ax.plot(x, y, z, "o", markersize=0.1) # zobrazení grafu plt.tight_layout() # plt.show() ch_3d = np.stack((x, y, z)) lim_xyz = [(np.min(ch_3d[ii]), np.max(ch_3d[ii])) for ii in range(3)] fig2 = plt.figure("3D Coordinates") plt.subplot(2, 2, 1) plt.plot(y, x, "o", linewidth=0.75, markersize=0.1) plt.grid() plt.xlabel("X") plt.ylabel("Y") plt.xlim(lim_xyz[1]) plt.ylim(lim_xyz[0]) plt.subplot(2, 2, 2) plt.plot(y, z, "o", linewidth=0.75, markersize=0.1) plt.grid() plt.xlabel("Z") plt.ylabel("Y") plt.xlim(lim_xyz[1]) plt.ylim(lim_xyz[2]) plt.subplot(2, 2, 3) plt.plot(z, x, "o", linewidth=0.75, markersize=0.1) plt.grid() plt.xlabel("X") plt.ylabel("Z") plt.xlim(lim_xyz[2]) plt.ylim(lim_xyz[0]) ax = fig2.add_subplot(2, 2, 4, projection="3d") ax.plot(x, y, z, "o", linewidth=0.7, markersize=0.1) ax.set_xlabel("X") ax.set_ylabel("Y") ax.set_zlabel("Z") plt.tight_layout() plt.tight_layout() plt.show()
[ "ptisnovs@redhat.com" ]
ptisnovs@redhat.com
c69b864f21b100901da859214899a8f7346b41a7
1796043fc26c958b8fc45d9c058e382473c4f3af
/Fabio 01 Parte 02/f1_p2_q2_metro_km.py
9fa23256bbcf9cb486f2779b637e721651f19242
[]
no_license
Lucakurotaki/ifpi-ads-algoritmos2020
a69adec27dbb10aceab1bc7038a0b56a760f99d1
34d5fedd5825a85404cf9340e42be618981679c1
refs/heads/master
2022-03-22T04:44:14.211359
2022-02-19T18:48:36
2022-02-19T18:48:36
246,585,463
0
0
null
null
null
null
UTF-8
Python
false
false
175
py
#Entrada m = int(input("Digite a medida em metro: ")) #Processamento km = m/1000 #Saída print("O equivalente de {} m em kilômetros é: {} km.".format(m,km))
[ "noreply@github.com" ]
Lucakurotaki.noreply@github.com
48d254577be2dfe57ef8a4d19ba7c41709aced4e
007ae03cfe5abf41a0ad864eade451141c267cca
/auto-docs/executables/python/legend_style.py
6f09c9c36aef49e5259e1ae46fe2bd16241aee10
[]
no_license
VukDukic/documentation
ca96eb1994eeb532fe60c542960b017354bcede1
8e5aefdc38788956cfe31d8fe8b4b77cdf790e57
refs/heads/master
2021-01-18T09:02:27.034396
2015-01-20T23:46:58
2015-01-20T23:46:58
30,007,728
2
0
null
null
null
null
UTF-8
Python
false
false
825
py
# Learn about API authentication here: {{BASE_URL}}/python/getting-started # Find your api_key here: {{BASE_URL}}/settings/api import plotly.plotly as py from plotly.graph_objs import * py.sign_in('TestBot', 'r1neazxo9w') trace1 = Scatter( x=[0, 1, 2, 3, 4, 5, 6, 7, 8], y=[0, 3, 6, 4, 5, 2, 3, 5, 4] ) trace2 = Scatter( x=[0, 1, 2, 3, 4, 5, 6, 7, 8], y=[0, 4, 7, 8, 3, 6, 3, 3, 4] ) data = Data([trace1, trace2]) layout = Layout( legend=Legend( x=0, y=1, traceorder='normal', font=Font( family='sans-serif', size=12, color='#000' ), bgcolor='#E2E2E2', bordercolor='#FFFFFF', borderwidth=2 ) ) fig = Figure(data=data, layout=layout) plot_url = py.plot(fig, filename='legend-style', auto_open=False)
[ "andseier@gmail.com" ]
andseier@gmail.com
7041bb1082d5baec910b892c4c325173285c5c89
572c828b5ef8c17f48cceada08f7a373c2d31e91
/DES_1_2020/quizG.py
87ebfa5c0eceb5a8ccb76bec213b13d12a69e9da
[ "MIT" ]
permissive
refeed/PAlgoritmaTRPLA
4262387011a4942e137674f92c5606eacfec4c1e
e0c79c1d57bee0869e2344651718e8cf053c035f
refs/heads/master
2023-02-03T19:19:43.210447
2020-12-17T03:46:13
2020-12-17T03:46:13
297,596,722
0
0
null
null
null
null
UTF-8
Python
false
false
2,430
py
''' DESKRIPSI SOAL Kali ini Adi belajar baris-berbaris. Ada N peserta baris-berbaris yang ikut. Seperti biasa, peserta harus berbaris sesuai urutan tinggi badan (yang paling tinggi di belakang). Setelah berbaris dengan urut, peserta akan diberi no urut. No 1 untuk peserta yang paling pendek dan no N untuk peserta paling tinggi. Tinggi badan Adi adalah 165 cm. Tentukan no peserta yang dikenakan oleh Adi (asumsi tidak ada peserta yang tingginya sama). PETUNJUK MASUKAN Baris pertama adalah bilangan bulat N yang menunjukkan banyaknya peserta. N baris berikutnya adalah tinggi dari setiap peserta. Tinggi Adi adalah 165 cm. PETUNJUK KELUARAN Bilangan antara 1-N yang menunjukkan no peserta dari Adi. CONTOH MASUKAN 4 170 158 165 168 CONTOH KELUARAN 2 Adi akan diberi no urut 2 karena tinggi badannya nomor 2 dihitung dari yang paling pendek. ''' import sys def merge_sort(list_awal): list_awal_length = len(list_awal) if list_awal_length == 1: return # Split tengah = list_awal_length // 2 list_kiri = list_awal[:tengah] list_kanan = list_awal[tengah:] merge_sort(list_kiri) merge_sort(list_kanan) # Merge i = 0 # Iterator untuk list kiri j = 0 # Iterator untuk list kanan k = 0 # Iterator untuk list awal while i < len(list_kiri) and j < len(list_kanan): if list_kiri[i] < list_kanan[j]: list_awal[k] = list_kiri[i] i += 1 else: list_awal[k] = list_kanan[j] j += 1 k += 1 while i < len(list_kiri): list_awal[k] = list_kiri[i] i += 1 k += 1 while j < len(list_kanan): list_awal[k] = list_kanan[j] j += 1 k += 1 def binary_search(list_awal, wanted_value, lo, hi): mid_index = (lo + hi) // 2 mid_value = list_awal[mid_index] if mid_value == wanted_value: return mid_index elif (hi - lo) <= 1: return -1 # Not found elif wanted_value > mid_value: return binary_search(list_awal, wanted_value, mid_index+1, hi) elif wanted_value < mid_value: return binary_search(list_awal, wanted_value, lo, mid_index) if __name__ == "__main__": input_list = [] list_length = int(input()) for _ in range(list_length): input_list.append(int(input())) merge_sort(input_list) sys.stdout.write(str(binary_search(input_list, 165, 0, list_length-1) + 1))
[ "rafidteam@gmail.com" ]
rafidteam@gmail.com
e882be5c62481dfe01e05dc2076dc494788b242a
577ab02be20b264023c86af0b1f7598611b1d3bc
/mysite/urls.py
98af82e60031a42f6647e718f392a3be3cbfc95f
[]
no_license
yoongyo/festigo
323316d79796e4fc5a6ad42f26c0c8f181100e1e
73c3f6c619acb70d8031efb62a90fb8d60acbc66
refs/heads/master
2020-05-18T09:49:35.673560
2019-04-30T21:56:49
2019-04-30T21:56:49
184,336,479
0
0
null
null
null
null
UTF-8
Python
false
false
1,252
py
from django.contrib import admin from django.urls import re_path, include, path from . import views from django.conf import settings from django.conf.urls.static import static from django.contrib.staticfiles.urls import staticfiles_urlpatterns urlpatterns = [ re_path(r'^$', views.main, name='main'), re_path(r'^admin/', admin.site.urls), path('summernote/', include('django_summernote.urls')), path('accounts/', include(('accounts.urls', 'accounts'), namespace='accounts')), re_path(r'^festival/', include(('festival.urls', 'festival'), namespace='festival')), ] urlpatterns += staticfiles_urlpatterns() if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) from django.conf import settings from django.views import static static_list = [ (settings.STATIC_URL, settings.STATIC_ROOT), (settings.MEDIA_URL, settings.MEDIA_ROOT), ] for (prefix_url, root) in static_list: if '://' not in prefix_url: # 외부 서버에서 서빙하는 것이 아니라면 prefix_url = prefix_url.lstrip('/') url_pattern = r'^' + prefix_url + r'(?P<path>.+)' pattern = re_path(url_pattern, static.serve, kwargs={'document_root': root}) urlpatterns.append(pattern)
[ "jyg0172@naver.com" ]
jyg0172@naver.com
5dbfdb24f29942128a5169a17742ce17ac9fd1e3
89de54a5ee1628bceb08d804407e4d43434fa2e0
/backend/home/migrations/0002_load_initial_data.py
83735bcf6368729c0f275f5250287e0dd41ff7e4
[]
no_license
crowdbotics-apps/tkabs-podcast-20287
430f4896c2b5ad2ec2c65ad145151569304469d6
a3980e01490f67f3bcd2af18e970ba949780c3bb
refs/heads/master
2022-12-13T02:10:38.199166
2020-09-15T15:25:22
2020-09-15T15:25:22
295,769,623
0
0
null
null
null
null
UTF-8
Python
false
false
1,312
py
from django.db import migrations def create_customtext(apps, schema_editor): CustomText = apps.get_model("home", "CustomText") customtext_title = "TKABS - Podcast" CustomText.objects.create(title=customtext_title) def create_homepage(apps, schema_editor): HomePage = apps.get_model("home", "HomePage") homepage_body = """ <h1 class="display-4 text-center">TKABS - Podcast</h1> <p class="lead"> This is the sample application created and deployed from the Crowdbotics app. You can view list of packages selected for this application below. </p>""" HomePage.objects.create(body=homepage_body) def create_site(apps, schema_editor): Site = apps.get_model("sites", "Site") custom_domain = "tkabs-podcast-20287.botics.co" site_params = { "name": "TKABS - Podcast", } if custom_domain: site_params["domain"] = custom_domain Site.objects.update_or_create(defaults=site_params, id=1) class Migration(migrations.Migration): dependencies = [ ("home", "0001_initial"), ("sites", "0002_alter_domain_unique"), ] operations = [ migrations.RunPython(create_customtext), migrations.RunPython(create_homepage), migrations.RunPython(create_site), ]
[ "team@crowdbotics.com" ]
team@crowdbotics.com
b366a560cc962b8a35af64cdeb311111005dbad4
4dade4f29881e99d8602144744e09ed870bd1034
/Python/path_algorithms/greedy_best_first.py
d4296e908f40cc690cd6ffa05663905c49b40e48
[]
no_license
alexbaryzhikov/codebase-archive
9795347c19a82c098983c6d0fe4959c3162ca868
c78c189002a26296a552f30078578cc0cf72e426
refs/heads/master
2023-02-19T21:54:21.310865
2021-01-11T15:47:50
2021-01-11T15:47:50
106,846,461
1
0
null
null
null
null
UTF-8
Python
false
false
5,406
py
from tkinter import * import numpy as np from queue import PriorityQueue G = {} MOVE_COST = 10 MOVE_COST_DIAG = 14 class MyCanvas(Canvas): def __init__(self, master, shape): self.cwidth = 50 self.shape = shape Canvas.__init__(self, master, width=shape[0]*self.cwidth, height=shape[1]*self.cwidth) self.pack() self.bind("<Button-1>", self.on_mclick) self.bind("<Button-2>", self.on_mclick) self.bind("<Button-3>", self.on_mclick) self.tiles = {} self.labels = {} for y in range(0, self.shape[1]): for x in range(0, self.shape[0]): x_, y_ = x*self.cwidth, y*self.cwidth tile = self.create_rectangle(x_+1, y_+1, x_+self.cwidth, y_+self.cwidth, \ fill='white', outline='') self.tiles[(x, y)] = tile label = self.create_text((x_+self.cwidth//2, y_+self.cwidth//2), \ fill='black', text='') self.labels[(x, y)] = label for node in G['grid'].walls: self.itemconfig(self.tiles[node], fill = 'gray') def on_mclick(self, event): start, goal = G['pathf'].start, G['pathf'].goal G['pathf'].reset() self.reset() x, y = event.x//self.cwidth, event.y//self.cwidth if event.num == 1: G['pathf'].set_start((x, y)) if goal: G['pathf'].set_goal(goal) update() elif event.num == 3: G['pathf'].set_goal((x, y)) if start: G['pathf'].set_start(start) update() elif event.num == 2: if (x, y) in G['grid'].walls: G['grid'].walls.remove((x, y)) else: G['grid'].walls.append((x, y)) self.reset() def reset(self): for y in range(0, self.shape[1]): for x in range(0, self.shape[0]): self.itemconfig(self.tiles[(x, y)], fill = 'white') self.itemconfig(self.labels[(x, y)], text = '') for node in G['grid'].walls: self.itemconfig(self.tiles[node], fill = 'gray') class Grid: def __init__(self, x, y): self.nodes = np.zeros((x, y), dtype = np.int) self.walls = [] for i in range(10): self.walls.append((i, 4)) def neighbors(self, node): res = [(node[0]+x, node[1]+y) for x in range(-1, 2) for y in range(-1,2) \ if (node[0]+x >= 0) and (node[0]+x < self.nodes.shape[0]) \ and (node[1]+y >= 0) and (node[1]+y < self.nodes.shape[1]) \ and (x != 0 or y != 0)] res = [node for node in res if node not in self.walls] return res class Pathfinder: def __init__(self): self.reset() def reset(self): self.start = None self.goal = None self.frontier = PriorityQueue() self.came_from = {} self.done = False def set_start(self, node): self.start = node self.frontier.put((0, node)) self.came_from[node] = None G['c'].itemconfig(G['c'].tiles[node], fill='#ef7373') def set_goal(self, node): self.goal = node G['c'].itemconfig(G['c'].tiles[node], fill='#785bef') def expand(self): if self.frontier.empty(): return # there is no path current = self.frontier.get()[1] if current == self.goal: self.get_path(); return for next_node in G['grid'].neighbors(current): if next_node not in self.came_from: priority = manhattan_dist(next_node, self.goal) self.frontier.put((priority, next_node)) self.came_from[next_node] = current text = '{}'.format(priority) G['c'].itemconfig(G['c'].labels[next_node], text=text) ## coloring pass for x in range(G['grid'].nodes.shape[0]): for y in range(G['grid'].nodes.shape[1]): if (x, y) == self.start or (x, y) == self.goal: pass elif (x, y) in [item[1] for item in self.frontier.queue]: G['c'].itemconfig(G['c'].tiles[(x, y)], fill='#62aac9') elif (x, y) in self.came_from.keys(): G['c'].itemconfig(G['c'].tiles[(x, y)], fill='#bee2c8') def get_path(self): current = self.goal path = [] while current != self.start: path.append(current) current = self.came_from[current] path.reverse() ## coloring pass for node in path[:-1]: G['c'].itemconfig(G['c'].tiles[node], fill='#82ef5b') self.done = True def manhattan_dist(p1, p2): dx, dy = abs(p1[0]-p2[0]), abs(p1[1]-p2[1]) if dx > dy: dx, dy = dy, dx return dx*MOVE_COST_DIAG+(dy-dx)*MOVE_COST def update(): if not G['pathf'].done: G['pathf'].expand() root.after(20, update) def main(): global root root = Tk() root.title('Greedy best first search') root.resizable(0, 0) G['grid'] = Grid(15, 15) G['pathf'] = Pathfinder() G['c'] = MyCanvas(root, G['grid'].nodes.shape) mainloop() if __name__ == '__main__': main()
[ "aleksiarts@gmail.com" ]
aleksiarts@gmail.com
046e48003c5bea511c67085711ae1a3269a8078e
c6fa53212eb03017f9e72fad36dbf705b27cc797
/SimG4CMS/Calo/test/python/runPhase0_cfg.py
aa9513f0a903b7fd6a2c67f3f7d9056b01e5ae00
[]
no_license
gem-sw/cmssw
a31fc4ef2233b2157e1e7cbe9a0d9e6c2795b608
5893ef29c12b2718b3c1385e821170f91afb5446
refs/heads/CMSSW_6_2_X_SLHC
2022-04-29T04:43:51.786496
2015-12-16T16:09:31
2015-12-16T16:09:31
12,892,177
2
4
null
2018-11-22T13:40:31
2013-09-17T10:10:26
C++
UTF-8
Python
false
false
3,839
py
import FWCore.ParameterSet.Config as cms process = cms.Process("PROD") process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi") process.load("IOMC.EventVertexGenerators.VtxSmearedGauss_cfi") process.load("Geometry.HcalCommonData.testPhase0GeometryXML_cfi") process.load("Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi") process.load("Geometry.MuonNumbering.muonNumberingInitialization_cfi") process.load("Geometry.HcalCommonData.hcalSimNumberingInitialization_cfi") process.load("Configuration.StandardSequences.MagneticField_cff") process.load("Configuration.EventContent.EventContent_cff") process.load("SimG4Core.Application.g4SimHits_cfi") process.MessageLogger = cms.Service("MessageLogger", destinations = cms.untracked.vstring('cout'), categories = cms.untracked.vstring('CaloSim', 'G4cout', 'G4cerr', 'HCalGeom', 'HcalSim', 'HFShower', 'SimTrackManager', 'SimG4CoreGeometry'), debugModules = cms.untracked.vstring('*'), cout = cms.untracked.PSet( threshold = cms.untracked.string('DEBUG'), INFO = cms.untracked.PSet( limit = cms.untracked.int32(0) ), DEBUG = cms.untracked.PSet( limit = cms.untracked.int32(0) ), G4cerr = cms.untracked.PSet( limit = cms.untracked.int32(-1) ), G4cout = cms.untracked.PSet( limit = cms.untracked.int32(-1) ), SimTrackManager = cms.untracked.PSet( limit = cms.untracked.int32(0) ), SimG4CoreGeometry = cms.untracked.PSet( limit = cms.untracked.int32(0) ), HCalGeom = cms.untracked.PSet( limit = cms.untracked.int32(-1) ), CaloSim = cms.untracked.PSet( limit = cms.untracked.int32(0) ), HFShower = cms.untracked.PSet( limit = cms.untracked.int32(-1) ), HcalSim = cms.untracked.PSet( limit = cms.untracked.int32(-1) ) ) ) process.load("IOMC.RandomEngine.IOMC_cff") process.RandomNumberGeneratorService.generator.initialSeed = 456789 process.RandomNumberGeneratorService.g4SimHits.initialSeed = 9876 process.RandomNumberGeneratorService.VtxSmeared.initialSeed = 123456789 process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(2) ) process.source = cms.Source("EmptySource", firstRun = cms.untracked.uint32(1), firstEvent = cms.untracked.uint32(1) ) process.generator = cms.EDProducer("FlatRandomPtGunProducer", PGunParameters = cms.PSet( PartID = cms.vint32(211), MinEta = cms.double(-3.0), MaxEta = cms.double(3.0), MinPhi = cms.double(-3.14159265359), MaxPhi = cms.double(3.14159265359), MinPt = cms.double(100.), MaxPt = cms.double(100.) ), Verbosity = cms.untracked.int32(0), AddAntiParticle = cms.bool(False) ) process.o1 = cms.OutputModule("PoolOutputModule", process.FEVTSIMEventContent, fileName = cms.untracked.string('simevent_QGSP_FTFP_BERT_EML.root') ) process.Timing = cms.Service("Timing") process.SimpleMemoryCheck = cms.Service("SimpleMemoryCheck", oncePerEventMode = cms.untracked.bool(True), showMallocInfo = cms.untracked.bool(True), dump = cms.untracked.bool(True), ignoreTotal = cms.untracked.int32(1) ) process.Tracer = cms.Service("Tracer") process.common_maximum_timex = cms.PSet( MaxTrackTime = cms.double(1000.0), MaxTimeNames = cms.vstring(), MaxTrackTimes = cms.vdouble() ) process.p1 = cms.Path(process.generator*process.VtxSmeared*process.g4SimHits) process.outpath = cms.EndPath(process.o1) process.g4SimHits.Physics.type = 'SimG4Core/Physics/QGSP_FTFP_BERT_EML' process.g4SimHits.Physics.Verbosity = 0
[ "sunanda.banerjee@cern.ch" ]
sunanda.banerjee@cern.ch
08a025d81f80adffff746115fa919a21066a3bdd
a0fb29f99a852089193e4cc9a11e7263dc3f8b5f
/mayan/apps/documents/serializers/document_file_serializers.py
e8d6fe80d51c51303bbc2cb48ba61d4ae2e18e07
[ "Apache-2.0" ]
permissive
ikang9712/Mayan-EDMS
0e22a944d63657cea59c78023b604a01a622b52a
d6e57e27a89805329fe0c5582caa8e17882d94e6
refs/heads/master
2023-07-28T19:41:55.269513
2021-09-07T14:16:14
2021-09-07T14:16:14
402,884,683
1
0
NOASSERTION
2021-09-03T20:00:09
2021-09-03T20:00:09
null
UTF-8
Python
false
false
4,403
py
from django.utils.translation import ugettext_lazy as _ from rest_framework import serializers from mayan.apps.rest_api.relations import MultiKwargHyperlinkedIdentityField from mayan.apps.rest_api.serializer_mixins import CreateOnlyFieldSerializerMixin from ..literals import DOCUMENT_FILE_ACTION_PAGE_CHOICES from ..models.document_file_models import DocumentFile from ..models.document_file_page_models import DocumentFilePage class DocumentFileSerializer( CreateOnlyFieldSerializerMixin, serializers.HyperlinkedModelSerializer ): action = serializers.ChoiceField( choices=DOCUMENT_FILE_ACTION_PAGE_CHOICES ) document_url = serializers.HyperlinkedIdentityField( lookup_url_kwarg='document_id', view_name='rest_api:document-detail' ) download_url = MultiKwargHyperlinkedIdentityField( view_kwargs=( { 'lookup_field': 'document_id', 'lookup_url_kwarg': 'document_id', }, { 'lookup_field': 'pk', 'lookup_url_kwarg': 'document_file_id', }, ), view_name='rest_api:documentfile-download' ) file_new = serializers.FileField( help_text=_('Binary content for the new file.'), use_url=False ) page_list_url = MultiKwargHyperlinkedIdentityField( view_kwargs=( { 'lookup_field': 'document_id', 'lookup_url_kwarg': 'document_id', }, { 'lookup_field': 'pk', 'lookup_url_kwarg': 'document_file_id', }, ), view_name='rest_api:documentfilepage-list' ) size = serializers.SerializerMethodField() url = MultiKwargHyperlinkedIdentityField( view_kwargs=( { 'lookup_field': 'document_id', 'lookup_url_kwarg': 'document_id', }, { 'lookup_field': 'pk', 'lookup_url_kwarg': 'document_file_id', }, ), view_name='rest_api:documentfile-detail' ) class Meta: create_only_fields = ('action', 'file_new',) extra_kwargs = { 'file': {'use_url': False}, } fields = ( 'action', 'checksum', 'comment', 'document_url', 'download_url', 'encoding', 'file', 'filename', 'file_new', 'id', 'mimetype', 'page_list_url', 'size', 'timestamp', 'url' ) model = DocumentFile read_only_fields = ('document', 'file', 'size') def get_size(self, instance): return instance.size class DocumentFilePageSerializer(serializers.HyperlinkedModelSerializer): document_file_url = MultiKwargHyperlinkedIdentityField( view_kwargs=( { 'lookup_field': 'document_file.document.pk', 'lookup_url_kwarg': 'document_id', }, { 'lookup_field': 'document_file_id', 'lookup_url_kwarg': 'document_file_id', } ), view_name='rest_api:documentfile-detail' ) image_url = MultiKwargHyperlinkedIdentityField( view_kwargs=( { 'lookup_field': 'document_file.document.pk', 'lookup_url_kwarg': 'document_id', }, { 'lookup_field': 'document_file_id', 'lookup_url_kwarg': 'document_file_id', }, { 'lookup_field': 'pk', 'lookup_url_kwarg': 'document_file_page_id', } ), view_name='rest_api:documentfilepage-image' ) url = MultiKwargHyperlinkedIdentityField( view_kwargs=( { 'lookup_field': 'document_file.document.pk', 'lookup_url_kwarg': 'document_id', }, { 'lookup_field': 'document_file_id', 'lookup_url_kwarg': 'document_file_id', }, { 'lookup_field': 'pk', 'lookup_url_kwarg': 'document_file_page_id', } ), view_name='rest_api:documentfilepage-detail' ) class Meta: fields = ( 'document_file_url', 'id', 'image_url', 'page_number', 'url' ) model = DocumentFilePage
[ "roberto.rosario@mayan-edms.com" ]
roberto.rosario@mayan-edms.com
8348c1ef6bf74078986bbe932cf0607094123add
f7f58aa4ea9ec78b20532971ddebe1e3d985dc23
/practica11/demo/apps/home/migrations/0001_initial.py
8e2f4d3cff30b21377b2d4c6bd257cf6e022d8e4
[]
no_license
guille1194/Django-Practices
10b9ff4817d41cb086e198c07bb82aee201fb049
738cbfdd4a12089d93cd68a0cde8653c490e7fd9
refs/heads/master
2021-03-08T19:30:11.229921
2016-05-23T05:38:53
2016-05-23T05:38:53
59,388,217
0
0
null
null
null
null
UTF-8
Python
false
false
4,806
py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.utils.timezone from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Cursos', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('numero_curso', models.IntegerField(unique=True)), ('curso', models.CharField(max_length=50)), ], ), migrations.CreateModel( name='HorarioProfesionista', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ], ), migrations.CreateModel( name='Horarios', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('turno', models.CharField(max_length=1, choices=[(b'M', b'Matutino 6:00 - 14:00'), (b'V', b'Vespertino 14:00 - 22:00'), (b'N', b'Nocturno 22:00 - 6:00')])), ('curso', models.ForeignKey(to='home.Cursos')), ], options={ 'ordering': ['curso__numero_curso'], }, ), migrations.CreateModel( name='Paciente', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('nombre_paciente', models.CharField(max_length=99)), ('apellido_paciente', models.CharField(max_length=99)), ('num_expediente', models.IntegerField()), ('area', models.CharField(max_length=30)), ('fecha_ingreso', models.DateField(default=django.utils.timezone.now)), ('fecha_conclusion', models.DateField(default=django.utils.timezone.now)), ('evaluacion_completa', models.CharField(max_length=2)), ('reportes', models.CharField(max_length=2)), ('diagnostico', models.CharField(max_length=45)), ('fecha_nacimiento', models.DateField(default=django.utils.timezone.now)), ('edad_ingreso', models.IntegerField()), ('telefono', models.IntegerField()), ('email', models.EmailField(max_length=254)), ('genero', models.CharField(max_length=1, choices=[(b'M', b'Masculino'), (b'F', b'Femenino')])), ('perfil_usuario', models.OneToOneField(to=settings.AUTH_USER_MODEL)), ], options={ 'ordering': ['perfil_usuario'], 'permissions': (('puede_ser_paciente', 'Puede ser paciente'),), }, ), migrations.CreateModel( name='Profesionista', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('nombre_profesionista', models.CharField(max_length=68)), ('apellido_profesionista', models.CharField(max_length=68)), ('reportes', models.CharField(max_length=2)), ('horario', models.CharField(max_length=50)), ('telefono', models.IntegerField()), ('email', models.EmailField(max_length=254)), ('slug', models.SlugField(null=True, blank=True)), ('curso', models.ForeignKey(to='home.Cursos')), ('perfil_usuario', models.OneToOneField(to=settings.AUTH_USER_MODEL)), ], options={ 'permissions': (('puede_hacer_cosas', 'Puede hacer cosas'),), }, ), migrations.CreateModel( name='ProfesionistaPaciente', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('pacientes', models.ManyToManyField(to='home.Paciente', blank=True)), ('profesionista', models.OneToOneField(to='home.Profesionista')), ], ), migrations.AddField( model_name='horarioprofesionista', name='horario', field=models.OneToOneField(to='home.Horarios'), ), migrations.AddField( model_name='horarioprofesionista', name='profesionista', field=models.ForeignKey(to='home.Profesionista'), ), migrations.AlterUniqueTogether( name='horarios', unique_together=set([('curso', 'turno')]), ), ]
[ "guille1194@gmail.com" ]
guille1194@gmail.com
35c3ec42b0bed2b22113637433c0ccd79369c7e1
b08b5932c92824b592ac15e73fdffc79a7da18f3
/cauldron/cli/sync/comm.py
c66d111478c4100ba8854e7e5bc701845c5a0ee9
[ "MIT" ]
permissive
mlund01/cauldron
1de7426484d429703382c068c9704929e6c25bad
9a51cad2e5d528727151e9b60fd5be6a37b70273
refs/heads/master
2021-07-10T20:35:14.376399
2017-09-30T13:53:31
2017-09-30T13:53:31
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,019
py
import requests from requests import Response as HttpResponse from cauldron import environ def assemble_url( endpoint: str, remote_connection: 'environ.RemoteConnection' = None ) -> str: """ Assembles a fully-resolved remote connection URL from the given endpoint and remote_connection structure. If the remote_connection is omitted, the global remote_connection object stored in the environ module will be used in its place. :param endpoint: The endpoint for the API call :param remote_connection: The remote connection definition data structure :return: The fully-resolved URL for the given endpoint """ url_root = ( remote_connection.url if remote_connection else environ.remote_connection.url ) url_root = url_root if url_root else 'localhost:5010' parts = [ 'http://' if not url_root.startswith('http') else '', url_root.rstrip('/'), '/', endpoint.lstrip('/') ] return ''.join(parts) def parse_http_response(http_response: HttpResponse) -> 'environ.Response': """ Returns a Cauldron response object parsed from the serialized JSON data specified in the http_response argument. If the response doesn't contain valid Cauldron response data, an error Cauldron response object is returned instead. :param http_response: The response object from an http request that contains a JSON serialized Cauldron response object as its body :return: The Cauldron response object for the given http response """ try: response = environ.Response.deserialize(http_response.json()) except Exception as error: response = environ.Response().fail( code='INVALID_REMOTE_RESPONSE', error=error, message='Invalid HTTP response from remote connection' ).console( whitespace=1 ).response response.http_response = http_response return response def get_request_function(data: dict = None, method: str = None): """ """ default_method = 'post' if data else 'get' return getattr(requests, method.lower() if method else default_method) def send_request( endpoint: str, data: dict = None, remote_connection: 'environ.RemoteConnection' = None, method: str = None, **kwargs ) -> 'environ.Response': """ """ url = assemble_url(endpoint, remote_connection) func = get_request_function(data, method) try: http_response = func(url, json=data, **kwargs) except Exception as error: return environ.Response().fail( code='CONNECTION_ERROR', error=error, message='Unable to communicate with the remote connection' ).console( whitespace=1 ).response return parse_http_response(http_response) def download_file( filename: str, save_path: str, remote_connection: 'environ.RemoteConnection' = None ) -> 'environ.Response': """ """ url = assemble_url( '/download/{}'.format(filename), remote_connection=remote_connection ) try: http_response = requests.get(url, stream=True) except Exception as error: return environ.Response().fail( code='CONNECTION_ERROR', error=error, message='Unable to communicate with the remote download connection' ).console( whitespace=1 ).response try: with open(save_path, 'wb') as f: for chunk in http_response.iter_content(2048): if chunk: f.write(chunk) except Exception as error: return environ.Response().fail( code='WRITE_ERROR', error=error, message='Unable to write data to "{}"'.format(save_path) ).console( whitespace=1 ).response return environ.Response()
[ "swernst@gmail.com" ]
swernst@gmail.com
2f2580af3e6b347cac1c59f041da72e745bea421
8fe440deb4eb66d2fcb222a7c43680dc516394c1
/src/api/bkuser_core/categories/utils.py
ce77a59d4005ae077ec7e093e24c60a0a708802c
[ "MIT" ]
permissive
robert871126/bk-user
780e163db76a8a997ed94a1a83389fa4f81ad6a4
8c633e0a3821beb839ed120c4514c5733e675862
refs/heads/master
2023-08-20T11:05:46.317044
2021-10-22T08:44:06
2021-10-22T08:44:06
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,313
py
# -*- coding: utf-8 -*- """ TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available. Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import logging import time from contextlib import contextmanager from dataclasses import dataclass, field from typing import Callable, ContextManager, Iterator, Optional from bkuser_core.categories.models import ProfileCategory logger = logging.getLogger(__name__) def change_category_type(category_id: int, target_type: str): """将其他类型的目录转换""" ins = ProfileCategory.objects.get(pk=category_id) logger.info( "going to change type of Category<%s> from %s to %s", ins.display_name, ins.type, target_type, ) ins.type = target_type ins.save() @dataclass class TimeContext: start_time: float = field(default_factory=time.time) start_clock: float = field(default_factory=time.clock) end_time: Optional[float] = None end_clock: Optional[float] = None @property def time_delta(self): """消耗的时间""" if self.end_time is None: return time.time() - self.start_time return self.end_time - self.start_time @property def clock_delta(self): """消耗的 CPU 时钟""" if self.end_clock is None: return time.clock() - self.start_clock return self.end_clock - self.start_clock def close(self): self.end_time = time.time() self.end_clock = time.clock() def __catch_time__() -> Iterator[TimeContext]: context = TimeContext() try: yield context finally: context.close() catch_time: Callable[..., ContextManager[TimeContext]] = contextmanager(__catch_time__)
[ "bluesedenyu@gmail.com" ]
bluesedenyu@gmail.com
318d389b6772abd1e5773d73bf64ff401cfbfb8d
28f088b5356e66780c4bad204564bff92f910f02
/src/python/pants/backend/docker/subsystems/dockerfile_parser_test.py
533f9646fc8c9d82356bf8538864bf6be0332c24
[ "Apache-2.0" ]
permissive
wonlay/pants
57dcd99f82cdb2e37fcb7c563ec2bccf797ee7b7
53c66503b6898e83c9c9596e56cde5ad9ed6a0d3
refs/heads/master
2023-03-06T03:23:08.602817
2022-05-05T23:41:32
2022-05-05T23:41:32
24,695,709
0
0
Apache-2.0
2023-03-01T11:59:58
2014-10-01T21:15:29
Python
UTF-8
Python
false
false
6,330
py
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import annotations from textwrap import dedent import pytest from pants.backend.docker.subsystems.dockerfile_parser import DockerfileInfo, DockerfileInfoRequest from pants.backend.docker.subsystems.dockerfile_parser import rules as parser_rules from pants.backend.docker.subsystems.dockerfile_parser import split_iterable from pants.backend.docker.target_types import DockerImageTarget from pants.backend.docker.util_rules.docker_build_args import DockerBuildArgs from pants.backend.docker.util_rules.dockerfile import rules as dockerfile_rules from pants.backend.python.target_types import PexBinary from pants.backend.python.util_rules.pex import rules as pex_rules from pants.engine.addresses import Address from pants.engine.internals.scheduler import ExecutionError from pants.testutil.rule_runner import QueryRule, RuleRunner @pytest.fixture def rule_runner() -> RuleRunner: rule_runner = RuleRunner( rules=[ *dockerfile_rules(), *parser_rules(), *pex_rules(), QueryRule(DockerfileInfo, (DockerfileInfoRequest,)), ], target_types=[DockerImageTarget, PexBinary], ) rule_runner.set_options( [], env_inherit={"PATH", "PYENV_ROOT", "HOME"}, ) return rule_runner @pytest.mark.parametrize( "files", [ pytest.param( [ ("test/BUILD", "docker_image()"), ("test/Dockerfile", "{dockerfile}"), ], id="source Dockerfile", ), pytest.param( [ ("test/BUILD", "docker_image(instructions=[{dockerfile!r}])"), ], id="generate Dockerfile", ), ], ) def test_putative_target_addresses(files: list[tuple[str, str]], rule_runner: RuleRunner) -> None: dockerfile_content = dedent( """\ FROM base COPY some.target/binary.pex some.target/tool.pex /bin COPY --from=scratch this.is/ignored.pex /opt COPY binary another/cli.pex tool /bin """ ) rule_runner.write_files( {filename: content.format(dockerfile=dockerfile_content) for filename, content in files} ) addr = Address("test") info = rule_runner.request(DockerfileInfo, [DockerfileInfoRequest(addr)]) assert info.putative_target_addresses == ( "some/target:binary", "some/target:tool", "another:cli", ) def test_split_iterable() -> None: assert [("a", "b"), ("c",)] == list(split_iterable("-", ("a", "b", "-", "c"))) def test_build_args(rule_runner: RuleRunner) -> None: rule_runner.write_files( { "test/BUILD": "docker_image()", "test/Dockerfile": dedent( """\ ARG registry FROM ${registry}/image:latest ARG OPT_A ARG OPT_B=default_b_value ENV A=${OPT_A:-A_value} ENV B=${OPT_B} """ ), } ) addr = Address("test") info = rule_runner.request(DockerfileInfo, [DockerfileInfoRequest(addr)]) assert info.build_args == DockerBuildArgs.from_strings( "registry", "OPT_A", "OPT_B=default_b_value", ) def test_from_image_build_arg_names(rule_runner: RuleRunner) -> None: rule_runner.write_files( { "test/upstream/BUILD": "docker_image(name='image')", "test/upstream/Dockerfile": "FROM upstream", "test/downstream/BUILD": "docker_image(name='image')", "test/downstream/Dockerfile": dedent( """\ ARG BASE_IMAGE=test/upstream:image FROM ${BASE_IMAGE} AS base """ ), } ) addr = Address("test/downstream", target_name="image") info = rule_runner.request(DockerfileInfo, [DockerfileInfoRequest(addr)]) assert info.from_image_build_arg_names == ("BASE_IMAGE",) def test_inconsistent_build_args(rule_runner: RuleRunner) -> None: rule_runner.write_files( { "test/BUILD": "docker_image(name='image')", "test/Dockerfile": dedent( """\ FROM image1:latest ARG OPT_A=default_1 FROM image2:latest ARG OPT_A=default_2 """ ), } ) addr = Address("test", target_name="image") err_msg = ( r"Error while parsing test/Dockerfile for the test:image target: DockerBuildArgs: " r"duplicated 'OPT_A' with different values: 'default_1' != 'default_2'\." ) with pytest.raises(ExecutionError, match=err_msg): rule_runner.request(DockerfileInfo, [DockerfileInfoRequest(addr)]) def test_copy_source_references(rule_runner: RuleRunner) -> None: rule_runner.write_files( { "test/BUILD": "docker_image()", "test/Dockerfile": dedent( """\ FROM base COPY a b / COPY --option c/d e/f/g /h ADD ignored COPY j k / COPY """ ), } ) info = rule_runner.request(DockerfileInfo, [DockerfileInfoRequest(Address("test"))]) assert info.copy_sources == ("a", "b", "c/d", "e/f/g", "j", "k") def test_baseimage_tags(rule_runner: RuleRunner) -> None: rule_runner.write_files( { "test/BUILD": "docker_image()", "test/Dockerfile": ( "FROM untagged\n" "FROM tagged:v1.2\n" "FROM digest@sha256:d1f0463b35135852308ea815c2ae54c1734b876d90288ce35828aeeff9899f9d\n" "FROM gcr.io/tekton-releases/github.com/tektoncd/operator/cmd/kubernetes/operator:" "v0.54.0@sha256:d1f0463b35135852308ea815c2ae54c1734b876d90288ce35828aeeff9899f9d\n" ), } ) info = rule_runner.request(DockerfileInfo, [DockerfileInfoRequest(Address("test"))]) assert info.version_tags == ( "stage0 latest", "stage1 v1.2", # Stage 2 is not pinned with a tag. "stage3 v0.54.0", )
[ "noreply@github.com" ]
wonlay.noreply@github.com