blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
288
content_id
stringlengths
40
40
detected_licenses
listlengths
0
112
license_type
stringclasses
2 values
repo_name
stringlengths
5
115
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
684 values
visit_date
timestamp[us]date
2015-08-06 10:31:46
2023-09-06 10:44:38
revision_date
timestamp[us]date
1970-01-01 02:38:32
2037-05-03 13:00:00
committer_date
timestamp[us]date
1970-01-01 02:38:32
2023-09-06 01:08:06
github_id
int64
4.92k
681M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
22 values
gha_event_created_at
timestamp[us]date
2012-06-04 01:52:49
2023-09-14 21:59:50
gha_created_at
timestamp[us]date
2008-05-22 07:58:19
2023-08-21 12:35:19
gha_language
stringclasses
147 values
src_encoding
stringclasses
25 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
128
12.7k
extension
stringclasses
142 values
content
stringlengths
128
8.19k
authors
listlengths
1
1
author_id
stringlengths
1
132
7862f8b3d8719a8e3d7345b5a55b8d124eabfb15
1902858ae52dcaff33ad6056670d2df933744122
/Sample_Python_File.py
5ea4ff989e81614a62b7c184e148129e9c2b5b8f
[]
no_license
gsudarshan1990/codilitypython
220dabe82917cdab955c9bf7446c7752d5b9474d
73d601291c86824925daf52c4fee369765a0d176
refs/heads/master
2022-11-29T02:11:14.901476
2020-08-17T06:01:14
2020-08-17T06:01:14
288,095,600
0
0
null
null
null
null
UTF-8
Python
false
false
3,264
py
print("Hello World") string1 = 'Good morning' print(string1[:3]) print(string1[4:]) message='A kong string with a silly typo' new_message=message[:2]+'l'+message[3:] print(new_message) pets="Dogs & Cats" print(pets.index('&')) email="shashank@gmail.com" old_domain = "gmail.com" new_domain = "yahoo.com" def replace(email,old_domain,new_domain): with_symbol="@"+old_domain for with_symbol in email: index=email.index('@') new_email=email[:index]+new_domain return new_email print(replace(email,old_domain,new_domain)) string2=" ".join(["This","is","joined"]) print(string2) print("...".join(["This","is","a","string","joined","by","three","dots"])) string3="This is the string splitted based on the split method" print(string3.split()) print(string3.count('s')) print(string3.endswith('method')) name="Manny" lucky_number=len(name)*3 print("Your name is {} and your lucky number is {}".format(name,lucky_number)) print("Your name is {name} and your lucky number is {number}".format(name=name,number=lucky_number)) print("Your name is {0} and your lucky number {1}".format(name,lucky_number)) price =7.5 with_tax = price*3.33 print(price,with_tax) print("base price {:.2f} and tax is {:>50.2f}".format(price,with_tax)) list1=['Now','we','are','cooking'] print(type(list1)) print(len(list1)) print(list1) print(list1[0]) print(list1[1]) fruits=["Pineapple","Apples","Grapes","Oranges"] fruits.append("Jack Fruit") fruits.insert(0,"Kiwi") fruits.insert(len(fruits)+10,"peach") print(fruits) fruits.remove("Grapes") print(fruits) print(fruits.pop(2)) print(fruits) fruits[2]='Strawberry' print(fruits) animals=['lion','monkey','zebra','dolphin'] len_chars=0 for animal in animals: len_chars+=len(animal) print("Total length:{} and average:{}".format(len_chars,len_chars/len(animals))) names=['laxman','rama','hanuman','sita'] for index,person in enumerate(names): print('{}-{}'.format(index+1,person)) email_list=[('laxman','laxman@gmail.com'),('rama','rama@gmail.com')] def re_order(people): for name,email in people: print("{}<{}>".format(name,email)) re_order(email_list) multiples=[] for x in range(71): if x%7 == 0: multiples.append(x) print(multiples) multiples=[] for x in range(1,11): multiples.append(x*7) print(multiples) multiples=[x*7 for x in range(1,11)] print(multiples) programs=["Python","Java","C","C++","COBOL","VB"] length_program=[len(x) for x in programs] print(length_program) multiples_of_three=[x for x in range(0,101) if x%3 == 0] print(multiples_of_three) file_count={"jpeg":20,"csv":24,"exe":33,"txt":52,"py":100} print(file_count) print(file_count["jpeg"]) print("jpeg" in file_count) file_count["cfg"] =12 print(file_count) file_count["csv"]=17 print(file_count) del file_count["csv"] print(file_count) for key in file_count: print(key) for key,value in file_count.items(): print("There are {} and {} ".format(value,key)) for key in file_count.keys(): print(key) for value in file_count.values(): print(value) def count_letters(text): result = {} for letter in text: if letter not in result: result[letter]=0 result[letter]+=1 print(result) count_letters("Hello")
[ "sudarshan2009@live.in" ]
sudarshan2009@live.in
1e2ddbfb4d384d155cac17158825e2a855f673cc
536538af28cfe40e10ff1ce469cd0f81e8b3a8fe
/longest_increasing_path_in_a_matrix.py
55beead6cedf6fdd763cbdc064334a7ba3d1b00f
[]
no_license
ShunKaiZhang/LeetCode
7e10bb4927ba8581a3a7dec39171eb821c258c34
ede2a2e19f27ef4adf6e57d6692216b8990cf62b
refs/heads/master
2021-09-01T07:41:03.255469
2017-12-25T19:22:18
2017-12-25T19:22:18
104,136,129
0
0
null
null
null
null
UTF-8
Python
false
false
1,549
py
# python3 # Given an integer matrix, find the length of the longest increasing path. # From each cell, you can either move to four directions: left, right, up or down. # You may NOT move diagonally or move outside of the boundary (i.e. wrap-around is not allowed). # Example 1: # nums = [ # [9,9,4], # [6,6,8], # [2,1,1] # ] # Return 4 # The longest increasing path is [1, 2, 6, 9]. # Example 2: # nums = [ # [3,4,5], # [3,2,6], # [2,2,1] # ] # Return 4 # The longest increasing path is [3, 4, 5, 6]. Moving diagonally is not allowed. # Solution class Solution(object): def search(self, matrix, i, j): directions = [(1, 0), (-1, 0), (0, 1), (0, -1)] if not self.res[i][j]: cur = matrix[i][j] self.res[i][j] = 1 for it in directions: if i + it[0] >= 0 and j + it[1] >= 0 and i + it[0] < len(matrix) and j + it[1] < len( matrix[0]) and cur > matrix[i + it[0]][j + it[1]]: self.res[i][j] = max(self.res[i][j], 1 + self.search(matrix, i + it[0], j + it[1])) return self.res[i][j] def longestIncreasingPath(self, matrix): """ :type matrix: List[List[int]] :rtype: int """ if matrix == [] or matrix == [[]]: return 0 self.res = [[0 for j in range(len(matrix[0]))] for j in range(len(matrix))] return max(self.search(matrix, i, j) for i in range(len(matrix)) for j in range(len(matrix[0])))
[ "noreply@github.com" ]
ShunKaiZhang.noreply@github.com
36444e5712a6f02ffbc71f329bf1d966fd9abe0a
ee6ec654937a5f592b373630f58de1a41138ad70
/nosocod/nodes/node.py
83d0ba40b2c92132e4cc44d91623886984861afc
[]
no_license
atykhonov/nosocod
eae05b857b5e88f0a590c632d013cb4deea715d4
2af67d7280af3344a0b44a8e6ccd1c1fd7a9eb88
refs/heads/master
2016-09-16T14:44:33.519300
2014-03-10T20:14:51
2014-03-10T20:14:51
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,682
py
class Node(object): name = '' value = '' actions = [] children = [] parent = None params = [] linked_with = None anchored = False def __init__(self, name, value=None): self.name = name # TODO do we realy need this if? if value: self.value = value else: # something is wrong with that value assigning self.value = name self.children = [] self.params = [] self.actions = [] def assign_action(self, action): action.set_node(self) self.append(action) def add_node(self, node): self.children.append(node) node.parent = self def get_children(self): result = self.children if self.linked_with is not None: result = self.linked_with.get_children() return result def has_children(self): return len(self.children) > 0 def add_param(self, param): self.params.append(param) def has_params(self): return len(self.params) > 0 def get_param(self, name): result = None for param in self.params: if param.name == name: result = param break return result def accept(self, visitor): if visitor.visitEnter(self): for child in self.children: if child.accept(visitor): break return visitor.visitLeave(self) def is_linked(self): return self.linked_with is not None def __add__(self, other): if isinstance(other, (list, tuple)): self.children.extend(other) else: self.children.append(other) return self def __sub__(self, other): for child in self.children: if child.name == other.name: self.children.remove(child) return self def is_valid(self, value): result = False if self.value == value: result = True return result def __str__(self): return self.name def __unicode__(self): return self.name """ def get_path(self, with_root=True): parents = [] parent = self.parent while parent != None: parents.append(parent) parent = parent.parent parents.sort(reverse=True) path = '' for parent in parents: if not with_root and parent.name == 'root': continue path += '/' + parent.name path += '/' + self.name return path def get_relative_path(self, path): # TODO pass """
[ "atykhonov@gmail.com" ]
atykhonov@gmail.com
9b07f7e39167b54c0126332b3d41fccbbaba53b5
b175a3abfa14992d9b07d53adc12700ded3b1c02
/BasicLibs/learnPySpark/ml/random_forest_classifier_example.py
15bd8a95d0a503d9c4e17628250736e88d0a0344
[]
no_license
BarryZM/Python-AI
d695a1569e5497da391b578e6638cc11479bfbaa
251dc4002f9d7e5dd789e62b813651f2006f6ab6
refs/heads/master
2023-03-17T15:33:35.530258
2020-04-01T10:32:47
2020-04-01T10:32:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,306
py
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Random Forest Classifier Example. """ from __future__ import print_function # $example on$ from pyspark.ml import Pipeline from pyspark.ml.classification import RandomForestClassifier from pyspark.ml.feature import IndexToString, StringIndexer, VectorIndexer from pyspark.ml.evaluation import MulticlassClassificationEvaluator # $example off$ from pyspark.sql import SparkSession if __name__ == "__main__": spark = SparkSession\ .builder\ .appName("RandomForestClassifierExample")\ .getOrCreate() # $example on$ # Load and parse the data file, converting it to a DataFrame. data = spark.read.format("libsvm").load("file:///Users/hushiwei/PycharmProjects/Python-AI/BasicLibs/learnPySpark/data/mllib/sample_libsvm_data.txt") # Index labels, adding metadata to the label column. # Fit on whole dataset to include all labels in index. labelIndexer = StringIndexer(inputCol="label", outputCol="indexedLabel").fit(data) # Automatically identify categorical features, and index them. # Set maxCategories so features with > 4 distinct values are treated as continuous. featureIndexer =\ VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=4).fit(data) # Split the data into training and test sets (30% held out for testing) (trainingData, testData) = data.randomSplit([0.7, 0.3]) # Train a RandomForest model. rf = RandomForestClassifier(labelCol="indexedLabel", featuresCol="indexedFeatures", numTrees=10) # Convert indexed labels back to original labels. labelConverter = IndexToString(inputCol="prediction", outputCol="predictedLabel", labels=labelIndexer.labels) # Chain indexers and forest in a Pipeline pipeline = Pipeline(stages=[labelIndexer, featureIndexer, rf, labelConverter]) # Train model. This also runs the indexers. model = pipeline.fit(trainingData) # Make predictions. predictions = model.transform(testData) # Select example rows to display. predictions.select("predictedLabel", "label", "features").show(5) # Select (prediction, true label) and compute test error evaluator = MulticlassClassificationEvaluator( labelCol="indexedLabel", predictionCol="prediction", metricName="accuracy") accuracy = evaluator.evaluate(predictions) print("Test Error = %g" % (1.0 - accuracy)) rfModel = model.stages[2] print(rfModel) # summary only # $example off$ spark.stop()
[ "hsw.time@gmail.com" ]
hsw.time@gmail.com
c7bdea3d3ab6180d2b789fd140f141eb2e07ac06
b1fe502cfd4832b9ddc24f5dccc9c5d6f3e1ef4a
/code/emails2.py
21dd2dcb13da5666f3fdbebe753f99c95384c365
[]
no_license
hanxiaoshun/steam_cui
73fa3273fc057811e76ed5a4f0b65b858d5536d7
768b660e27806a90bf43351526b88aa36552877b
refs/heads/master
2023-01-22T08:15:33.851246
2020-11-22T23:47:08
2020-11-22T23:47:08
309,525,747
2
0
null
null
null
null
UTF-8
Python
false
false
570
py
# %3A%2F%2F=:// # %2F=/ # %3D= = # 'https%3A%2F%2Fstore.steampowered.com%2Faccount%2Fnewaccountverification%3F # stoken # %3D385313a17af59bf057c9254f1d024ea2c87193ac543cb821540ed83539596a944d188e14361d8315e303d3833e78a6a7 # %3D385313a17af59bf057c9254f1d024ea2c87193ac543cb821540ed83539596a944d188e14361d8315e303d3833e78a6a7 # %26creationid # %3D5304319067267826436 'https://store.steampowered.com/account/newaccountverification?stoken=385313a17af59bf057c9254f1d024ea2c87193ac543cb821540ed83539596a944d188e14361d8315e303d3833e78a6a7&amp;creationid=5304319067267826436'
[ "18301513217@sina.cn" ]
18301513217@sina.cn
2792bbe02ccbe16552038a958ca64abd2b9a6c45
b708cf2c5145d68fbc45c736568fe152e7dec27e
/apps/store/views.py
8dda31fbfd77fc5dcb60bd45c6728e7a32db3fa1
[]
no_license
stillnurs/dropshipper-django-vue
56036ebf705d91aea58492344d57c2d94ee6c368
becad05d44342598b4c1f8fba7d19bc6223d6c57
refs/heads/main
2023-05-03T08:34:35.771909
2021-05-23T12:15:15
2021-05-23T12:15:15
369,894,968
0
0
null
null
null
null
UTF-8
Python
false
false
2,720
py
import random from datetime import datetime from django.shortcuts import render, get_object_or_404, redirect from django.db.models import Q from apps.cart.cart import Cart from .models import Product, Category, ProductReview def search(request): query = request.GET.get('query') instock = request.GET.get('instock') price_from = request.GET.get('price_from', 0) price_to = request.GET.get('price_to', 100000) sorting = request.GET.get('sorting', '-date_added') products = Product.objects.filter(Q(title__icontains=query) | Q(description__icontains=query)).filter(price__gte=price_from).filter(price__lte=price_to) if instock: products = products.filter(num_available__gte=1) context = { 'query': query, 'products': products.order_by(sorting), 'instock': instock, 'price_from': price_from, 'price_to': price_to, 'sorting': sorting } return render(request, 'search.html', context) def product_detail(request, category_slug, slug): product = get_object_or_404(Product, slug=slug) product.num_visits = product.num_visits + 1 product.last_visit = datetime.now() product.save() # Add review if request.method == 'POST' and request.user.is_authenticated: stars = request.POST.get('stars', 3) content = request.POST.get('content', '') review = ProductReview.objects.create(product=product, user=request.user, stars=stars, content=content) return redirect('product_detail', category_slug=category_slug, slug=slug) # related_products = list(product.category.products.filter(parent=None).exclude(id=product.id)) if len(related_products) >= 3: related_products = random.sample(related_products, 3) if product.parent: return redirect('product_detail', category_slug=category_slug, slug=product.parent.slug) imagesstring = "{'thumbnail': '%s', 'image': '%s'}," % (product.thumbnail.url, product.image.url) for image in product.images.all(): imagesstring = imagesstring + ("{'thumbnail': '%s', 'image': '%s'}," % (image.thumbnail.url, image.image.url)) cart = Cart(request) product.in_cart = bool(cart.has_product(product.id)) context = { 'product': product, 'imagesstring': imagesstring, 'related_products': related_products } return render(request, 'product_detail.html', context) def category_detail(request, slug): category = get_object_or_404(Category, slug=slug) products = category.products.filter(parent=None) context = { 'category': category, 'products': products } return render(request, 'category_detail.html', context)
[ "noorsultan.mamataliev@gmail.com" ]
noorsultan.mamataliev@gmail.com
1e83527a90853e686356fcf1282cefab339f8905
c9e2bb165d0cc60e47f4d658a05bd77bb5f6fa9a
/unittests/test_wsgi_jinja.py
6d2b9eea3f7bcddd96ac3abda9a6e82dcc56edce
[ "BSD-3-Clause", "LicenseRef-scancode-unknown-license-reference" ]
permissive
swl10/pyslet
c1eb8a505972e6c16bd01d6f9e7c6cd83f75f123
b30e9a439f6c0f0e2d01f1ac80986944bed7427b
refs/heads/master
2021-01-25T09:31:55.480250
2018-03-05T18:50:11
2018-03-05T18:50:11
20,192,341
67
39
NOASSERTION
2020-03-31T05:51:24
2014-05-26T16:37:45
Python
UTF-8
Python
false
false
1,661
py
#! /usr/bin/env python import logging import optparse import os.path import unittest from pyslet.wsgi_jinja import JinjaApp def suite(prefix='test'): loader = unittest.TestLoader() loader.testMethodPrefix = prefix return unittest.TestSuite(( loader.loadTestsFromTestCase(JinjaAppTests), )) SETTINGS_FILE = os.path.join( os.path.join(os.path.split(__file__)[0], 'data_jinja'), 'settings.json') class JinjaAppTests(unittest.TestCase): def setUp(self): # noqa self.settings_file = os.path.abspath(SETTINGS_FILE) def tearDown(self): # noqa pass def test_debug_option(self): class DebugApp(JinjaApp): settings_file = self.settings_file p = optparse.OptionParser() DebugApp.add_options(p) options, args = p.parse_args([]) self.assertTrue(options.debug is False) DebugApp.setup(options=options, args=args) # check setting value self.assertTrue(DebugApp.debug is False) class DebugApp(JinjaApp): settings_file = self.settings_file options, args = p.parse_args(['-d']) self.assertTrue(options.debug is True) DebugApp.setup(options=options, args=args) self.assertTrue(DebugApp.debug is True) class DebugApp(JinjaApp): settings_file = self.settings_file options, args = p.parse_args(['--debug']) self.assertTrue(options.debug is True) DebugApp.setup(options=options, args=args) self.assertTrue(DebugApp.debug is True) if __name__ == "__main__": logging.basicConfig(level=logging.INFO) unittest.main()
[ "steve.w.lay@gmail.com" ]
steve.w.lay@gmail.com
66ce0e5e6607fb88403194d1679ab6f2ffdbf620
4b198f184cbe79abf0c7f9e53054b19a2100c908
/tests/test_base.py
04890bf888e42d74bc966f8c7d586d82af3402b1
[ "Apache-2.0" ]
permissive
zzzz123321/GerapyAutoExtractor
03095a693770d0f2a9919272db9134466ac314bc
8d6f44d3d55a590fe971a68a425f20bc792a3947
refs/heads/master
2022-11-15T23:19:20.491401
2020-07-10T03:19:16
2020-07-10T03:19:16
null
0
0
null
null
null
null
UTF-8
Python
false
false
372
py
import unittest from os.path import join class TestBase(unittest.TestCase): samples_dir = None def html(self, file_name): """ get html content of file :param file_name: :return: """ file_path = join(self.samples_dir, file_name) with open(file_path, encoding='utf-8') as f: return f.read()
[ "cqc@cuiqingcai.com" ]
cqc@cuiqingcai.com
694a05ea0a76734cee806539f4d944a677e2ea02
24fe1f54fee3a3df952ca26cce839cc18124357a
/servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/rtctrl/settagdef.py
d227f1c2fb007ad7995a709552fb6968c7f8a4bc
[]
no_license
aperiyed/servicegraph-cloudcenter
4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff
9eb7975f2f6835e1c0528563a771526896306392
refs/heads/master
2023-05-10T17:27:18.022381
2020-01-20T09:18:28
2020-01-20T09:18:28
235,065,676
0
0
null
2023-05-01T21:19:14
2020-01-20T09:36:37
Python
UTF-8
Python
false
false
5,881
py
# coding=UTF-8 # ********************************************************************** # Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved # written by zen warriors, do not modify! # ********************************************************************** from cobra.mit.meta import ClassMeta from cobra.mit.meta import StatsClassMeta from cobra.mit.meta import CounterMeta from cobra.mit.meta import PropMeta from cobra.mit.meta import Category from cobra.mit.meta import SourceRelationMeta from cobra.mit.meta import NamedSourceRelationMeta from cobra.mit.meta import TargetRelationMeta from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory from cobra.model.category import MoCategory, PropCategory, CounterCategory from cobra.mit.mo import Mo # ################################################## class SetTagDef(Mo): """ The set tag definition. """ meta = ClassMeta("cobra.model.rtctrl.SetTagDef") meta.moClassName = "rtctrlSetTagDef" meta.rnFormat = "tag" meta.category = MoCategory.REGULAR meta.label = "None" meta.writeAccessMask = 0x1 meta.readAccessMask = 0x1 meta.isDomainable = False meta.isReadOnly = True meta.isConfigurable = False meta.isDeletable = False meta.isContextRoot = False meta.childClasses.add("cobra.model.fault.Delegate") meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-")) meta.parentClasses.add("cobra.model.rtctrl.AttrDef") meta.superClasses.add("cobra.model.pol.Comp") meta.superClasses.add("cobra.model.rtctrl.ASetRule") meta.superClasses.add("cobra.model.fabric.L3ProtoComp") meta.superClasses.add("cobra.model.fabric.ProtoComp") meta.superClasses.add("cobra.model.pol.Obj") meta.superClasses.add("cobra.model.naming.NamedObject") meta.superClasses.add("cobra.model.rtctrl.ASetTag") meta.rnPrefixes = [ ('tag', False), ] prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION) prop.label = "None" prop.isImplicit = True prop.isAdmin = True prop._addConstant("deleteAll", "deleteall", 16384) prop._addConstant("deleteNonPresent", "deletenonpresent", 8192) prop._addConstant("ignore", "ignore", 4096) meta.props.add("childAction", prop) prop = PropMeta("str", "descr", "descr", 5582, PropCategory.REGULAR) prop.label = "Description" prop.isConfig = True prop.isAdmin = True prop.range = [(0, 128)] prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+'] meta.props.add("descr", prop) prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN) prop.label = "None" prop.isDn = True prop.isImplicit = True prop.isAdmin = True prop.isCreateOnly = True meta.props.add("dn", prop) prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR) prop.label = "None" prop.isImplicit = True prop.isAdmin = True prop.defaultValue = 0 prop.defaultValueStr = "local" prop._addConstant("implicit", "implicit", 4) prop._addConstant("local", "local", 0) prop._addConstant("policy", "policy", 1) prop._addConstant("replica", "replica", 2) prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3) meta.props.add("lcOwn", prop) prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR) prop.label = "None" prop.isImplicit = True prop.isAdmin = True prop.defaultValue = 0 prop.defaultValueStr = "never" prop._addConstant("never", "never", 0) meta.props.add("modTs", prop) prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR) prop.label = "Name" prop.isConfig = True prop.isAdmin = True prop.range = [(0, 64)] prop.regex = ['[a-zA-Z0-9_.:-]+'] meta.props.add("name", prop) prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR) prop.label = "Name alias" prop.isConfig = True prop.isAdmin = True prop.range = [(0, 63)] prop.regex = ['[a-zA-Z0-9_.-]+'] meta.props.add("nameAlias", prop) prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN) prop.label = "None" prop.isRn = True prop.isImplicit = True prop.isAdmin = True prop.isCreateOnly = True meta.props.add("rn", prop) prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS) prop.label = "None" prop.isImplicit = True prop.isAdmin = True prop._addConstant("created", "created", 2) prop._addConstant("deleted", "deleted", 8) prop._addConstant("modified", "modified", 4) meta.props.add("status", prop) prop = PropMeta("str", "tag", "tag", 790, PropCategory.REGULAR) prop.label = "Route Tag" prop.isConfig = True prop.isAdmin = True meta.props.add("tag", prop) prop = PropMeta("str", "type", "type", 789, PropCategory.REGULAR) prop.label = "None" prop.isConfig = True prop.isAdmin = True prop.defaultValue = 2 prop.defaultValueStr = "rt-tag" prop._addConstant("as-path", "as-path", 11) prop._addConstant("community", "community", 1) prop._addConstant("dampening-pol", "dampening-type", 10) prop._addConstant("ip-nh", "ip-nexthop", 8) prop._addConstant("local-pref", "local-preference", 4) prop._addConstant("metric", "metric", 5) prop._addConstant("metric-type", "metric-type", 9) prop._addConstant("ospf-fwd-addr", "ospf-fowarding-address", 7) prop._addConstant("ospf-nssa", "ospf-nssa-area", 6) prop._addConstant("rt-tag", "route-tag", 2) prop._addConstant("rt-weight", "route-weight", 3) meta.props.add("type", prop) def __init__(self, parentMoOrDn, markDirty=True, **creationProps): namingVals = [] Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps) # End of package file # ##################################################
[ "rrishike@cisco.com" ]
rrishike@cisco.com
a224e09ae49b8d796c3870bb6502820e4dc8fd10
ab0585893c7ac6ee649de2359f6c4c64bf76dd32
/venv/lib/python2.7/site-packages/flask_bootstrap/__init__.py
1581420430d26107f0e6f7c5bdd8e31121a39421
[]
no_license
bmcharek/flaskapp
fc8fd8c6cedd2af022a97d40f9013b44b9e4af75
943a5c05525c189fa86406998c9f2a32243d4e1e
refs/heads/master
2021-01-19T11:16:09.165869
2014-04-24T00:18:18
2014-04-24T00:18:18
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,526
py
#!/usr/bin/env python # coding=utf8 __version__ = '3.0.2.3' import re from flask import Blueprint, current_app, url_for try: from wtforms.fields import HiddenField except ImportError: def is_hidden_field_filter(field): raise RuntimeError('WTForms is not installed.') else: def is_hidden_field_filter(field): return isinstance(field, HiddenField) class CDN(object): """Base class for CDN objects.""" def get_resource_url(self, filename): """Return resource url for filename.""" raise NotImplementedError class StaticCDN(object): """A CDN that serves content from the local application. :param static_endpoint: Endpoint to use. :param rev: If ``True``, honor ``BOOTSTRAP_QUERYSTRING_REVVING``. """ def __init__(self, static_endpoint='static', rev=False): self.static_endpoint = static_endpoint self.rev = rev def get_resource_url(self, filename): extra_args = {} if self.rev and current_app.config['BOOTSTRAP_QUERYSTRING_REVVING']: extra_args['bootstrap'] = __version__ return url_for(self.static_endpoint, filename=filename, **extra_args) class WebCDN(object): """Serves files from the Web. :param baseurl: The baseurl. Filenames are simply appended to this URL. """ def __init__(self, baseurl): self.baseurl = baseurl def get_resource_url(self, filename): return self.baseurl + filename class ConditionalCDN(object): """Serves files from one CDN or another, depending on whether a configuration value is set. :param confvar: Configuration variable to use. :param primary: CDN to use if the configuration variable is ``True``. :param fallback: CDN to use otherwise. """ def __init__(self, confvar, primary, fallback): self.confvar = confvar self.primary = primary self.fallback = fallback def get_resource_url(self, filename): if current_app.config[self.confvar]: return self.primary.get_resource_url(filename) return self.fallback.get_resource_url(filename) def bootstrap_find_resource(filename, cdn, use_minified=None, local=True): """Resource finding function, also available in templates. Tries to find a resource, will force SSL depending on ``BOOTSTRAP_CDN_FORCE_SSL`` settings. :param filename: File to find a URL for. :param cdn: Name of the CDN to use. :param use_minified': If set to ``True``/``False``, use/don't use minified. If ``None``, honors ``BOOTSTRAP_USE_MINIFIED``. :param local: If ``True``, uses the ``local``-CDN when ``BOOTSTRAP_SERVE_LOCAL`` is enabled. If ``False``, uses the ``static``-CDN instead. :return: A URL. """ config = current_app.config if None == use_minified: use_minified = config['BOOTSTRAP_USE_MINIFIED'] if use_minified: filename = '%s.min.%s' % tuple(filename.rsplit('.', 1)) cdns = current_app.extensions['bootstrap']['cdns'] resource_url = cdns[cdn].get_resource_url(filename) if resource_url.startswith('//') and config['BOOTSTRAP_CDN_FORCE_SSL']: resource_url = 'https:%s' % resource_url return resource_url class Bootstrap(object): def __init__(self, app=None): if app is not None: self.init_app(app) def init_app(self, app): BOOTSTRAP_VERSION = re.sub(r'^(\d+\.\d+\.\d+).*', r'\1', __version__) JQUERY_VERSION = '2.0.3' HTML5SHIV_VERSION = '3.7' RESPONDJS_VERSION = '1.3.0' app.config.setdefault('BOOTSTRAP_USE_MINIFIED', True) app.config.setdefault('BOOTSTRAP_CDN_FORCE_SSL', False) app.config.setdefault('BOOTSTRAP_QUERYSTRING_REVVING', True) app.config.setdefault('BOOTSTRAP_SERVE_LOCAL', False) blueprint = Blueprint( 'bootstrap', __name__, template_folder='templates', static_folder='static', static_url_path=app.static_url_path + '/bootstrap') app.register_blueprint(blueprint) app.jinja_env.globals['bootstrap_is_hidden_field'] =\ is_hidden_field_filter app.jinja_env.globals['bootstrap_find_resource'] =\ bootstrap_find_resource if not hasattr(app, 'extensions'): app.extensions = {} local = StaticCDN('bootstrap.static', rev=True) static = StaticCDN() def lwrap(cdn, primary=static): return ConditionalCDN('BOOTSTRAP_SERVE_LOCAL', primary, cdn) bootstrap = lwrap( WebCDN('//cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/%s/' % BOOTSTRAP_VERSION), local) jquery = lwrap( WebCDN('//cdnjs.cloudflare.com/ajax/libs/jquery/%s/' % JQUERY_VERSION), local) html5shiv = lwrap( WebCDN('//cdnjs.cloudflare.com/ajax/libs/html5shiv/%s/' % HTML5SHIV_VERSION)) respondjs = lwrap( WebCDN('//cdnjs.cloudflare.com/ajax/libs/respond.js/%s/' % RESPONDJS_VERSION)) app.extensions['bootstrap'] = { 'cdns': { 'local': local, 'static': static, 'bootstrap': bootstrap, 'jquery': jquery, 'html5shiv': html5shiv, 'respond.js': respondjs, }, }
[ "bmcharek@gmail.com" ]
bmcharek@gmail.com
e7959ef076b4880b872bfab62cafa5623aa515bf
215fd5c4f9893d9f38e4e48199ea16d7d6ef9430
/3.Binary_Tree_DC/3.11_596_Minimum_Subtree.py
5b834db4fb3005e8aaca52226012ecd8ff9fec06
[]
no_license
fztest/Classified
fd01622c097ca21b2e20285b06997ff0e9792dd1
b046d94657c0d04f3803ca15437dfe9a6f6f3252
refs/heads/master
2020-03-25T06:34:07.885108
2017-05-04T17:22:36
2017-05-04T17:22:36
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,156
py
""" Description _________________ Given a binary tree, find the subtree with minimum sum. Return the root of the subtree. Example _________________ Given a binary tree: 1 / \ -5 2 / \ / \ 0 2 -4 -5 return the node 1. Appraoch ___________ Same as 589 Complexity ___________ Same as 589 """ """ Definition of TreeNode: class TreeNode: def __init__(self, val): this.val = val this.left, this.right = None, None """ class Solution: # @param {TreeNode} root the root of binary tree # @return {TreeNode} the root of the minimum subtree minimum = None minimum_node = None def findSubtree(self, root): # Write your code here self.traverse_dc(root) return self.minimum_node def traverse_dc(self, root): if root == None: return 0 left_sum = self.traverse_dc(root.left) right_sum = self.traverse_dc(root.right) result_sum = left_sum + right_sum + root.val if self.minimum is None or result_sum < self.minimum: self.minimum_node = root self.minimum = result_sum return result_sum
[ "cdzengpeiyun@gmail.com" ]
cdzengpeiyun@gmail.com
567be9d7609520005a28e920c0fe51f55a8a4a0b
801f367bd19b8f2ab08669fd0a85aad7ace961ac
/dataset/walker_toy_v5/env/walker2d_vanilla.py
cfe02f70b7b9e5235092d44ffd07be1e35e0d731
[ "MIT" ]
permissive
Wendong-Huo/thesis-bodies
d91b694a6b1b6a911476573ed1ed27eb27fb000d
dceb8a36efd2cefc611f6749a52b56b9d3572f7a
refs/heads/main
2023-04-17T18:32:38.541537
2021-03-12T19:53:23
2021-03-12T19:53:23
623,471,326
1
0
null
2023-04-04T12:45:48
2023-04-04T12:45:47
null
UTF-8
Python
false
false
2,080
py
# This is not 100% vanilla, because still we need to replace the xml of new body. # import numpy as np import pybullet from pybullet_envs.gym_locomotion_envs import WalkerBaseBulletEnv, Walker2DBulletEnv from pybullet_envs.robot_locomotors import WalkerBase, Walker2D from pybullet_envs.scene_stadium import MultiplayerStadiumScene import pybullet_data from pathlib import Path class _Walker2D(Walker2D): def __init__(self, xml, param, render=False): self.param = param WalkerBase.__init__(self, xml, "torso", action_dim=6, obs_dim=22, power=0.40) def robot_specific_reset(self, bullet_client): super().robot_specific_reset(bullet_client) # power coefficient should be proportional to the min possible volume of that part. (Avoid pybullet fly-away bug.) self.jdict["thigh_joint"].power_coef = 65 self.jdict["leg_joint"].power_coef = 31 self.jdict["foot_joint"].power_coef = 18 self.jdict["thigh_left_joint"].power_coef = 65 self.jdict["leg_left_joint"].power_coef = 31 self.jdict["foot_left_joint"].power_coef = 18 # I deleted ignore_joints in mujoco xml files, so i need to place the robot at an appropriate initial place manually. robot_id = self.objects[0] # is the robot pybullet_id bullet_client.resetBasePositionAndOrientation( bodyUniqueId=robot_id, posObj=[0, 0, self.param["torso_center_height"] + 0.1], ornObj=[0, 0, 0, 1]) # Lift the robot higher above ground class Walker2DEnv(Walker2DBulletEnv): def __init__(self, xml, param, render=False, max_episode_steps=1000): self.robot = _Walker2D(xml=xml, param=param) self.max_episode_steps = max_episode_steps WalkerBaseBulletEnv.__init__(self, self.robot, render) def reset(self): self.step_num = 0 return super().reset() def step(self, a): self.step_num += 1 obs, r, done, info = super().step(a) if self.step_num > self.max_episode_steps: done = True return obs, r, done, info
[ "sliu1@uvm.edu" ]
sliu1@uvm.edu
b1706d90df624d71e0582505bf071f5ee0114199
1ef536d93c6616f9793e57a9ebc6b44248d50202
/check_managementtttt/check_managementtttt/models/report_check_cash_payment_receipt.py
68d6f94e2bc4753e85a4150adf9e14ffc5d38c3c
[]
no_license
mohamed4185/Express
157f21f8eba2b76042f4dbe09e4071e4411342ac
604aa39a68bfb41165549d605d40a27b9251d742
refs/heads/master
2022-04-12T17:04:05.407820
2020-03-09T14:02:17
2020-03-09T14:02:17
246,014,712
1
3
null
null
null
null
UTF-8
Python
false
false
839
py
# -*- coding: utf-8 -*- import datetime from odoo import tools from odoo import models, fields, api import logging _logger = logging.getLogger(__name__) class receipt_cash_check(models.AbstractModel): _name = 'report.check_managementtttt.receipt_check_cash_payment' @api.model def _get_report_values(self, docids, data=None): _logger.info('docids') _logger.info(docids) report_obj = self.env['ir.actions.report'] report = report_obj._get_report_from_name('check_managementtttt.receipt_check_cash_payment') docargs = { 'doc_ids': docids, 'doc_model': 'normal.payments', 'docs': self.env['normal.payments'].browse(docids), #'payment_info':self._payment_info, #'convert':self._convert, } return docargs
[ "mohamed.abdelrahman@businessborderlines.com" ]
mohamed.abdelrahman@businessborderlines.com
0d82031e876cc589dc3a35e3b6c95624c3a3dd6c
1207e317fa2837fa4cdb49150b9b2ca99dada2f3
/sdfs/newReporting/dashboard/migrations/0010_auto_20191217_1714.py
4123d095b20050153016a3357a6e69aa24865d6f
[]
no_license
ericniyon/all_in_one_repo
d14cb715776f5c23851d23930145fcb707aaca1d
9080315fbe9e8226a21bf35c49ff7662b4b095b4
refs/heads/master
2022-12-16T17:04:48.602534
2020-01-12T00:40:54
2020-01-12T00:40:54
233,317,032
0
0
null
2022-12-08T01:50:51
2020-01-12T00:30:03
Python
UTF-8
Python
false
false
511
py
# Generated by Django 2.2.2 on 2019-12-17 15:14 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('dashboard', '0009_auto_20191217_1434'), ] operations = [ migrations.AlterField( model_name='umuryango', name='kpi', field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='kpi_name', to='dashboard.KPI'), ), ]
[ "niyoeri6@gmail.com" ]
niyoeri6@gmail.com
ff59d6e66a856d739eb56ea53d9d8223bfe43bf7
163bbb4e0920dedd5941e3edfb2d8706ba75627d
/Code/CodeRecords/2127/60833/274342.py
5e551b298d0556ce07128eab0c947da0b1d6fb95
[]
no_license
AdamZhouSE/pythonHomework
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
ffc5606817a666aa6241cfab27364326f5c066ff
refs/heads/master
2022-11-24T08:05:22.122011
2020-07-28T16:21:24
2020-07-28T16:21:24
259,576,640
2
1
null
null
null
null
UTF-8
Python
false
false
184
py
lines = [] while True: try: lines.append(input()) except: break dishu=int(lines.pop(0)) zhishu=int(lines.pop(0).replace(",","")) mi=dishu**zhishu print(mi%1337)
[ "1069583789@qq.com" ]
1069583789@qq.com
5202b14467cdb9567b35f0d1c4849e1aba84e860
caba6ab9fc9a4528e58930adc7e0eb44a096bec4
/product/migrations/0011_auto_20190801_1647.py
53ea336dd7405720fd222573449c2ab507674993
[]
no_license
prabaldeshar/productwebsite
9159edacfb9c8ac5b9fbe50bd700f2a35e0553c7
f36fd52ec1b239f65c941bd0b5af44f1f8530231
refs/heads/master
2020-06-29T19:10:38.478856
2019-08-17T04:06:22
2019-08-17T04:06:22
200,599,983
0
0
null
null
null
null
UTF-8
Python
false
false
363
py
# Generated by Django 2.2.2 on 2019-08-01 11:02 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('product', '0010_comment_rating'), ] operations = [ migrations.RenameField( model_name='comment', old_name='rating', new_name='polaratiy', ), ]
[ "prabaldeshar@gmail.com" ]
prabaldeshar@gmail.com
4f4340a42ad423c4e6730bf0066a175e8764d45a
b683c8f1942a1ab35062620c6013b1e223c09e92
/txt-Files/C22-Day-22/Question-92.txt
ec1f12d94f4baed689708e13ca358cf3f73ea0f2
[]
no_license
nihathalici/Break-The-Ice-With-Python
601e1c0f040e02fe64103c77795deb2a5d8ff00a
ef5b9dd961e8e0802eee171f2d54cdb92f2fdbe8
refs/heads/main
2023-07-18T01:13:27.277935
2021-08-27T08:19:44
2021-08-27T08:19:44
377,414,827
0
0
null
null
null
null
UTF-8
Python
false
false
367
txt
""" Question 92 Question Please write a program which accepts a string from console and print the characters that have even indexes. Example: If the following string is given as input to the program: H1e2l3l4o5w6o7r8l9d Then, the output of the program should be: Helloworld Hints Use list[::2] to iterate a list by step 2. """ s = input() s = s[::2] print(s)
[ "noreply@github.com" ]
nihathalici.noreply@github.com
ba8bb00ef9370173431788113e4ce73b679b9500
fa824aa50ea827d95eacadaa939680db3f36138e
/backend/accounts/urls.py
a735642f6a5c1ce21751aae014c2789e3dfc6b5e
[]
no_license
samlex20/djavue-iptv
606401c262c15793087e36de6d5d504571fb233f
10bcf515d858e1d32518cf8bee09d10f69df0811
refs/heads/master
2023-03-15T01:27:21.329673
2021-02-13T15:51:08
2021-02-13T15:51:08
null
0
0
null
null
null
null
UTF-8
Python
false
false
648
py
from django.urls import include, path from rest_framework.routers import DefaultRouter from .views import UserProfileListCreateView, UserProfileDetailView, UserFavouritesView, UserFavouritesListView urlpatterns = [ #gets all user profiles and create a new profile path("all-profiles", UserProfileListCreateView.as_view(), name="all-profiles"), # retrieves profile details of the currently logged in user path("profile", UserProfileDetailView.as_view(), name="profile"), path("favourites/", UserFavouritesView.as_view(), name="favourites"), path("favourites/ids/", UserFavouritesListView.as_view(), name="favourites-ids"), ]
[ "pawanpaudel93@gmail.com" ]
pawanpaudel93@gmail.com
02624e3c9a582e9c59c03d7a00158087380af871
f60b0c051d8ba5088dc4246679b870f577646bb0
/58 Wed, 21 Mar 2012 23:59:08.py
ea420daac6eb14e2a31ce05c3584ebc9cd0b4f6d
[]
no_license
joopeed/lp1
bbd11fe7749356828a16fc45703e010db5d35464
117bf769a048ec1dff53f779b26c9e7adec052ba
refs/heads/master
2021-01-02T22:50:08.600553
2014-04-03T21:15:40
2014-04-03T21:15:40
null
0
0
null
null
null
null
UTF-8
Python
false
false
311
py
# Date: Wed, 21 Mar 2012 23:59:08 +0000 # Question 58 # JOAO PEDRO FERREIRA 21211940 meses = ["jan","fev","mar","abr",= "mai","jun","jul","ago","set&q= uot;,"out","nov","dez"] for i in range(12): n,m = map(float,raw_input().split()) lucro = n-m if lucro <0: print meses[i],"%.1f" % lucro
[ "joopeeds@gmail.com" ]
joopeeds@gmail.com
3b13b3fa6ada044f550736bff33731b5005c6970
59c875269e0865ccdf0ed89f46f5e0cdf2b23d73
/btree_level_avgs.py
5f659ca67649fd9306add74e2c8be573131e2980
[]
no_license
mike-jolliffe/Learning
e964f12e33c2013f04510935477f3b00534fd206
308889e57e71c369aa8516fba8a2064f6a26abee
refs/heads/master
2020-12-06T17:20:21.225587
2018-07-31T20:22:23
2018-07-31T20:22:23
95,589,406
0
0
null
null
null
null
UTF-8
Python
false
false
1,722
py
# Definition for a binary tree node. class TreeNode: def __init__(self, x): self.val = x self.left = None self.right = None class Solution: # Create dictionary w/key as level, vals as vals def __init__(self): self.level_dict = {} def averageOfLevels(self, root): """ Return the average of values at each level of binary tree :type root: TreeNode :rtype: List[float] """ # Build dict for tracking level values self.build_leveldict(root, 1) print(self.level_dict) # Create list for holding level avgs level_avgs = [] for level in self.level_dict.values(): # Calculate the average for each level's values total = sum(level) level_avgs.append(total / float(len(level))) return level_avgs def build_leveldict(self, root, level): """ Return dictionary of values by btree level :type root: TreeNode :rtype: Dict[int] """ # if self.val is None, you are at a leaf node if root == None: return None else: self.level_dict.setdefault(level, []).append(root.val) return self.build_leveldict(root.left, level+1), self.build_leveldict(root.right, level+1) if __name__ == '__main__': node1 = TreeNode(3) node1.right = TreeNode(20) node1.left = TreeNode(9) node1.right.left = TreeNode(15) node1.right.right = TreeNode(7) node2 = TreeNode(5) node2.left = TreeNode(2) node2.right = TreeNode(-3) sol = Solution() print(sol.averageOfLevels(node1)) sol2 = Solution() print(sol2.averageOfLevels(node2))
[ "michael.a.jolliffe@gmail.com" ]
michael.a.jolliffe@gmail.com
5be6b8178a4dfc8e0e81bb0f031629338ed91631
6c4303314d727ff34f6dc65b8df5a44c133b6660
/bin/add_subproblem_info_to_annotations.py
986443f94fb774f41bf41c6503543547c01bd59d
[ "BSD-2-Clause" ]
permissive
mtholder/propinquity
266bfcaad71470a80d31d7eba53930c0a74cfb1a
22ec326dbdb9d06e665d0b853f8fdb67ddfc593b
refs/heads/master
2020-04-06T06:10:36.258830
2019-02-06T21:25:00
2019-02-06T21:25:00
52,227,503
2
0
null
2017-03-15T15:19:33
2016-02-21T20:54:21
Python
UTF-8
Python
false
false
955
py
#!/usr/bin/env python from peyotl import read_as_json import codecs import json import sys try: subproblem_ids_file, in_annotations_file, out_annotations_file = sys.argv[1:] except: sys.exit('Expecting 3 arguments:\n subproblem_ids_file, in_annotations_file, out_annotations_file') import os bin_dir = os.path.abspath(os.path.dirname(sys.argv[0])) sys.path.append(os.path.join(bin_dir)) from document_outputs import stripped_nonempty_lines subproblems = [] for s in stripped_nonempty_lines(subproblem_ids_file): assert s.endswith('.tre') subproblems.append(s[:-4]) jsonblob = read_as_json(in_annotations_file) nodes_dict = jsonblob['nodes'] for ott_id in subproblems: d = nodes_dict.setdefault(ott_id, {}) d['was_constrained'] = True d['was_uncontested'] = True with codecs.open(out_annotations_file, 'w', encoding='utf-8') as out_stream: json.dump(jsonblob, out_stream, indent=2, sort_keys=True, separators=(',', ': '))
[ "mtholder@gmail.com" ]
mtholder@gmail.com
439ed1e473b23c914296be1445a3ce4c51ac8f1c
1e0355b293100873cedfcac789655a35180781db
/BOJ16493.py
5d1038b921fd6a9bf1a4226731345769f521cd07
[ "MIT" ]
permissive
INYEONGKIM/BOJ
47dbf6aeb7a0f1b15208866badedcd161c00ee49
5e83d77a92d18b0d20d26645c7cfe4ba3e2d25bc
refs/heads/master
2021-06-14T13:50:04.124334
2021-03-09T14:04:14
2021-03-09T14:04:14
168,840,573
2
0
null
null
null
null
UTF-8
Python
false
false
462
py
__import__('sys').setrecursionlimit(100000) W,n=map(int,__import__('sys').stdin.readline().split()) weight=[0]*n;value=[0]*n for i in range(n): w,v=map(int,__import__('sys').stdin.readline().split()) weight[i]=w;value[i]=v def knapsack(cap, n): if cap==0 or n==0: return 0 if weight[n-1]>cap: return knapsack(cap, n-1) else: return max(value[n-1]+knapsack(cap-weight[n-1],n-1), knapsack(cap,n-1)) print(knapsack(W,n))
[ "noreply@github.com" ]
INYEONGKIM.noreply@github.com
241cb32f5ed9618aa63ae1e8fdae43169081b506
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
/PyTorch/contrib/cv/semantic_segmentation/MMseg-swin/mmcv/mmcv/device/mlu/data_parallel.py
26f74dc6b3ca61208125f7dc60216fac588f26d3
[ "GPL-1.0-or-later", "Apache-2.0", "BSD-2-Clause", "MIT", "BSD-3-Clause", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-unknown-license-reference" ]
permissive
Ascend/ModelZoo-PyTorch
4c89414b9e2582cef9926d4670108a090c839d2d
92acc188d3a0f634de58463b6676e70df83ef808
refs/heads/master
2023-07-19T12:40:00.512853
2023-07-17T02:48:18
2023-07-17T02:48:18
483,502,469
23
6
Apache-2.0
2022-10-15T09:29:12
2022-04-20T04:11:18
Python
UTF-8
Python
false
false
4,805
py
# -*- coding: utf-8 -*- # BSD 3-Clause License # # Copyright (c) 2017 # All rights reserved. # Copyright 2022 Huawei Technologies Co., Ltd # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ========================================================================== # -*- coding: utf-8 -*- # BSD 3-Clause License # # Copyright (c) 2017 # All rights reserved. # Copyright 2022 Huawei Technologies Co., Ltd # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ========================================================================== # Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.parallel import MMDataParallel from .scatter_gather import scatter_kwargs class MLUDataParallel(MMDataParallel): """The MLUDataParallel module that supports DataContainer. MLUDataParallel is a class inherited from MMDataParall, which supports MLU training and inference only. The main differences with MMDataParallel: - It only supports single-card of MLU, and only use first card to run training and inference. - It uses direct host-to-device copy instead of stream-background scatter. .. warning:: MLUDataParallel only supports single MLU training, if you need to train with multiple MLUs, please use MLUDistributedDataParallel instead. If you have multiple MLUs, you can set the environment variable ``MLU_VISIBLE_DEVICES=0`` (or any other card number(s)) to specify the running device. Args: module (:class:`nn.Module`): Module to be encapsulated. dim (int): Dimension used to scatter the data. Defaults to 0. """ def __init__(self, *args, dim=0, **kwargs): super().__init__(*args, dim=dim, **kwargs) self.device_ids = [0] self.src_device_obj = torch.device('mlu:0') def scatter(self, inputs, kwargs, device_ids): return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
[ "wangjiangben@huawei.com" ]
wangjiangben@huawei.com
83ec28298fc8649b0e87555dd5b7e847615479bd
34f365117eb1d846fa922c24f3fc650188ce9746
/bin/bed2scoreToExpandBed.py
47a35ac1a5dacf9926cd21daed5f5ecf8f6a9b8a
[ "MIT" ]
permissive
PinarSiyah/NGStoolkit
53ac6d87a572c498414a246ae051785b40fbc80d
b360da965c763de88c9453c4fd3d3eb7a61c935d
refs/heads/master
2021-10-22T04:49:51.153970
2019-03-08T08:03:28
2019-03-08T08:03:28
null
0
0
null
null
null
null
UTF-8
Python
false
false
776
py
#!/usr/bin/env python import os import sys import argparse from itertools import izip import bed parser = argparse.ArgumentParser(description='expanding bed files with scores') parser.add_argument('-i', required= True, help='input') parser.add_argument('-o', required= True, help='output') args = parser.parse_args() bedFile = args.i out = open(args.o, 'w') normalizationValue = 20000000 totalScore = 0 for bedline in bed.bed(bedFile).read(): totalScore += float(min(0,bedline.score())) for bedline in bed.bed(bedFile).read(): expandedScore = max(0, normalizationValue * (float(bedline.score()) / totalScore)) newScore = '.' fields = bedline.fields() fields[4] = newScore out.write(int(round(expandedScore)) * (bedline.fields2line(fields) + '\n')) out.close()
[ "adebali@users.noreply.github.com" ]
adebali@users.noreply.github.com
1fa0fb672f7462963fe48cc954c50fe8db869ad6
8b576f16cfd9202f756611001e684657dde3e812
/01_hellopython/hn_15_whileStar.py
2b31aaec992654c1d0c429441fc4b6fbc68e37eb
[]
no_license
WenhaoChen0907/Python_Demo
26cc4d120aaa990c2d26fd518dfe6bcb622b1d77
136b8ced40623b0970c2a5bd47852425dcac3e86
refs/heads/master
2023-02-23T23:58:09.584321
2021-01-30T11:49:50
2021-01-30T11:49:50
334,374,597
0
0
null
null
null
null
UTF-8
Python
false
false
509
py
# 输出小星星 # 方法一 """ row = 1 while row <= 5: print("*" * row) row += 1 """ # 方法二:循环嵌套 row = 1 while row <= 5: # 每一行打印的星星和当前行数是一致的 # 增加一个循环,专门负责当前行中,每一列的星 col = 1 while col <= row: # print函数输出默认换行,想要去掉换行可以使用, end="" print("*", end="") col += 1 # 一行打印完成时,添加换行 print("") row += 1
[ "18738127274@163.com" ]
18738127274@163.com
9621ec41d9db1cd973fd5b6bcd3dceda3b721d04
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p02722/s643080348.py
12af358f61bf63984b21ca359bf673d793d1dadf
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
389
py
def make_divisors(n): divisors = [] for i in range(1, int(n**0.5)+1): if n % i == 0: divisors.append(i) if i != n // i: divisors.append(n//i) return divisors n=int(input()) l1=make_divisors(n-1) l2=make_divisors(n)[2:] for i in l2: j=n/i while j%i==0: j/=i if j%i==1 and n/i not in l1: l1.append(n/i) print(len(l1))
[ "66529651+Aastha2104@users.noreply.github.com" ]
66529651+Aastha2104@users.noreply.github.com
99d1b2fac8e0f299ee585ed4eebd0b11a45183b3
5df9cd8ed8b2ac02606e8f760bb098a85ac40dd6
/AQI/AQI/spiders/aqi_crawl.py
ecfcf2eed5e27c302ba774c16c202cb361e5750f
[]
no_license
ShaoLay/AQICrawl
270352d31a41d7b925a4c899642cda21ad12abea
5abf872fc0495d310514abc029dae799487299e5
refs/heads/master
2020-08-14T19:48:56.982460
2019-10-23T05:30:24
2019-10-23T05:30:24
215,224,771
0
0
null
null
null
null
UTF-8
Python
false
false
1,669
py
from scrapy.spiders import CrawlSpider, Rule from scrapy.linkextractors import LinkExtractor from AQI.items import AqiItem class AqiSpider(CrawlSpider): name = 'aqi_crawl' allowed_domains = ['aqistudy.cn'] start_urls = ['https://www.aqistudy.cn/historydata/'] rules = ( Rule(LinkExtractor(allow='monthdata\.php')), Rule(LinkExtractor(allow='daydate\.php'), callback='parse_day', follow=False) ) def parse_day(self, response): item = AqiItem() title = response.xpath('//*[@id="title"]/text()').extract_first() item['city_name'] = title[8:-11] # 1. 取出所有 tr_list tr_list = response.xpath('//tr') # 2.删除表头 tr_list.pop(0) for tr in tr_list: # 日期 item['date'] = tr.xpath('./td[1]/text()').extract_first() # AQI item['aqi'] = tr.xpath('./td[2]/text()').extract_first() # 质量等级 item['level'] = tr.xpath('./td[3]//text()').extract_first() # PM2.5 item['pm2_5'] = tr.xpath('./td[4]/text()').extract_first() # PM10 item['pm10'] = tr.xpath('./td[5]/text()').extract_first() # 二氧化硫 item['so_2'] = tr.xpath('./td[6]/text()').extract_first() # 一氧化碳 item['co'] = tr.xpath('./td[7]/text()').extract_first() # 二氧化氮 item['no_2'] = tr.xpath('./td[8]/text()').extract_first() # 臭氧 item['o_3'] = tr.xpath('./td[9]/text()').extract_first() # 将数据 -->engine-->pipeline yield item
[ "javs_shao@163.com" ]
javs_shao@163.com
53b6f8fc41440ab6256d1334cf60ed629b0fce31
9381d2a25adac95fab9fc4b8015aadd6c7bed6ca
/ITP1/2_D.py
c35c8e9af2a5a9165517debe9f1a651a661f4d22
[]
no_license
kazuma104/AOJ
e3ca14bd31167656bcd203d4f92a43fd4045434c
d91cc3313cbfa575928787677e5ed6be63aa8acf
refs/heads/master
2023-03-20T22:16:22.764351
2021-03-18T10:38:08
2021-03-18T10:38:08
262,047,590
0
0
null
null
null
null
UTF-8
Python
false
false
141
py
W,H,x,y,r = map(int, input().split()) if x-r < 0 or y-r < 0: print("No") elif x+r > W or y+r > H: print("No") else: print("Yes")
[ "kazuma@info.nara-k.ac.jp" ]
kazuma@info.nara-k.ac.jp
1dd0526d70e5ef6e0a57ded59b165fd2f43842fa
163bbb4e0920dedd5941e3edfb2d8706ba75627d
/Code/CodeRecords/2412/60715/301485.py
69220007da8305e6488cfa5d20eabb5c5fb9f53a
[]
no_license
AdamZhouSE/pythonHomework
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
ffc5606817a666aa6241cfab27364326f5c066ff
refs/heads/master
2022-11-24T08:05:22.122011
2020-07-28T16:21:24
2020-07-28T16:21:24
259,576,640
2
1
null
null
null
null
UTF-8
Python
false
false
152
py
n,s=map(int,input().split()) if(n==2 and s==0): print(0) elif(n==4 and s==3): print(-1) else: print(1) print(5) print("1 4 2 3 5 ")
[ "1069583789@qq.com" ]
1069583789@qq.com
f2229bd1270be63072c3f36c5e4cc704b0eb8c3c
73ee941896043f9b3e2ab40028d24ddd202f695f
/external/chromium_org/chrome/tools/DEPS
86737fb82392847aa02de3cf27f22ebea02cf380
[ "BSD-3-Clause" ]
permissive
CyFI-Lab-Public/RetroScope
d441ea28b33aceeb9888c330a54b033cd7d48b05
276b5b03d63f49235db74f2c501057abb9e79d89
refs/heads/master
2022-04-08T23:11:44.482107
2016-09-22T20:15:43
2016-09-22T20:15:43
58,890,600
5
3
null
null
null
null
UTF-8
Python
false
false
185
include_rules = [ "+breakpad", "+chrome/browser", "+chrome/third_party/hunspell/google", "+chrome/utility/local_discovery", "+content/browser", "+content/public/browser", ]
[ "ProjectRetroScope@gmail.com" ]
ProjectRetroScope@gmail.com
3836037d938bd2f19011c61605f118cd7ccee3ef
c045416b1f5c5bc4f0fbfa7d64a7cfa5afe1c3a4
/magentaT/magenta/pipelines/lead_sheet_pipelines.py
9fd8562fef751fb1354bf91f555c168f3e618ff7
[ "Apache-2.0" ]
permissive
Tiasa/CreativeComposer
e037055c232dc6b261b4fd7542cab3865e7ec1c4
b2ea3d1570b367068021726f1858587a0ad4d09f
refs/heads/master
2021-06-27T06:59:37.351749
2020-10-29T21:54:19
2020-10-29T21:54:19
143,206,847
0
0
null
null
null
null
UTF-8
Python
false
false
2,732
py
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Data processing pipelines for lead sheets.""" from magenta.music import chord_symbols_lib from magenta.music import events_lib from magenta.music import lead_sheets_lib from magenta.pipelines import pipeline from magenta.pipelines import statistics from magenta.protobuf import music_pb2 import tensorflow as tf class LeadSheetExtractor(pipeline.Pipeline): """Extracts lead sheet fragments from a quantized NoteSequence.""" def __init__(self, min_bars=7, max_steps=512, min_unique_pitches=5, gap_bars=1.0, ignore_polyphonic_notes=False, filter_drums=True, require_chords=True, all_transpositions=True, name=None): super(LeadSheetExtractor, self).__init__( input_type=music_pb2.NoteSequence, output_type=lead_sheets_lib.LeadSheet, name=name) self._min_bars = min_bars self._max_steps = max_steps self._min_unique_pitches = min_unique_pitches self._gap_bars = gap_bars self._ignore_polyphonic_notes = ignore_polyphonic_notes self._filter_drums = filter_drums self._require_chords = require_chords self._all_transpositions = all_transpositions def transform(self, quantized_sequence): try: lead_sheets, stats = lead_sheets_lib.extract_lead_sheet_fragments( quantized_sequence, min_bars=self._min_bars, max_steps_truncate=self._max_steps, min_unique_pitches=self._min_unique_pitches, gap_bars=self._gap_bars, ignore_polyphonic_notes=self._ignore_polyphonic_notes, filter_drums=self._filter_drums, require_chords=self._require_chords, all_transpositions=self._all_transpositions) except events_lib.NonIntegerStepsPerBarError as detail: tf.logging.warning('Skipped sequence: %s', detail) lead_sheets = [] stats = [statistics.Counter('non_integer_steps_per_bar', 1)] except chord_symbols_lib.ChordSymbolError as detail: tf.logging.warning('Skipped sequence: %s', detail) lead_sheets = [] stats = [statistics.Counter('chord_symbol_exception', 1)] self._set_stats(stats) return lead_sheets
[ "tiasa.ap.10@gmail.com" ]
tiasa.ap.10@gmail.com
ad260b2ad0e4ba83bb002b471b76f420bf3405ba
4be5c172c84e04c35677f5a327ab0ba592849676
/python/design/queue_using_stacks/queue_using_stacks2.py
1253bf3342a1290b6fd12967079eda6c6b4ac9b5
[]
no_license
niranjan-nagaraju/Development
3a16b547b030182867b7a44ac96a878c14058016
d193ae12863971ac48a5ec9c0b35bfdf53b473b5
refs/heads/master
2023-04-06T20:42:57.882882
2023-03-31T18:38:40
2023-03-31T18:38:40
889,620
9
2
null
2019-05-27T17:00:29
2010-09-05T15:58:46
Python
UTF-8
Python
false
false
3,921
py
''' Implement a queue using stacks Approach: An enqueue-efficient implementation using two stacks Enqueue is O(1), dequeue is O(n) Enqueue(x): Push to S1 Dequeue(): if S2 has elements (they are already in queue-order) return S2.pop() Otherwise (S2 is empty) Push all items from S1 to S2, S2 now has queue order return S2.pop() Test runs: Enqueue(1): S1: 1 S2: Enqueue(2): S1: 2 1 S2: Enqueue(3): S1: 3 2 1 S2: Dequeue(): 1 S1: 3 2 1 S2: <-- S1: S2: 1 2 3 <-- x = S2.pop() == 1 S1: S2: 2 3 Enqueue(4) S1: 4 S2: 2 3 Enqueue(5) S1: 5 4 S2: 2 3 Dequeue() : 2 S1: 5 4 S2: 2 3 <-- S2.pop() == 2 S1: 5 4 S2: 3 Dequeue(): 3 S1: 5 4 S2: 3 <-- S2.pop() == 3 S1: 5 4 S2: Dequeue(): 4 S1: 5 4 S2: <-- S1: S2: 4 5 Dequeue(): 5 ''' from data_structures.sll.stack import Stack class Queue(object): def __init__(self): self.s1 = Stack() self.s2 = Stack() # There can be newly pushed elements onto S1 # while S2 has earlier elements in queue-order # Total items in queue would be the sum total in both the stacks def __len__(self): return self.s1.size + self.s2.size #NOTE: front() cannot be implemented efficiently # if S2 has elements in them, front of the queue would just be S2.top() # as S2 order is the queue-order # otherwise, we have items in S1 which are in the reverse-queue-order, # The bottom of the stack would be the front in this case def front(self): if self.s2: return self.s2.top() else: for x in self.s1: pass return x #NOTE: back() cannot be implemented efficiently # If S1 is empty, tail(S2) (which is in queue order) is the back of the queue # If S2 has elements in them, # back of the queue is in S1.top() as that would be where the last item was enqueued def back(self): if not self.s1: for x in self.s2: pass return x else: return self.s1.top() def length(self): return self.s1.size + self.s2.size # Enqueue is efficient, Just push to S1 and return def enqueue(self, x): self.s1.push(x) # if S2 has items in it, we have atleast one item in actual queue-order, so we can just return S2.pop() # Otherwise, move everything from S1 into S2, essentially ordering all current items in queue-order, and then return S2.pop() def dequeue(self): # Helper function to move all items from stack a to stack b def move(a, b): while a: b.push(a.pop()) # s2 is empty, move everything from s1 to s2 so everything in s1 now # is stored into s2 in queue-order if not self.s2: move(self.s1, self.s2) # S2 already has elements in queue-order # or everything in S1 was just moved into S2 # Either ways, Item to dequeue will be in S2 top return self.s2.pop() # NOTE: Queue elements is S2: top->bottom followed by S1: bottom->top def __str__(self): s1_contents = [] for x in self.s1: s1_contents.insert(0, str(x)) s2_contents = [] for x in self.s2: s2_contents.append(str(x)) return '[%d]: ' %(self.__len__()) + ' '.join(s2_contents) + ' ' + ' '.join(s1_contents) def __repr__(self): return "{%r , %r}" %(self.s1, self.s2) # Basic testcases if __name__ == "__main__": queue = Queue() for i in range(1, 4): queue.enqueue(i) assert(queue.front() == 1) assert(queue.back() == i) assert(queue.length() == 3) assert(queue.dequeue() == 1) queue.enqueue(4) queue.enqueue(5) assert(str(queue) == "[4]: 2 3 4 5") assert("%r" %(queue) == "{[2]: 5 4 , [2]: 2 3}") for i in range(2, 6): assert(i == queue.dequeue()) assert(len(queue) == (5-i)) q2 = Queue() q2.enqueue('+') q2.enqueue('a') assert('+' == q2.dequeue()) q2.enqueue('b') assert('a' == q2.dequeue()) assert('b' == q2.dequeue()) q3 = Queue() q3.enqueue(('a', 1)) q3.enqueue(('b', 2)) assert(q3.dequeue() == ('a',1)) q3.enqueue(('c', 3)) print 'Queue testcases passed'
[ "vinithepooh@gmail.com" ]
vinithepooh@gmail.com
9ca63456aabc1b850ca2345a6625a7699eae6610
71d4fafdf7261a7da96404f294feed13f6c771a0
/mainwebsiteenv/lib/python2.7/site-packages/psycopg2/tests/test_bug_gc.py
fcad0f599090b4ba1dabca0b3a0599da294da7c6
[]
no_license
avravikiran/mainwebsite
53f80108caf6fb536ba598967d417395aa2d9604
65bb5e85618aed89bfc1ee2719bd86d0ba0c8acd
refs/heads/master
2021-09-17T02:26:09.689217
2018-06-26T16:09:57
2018-06-26T16:09:57
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,773
py
#!/usr/bin/env python # bug_gc.py - test for refcounting/GC bug # # Copyright (C) 2010-2011 Federico Di Gregorio <fog@debian.org> # # psycopg2 is free software: you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # In addition, as a special exception, the copyright holders give # permission to link this program with the OpenSSL library (or with # modified versions of OpenSSL that use the same license as OpenSSL), # and distribute linked combinations including the two. # # You must obey the GNU Lesser General Public License in all respects for # all of the code used other than OpenSSL. # # psycopg2 is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public # License for more details. import psycopg2 import psycopg2.extensions import unittest import gc from testutils import ConnectingTestCase, skip_if_no_uuid class StolenReferenceTestCase(ConnectingTestCase): @skip_if_no_uuid def test_stolen_reference_bug(self): def fish(val, cur): gc.collect() return 42 UUID = psycopg2.extensions.new_type((2950,), "UUID", fish) psycopg2.extensions.register_type(UUID, self.conn) curs = self.conn.cursor() curs.execute("select 'b5219e01-19ab-4994-b71e-149225dc51e4'::uuid") curs.fetchone() def test_suite(): return unittest.TestLoader().loadTestsFromName(__name__) if __name__ == "__main__": unittest.main()
[ "me15btech11039@iith.ac.in.com" ]
me15btech11039@iith.ac.in.com
0c4c6b2d2404135d8229d93244b95ea8ac0bf89f
870599f68f29701f6dc0c9911183ee1127a0e8f1
/attend/util.py
de85823500fe59f84b6604edb5067152ca94ff88
[]
no_license
rubenvereecken/attend
dc8168df8fde45421ed1f02a8dc519fc07b836ee
d5fedeec589956ba86c11a93709ad2c7d4f01fcf
refs/heads/master
2021-03-27T20:50:00.543071
2017-09-29T09:42:16
2017-09-29T09:42:16
95,661,311
0
0
null
null
null
null
UTF-8
Python
false
false
2,130
py
import inspect import numpy as np import attend import time from functools import partial # parse_timestamp = partial(time.strptime, format=attend.TIMESTAMP_FORMAT) parse_timestamp = lambda x: time.strptime(x, attend.TIMESTAMP_FORMAT) def pick(dic, l): ''' Pick from dictionary all the keys present in a list of keywords After the spirit of Lodash pick ''' return { k: v for k, v in dic.items() if k in l } def params_for(fun): return list(inspect.signature(fun).parameters) def call_with(fun, d): return fun(**pick(d, params_for(fun))) def init_with(cls, d, *args, **kwargs): return cls(*args, **pick(d, params_for(cls.__init__)), **kwargs) def notify(title, text='', duration=5000): import subprocess as s s.call(['notify-send', title, text]) import contextlib @contextlib.contextmanager def noop(): yield None def pad(x, pad_width, axis=0): padding = [[0, 0] for _ in range(x.ndim)] padding[axis][1] = pad_width return np.pad(x, padding, mode='constant') def pad_and_stack(arrays, length=None, axis=0): if length is None: length = max(v.shape[0] for v in arrays) def _padding(v): padding = np.zeros([len(v.shape), 2], dtype=int) assert length >= v.shape[axis] padding[axis][1] = length - v.shape[axis] return padding return np.stack(np.pad(v, _padding(v), 'constant') for v in arrays) def unstack_and_unpad(arrays, lengths): return [arr[:lengths[i]] for i, arr in enumerate(arrays)] def dict_to_args(d): arg_list = [] for k, v in d.items(): if v is None: # Don't pass None values, just 'none' values continue # s = 'none' elif isinstance(v, bool): s = str(int(v)) # Boolean is represented 0 or 1 elif isinstance(v, list): # s = ' '.join(el for el in v) s = ' '.join('--{}={}'.format(k, el) for el in v) arg_list.append(s) continue else: s = str(v) s = '--{}={}'.format(k, s) arg_list.append(s) return ' '.join(arg_list)
[ "rubenvereecken@gmail.com" ]
rubenvereecken@gmail.com
12ff517076961d3280a395ff8024086c0c6154b7
56fd2d92b8327cfb7d8f95b89c52e1700343b726
/odin/utilities/mixins/strategy_mixins/features_mixins.py
78f57281f4bf3ad86d7fe2d11595c0fc2708bdf0
[ "MIT" ]
permissive
stjordanis/Odin
fecb640ccf4f2e6eb139389d25cbe37da334cdb6
e2e9d638c68947d24f1260d35a3527dd84c2523f
refs/heads/master
2020-04-15T09:13:17.850126
2017-02-09T00:25:55
2017-02-09T00:25:55
null
0
0
null
null
null
null
UTF-8
Python
false
false
538
py
import pandas as pd from ....strategy import AbstractStrategy class DefaultFeaturesMixin(AbstractStrategy): """Default Features Mixin Class This just creates an empty dataframe containing as the index the symbols available on each day of trading and no columns. """ def generate_features(self): """Implementation of abstract base class method.""" symbols = self.portfolio.data_handler.bars.ix[ "adj_price_close", -1, : ].dropna().index return pd.DataFrame(index=symbols)
[ "jamesbrofos@gmail.com" ]
jamesbrofos@gmail.com
4bdb997b1a9edc8482e96a303102a09e4fd12614
7d5b51afb991787f3b373c3b931a06a5bbd1deaa
/Dynamic Programming/2011.py
f0bbda3160a48b68c40316799b97402cad79f969
[]
no_license
inkyu0103/BOJ
cb7549855702ba7a744d9121d12f5d327469acd6
0d889b9cc36a95e8e802d22a7b0ec87dbf80b193
refs/heads/master
2023-09-02T05:32:50.704738
2023-08-24T08:07:12
2023-08-24T08:07:12
247,743,620
0
0
null
null
null
null
UTF-8
Python
false
false
364
py
# 2011 암호코드 import sys input = sys.stdin.readline arr = [0] + list(map(int, list(input().rstrip()))) N = len(arr) dp = [0] * N dp[0] = 1 mod = 1000000 for i in range(1, N): if arr[i]: dp[i] = (dp[i - 1] + dp[i]) % mod case = arr[i - 1] * 10 + arr[i] if 10 <= case <= 26: dp[i] = (dp[i - 2] + dp[i]) % mod print(dp[N - 1])
[ "inkyu0103@gmail.com" ]
inkyu0103@gmail.com
c31eab571b99a610289be0215b55c0b96a403c9d
92429015d9a1f1cea9b9bf2c9f1a8a7a07586af5
/attack_methods/feat_test_interface.py
9cb5c53ae4fc13ba93a936aded8839b658eb5c61
[]
no_license
arthur-qiu/adv_vis
46a953ce6c3d562137c8e566bc9b523e25bc5bbd
ba46c00cf38ca5186d7db84844892036ed714eaf
refs/heads/master
2021-01-03T23:00:45.065108
2020-04-05T03:47:01
2020-04-05T03:47:01
240,272,320
0
0
null
null
null
null
UTF-8
Python
false
false
5,488
py
import torch import torch.nn as nn import torch.nn.functional as F from attack_methods import pgd, feature_targets class Restart_PGD(nn.Module): def __init__(self, epsilon, num_steps, step_size, num_restart = 10, data_min = -1.0, data_max = 1.0): super().__init__() self.epsilon = epsilon self.num_steps = num_steps self.step_size = step_size self.num_restart = num_restart self.data_min = data_min self.data_max = data_max self.untargeted_pgd = pgd.PGD(epsilon=epsilon, num_steps=num_steps, step_size=step_size, data_min=data_min, data_max=data_max).cuda() def forward(self, model, bx, by): final_results = torch.zeros_like(by).byte() + 1 for re in range(self.num_restart): adv_bx = self.untargeted_pgd(model, bx, by) logits = model(adv_bx) if len(logits) == 2: logits = logits[1] pred = logits.data.max(1)[1] correct = pred.eq(by.data) final_results &= correct if re == 0: single_correct = final_results.sum().item() final_correct = final_results.sum().item() return final_correct, single_correct class Mult_Targets(nn.Module): def __init__(self, epsilon, num_steps, step_size, num_classes = 10, data_min = -1.0, data_max = 1.0): super().__init__() self.epsilon = epsilon self.num_steps = num_steps self.step_size = step_size self.num_classes = num_classes self.data_min = data_min self.data_max = data_max self.targeted_pgd = pgd.PGD_Margin_Target(epsilon=epsilon, num_steps=num_steps, step_size=step_size, data_min=data_min, data_max=data_max).cuda() def forward(self, model, bx, by): final_results = torch.zeros_like(by).byte() + 1 for re in range(1, self.num_classes): adv_bx = self.targeted_pgd(model, bx, by, (by+re)%self.num_classes) logits = model(adv_bx) if len(logits) == 2: logits = logits[1] pred = logits.data.max(1)[1] correct = pred.eq(by.data) final_results &= correct if re == 1: single_correct = final_results.sum().item() final_correct = final_results.sum().item() return final_correct, single_correct class Feature_Attack(nn.Module): def __init__(self, epsilon, num_steps, step_size, num_classes = 10, data_min = -1.0, data_max = 1.0): super().__init__() self.epsilon = epsilon self.num_steps = num_steps self.step_size = step_size self.num_classes = num_classes self.data_min = data_min self.data_max = data_max self.feature_attack = feature_targets.Feature_Targets(epsilon=epsilon, num_steps=num_steps, step_size=step_size, data_min=data_min, data_max=data_max).cuda() def forward(self, model, bx, by, target_bx): final_results = torch.zeros_like(by).byte() + 1 for re in range(target_bx.shape[0]): adv_bx = self.feature_attack(model, bx, by, torch.cat((target_bx[re:], target_bx[:re]),0)) logits = model(adv_bx) if len(logits) == 2: logits = logits[1] pred = logits.data.max(1)[1] correct = pred.eq(by.data) final_results &= correct if re == 0: single_correct = final_results.sum().item() final_correct = final_results.sum().item() return final_correct, single_correct class Test_All(nn.Module): def __init__(self, epsilon, num_steps, step_size, data_min = -1.0, data_max = 1.0): super().__init__() self.epsilon = epsilon self.num_steps = num_steps self.step_size = step_size self.data_min = data_min self.data_max = data_max self.untargeted_pgd = pgd.PGD(epsilon=epsilon, num_steps=num_steps, step_size=step_size, data_min=data_min, data_max=data_max).cuda() self.targeted_pgd = pgd.PGD(epsilon=epsilon, num_steps=num_steps, step_size=step_size, data_min=data_min, data_max=data_max).cuda() self.feature_attack = feature_targets.Feature_Targets(epsilon=epsilon, num_steps=num_steps, step_size=step_size, data_min=data_min, data_max=data_max).cuda() def forward(self, model, bx, by, target_bx): """ :param model: the classifier's forward method :param bx: batch of images :param by: true labels :return: perturbed batch of images """ adv_bx = bx.detach().clone() # TODO # if self.random_start: # adv_bx += torch.zeros_like(adv_bx).uniform_(-self.epsilon, self.epsilon) # adv_bx = adv_bx.clamp(self.data_min, self.data_max) # # for i in range(self.num_steps): # adv_bx.requires_grad_() # with torch.enable_grad(): # logits = model(adv_bx) # if len(logits) == 2: # logits = logits[1] # loss = F.cross_entropy(logits, by, reduction='sum') # grad = torch.autograd.grad(loss, adv_bx, only_inputs=True)[0] # # if self.grad_sign: # adv_bx = adv_bx.detach() + self.step_size * torch.sign(grad.detach()) # # adv_bx = torch.min(torch.max(adv_bx, bx - self.epsilon), bx + self.epsilon).clamp(self.data_min, self.data_max) return adv_bx
[ "Arthur" ]
Arthur
75a04e3f175dcf1ab31fe1ed4681128d03f5a5e7
b56eaf7a603cbb850be11dbbed2c33b954dedbcb
/ctools/pysc2/bin/battle_net_maps.py
031964f9d15ee9e1b9c03452b2cab3efdf37a247
[ "Apache-2.0" ]
permissive
LFhase/DI-star
2887d9c5dd8bfaa629e0171504b05ac70fdc356f
09d507c412235a2f0cf9c0b3485ec9ed15fb6421
refs/heads/main
2023-06-20T20:05:01.378611
2021-07-09T16:26:18
2021-07-09T16:26:18
384,499,311
1
0
Apache-2.0
2021-07-09T16:50:29
2021-07-09T16:50:28
null
UTF-8
Python
false
false
1,250
py
#!/usr/bin/python # Copyright 2019 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Print the list of available maps according to the game.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl import app from ctools.pysc2 import run_configs def main(unused_argv): with run_configs.get().start(want_rgb=False) as controller: available_maps = controller.available_maps() print("\n") print("Local map paths:") for m in sorted(available_maps.local_map_paths): print(" ", m) print() print("Battle.net maps:") for m in sorted(available_maps.battlenet_map_names): print(" ", m) if __name__ == "__main__": app.run(main)
[ "opendilab@gmail.com" ]
opendilab@gmail.com
6736bae8e9a339384d9187263901c8a806725b7c
c76d46dcde554eaed0f79cafde00cce973b481e3
/user_dashboard/templatetags/dash_extra.py
47f23524cacfe2cb23d3a65a74d184822aba44c8
[]
no_license
dharati-code/microapi
26e2e9adb935d68a9ca663b69f0dd9c70e017c0b
8c4848856bd680e7c8493b80fa5938b317d9e474
refs/heads/master
2023-02-04T13:46:25.235108
2020-12-22T14:17:31
2020-12-22T14:17:31
null
0
0
null
null
null
null
UTF-8
Python
false
false
459
py
from django import template from user_dashboard.models import Configs register = template.Library() @register.filter def get_item(dictionary, key): return dictionary.get(key) @register.filter def get_configs(confobj, pie): pe = confobj.get(konfigd_api=pie) return pe @register.filter def get_status(onj, attr): return getattr(onj, attr) @register.filter def addstr(arg1, arg2): """concatenate arg1 & arg2""" return str(arg1) + str(arg2)
[ "phemmylintry@gmail.com" ]
phemmylintry@gmail.com
6350f193e0270a17e76912867183fe6569906bb1
fbb67b247799b724dd60415026828ec247576f9f
/demo/assignments/word_freq.py
759be0e803b112ce19d0545ee90a83e5bb4c09c4
[]
no_license
srikanthpragada/PYTHON_31_MAY_2021
845afca884cac9a30d3e87068f0dab275c056974
55a4a168521bc93d331fd54b85ab4faa7e4f1f02
refs/heads/master
2023-06-13T03:22:23.946454
2021-07-10T14:17:42
2021-07-10T14:17:42
373,483,617
0
1
null
null
null
null
UTF-8
Python
false
false
130
py
st = "how do you do how did you do last year" words = st.split(" ") for w in set(words): print(f"{w:10} {words.count(w)}")
[ "srikanthpragada@gmail.com" ]
srikanthpragada@gmail.com
e2547859689ea3739bc06cb400f57095ca59c69d
89bcfc45d70a3ca3f0f1878bebd71aa76d9dc5e2
/simpleSpider/xpath_demo/neteasy163.py
05e4e31d9f71d741cf48616cd3548eb9a4f52c92
[]
no_license
lichao20000/python_spider
dfa95311ab375804e0de4a31ad1e4cb29b60c45b
81f3377ad6df57ca877463192387933c99d4aff0
refs/heads/master
2022-02-16T20:59:40.711810
2019-09-10T03:13:07
2019-09-10T03:13:07
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,857
py
# coding: utf-8 from lxml import etree import requests import pymysql import uuid class NeteasyRank(object): def __init__(self): self.url = 'http://news.163.com/rank/' self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)' ' Chrome/71.0.3578.98 Safari/537.36'} self.news = dict() self.news_list = [] db_params = { 'host': '127.0.0.1', 'user': 'root', 'password': 'QAZ123qaz#', 'database': 'jianshu', 'charset': 'utf8' } self.conn = pymysql.connect(**db_params) self.cursor = self.conn.cursor() self._sql = None @property def sql(self): if not self._sql: self._sql = """ insert into news163(id,title_id,url,type,title,click_num) values (%s,%s,%s,%s,%s,%s) """ return self._sql return self._sql def parse(self): response = requests.get(self.url, headers=self.headers) # response.encoding = 'utf-8' content = response.text html = etree.HTML(content) lis = html.xpath("//div[@id='whole']/following-sibling::div[1]/div[@class='tabBox']/div[@class='title-tab']/ul/li") tab_contents = html.xpath("//div[@id='whole']/following-sibling::div[1]/div[@class='tabBox']/div[contains(@class,'tabContents')]") for i, li in enumerate(lis): tab_text = li.xpath('./text()')[0] trs = tab_contents[i].xpath('.//tr')[1:] for tr in trs: tds = tr.xpath("./td") self.news['type'] = tab_text self.news['id'] = str(uuid.uuid1()) self.news['title_id'] = tds[0].xpath('./span/text()')[0] self.news['url'] = tds[0].xpath('./a/@href')[0] self.news['title'] = tds[0].xpath('./a/text()')[0] self.news['click_num'] = tds[1].xpath('./text()')[0] ################### # id,url,type,title,click_num) values (%s,%s,%s,%s,%s) # # 需要入库时开启下面代码 # self.cursor.execute(self.sql, (self.news['id'], self.news['title_id'], self.news['url'], # self.news['type'], self.news['title'], # self.news['click_num']) # ) # # self.conn.commit() self.news_list.append(self.news) self.news = {} return self.news_list if __name__ == '__main__': net163 = NeteasyRank() list_news = net163.parse() # print(net163.sql) for new in list_news: print(new)
[ "64174469@qq.com" ]
64174469@qq.com
645f53ecc1c87df1360824cfed175d4a363d1601
985b64cb54d3d36fc6f9c0926485199d08bc35c0
/sortosm.py
f71c73d0a50c3eaf71f966a8af555fc36e37ec03
[ "BSD-2-Clause" ]
permissive
TimSC/osm-to-gps-map
05bef69e8755be4e266dc4e9e7582e0c1fd94d36
2bad808be37404fbef7a1d2fa0ce2db27ffdebe8
refs/heads/master
2022-08-26T04:24:56.270120
2022-08-17T02:30:44
2022-08-17T02:30:44
10,490,170
2
0
null
null
null
null
UTF-8
Python
false
false
1,026
py
import xml.etree.ElementTree as ET import bz2, sys def SortOsm(inFina, outFina): fi = bz2.BZ2File(inFina) root = ET.fromstring(fi.read()) fi.close() objDict = {} for obj in root: if 'id' in obj.attrib: i = int(obj.attrib['id']) #print obj.tag, i if obj.tag not in objDict: objDict[obj.tag] = {} objDict[obj.tag][i] = obj #for ty in objDict: # print ty, len(objDict[ty]), objDict[ty].keys() outRoot = ET.Element("osm") outTree = ET.ElementTree(outRoot) outRoot.attrib = root.attrib if 'node' in objDict: keys = objDict['node'].keys() keys.sort() for i in keys: outRoot.append(objDict['node'][i]) if 'way' in objDict: keys = objDict['way'].keys() keys.sort() for i in keys: outRoot.append(objDict['way'][i]) if 'relation' in objDict: keys = objDict['relation'].keys() keys.sort() for i in keys: outRoot.append(objDict['relation'][i]) fiOut = bz2.BZ2File(outFina,"w") outTree.write(fiOut,"utf-8") if __name__=="__main__": SortOsm(sys.argv[1], sys.argv[2])
[ "tim2009@sheerman-chase.org.uk" ]
tim2009@sheerman-chase.org.uk
82e5e9a09fa717768d05f8393a81f380a0bf8834
07ec5a0b3ba5e70a9e0fb65172ea6b13ef4115b8
/lib/python3.6/site-packages/tensorflow/python/training/input.py
2304925ca8931d85101039fd238a00588a2a0731
[]
no_license
cronos91/ML-exercise
39c5cd7f94bb90c57450f9a85d40c2f014900ea4
3b7afeeb6a7c87384049a9b87cac1fe4c294e415
refs/heads/master
2021-05-09T22:02:55.131977
2017-12-14T13:50:44
2017-12-14T13:50:44
118,736,043
0
0
null
2018-01-24T08:30:23
2018-01-24T08:30:22
null
UTF-8
Python
false
false
130
py
version https://git-lfs.github.com/spec/v1 oid sha256:5456569a54cde4607762afc196ac29a5d763a71eab509d9896a77e6bb8d33539 size 60349
[ "seokinj@jangseog-in-ui-MacBook-Pro.local" ]
seokinj@jangseog-in-ui-MacBook-Pro.local
7a4f58d873d4ef63c94868b482e144f66ed6c748
fb28a622b21f5127c83c7fe6193b6312294b2dbe
/apps/car/views.py
34819052aab28c8706a817a794c9556158666698
[]
no_license
laoyouqing/video
0cd608b1f9d3a94da4a537867fafce6f7dcd1297
9aa7ecf17f0145437408a8c979f819bb61617294
refs/heads/master
2022-12-19T11:02:01.343892
2019-08-21T04:00:13
2019-08-21T04:00:13
203,500,521
0
0
null
2022-12-08T06:03:17
2019-08-21T03:40:13
Python
UTF-8
Python
false
false
2,005
py
from django.shortcuts import render # Create your views here. from django_redis import get_redis_connection from rest_framework import viewsets, status from rest_framework.generics import GenericAPIView from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from rest_framework.views import APIView from car.serializers import CartSerializer, CartVideoSerializer from videos.models import Video class CarView(GenericAPIView): '''购物车''' permission_classes = [IsAuthenticated] serializer_class = CartSerializer queryset = Video.objects.all() def post(self, request): """ 添加购物车 """ ser = CartSerializer(data=request.data) ser.is_valid(raise_exception=True) video_id = ser.validated_data.get('video_id') # 用户已登录,在redis中保存 redis_conn = get_redis_connection('cart') pl = redis_conn.pipeline() user=request.user bvideo_id = str(video_id).encode('utf-8') if bvideo_id in redis_conn.lrange('cart_%s' % user.id, 0, -1): return Response({'msg':'已加入购物车'},status=status.HTTP_400_BAD_REQUEST) # 记录购物车商品数量 pl.lpush('cart_%s' % user.id, video_id) pl.execute() return Response(ser.data, status=status.HTTP_201_CREATED) def get(self, request): redis_conn = get_redis_connection('cart') user = request.user # cart_ids=redis_conn.lrange('cart_%s' % user.id,0,-1) cart_ids=redis_conn.lrange('cart_%s' % user.id,0,-1) videos = Video.objects.filter(id__in=cart_ids) ser=CartVideoSerializer(instance=videos,many=True) return Response(ser.data) def delete(self,request,pk): redis_conn = get_redis_connection('cart') user = request.user a=redis_conn.lrem('cart_%s' % user.id, 0, pk) return Response({'msg':'删除成功'},status=status.HTTP_200_OK)
[ "lingo.lin@foxmail.com" ]
lingo.lin@foxmail.com
a63520698af13ffbf16e3bdb315e8a135cc8d278
40804cfe754f4a0c99055e81966367f5a8641fac
/rotate_list.py
38847325d29fa93ffdf8778d1c9c7420ce5b297b
[]
no_license
raochuan/LintCodeInPython
a3731b9a14c623278c8170fedd7be85fd7acfbfb
e57869437287bcc7619411f9d3d965a83e2bfacb
refs/heads/master
2021-01-17T23:57:47.709879
2016-08-02T07:21:36
2016-08-02T07:21:36
64,736,138
1
0
null
2016-08-02T07:48:56
2016-08-02T07:48:54
Python
UTF-8
Python
false
false
815
py
# -*- coding: utf-8 -*- class Solution: # @param head: the list # @param k: rotate to the right k places # @return: the list after rotation def rotateRight(self, head, k): # write your code here if not head: return head list_len = 0 tail = None node = head while node: # 找到链表的尾部节点并统计链表长度 list_len += 1 tail = node node = node.next shift = k % list_len if shift == 0: return head new_tail = head for i in xrange(list_len - shift-1): new_tail = new_tail.next new_head = new_tail.next # 找到新的head tail.next = head # tail指向原来的head new_tail.next = None return new_head
[ "linying_43151@163.com" ]
linying_43151@163.com
3465049bccb803e10ef5fdde087a7d9139c12762
cccfb7be281ca89f8682c144eac0d5d5559b2deb
/tools/perf/page_sets/desktop_ui/download_shelf_story.py
93cc95aaf9b6b55b32f7984b2d41cee2d80f646c
[ "BSD-3-Clause", "LGPL-2.0-or-later", "MPL-1.1", "APSL-2.0", "MIT", "Zlib", "GPL-2.0-only", "Apache-2.0", "LGPL-2.0-only", "LicenseRef-scancode-unknown", "LicenseRef-scancode-unknown-license-reference", "LGPL-2.1-only" ]
permissive
SREERAGI18/chromium
172b23d07568a4e3873983bf49b37adc92453dd0
fd8a8914ca0183f0add65ae55f04e287543c7d4a
refs/heads/master
2023-08-27T17:45:48.928019
2021-11-11T22:24:28
2021-11-11T22:24:28
428,659,250
1
0
BSD-3-Clause
2021-11-16T13:08:14
2021-11-16T13:08:14
null
UTF-8
Python
false
false
5,621
py
# Copyright 2021 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging from page_sets.desktop_ui.browser_utils import Resize from page_sets.desktop_ui.js_utils import MEASURE_JS_MEMORY from page_sets.desktop_ui.multitab_story import MultiTabStory from page_sets.desktop_ui.ui_devtools_utils import ClickOn, IsMac, PressKey from page_sets.desktop_ui.url_list import TOP_URL from page_sets.desktop_ui.webui_utils import Inspect from telemetry.internal.browser.ui_devtools import MOUSE_EVENT_BUTTON_RIGHT DOWNLOAD_SHELF_BENCHMARK_UMA = [ 'Download.Shelf.Views.FirstDownloadPaintTime', 'Download.Shelf.Views.NotFirstDownloadPaintTime', 'Download.Shelf.Views.ShowContextMenuTime', 'Download.Shelf.WebUI.FirstDownloadPaintTime', 'Download.Shelf.WebUI.LoadCompletedTime', 'Download.Shelf.WebUI.LoadDocumentTime', 'Download.Shelf.WebUI.NotFirstDownloadPaintTime', 'Download.Shelf.WebUI.ShowContextMenuTime', ] DOWNLOAD_URL = 'https://dl.google.com/chrome/mac/stable/GGRO/googlechrome.dmg' WEBUI_DOWNLOAD_SHELF_URL = 'chrome://download-shelf.top-chrome/' class DownloadShelfStory(MultiTabStory): """Base class for stories to download files""" def RunNavigateSteps(self, action_runner): url_list = self.URL_LIST tabs = action_runner.tab.browser.tabs for url in url_list: # Suppress error caused by tab closed before it returns occasionally try: tabs.New(url=url) except Exception: pass if not IsMac(): self._devtools = action_runner.tab.browser.GetUIDevtools() def IsWebUI(self): return 'webui' in self.NAME def RunPageInteractions(self, action_runner): # Wait for download items to show up, this may take quite some time # for lowend machines. action_runner.Wait(10) if self.IsWebUI(): action_runner = Inspect(action_runner.tab.browser, WEBUI_DOWNLOAD_SHELF_URL) action_runner.ExecuteJavaScript(MEASURE_JS_MEMORY % 'download_shelf:used_js_heap_size_begin') self.InteractWithPage(action_runner) if self.IsWebUI(): action_runner.ExecuteJavaScript(MEASURE_JS_MEMORY % 'download_shelf:used_js_heap_size_end') def InteractWithPage(self, action_runner): self.ContextMenu(action_runner) action_runner.Wait(2) browser = action_runner.tab.browser Resize(browser, browser.tabs[0].id, start_width=600, end_width=800) action_runner.Wait(2) def ContextMenu(self, action_runner): if IsMac(): return try: if self.IsWebUI(): action_runner.ClickElement( element_function=DROPDOWN_BUTTON_ELEMENT_FUNCTION) else: ClickOn(self._devtools, 'TransparentButton', button=MOUSE_EVENT_BUTTON_RIGHT) action_runner.Wait(1) node_id = self._devtools.QueryNodes('<Window>')[ -1] # Context menu lives in the last Window. PressKey(self._devtools, node_id, 'Esc') action_runner.Wait(1) except Exception as e: logging.warning('Failed to run context menu. Error: %s', e) def WillStartTracing(self, chrome_trace_config): super(DownloadShelfStory, self).WillStartTracing(chrome_trace_config) chrome_trace_config.EnableUMAHistograms(*DOWNLOAD_SHELF_BENCHMARK_UMA) class DownloadShelfStory1File(DownloadShelfStory): NAME = 'download_shelf:1file' URL_LIST = [DOWNLOAD_URL] URL = URL_LIST[0] class DownloadShelfStory5File(DownloadShelfStory): NAME = 'download_shelf:5file' URL_LIST = [DOWNLOAD_URL] * 5 URL = URL_LIST[0] class DownloadShelfStoryTop10Loading(DownloadShelfStory): NAME = 'download_shelf:top10:loading' URL_LIST = TOP_URL[:10] + [DOWNLOAD_URL] URL = URL_LIST[0] WAIT_FOR_NETWORK_QUIESCENCE = False class DownloadShelfStoryMeasureMemory(DownloadShelfStory): NAME = 'download_shelf:measure_memory' URL_LIST = [DOWNLOAD_URL] URL = URL_LIST[0] def WillStartTracing(self, chrome_trace_config): super(DownloadShelfStoryMeasureMemory, self).WillStartTracing(chrome_trace_config) chrome_trace_config.category_filter.AddExcludedCategory('*') chrome_trace_config.category_filter.AddIncludedCategory('blink.console') chrome_trace_config.category_filter.AddDisabledByDefault( 'disabled-by-default-memory-infra') def GetExtraTracingMetrics(self): return super(DownloadShelfStoryMeasureMemory, self).GetExtraTracingMetrics() + ['memoryMetric'] def InteractWithPage(self, action_runner): action_runner.MeasureMemory(deterministic_mode=True) class DownloadShelfWebUIStory1File(DownloadShelfStory): NAME = 'download_shelf_webui:1file' URL_LIST = [DOWNLOAD_URL] URL = URL_LIST[0] class DownloadShelfWebUIStory5File(DownloadShelfStory): NAME = 'download_shelf_webui:5file' URL_LIST = [DOWNLOAD_URL] * 5 URL = URL_LIST[0] class DownloadShelfWebUIStoryTop10Loading(DownloadShelfStory): NAME = 'download_shelf_webui:top10:loading' URL_LIST = TOP_URL[:10] + [DOWNLOAD_URL] URL = URL_LIST[0] WAIT_FOR_NETWORK_QUIESCENCE = False class DownloadShelfWebUIStoryMeasureMemory(DownloadShelfStoryMeasureMemory): NAME = 'download_shelf_webui:measure_memory' URL_LIST = [DOWNLOAD_URL] URL = URL_LIST[0] DROPDOWN_BUTTON_ELEMENT_FUNCTION = ''' document.querySelector('download-shelf-app').shadowRoot. querySelector('download-list').shadowRoot. querySelector('download-item').shadowRoot. getElementById('dropdown-button') '''
[ "chromium-scoped@luci-project-accounts.iam.gserviceaccount.com" ]
chromium-scoped@luci-project-accounts.iam.gserviceaccount.com
4287c198601ca5886f6c18af6d37e2e322a38868
5af1de35dc2b79ecfa823f3ce3bb1097ec29bbd7
/src/map/map_info/tests.py
899ed1151fa1a972ca3d943e25543c19998fe114
[ "BSD-2-Clause" ]
permissive
daonb/oMap
7b79f4e1fc2886523e5a5c1aab249b802bf30505
a904ddee91d2ef4c54cae0ad7ba83fb3cb2150ab
refs/heads/master
2021-01-21T01:52:12.233153
2011-06-22T11:52:18
2011-06-22T11:52:18
1,933,319
0
0
null
null
null
null
UTF-8
Python
false
false
1,799
py
import re from django.test import TestCase from django.test.client import Client from django.core.urlresolvers import reverse from django.contrib.gis.geos import * from django.contrib.auth.models import User, AnonymousUser from django import template #from knesset.mks.server_urls import mock_pingback_server from django.utils import simplejson as json from models import Layer, Point just_id = lambda x: x.id class ViewsTest(TestCase): def setUp(self): self.jacob = User.objects.create_user('jacob', 'jacob@jacob.org', 'JKM') self.layer = Layer.objects.create(name='layer 1', owner=self.jacob) self.p1 = Point.objects.create(user = self.jacob, layer = self.layer, point = fromstr('POINT(31,31)', srid=4326), subject = 'p1', description= 'This is p1') self.p2 = Point.objects.create(user = self.jacob, layer = self.layer, point = fromstr('POINT(32,32)', srid=4326), subject = 'p2', description= 'This is p2', views_count=4) def testMainView(self): res = self.client.get(reverse('map-home')) self.assertEqual(res.status_code, 200) self.assertTemplateUsed(res, 'site/index.html') self.assertEqual(map(just_id, res.context['points']), [ self.p1.id, self.p2.id, ]) self.assertEqual(map(just_id, res.context['most_recent']), [ self.p2.id, self.p1.id, ]) self.assertEqual(map(just_id, res.context['hot_topics']), [ self.p2.id, self.p1.id]) def tearDown(self): self.p1.delete() self.p2.delete() self.layer.delete() self.jacob.delete()
[ "bennydaon@gmail.com" ]
bennydaon@gmail.com
7efa9975210f4ac91ff681aafebfb83b62f65fe5
79359f4814c53a09460cd7c257d17901ba665f7b
/adress/models.py
eab690f225d2bd8ddbf10507db3f22156be3d793
[]
no_license
idelfrides/API_django_REST_framework
77e784bc8d5fd0e282273bb401acae3e7c2dc6e3
06429c9e394a40925850504f6fe233296b0d303a
refs/heads/master
2020-08-12T22:59:12.775204
2019-10-13T17:11:00
2019-10-13T17:11:00
214,859,685
0
0
null
null
null
null
UTF-8
Python
false
false
545
py
from django.db import models class Adress(models.Model): line1 = models.CharField(max_length=150) line2 = models.CharField(max_length=150, null=True, blank=True) city = models.CharField(max_length=50) state = models.CharField(max_length=50) country = models.CharField(max_length=50) latitude = models.IntegerField(null=True, blank=True) longitude = models.IntegerField(null=True, blank=True) update = models.DateTimeField(auto_now=True, auto_now_add=False) def __str__(self): return self.line1
[ "idelfridesjorgepapai@gmail.com" ]
idelfridesjorgepapai@gmail.com
bf49b30027e8e76c3e2969b93abaf9b1c89d9e40
beb4d7c16ea8d8da9747b94298891cf01d9466f6
/users/forms.py
8e58828a265c591a8f885ad7435e82918ab2af63
[]
no_license
Chirag-Django/nonstopio_assignment
48985445a19f8d01c1f0565e8058cd032942d903
b1f1561e841857ea64d9a5170974704a347cc0e3
refs/heads/master
2023-03-05T07:05:49.963018
2021-02-20T18:35:29
2021-02-20T18:35:29
340,347,626
0
0
null
null
null
null
UTF-8
Python
false
false
429
py
from .models import Profile from django import forms from django.contrib.auth.models import User class UserForm(forms.ModelForm): class Meta: model = User fields = ('username','password') widgets = { 'password': forms.PasswordInput(), } class ProfileForm(forms.ModelForm): age =forms.IntegerField() class Meta: model = Profile fields = ('age', 'address')
[ "chirag.django@gmail.com" ]
chirag.django@gmail.com
2b629008ebebe1c82a9798ac82abd5a380f666de
8d02b867eaa5d7aedb80ae31cec5dfe7b0201d1f
/Ch_06 - Functions/ants_solution.py
c1117f5a2df92b0bc9247374d9ddcc7f38d2443a
[]
no_license
capncrockett/beedle_book
df17f632990edf4dfae82ccedb5f8d2d07385c00
d65315ddff20fb0ef666c610dbe4634dff0a621a
refs/heads/main
2023-07-23T08:33:17.275029
2021-09-01T02:47:08
2021-09-01T02:47:08
401,894,762
0
0
null
null
null
null
UTF-8
Python
false
false
1,193
py
# c06ex02.py # Prints lyrics for "The Ants Go Marching". def verse(number, action): print(march(number), hurrah()) print(march(number), hurrah()) print(march(number)) print(littleOne(action)) refrain() def march(number): return "The ants go marching %s by %s," % (number, number) def hurrah(): return "hurrah! hurrah!" def littleOne(action): return "The little one stops to " + action + "," def refrain(): print("And they all go marching down...") print("In the ground...") print("To get out...") print("Of the rain.") print("Boom! " * 3) def main(): actions = [("one", "suck his thumb"), ("two", "tie his shoe"), ("three", "climb a tree"), ("four", "shut the door"), ("five", "take a dive"), ("six", "pick up sticks"), ("seven", "talk to Kevin"), ("eight", "jump the gate"), ("nine", "swing on a vine"), ("ten", "say 'The End'")] for n, a in actions: verse(n, a) print() input("Press <Enter> to Quit") main()
[ "root@YOGA-720.localdomain" ]
root@YOGA-720.localdomain
4e083f58b4e80f2947c7cd47ac00d60a37371e07
8be2df0c4508cc5254887b8cccb044032aea5c21
/interview/first/tests/test_first.py
97b686ec7178ed3528336ed4a0d8dfa6763ad96e
[]
no_license
ezhk/python-learning
2d3dad2190ac9ce9299534f0f303e8b76a8eeab2
424ec9ca08541273f9ec39ff25f75a3b78d9dcb7
refs/heads/master
2023-01-05T16:50:08.829169
2020-06-02T18:03:05
2020-06-02T18:03:05
165,482,083
0
1
null
2023-01-04T04:59:43
2019-01-13T08:21:44
Python
UTF-8
Python
false
false
572
py
#!/usr/bin/env python import sys import unittest sys.path.append(".") from first import * class TestFirst(unittest.TestCase): def test_multiplication_table(self): result = """- -- -- -- -- -- -- -- -- -- 1 2 3 4 5 6 7 8 9 10 2 4 6 8 10 12 14 16 18 20 3 6 9 12 15 18 21 24 27 30 4 8 12 16 20 24 28 32 36 40 5 10 15 20 25 30 35 40 45 50 - -- -- -- -- -- -- -- -- --""" self.assertEqual(multiplication_table(10, 5), result) if __name__ == "__main__": unittest.main()
[ "ezhik@ezhik.info" ]
ezhik@ezhik.info
6db4923a1304a510d7f7b79194c89fae3d3e433d
f7b05ca511d923822ae8519de4c3f35b24a76f5f
/stubs/beancount/core/data.pyi
58f15af63d5704d692508807380a0e2130b72883
[ "MIT" ]
permissive
iEverX/fava
50d4c1214afbc80a01c60841ecd33bc366d2b44b
2c5508038b886b42e13648e3fb8a50bf9ac484cf
refs/heads/main
2023-04-26T23:32:10.516227
2021-05-23T07:08:45
2021-05-23T07:08:45
369,947,837
0
0
MIT
2021-05-23T02:52:55
2021-05-23T02:52:54
null
UTF-8
Python
false
false
3,760
pyi
# pylint: disable=all # flake8: noqa import datetime import enum from typing import Any from typing import Dict from typing import FrozenSet from typing import List from typing import NamedTuple from typing import Optional from typing import Set from typing import Tuple from typing import Type from typing import Union from beancount.core.amount import Amount from beancount.core.number import Decimal from beancount.core.number import MISSING from beancount.core.position import Cost from beancount.core.position import CostSpec Account = str Currency = str Flag = str Meta = Dict[str, Any] Tags = Union[Set[str], FrozenSet[str]] Links = Tags EMPTY_SET: Any class Booking(enum.Enum): STRICT: str = ... NONE: str = ... AVERAGE: str = ... FIFO: str = ... LIFO: str = ... class Close(NamedTuple): meta: Meta date: datetime.date account: Account class Commodity(NamedTuple): meta: Meta date: datetime.date currency: Currency class Open(NamedTuple): meta: Meta date: datetime.date account: Account currencies: List[Currency] booking: Booking class Pad(NamedTuple): meta: Meta date: datetime.date account: Account source_account: Account class Balance(NamedTuple): meta: Meta date: datetime.date account: Account amount: Amount tolerance: Optional[Decimal] diff_amount: Optional[Decimal] class Posting(NamedTuple): account: Account units: Union[Amount, Type[MISSING]] cost: Optional[Union[Cost, CostSpec]] price: Optional[Amount] flag: Optional[Flag] meta: Optional[Meta] class Transaction(NamedTuple): meta: Meta date: datetime.date flag: Flag payee: Optional[str] narration: str tags: Tags links: Links postings: List[Posting] class TxnPosting(NamedTuple): txn: Transaction posting: Posting class Note(NamedTuple): meta: Meta date: datetime.date account: Account comment: str class Event(NamedTuple): meta: Meta date: datetime.date type: str description: str class Query(NamedTuple): meta: Meta date: datetime.date name: str query_string: str class Price(NamedTuple): meta: Meta date: datetime.date currency: Currency amount: Amount class Document(NamedTuple): meta: Meta date: datetime.date account: Account filename: str tags: Optional[Tags] links: Optional[Links] class Custom(NamedTuple): meta: Meta date: datetime.date type: str values: List # ALL_DIRECTIVES: Any Directive = Union[ Open, Close, Commodity, Pad, Balance, Transaction, Note, Event, Query, Price, Document, Custom, ] Entries = List[Directive] def new_metadata(filename: Any, lineno: Any, kvlist: Optional[Any] = ...): ... def create_simple_posting( entry: Any, account: Any, number: Any, currency: Any ): ... def create_simple_posting_with_cost( entry: Any, account: Any, number: Any, currency: Any, cost_number: Any, cost_currency: Any, ): ... NoneType: Any def sanity_check_types( entry: Any, allow_none_for_tags_and_links: bool = ... ) -> None: ... def posting_has_conversion(posting: Any): ... def transaction_has_conversion(transaction: Any): ... def get_entry(posting_or_entry: Any): ... SORT_ORDER: Any def entry_sortkey(entry: Any): ... def sorted(entries: Any): ... def posting_sortkey(entry: Any): ... def filter_txns(entries: Any) -> None: ... def has_entry_account_component(entry: Any, component: Any): ... def find_closest(entries: Any, filename: Any, lineno: Any): ... def remove_account_postings(account: Any, entries: Any): ... def iter_entry_dates(entries: Any, date_begin: Any, date_end: Any): ...
[ "mail@jakobschnitzer.de" ]
mail@jakobschnitzer.de
db7e7c16b31a09e8b6ca45c25861d6d291100c75
f0d713996eb095bcdc701f3fab0a8110b8541cbb
/v34oCTbkrceCZjgRE_13.py
a9172c7f75c4daa5b8039320bd29cd908f79cc2b
[]
no_license
daniel-reich/turbo-robot
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
a7a25c63097674c0a81675eed7e6b763785f1c41
refs/heads/main
2023-03-26T01:55:14.210264
2021-03-23T16:08:01
2021-03-23T16:08:01
350,773,815
0
0
null
null
null
null
UTF-8
Python
false
false
1,286
py
""" The **right shift** operation is similar to **floor division by powers of two** , thus, the process is _repetitive_ and can be done _recursively_. Sample calculation using the right shift operator ( `>>` ): 80 >> 3 = floor(80/2^3) = floor(80/8) = 10 -24 >> 2 = floor(-24/2^2) = floor(-24/4) = -6 -5 >> 1 = floor(-5/2^1) = floor(-5/2) = -3 Write a function that **mimics** (without the use of **> >**) the right shift operator and returns the result from the two given integers. ### Examples shift_to_right(80, 3) ➞ 10 shift_to_right(-24, 2) ➞ -6 shift_to_right(-5, 1) ➞ -3 shift_to_right(4666, 6) ➞ 72 shift_to_right(3777, 6) ➞ 59 shift_to_right(-512, 10) ➞ -1 ### Notes * There will be no negative values for the second parameter `y`. * This challenge is more like recreating of the **right shift** operation, thus, **the use of the operator directly** is **prohibited**. * You are expected to solve this challenge via **recursion**. * An **iterative** version of this challenge can be found via this [link](https://edabit.com/challenge/noqQNSr5o9qzvXWzL). """ def shift_to_right(x, y): # recursive code here if y == 0: return x else: return shift_to_right(x // 2, y - 1)
[ "daniel.reich@danielreichs-MacBook-Pro.local" ]
daniel.reich@danielreichs-MacBook-Pro.local
c8cad4bbe0e65284571d2098ad9eac09162e0c47
fe6e0a2cfb00d34b58f64f164a747e3df08e8a9d
/client/application/controller/quyuxiaoshoutongji.py
bf2221e525b580d1453b72118bd22302a81d672e
[]
no_license
huboqiao/kmvip
c141814666631c35b8adeec3d3beb5aca0d2d1cd
11ae7e1f78943c8425516c4f06acf043a99acdcc
refs/heads/master
2020-02-26T14:58:31.573602
2016-08-03T06:29:41
2016-08-03T06:29:41
64,809,269
0
1
null
null
null
null
UTF-8
Python
false
false
3,712
py
#coding:utf-8 from application.lib.Commethods import * from application.view.quyuxiaoshoutongji import Ui_Dialog class QuYuScale(ControllerAction,Ui_Dialog,PrintAction): def __init__(self,parent = None): ControllerAction.__init__(self, parent) PrintAction.__init__(self,u"区域销售统计表") self.setTable() self.setStyleSheet("""QGroupBox{font-size:18px;margin-top:10px;padding:14px;border:2px solid #6cb479;border-radius:10px;} QGroupBox::indicator{width:130px;height:130px;} QGroupBox::title{font-size:20px;left:15px;margin-top:-15px;} QTableWidget{border:2px solid #6cb479;border-radius:5px;} """) self.tableWidget.setAlternatingRowColors(True); self.tableWidget.setSelectionBehavior(QAbstractItemView.SelectRows); #选择整行 【推荐】 self.tableWidget.verticalHeader().hide() # self.connect(self.table, SIGNAL("cellPressed(int,int)"),self.test) self.connect(self.pushButton, SIGNAL("clicked()"),self.testdd) self.connect(self.pushButton_2, SIGNAL("clicked()"),self.generateExcel) self.connect(self.pushButton_3, SIGNAL("clicked()"),self.printTo) self.connect(self.pushButton_5, SIGNAL("clicked()"),self.prePrint) self.connect(self.pushButton_4, SIGNAL("clicked()"),self.configColumn) self.tableWidget.horizontalHeader().setResizeMode(QHeaderView.Stretch) # self.table.horizontalHeader().setResizeMode(QHeaderView.Stretch)#【推荐】 def testdd(self): dlg = KDialog(self) dlg.exec_() def setTable(self): self.tableWidget.setRowCount(10) # alignment,color,format,count self.table_fmt_list = [] self.table_fmt_list.append({"alignment":"left","color":"black","format":"general","count":False}) self.table_fmt_list.append({"alignment":"left","color":"black","format":"general","count":False}) self.table_fmt_list.append({"alignment":"left","color":"black","format":"0","count":True}) self.table_fmt_list.append({"alignment":"right","color":"black","format":"#,##0.00","count":True}) self.table_fmt_list.append({"alignment":"right","color":"black","format":"#,##0.00","count":True}) self.table_data_list = ["苹果","水果",11,123.2,123434321] # countColumn = [key for key,value in enumerate(self.table_fmt_list) if value['count'] == True] print countColumn countList = {} for i in countColumn: countList[str(i)] = 0 for i in range(10): for j in range(5): item = QTableWidgetItem(unicode(str(self.table_data_list[j]))) self.formatTableItem(item,self.table_fmt_list[j]) self.tableWidget.setItem(i,j,item) if j in countColumn: countList[str(j)] += self.table_data_list[j] if len(countColumn)>0: rowCount = self.tableWidget.rowCount() self.tableWidget.setRowCount(rowCount+1) self.tableWidget.setItem(rowCount,0,QTableWidgetItem(u"共计:")) for key,value in countList.items(): item = QTableWidgetItem(str(value)) self.tableWidget.setItem(rowCount,int(key),item) self.formatTableItem(item,self.table_fmt_list[int(key)]) def test(self,x,y): print x,y # self.verticalLayout.addWidget()
[ "42320756@qq.com" ]
42320756@qq.com
4c5b927c53c82fc293f4290aa24ae57772b12da7
930309163b930559929323647b8d82238724f392
/typical90_i.py
ea4c1a1584c4ad62fa2c06098d162e2a63c7da57
[]
no_license
GINK03/atcoder-solvers
874251dffc9f23b187faa77c439b445e53f8dfe1
b1e7ac6e9d67938de9a85df4a2f9780fb1fbcee7
refs/heads/master
2021-11-07T14:16:52.138894
2021-09-12T13:32:29
2021-09-12T13:32:29
11,724,396
3
1
null
null
null
null
UTF-8
Python
false
false
1,077
py
import itertools import numpy as np import bisect def angle(c0, c1): cosine_angle = np.dot(c0, c1) / (np.linalg.norm(c0) * np.linalg.norm(c1)) angle = np.arccos(cosine_angle) return np.degrees(angle) def slow(): N=int(input()) XY = [] for _ in range(N): x, y = map(int,input().split()) XY.append( [x,y] ) XY=np.array(XY) for a,b,c in itertools.permutations(XY,3): print(a, b, c, angle(a-c, b-c)) # slow() def main(): N=int(input()) XY = [] for _ in range(N): x, y = map(int,input().split()) XY.append( [x,y] ) XY=np.array(XY) ans = 0 for c_pos in range(N): c = XY[c_pos] xy = [angle(x,c) for x in XY] print(c, xy) xy.sort() for i in range(len(xy)): a = xy[i] can_b = a + 180 if can_b >= 360: can_b -= 360 i0 = bisect.bisect_left(xy, can_b)%len(xy) print( can_b, i0, xy[i0] - a) ans = max(xy[i0] - a, ans) print(ans) main()
[ "gim.kobayashi@gmail.com" ]
gim.kobayashi@gmail.com
d93e485b32559ed9bbeb9095906afc77c3b293b1
de24f83a5e3768a2638ebcf13cbe717e75740168
/moodledata/vpl_data/19/usersdata/102/7266/submittedfiles/jogoDaVelha.py
ca7bac2dab81b9c471ab886a8d0b50f2746278a9
[]
no_license
rafaelperazzo/programacao-web
95643423a35c44613b0f64bed05bd34780fe2436
170dd5440afb9ee68a973f3de13a99aa4c735d79
refs/heads/master
2021-01-12T14:06:25.773146
2017-12-22T16:05:45
2017-12-22T16:05:45
69,566,344
0
0
null
null
null
null
UTF-8
Python
false
false
900
py
# -*- coding: utf-8 -*- from __future__ import division import math x1 = input('Digite x1: ') x2 = input('Digite x2: ') x3 = input('Digite x3: ') x4 = input('Digite x4: ') x5 = input('Digite x5: ') x6 = input('Digite x6: ') x7 = input('Digite x7: ') x8 = input('Digite x8: ') x9 = input('Digite x9: ') #CONTINUE... if x1==x2==x3==0: print('0') elif x1==x2==x3==1: print('1') elif x1==x4==x7==0: print('0') elif x1==x4==x7==1: print('1') elif x4==x5==x6==0: print('0') elif x4==x5==x6==1: print('1') elif x7==x8==x9==0: print('0') elif x7==x8==x9==1: print('1') elif x2==x5==x8==0: print('0') elif x2==x5==x8==1: print('1') elif x3==x6==x9==0: print('0') elif x3==x6==x9==1: print('1') elif x1==x5==x9==0: print('0') elif x1==x5==x9==1: print('1') elif x3==x5==x7==0: print('0') elif x3==x5==x7==1: print('1') else: print('e')
[ "rafael.mota@ufca.edu.br" ]
rafael.mota@ufca.edu.br
ea1f690bb16b6f3b7c4f574beb17f9754aa3dfa2
3fad7381b03607e908dc06a7f91ae60f10e5be16
/01_tests/05_andrei_repository/2017.08.23_RaportMnist/knn_slider_experiment/slider.py
549c9c6779a84f95aa31a5ece4f596f64525af76
[]
no_license
Cloudifier/CLOUDIFIER_WORK
ea5efe0f8e75315313db5ee145f4cc8092b542fa
e8ce18fad97b1207545e933ed0947347ed09c536
refs/heads/master
2021-12-23T16:41:03.149554
2021-12-13T13:16:51
2021-12-13T13:16:51
108,911,842
0
0
null
null
null
null
UTF-8
Python
false
false
5,605
py
import numpy as np import pandas as pd from utils import sigmoid, softmax, min_max_scaler from sklearn.metrics import pairwise_distances from scipy.stats import mode import multiprocessing from tqdm import tqdm, trange import time import gc import inspect import sys class Slider(): def __init__(self, df_files, sizes, window_size, step_size, classifier, epsilon, logger): self.df_files = df_files self.sizes = sizes self.window_size = window_size self.step_size = step_size self.epsilon = epsilon self.num_df = len(df_files) self.crt_df = None self.crt_idx = 0 self.logger = logger self.classifier = classifier self.X = None self.y = None self.img_pos = None self.results = [[0, 0] for i in range(self.num_df)] np.set_printoptions(precision = 2, suppress = True) def sliding_window(self, image, step_size): for y in range(0, image.shape[0] - self.window_size[1], step_size): for x in range(0, image.shape[1] - self.window_size[0], step_size): yield (x, y, image[y:y + self.window_size[1], x:x + self.window_size[0]]) def slide_over_image(self, image): start_time = time.time() windows = [] positions = [] self.dict_windows = {} for (x, y, window) in self.sliding_window(image, step_size = self.step_size): self.logger.log("\tTaking window at pos = ({},{})".format(y,x), verbosity_level = 0) self.dict_windows[(y,x)] = window window = window.flatten() windows.append(window) positions.append((x, y)) self.windows = np.array(windows) self.positions = np.array(positions) predicted_val, predicted_pos, vals, counts, top_k_sums = self.classifier.predict(self.windows, self.positions, k = 5) self.predicted_val = predicted_val self.predicted_pos = predicted_pos self.vals = vals self.counts = counts self.top_k_sums = top_k_sums self.logger.log("\tScene slided in {:.2f}s".format(time.time()-start_time), verbosity_level = 2) return predicted_val, predicted_pos def read_df(self): self.crt_df = pd.read_pickle(self.df_files[self.crt_idx]) self.X = np.array(self.crt_df.iloc[:, 3:].values, dtype = float) self.y = np.array(self.crt_df.iloc[:, 0].values, dtype = int) self.img_pos = np.array(self.crt_df.iloc[:, 1:3].values, dtype = int) self.X = min_max_scaler(self.X) def check_position(self, i, predicted_pos): return (abs(self.img_pos[i][1] - predicted_pos[1]) < self.epsilon and abs(self.img_pos[i][0] - predicted_pos[0]) < self.epsilon) def slide_over_df(self): self.read_df() self.logger.log("Sliding {} test scenes of size {}x{} with {}x{} windows and step_size={}".format(self.X.shape[0], self.sizes[self.crt_idx][0], self.sizes[self.crt_idx][1], self.window_size[0], self.window_size[1], self.step_size), verbosity_level = 2) old_print = print inspect.builtins.print = tqdm.write t = trange(self.X.shape[0], desc='Slider', leave=True) for i in range(self.X.shape[0]): if self.results[self.crt_idx][0] + self.results[self.crt_idx][1] == 0: crt_accuracy = 0 else: crt_accuracy = float(self.results[self.crt_idx][0]) / (self.results[self.crt_idx][0] + self.results[self.crt_idx][1]) t.set_description("Target {} -- Position ({}, {}) -- corrects = {}, wrongs = {} -- accuracy = {:.2f} %".format(self.y[i], self.img_pos[i][1], self.img_pos[i][0], self.results[self.crt_idx][0], self.results[self.crt_idx][1], crt_accuracy * 100)) t.refresh() t.update(1) sys.stdout.flush() self.logger.log("Start sliding scene #{}; position of the image with target = {} in the scene = ({}, {})".format(i, self.y[i], self.img_pos[i][1], self.img_pos[i][0]), verbosity_level = 2) image = self.X[i].reshape(self.sizes[self.crt_idx][0], self.sizes[self.crt_idx][1]) predicted_val, predicted_pos = self.slide_over_image(image) if predicted_val == self.y[i]: if self.check_position(i, predicted_pos): self.results[self.crt_idx][0] += 1 self.logger.log("\tFound {} at pos ({}, {}) ... correct target, correct position" .format(predicted_val, predicted_pos[0], predicted_pos[1]), verbosity_level = 2) else: self.logger.log("\tFound {} at pos ({}, {}) ... correct target, wrong position" .format(predicted_val, predicted_pos[0], predicted_pos[1]), verbosity_level = 2) self.results[self.crt_idx][1] += 1 else: if predicted_val == -1: self.logger.log("\tCould not match a window .. ", verbosity_level = 2) else: self.logger.log("\tFound {} at pos ({}, {}) ... incorrect target" .format(predicted_val, predicted_pos[0], predicted_pos[1]), verbosity_level = 2) self.results[self.crt_idx][1] += 1 self.logger.log("Finished sliding scene #{}".format(i), verbosity_level = 2) inspect.builtins.print = old_print def slide(self): for i in range(1): start_time = time.time() self.slide_over_df() self.logger.log("Test scenes of size {}x{} slided in {:.2f}s; corrects={}, wrongs={}" .format(self.sizes[i][0], self.sizes[i][1], time.time() - start_time, self.results[i][0], self.results[i][1])) self.crt_idx += 1 del self.crt_df gc.collect() if __name__=='__main__': print("Library module. No main function")
[ "damian@cloudifier.net" ]
damian@cloudifier.net
923ca287952b81c4d4382b7af028fdc1393fab6e
5a9a28b79e01a71dae8c92d1c1feaee139a92510
/2022/2022-02/02-07/1405.py
48c8814e0ea1a5ed2f0f7c36808ffbe89f92fc37
[]
no_license
ez4lionky/Leetcode-practices
b81854e0ab9a9b39b6a26df6faf99bcf89860c39
0c28803043ea8196e564dacdbb231f6bb1693226
refs/heads/master
2023-02-08T03:33:42.756691
2023-01-21T03:31:09
2023-01-21T03:31:09
196,699,617
0
0
null
null
null
null
UTF-8
Python
false
false
1,061
py
import heapq class Solution: def longestDiverseString(self, a: int, b: int, c: int) -> str: max_heap = [] if a > 0: heapq.heappush(max_heap, (-a, 'a')) if b > 0: heapq.heappush(max_heap, (-b, 'b')) if c > 0: heapq.heappush(max_heap, (-c, 'c')) res = '' while max_heap: n, x = heapq.heappop(max_heap) n *= -1 if len(res) < 2 or not (res[-2] == res[-1] == x): res += x n -= 1 if n != 0: heapq.heappush(max_heap, (-n, x)) else: if len(max_heap) == 0: break n2, y = heapq.heappop(max_heap) n2 *= -1 res += y n2 -= 1 if n2 != 0: heapq.heappush(max_heap, (-n2, y)) heapq.heappush(max_heap, (-n, x)) return res if __name__ == "__main__": sol = Solution() a = 1 b = 1 c = 7 print(sol.longestDiverseString(a, b, c))
[ "codex.lxy@gmail.com" ]
codex.lxy@gmail.com
a0aa5f38758c6fb400aeb788892f86ccbf5513d2
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p02778/s359431231.py
05bef0f491391dffa6e151fc8c6564e36229bb6a
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
168
py
import sys input = lambda: sys.stdin.readline().rstrip() def solve(): n = len(input()) ans = 'x' * n print(ans) if __name__ == '__main__': solve()
[ "66529651+Aastha2104@users.noreply.github.com" ]
66529651+Aastha2104@users.noreply.github.com
35587ddb86a9f14e7a0f7fb2e1d56c72f3e4c63b
bc441bb06b8948288f110af63feda4e798f30225
/resource_package_tools_sdk/model/topology/view_pb2.pyi
87c922d2bad9ae781ab1970c8f62c712ab95c8b8
[ "Apache-2.0" ]
permissive
easyopsapis/easyops-api-python
23204f8846a332c30f5f3ff627bf220940137b6b
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
refs/heads/master
2020-06-26T23:38:27.308803
2020-06-16T07:25:41
2020-06-16T07:25:41
199,773,131
5
0
null
null
null
null
UTF-8
Python
false
false
7,522
pyi
# @generated by generate_proto_mypy_stubs.py. Do not edit! import sys from google.protobuf.descriptor import ( Descriptor as google___protobuf___descriptor___Descriptor, ) from google.protobuf.internal.containers import ( RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer, RepeatedScalarFieldContainer as google___protobuf___internal___containers___RepeatedScalarFieldContainer, ) from google.protobuf.message import ( Message as google___protobuf___message___Message, ) from resource_package_tools_sdk.model.topology.area_pb2 import ( Area as resource_package_tools_sdk___model___topology___area_pb2___Area, ) from resource_package_tools_sdk.model.topology.link_pb2 import ( Link as resource_package_tools_sdk___model___topology___link_pb2___Link, ) from resource_package_tools_sdk.model.topology.node_pb2 import ( Node as resource_package_tools_sdk___model___topology___node_pb2___Node, ) from resource_package_tools_sdk.model.topology.note_pb2 import ( Note as resource_package_tools_sdk___model___topology___note_pb2___Note, ) from typing import ( Iterable as typing___Iterable, Optional as typing___Optional, Text as typing___Text, Union as typing___Union, ) from typing_extensions import ( Literal as typing_extensions___Literal, ) builtin___bool = bool builtin___bytes = bytes builtin___float = float builtin___int = int if sys.version_info < (3,): builtin___buffer = buffer builtin___unicode = unicode class View(google___protobuf___message___Message): DESCRIPTOR: google___protobuf___descriptor___Descriptor = ... class Diff(google___protobuf___message___Message): DESCRIPTOR: google___protobuf___descriptor___Descriptor = ... @property def addNodes(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[resource_package_tools_sdk___model___topology___node_pb2___Node]: ... @property def removeNodes(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[resource_package_tools_sdk___model___topology___node_pb2___Node]: ... @property def addLinks(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[resource_package_tools_sdk___model___topology___link_pb2___Link]: ... @property def removeLinks(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[resource_package_tools_sdk___model___topology___link_pb2___Link]: ... def __init__(self, *, addNodes : typing___Optional[typing___Iterable[resource_package_tools_sdk___model___topology___node_pb2___Node]] = None, removeNodes : typing___Optional[typing___Iterable[resource_package_tools_sdk___model___topology___node_pb2___Node]] = None, addLinks : typing___Optional[typing___Iterable[resource_package_tools_sdk___model___topology___link_pb2___Link]] = None, removeLinks : typing___Optional[typing___Iterable[resource_package_tools_sdk___model___topology___link_pb2___Link]] = None, ) -> None: ... if sys.version_info >= (3,): @classmethod def FromString(cls, s: builtin___bytes) -> View.Diff: ... else: @classmethod def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> View.Diff: ... def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ... def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ... def ClearField(self, field_name: typing_extensions___Literal[u"addLinks",b"addLinks",u"addNodes",b"addNodes",u"removeLinks",b"removeLinks",u"removeNodes",b"removeNodes"]) -> None: ... id = ... # type: typing___Text name = ... # type: typing___Text creator = ... # type: typing___Text modifier = ... # type: typing___Text readAuthorizers = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[typing___Text] writeAuthorizers = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[typing___Text] version = ... # type: typing___Text ctime = ... # type: builtin___int mtime = ... # type: builtin___int @property def rootNode(self) -> resource_package_tools_sdk___model___topology___node_pb2___Node: ... @property def nodes(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[resource_package_tools_sdk___model___topology___node_pb2___Node]: ... @property def links(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[resource_package_tools_sdk___model___topology___link_pb2___Link]: ... @property def areas(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[resource_package_tools_sdk___model___topology___area_pb2___Area]: ... @property def notes(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[resource_package_tools_sdk___model___topology___note_pb2___Note]: ... @property def diff(self) -> View.Diff: ... def __init__(self, *, id : typing___Optional[typing___Text] = None, name : typing___Optional[typing___Text] = None, creator : typing___Optional[typing___Text] = None, modifier : typing___Optional[typing___Text] = None, readAuthorizers : typing___Optional[typing___Iterable[typing___Text]] = None, writeAuthorizers : typing___Optional[typing___Iterable[typing___Text]] = None, version : typing___Optional[typing___Text] = None, ctime : typing___Optional[builtin___int] = None, mtime : typing___Optional[builtin___int] = None, rootNode : typing___Optional[resource_package_tools_sdk___model___topology___node_pb2___Node] = None, nodes : typing___Optional[typing___Iterable[resource_package_tools_sdk___model___topology___node_pb2___Node]] = None, links : typing___Optional[typing___Iterable[resource_package_tools_sdk___model___topology___link_pb2___Link]] = None, areas : typing___Optional[typing___Iterable[resource_package_tools_sdk___model___topology___area_pb2___Area]] = None, notes : typing___Optional[typing___Iterable[resource_package_tools_sdk___model___topology___note_pb2___Note]] = None, diff : typing___Optional[View.Diff] = None, ) -> None: ... if sys.version_info >= (3,): @classmethod def FromString(cls, s: builtin___bytes) -> View: ... else: @classmethod def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> View: ... def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ... def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ... def HasField(self, field_name: typing_extensions___Literal[u"diff",b"diff",u"rootNode",b"rootNode"]) -> builtin___bool: ... def ClearField(self, field_name: typing_extensions___Literal[u"areas",b"areas",u"creator",b"creator",u"ctime",b"ctime",u"diff",b"diff",u"id",b"id",u"links",b"links",u"modifier",b"modifier",u"mtime",b"mtime",u"name",b"name",u"nodes",b"nodes",u"notes",b"notes",u"readAuthorizers",b"readAuthorizers",u"rootNode",b"rootNode",u"version",b"version",u"writeAuthorizers",b"writeAuthorizers"]) -> None: ...
[ "service@easyops.cn" ]
service@easyops.cn
0ecd372f87cdeb4bbb342d2f31042ecb2e05bd41
683876019cad0b0d562ac7f9da8c679cb310cfb2
/2016/day19/part1_ll.py
4a08c2e641a6605382f82513fdc09f18e30d97da
[]
no_license
CoachEd/advent-of-code
d028bc8c21235361ad31ea55922625adf743b5c8
10850d5d477c0946ef73756bfeb3a6db241cc4b2
refs/heads/master
2023-05-11T05:20:26.951224
2023-05-09T18:54:16
2023-05-09T18:54:16
160,375,311
0
0
null
null
null
null
UTF-8
Python
false
false
1,337
py
""" AoC """ import time import sys # A single node of a singly linked list class Node: # constructor def __init__(self, data = None, next=None): self.data = data self.next = next # A Linked List class with a single head node class LinkedList: def __init__(self): self.head = None # insertion method for the linked list def insert(self, data): newNode = Node(data) if(self.head): current = self.head while(current.next): current = current.next current.next = newNode else: self.head = newNode return newNode # print method for the linked list def printLL(self): current = self.head while(current): print(current.data) current = current.next if current == self.head: break print() # main #num_elves = 3014603 # does not scale well! num_elves = 5 # test data ll = LinkedList() elf_num = 1 for i in range(num_elves): n = ll.insert([elf_num,1]) elf_num += 1 n.next = ll.head curr = ll.head while True: arr = curr.data if arr[1] == num_elves: print('Elf ' + str(arr[0]) + ' wins!') break next_elf = curr.next arr[1] += next_elf.data[1] curr.next = next_elf.next curr = curr.next start_secs = time.time() print('') end_secs = time.time() print('--- ' + str(end_secs-start_secs) + ' secs ---')
[ "CoachEd@gmail.com" ]
CoachEd@gmail.com
de63c3e5d529ce92b9c76636a9c9546bef4fc10b
34263dd7d7cb038bd62b50239ba5f7a88d0430ce
/extra_apps/xadmin/plugins/ueditor.py
c95778d810b957813b85d7268919b96c31d85c15
[]
no_license
vevoly/StudyOnline2
c8da53ccc4d58e10d7f58639ffe4a0944757aef1
224a9d21a8600e26945e09ae4240b67267392173
refs/heads/master
2020-04-21T08:01:44.043799
2019-02-12T15:17:46
2019-02-12T15:17:46
169,407,451
0
0
null
null
null
null
UTF-8
Python
false
false
1,394
py
# _*_ coding: utf-8 _*_ __author__ = 'jevoly' __date__ = '2018/12/13 0013 上午 10:15' import xadmin from xadmin.views import BaseAdminPlugin, CreateAdminView, UpdateAdminView from DjangoUeditor.models import UEditorField from DjangoUeditor.widgets import UEditorWidget from django.conf import settings class XadminUEditorWidget(UEditorWidget): def __init__(self, **kwargs): self.ueditor_options = kwargs self.Media.js = None super(XadminUEditorWidget, self).__init__(kwargs) class UeditorPlugin(BaseAdminPlugin): def get_field_style(self, attrs, db_field, style, **kwargs): if style == 'ueditor': if isinstance(db_field, UEditorField): widget = db_field.formfield().widget param = {} param.update(widget.ueditor_settings) param.update(widget.attrs) return {'widget': XadminUEditorWidget(**param)} return attrs def block_extrahead(self, context, nodes): js = '<script type="text/javascript" src="%s"></script>' %(settings.STATIC_URL + "ueditor/ueditor.config.js") js += '<script type="text/javascript" src="%s"></script>' %(settings.STATIC_URL + "ueditor/ueditor.all.min.js") nodes.append(js) xadmin.site.register_plugin(UeditorPlugin, CreateAdminView) xadmin.site.register_plugin(UeditorPlugin, UpdateAdminView)
[ "jevoly@163.com" ]
jevoly@163.com
4fb1a32acd37b6b3c8a78123ef3c9402f520f7b1
3c868540c8f5b0b9b46440e9b8e9160de9e8988f
/ch04/items.py
7acd94f1c49dc7b2622e2f46537cd18c9af52490
[]
no_license
sarte3/python
cc8f41b8b22b0a980252d6546358dd212324e2cd
15d984e5df03387950692092b6b5569adab845bb
refs/heads/master
2023-01-18T18:37:40.720326
2020-11-17T08:43:27
2020-11-17T08:43:27
304,824,911
0
0
null
null
null
null
UTF-8
Python
false
false
354
py
example_dictionary = { '키A': '값A', '키B': '값B', '키C': '값C' } print('# 딕셔너리의 items() 함수') print('items() : ', example_dictionary.items()) print() print('# 딕셔너리의 items() 함수와 반복문 조합하기') for key, element in example_dictionary.items(): print('dictionary[{}] = {}'.format(key, element))
[ "sarte@outlook.kr" ]
sarte@outlook.kr
0761f644ba1f7580cfe8081b9a120d644abca30f
4ebdc7053d9341ce7ad45f1e859ff86ef1455177
/56_simple_interest.py
4ada91727b9d65ad66eee49e2e59748636e9322d
[]
no_license
daikiante/python
1f4d55e1fd04eef22702b364148b8e1a2beea2d3
9d604b8dcd9e3cbe8b4db24ef16c5c969f6f894f
refs/heads/master
2020-09-17T00:14:24.034179
2019-12-02T09:03:25
2019-12-02T09:03:25
223,928,994
0
0
null
null
null
null
UTF-8
Python
false
false
279
py
# 単利計算 # 元本*金利*運用期間 = 単利 def simple_interest(p, t, r): return p * (r / 100) * t p = int(input('Enter the amount (/Rs):')) t = int(input('Enter the hold span (/Year):')) r = int(input('Enter the interest (/%):')) print(simple_interest(p,t,r))
[ "daikiante@gmail.com" ]
daikiante@gmail.com
151e343e9c8d0f2441e111d73315a7c1519ef256
4cdb92fddb5082070ea8e25df63a7d43e9259c81
/pubsubpull/migrations/0005_auto_20150520_1121.py
e0bcf42821b5225682cacbac07cad406faa9d580
[ "MIT" ]
permissive
WFP-BKK/django-pubsubpull
50b22604eef82379124f8377188d956e4965a146
ae4341ff9fdbe7493399adb7f2ecb43024cb7961
refs/heads/master
2021-01-20T14:53:32.616023
2016-10-26T03:12:29
2016-10-26T03:12:29
82,781,454
0
0
null
2017-02-22T08:46:10
2017-02-22T08:46:10
null
UTF-8
Python
false
false
698
py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import datetime class Migration(migrations.Migration): dependencies = [ ('pubsubpull', '0004_auto_20150520_1041'), ] operations = [ migrations.AddField( model_name='request', name='duration', field=models.FloatField(null=True, blank=True), preserve_default=True, ), migrations.AddField( model_name='request', name='started', field=models.DateTimeField(default=datetime.date(1970, 1, 1), auto_now_add=True), preserve_default=False, ), ]
[ "k@kirit.com" ]
k@kirit.com
b1613ee7db44819afd8e61048612766198986df2
1dd89e25ae58eded2fc3a5756a706fd654c9b40a
/test/test_analyze.py
652880fcf500bc53bbb28c8567a8a18605915ec9
[ "Apache-2.0" ]
permissive
jiev/chan
9672bd3ac3146248da1da60660a6123ab1fa34b1
a6c57a9f64e37c9cb05cf498f16719dbc2757a4c
refs/heads/master
2022-06-17T21:31:19.680827
2022-06-10T01:52:36
2022-06-10T01:52:36
247,880,355
2
0
MIT
2020-03-17T04:38:18
2020-03-17T04:38:17
null
UTF-8
Python
false
false
4,209
py
# coding: utf-8 import zipfile from tqdm import tqdm import pandas as pd from czsc.analyze import * from czsc.enum import Freq from czsc.signals.signals import get_default_signals, get_s_three_bi, get_s_d0_bi cur_path = os.path.split(os.path.realpath(__file__))[0] def read_1min(): with zipfile.ZipFile(os.path.join(cur_path, 'data/000001.XSHG_1min.zip'), 'r') as z: f = z.open('000001.XSHG_1min.csv') data = pd.read_csv(f, encoding='utf-8') data['dt'] = pd.to_datetime(data['dt']) records = data.to_dict('records') bars = [] for row in tqdm(records, desc='read_1min'): bar = RawBar(**row) bar.freq = Freq.F1 bars.append(bar) return bars def read_daily(): file_kline = os.path.join(cur_path, "data/000001.SH_D.csv") kline = pd.read_csv(file_kline, encoding="utf-8") kline.loc[:, "dt"] = pd.to_datetime(kline.dt) bars = [RawBar(symbol=row['symbol'], id=i, freq=Freq.D, open=row['open'], dt=row['dt'], close=row['close'], high=row['high'], low=row['low'], vol=row['vol']) for i, row in kline.iterrows()] return bars def test_find_bi(): bars = read_daily() # 去除包含关系 bars1 = [] for bar in bars: if len(bars1) < 2: bars1.append(NewBar(symbol=bar.symbol, id=bar.id, freq=bar.freq, dt=bar.dt, open=bar.open, close=bar.close, high=bar.high, low=bar.low, vol=bar.vol, elements=[bar])) else: k1, k2 = bars1[-2:] has_include, k3 = remove_include(k1, k2, bar) if has_include: bars1[-1] = k3 else: bars1.append(k3) fxs = [] for i in range(1, len(bars1) - 1): fx = check_fx(bars1[i - 1], bars1[i], bars1[i + 1]) if isinstance(fx, FX): fxs.append(fx) def get_user_signals(c: CZSC) -> OrderedDict: """在 CZSC 对象上计算信号,这个是标准函数,主要用于研究。 实盘时可以按照自己的需要自定义计算哪些信号 :param c: CZSC 对象 :return: 信号字典 """ s = OrderedDict({"symbol": c.symbol, "dt": c.bars_raw[-1].dt, "close": c.bars_raw[-1].close}) # 倒0,特指未确认完成笔 # 倒1,倒数第1笔的缩写,表示第N笔 # 倒2,倒数第2笔的缩写,表示第N-1笔 # 倒3,倒数第3笔的缩写,表示第N-2笔 # 以此类推 for i in range(1, 3): s.update(get_s_three_bi(c, i)) s.update(get_s_d0_bi(c)) return s def test_czsc_update(): bars = read_daily() # 不计算任何信号 c = CZSC(bars) assert not c.signals # 计算信号 c = CZSC(bars, get_signals=get_default_signals) assert len(c.bi_list) == 50 and not c.last_bi_extend assert isinstance(c.signals, OrderedDict) and len(c.signals) == 38 last_bi = c.bi_list[-1] assert len(last_bi.raw_bars) == 32 and last_bi.power_price == last_bi.power assert len(last_bi.fake_bis) == 11 assert last_bi.fake_bis[0].direction == last_bi.fake_bis[-1].direction == last_bi.direction # 测试自定义信号 c = CZSC(bars, get_signals=get_user_signals, signals_n=20) assert len(c.signals) == 11 assert len(c.signals_list) == 20 assert c.signals_list[-1] == c.signals kline = [x.__dict__ for x in c.bars_raw] bi = [{'dt': x.fx_a.dt, "bi": x.fx_a.fx} for x in c.bi_list] + \ [{'dt': c.bi_list[-1].fx_b.dt, "bi": c.bi_list[-1].fx_b.fx}] chart = kline_pro(kline, bi=bi, title="{} - {}".format(c.symbol, c.freq)) file_html = "x.html" chart.render(file_html) os.remove(file_html) def test_get_signals(): def get_test_signals(c: CZSC) -> OrderedDict: s = OrderedDict({"symbol": c.symbol, "dt": c.bars_raw[-1].dt, "close": c.bars_raw[-1].close}) s.update(get_s_d0_bi(c)) return s bars = read_daily() # 不计算任何信号 c = CZSC(bars, get_signals=get_test_signals) assert c.signals['日线_倒0笔_方向'] == '向下_任意_任意_0' assert c.signals['日线_倒0笔_长度'] == '5到9根K线_任意_任意_0'
[ "zeng_bin8888@163.com" ]
zeng_bin8888@163.com
4edd2ce054246103a8e3827a9b3be861c706652a
09379c13eea9c6b806c43fca12ebf182684ea11f
/Day12/04装饰器进阶.py
af26a492d965e1d4d3728d61152eea7cccc77e28
[]
no_license
ExplorerX/PythonProjects
0d97348dc9712c622e4f20e9c33b3904e2810e1d
de23e5f7a9415bf983f9c99ba5c0bd12dc7b6a99
refs/heads/master
2020-04-19T10:39:12.788123
2019-02-26T05:38:47
2019-02-26T05:38:47
168,146,465
1
0
null
null
null
null
UTF-8
Python
false
false
1,700
py
# 带参数的装饰器 # 装饰器的本质就是闭包 # 闭包的本质就是内层函数使用了外层函数的变量,而这个变量会一直存在于内存中 # import time # FLAG = True # # # def wrapper(flag): # def tim(f): # def inner(*args, **kwargs): # if not flag: # start = time.time() # ret = f(*args, **kwargs) # end = time.time() # print(end - start) # return ret # else: # ret = f(*args, **kwargs) # return ret # return inner # return tim # # # @wrapper(FLAG) # def func(*args): # time.sleep(0.1) # sum1 = 0 # for i in args: # sum1 += i # return sum1 # # # # li = [1, 2, 3, 4] # result = func(1, 2, 3, 4) # print(result) # 多个装饰器装饰一个函数,就像俄罗斯套娃 def wrapper1(f): def inner1(*args, **kwargs): print('This is front string in inner1!') ret = f(*args, **kwargs) print('This is behind string in inner1!') return ret return inner1 def wrapper2(f): def inner2(*args, **kwargs): print('This is front string in inner2!') ret = f(*args, **kwargs) print('This is behind string in inner2!') return ret return inner2 @wrapper1 @wrapper2 def func(*args): sum1 = 0 print('This is a sum function!') for i in args: sum1 += i return sum1 result = func(1, 2, 3, 4, 5, 6) print(result) # 运行结果: """ This is front string in inner1! This is front string in inner2! This is a sum function! This is behind string in inner2! This is behind string in inner1! 21 """
[ "384470140@qq.com" ]
384470140@qq.com
40b395494ca5039218e435761d3aac136f825bd5
ebf723d6066e157ad5cedf94099deb62e6191390
/py/yahoo_procon2019_qual_b.py
ec66b16507bbf79dc272e9e4cf3a9ffbaa169713
[]
no_license
PeterGrainfield/AltCoder
a960bd2642887502829bb4078a35712c492a8d54
378cebdf1cf713310a83575b4dc1e39834e76e59
refs/heads/master
2023-05-08T16:32:53.653689
2021-05-31T06:10:55
2021-05-31T06:10:55
368,459,650
0
0
null
null
null
null
UTF-8
Python
false
false
230
py
lab = [list(map(int, input().split())) for _ in range(3)] road = [0]*4 for ab in lab: road[ab[0]-1] += 1 road[ab[1]-1] += 1 if min(road) == 0: print("NO") elif max(road) == 3: print("NO") else: print("YES")
[ "peter@vollegunord.work" ]
peter@vollegunord.work
05b39c43cf82306a1e921f4a016fbda0bddbc0d2
24e843a90a3b3a37cc4d76a207f41d1fc628c2e7
/python3/solved/P674. Longest Continuous Increasing Subsequence.py
e9b047b175727dd2037cea06f93d22535d18a00e
[]
no_license
erikliu0801/leetcode
c595ea786716f7df86bd352c1e8d691f1870ec70
1de7bfe192324f9de28afa06b9539331c87d1346
refs/heads/master
2023-08-07T14:47:19.074076
2021-09-05T09:46:35
2021-09-05T09:46:35
224,321,259
0
0
null
null
null
null
UTF-8
Python
false
false
1,720
py
# ToDo: """ 674. Longest Continuous Increasing Subsequence Easy Given an unsorted array of integers, find the length of longest continuous increasing subsequence (subarray). Note: Length of the array will not exceed 10,000. """ # Conditions & Concepts """ """ # Code ## submit part class Solution: def findLengthOfLCIS(self, nums: List[int]) -> int: ## test part def findLengthOfLCIS(nums): """ nums: List[int] rtype: int """ ## code here #1 """ Runtime Error Last executed input: [1,3,5,7] Runtime Error Last executed input: [1] Success Runtime: 72 ms, faster than 93.67% of Python3 online submissions for Longest Continuous Increasing Subsequence. Memory Usage: 13.9 MB, less than 95.65% of Python3 online submissions for Longest Continuous Increasing Subsequence. """ def findLengthOfLCIS(nums): if len(nums) < 2: return len(nums) count = 1 nums_count = [] for i in range(1,len(nums)): if nums[i] > nums[i-1]: count += 1 if i == len(nums) -1: nums_count.append(count) else: nums_count.append(count) count = 1 return max(nums_count) # Test ## Functional Test """ # Conditions & Concepts """ if __name__ == '__main__': input_nums = [[1,3,5,4,7], [2,2,2,2,2], [1,3,5,7], [1]] expected_output = [3, 1, 4, 1] for i in range(len(input_nums)): if findLengthOfLCIS(input_nums[i]) != expected_output[i]: print("Wrong!!!", ' Output:', findLengthOfLCIS(input_nums[i]), '; Expected Output:', expected_output[i]) else: print("Right") # print(findLengthOfLCIS(input_nums[-1])) ## Performance Test import cProfile cProfile.run('') ## Unit Test import unittest class Test(unittest.TestCase): def test(self): pass if __name__ == '__main__': unittest.main()
[ "erikliu0801@gmail.com" ]
erikliu0801@gmail.com
e24fedc5e7ee860538e5849e1ecaa5874d393e8f
88e3ae7a21301c56f25fb7561d354cb169139932
/likebee/core/migrations/0002_auto_20190113_1657.py
af66823781f4b2dd875b0cc34fbdeefbeaeeada6
[ "MIT" ]
permissive
ow7/likebee
9cf54a3c45d031cc92e9a9cdebe11e7db11acf6a
0a0dd6368ef43b53fb8315eb5eb14663067ef07c
refs/heads/master
2020-04-16T11:13:24.643022
2019-04-23T07:34:20
2019-04-23T07:34:20
165,528,187
1
0
null
null
null
null
UTF-8
Python
false
false
794
py
# Generated by Django 2.1.5 on 2019-01-13 18:57 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0001_initial'), ] operations = [ migrations.AddField( model_name='sprint', name='finish_on', field=models.DateTimeField(blank=True, null=True, verbose_name='Termina em'), ), migrations.AddField( model_name='sprint', name='start_on', field=models.DateTimeField(blank=True, null=True, verbose_name='Inicia em'), ), migrations.AlterField( model_name='sprint', name='name', field=models.CharField(default='Sprint', max_length=200, verbose_name='Nome'), ), ]
[ "kleberr@msn.com" ]
kleberr@msn.com
06ddd7dcca2fe0eb55e52f4f2f0c3f73f995e301
0a0cc1d675d57704e6fd15e60f7ef752fe296f6d
/code/data_utils.py
9a62de4d851bd06e2846755e5a801a3137677d43
[]
no_license
RichardcLee/SRGAN-LYH
83660ff4f92fe6ae61a030cfd7ba71d0b080d2fd
726e5b172fa3790976ff17efb5401f52efd23816
refs/heads/master
2021-07-08T17:26:33.109918
2021-04-20T07:30:12
2021-04-20T07:30:12
239,928,994
3
0
null
null
null
null
UTF-8
Python
false
false
3,598
py
from os import listdir from os.path import join from PIL import Image from torch.utils.data.dataset import Dataset from torchvision.transforms import Compose, RandomCrop, ToTensor, ToPILImage, CenterCrop, Resize def is_image_file(filename): return any(filename.endswith(extension) for extension in ['.png', '.jpg', '.jpeg', '.PNG', '.JPG', '.JPEG']) def calculate_valid_crop_size(crop_size, upscale_factor): return crop_size - (crop_size % upscale_factor) def train_hr_transform(crop_size): return Compose([ RandomCrop(crop_size), ToTensor(), ]) def train_lr_transform(crop_size, upscale_factor): return Compose([ ToPILImage(), Resize(crop_size // upscale_factor, interpolation=Image.BICUBIC), ToTensor() ]) def display_transform(): return Compose([ ToPILImage(), Resize(400), CenterCrop(400), ToTensor() ]) class TrainDatasetFromFolder(Dataset): def __init__(self, dataset_dir, crop_size, upscale_factor): super(TrainDatasetFromFolder, self).__init__() self.image_filenames = [join(dataset_dir, x) for x in listdir(dataset_dir) if is_image_file(x)] crop_size = calculate_valid_crop_size(crop_size, upscale_factor) self.hr_transform = train_hr_transform(crop_size) self.lr_transform = train_lr_transform(crop_size, upscale_factor) def __getitem__(self, index): hr_image = self.hr_transform(Image.open(self.image_filenames[index])) lr_image = self.lr_transform(hr_image) return lr_image, hr_image def __len__(self): return len(self.image_filenames) class ValDatasetFromFolder(Dataset): def __init__(self, dataset_dir, upscale_factor): super(ValDatasetFromFolder, self).__init__() self.upscale_factor = upscale_factor self.image_filenames = [join(dataset_dir, x) for x in listdir(dataset_dir) if is_image_file(x)] def __getitem__(self, index): hr_image = Image.open(self.image_filenames[index]) w, h = hr_image.size crop_size = calculate_valid_crop_size(min(w, h), self.upscale_factor) lr_scale = Resize(crop_size // self.upscale_factor, interpolation=Image.BICUBIC) hr_scale = Resize(crop_size, interpolation=Image.BICUBIC) hr_image = CenterCrop(crop_size)(hr_image) lr_image = lr_scale(hr_image) hr_restore_img = hr_scale(lr_image) return ToTensor()(lr_image), ToTensor()(hr_restore_img), ToTensor()(hr_image) def __len__(self): return len(self.image_filenames) class TestDatasetFromFolder(Dataset): def __init__(self, lr_path, hr_path, upscale_factor): super(TestDatasetFromFolder, self).__init__() self.lr_path = lr_path self.hr_path = hr_path self.upscale_factor = upscale_factor self.lr_filenames = [join(self.lr_path, x) for x in listdir(self.lr_path) if is_image_file(x)] self.hr_filenames = [join(self.hr_path, x) for x in listdir(self.hr_path) if is_image_file(x)] def __getitem__(self, index): image_name = self.lr_filenames[index].split('/')[-1] lr_image = Image.open(self.lr_filenames[index]) w, h = lr_image.size hr_image = Image.open(self.hr_filenames[index]) hr_scale = Resize((self.upscale_factor * h, self.upscale_factor * w), interpolation=Image.BICUBIC) hr_restore_img = hr_scale(lr_image) return image_name, ToTensor()(lr_image), ToTensor()(hr_restore_img), ToTensor()(hr_image) def __len__(self): return len(self.lr_filenames)
[ "0yunhow@gmail.com" ]
0yunhow@gmail.com
ccbd80416743a01517fc733d6407b812ed9d7c4c
93713f46f16f1e29b725f263da164fed24ebf8a8
/Library/lib/python3.7/site-packages/astropy-4.0-py3.7-macosx-10.9-x86_64.egg/astropy/units/astrophys.py
9a4a5f2f26913ab6e4eb9c7e050137ab68f603e3
[ "BSD-3-Clause" ]
permissive
holzschu/Carnets
b83d15136d25db640cea023abb5c280b26a9620e
1ad7ec05fb1e3676ac879585296c513c3ee50ef9
refs/heads/master
2023-02-20T12:05:14.980685
2023-02-13T15:59:23
2023-02-13T15:59:23
167,671,526
541
36
BSD-3-Clause
2022-11-29T03:08:22
2019-01-26T09:26:46
Python
UTF-8
Python
false
false
7,282
py
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package defines the astrophysics-specific units. They are also available in the `astropy.units` namespace. """ from . import si from astropy.constants import si as _si from .core import (UnitBase, def_unit, si_prefixes, binary_prefixes, set_enabled_units) # To ensure si units of the constants can be interpreted. set_enabled_units([si]) import numpy as _numpy _ns = globals() ########################################################################### # LENGTH def_unit((['AU', 'au'], ['astronomical_unit']), _si.au, namespace=_ns, prefixes=True, doc="astronomical unit: approximately the mean Earth--Sun " "distance.") def_unit(['pc', 'parsec'], _si.pc, namespace=_ns, prefixes=True, doc="parsec: approximately 3.26 light-years.") def_unit(['solRad', 'R_sun', 'Rsun'], _si.R_sun, namespace=_ns, doc="Solar radius", prefixes=False, format={'latex': r'R_{\odot}', 'unicode': 'R⊙'}) def_unit(['jupiterRad', 'R_jup', 'Rjup', 'R_jupiter', 'Rjupiter'], _si.R_jup, namespace=_ns, prefixes=False, doc="Jupiter radius", # LaTeX jupiter symbol requires wasysym format={'latex': r'R_{\rm J}', 'unicode': 'R♃'}) def_unit(['earthRad', 'R_earth', 'Rearth'], _si.R_earth, namespace=_ns, prefixes=False, doc="Earth radius", # LaTeX earth symbol requires wasysym format={'latex': r'R_{\oplus}', 'unicode': 'R⊕'}) def_unit(['lyr', 'lightyear'], (_si.c * si.yr).to(si.m), namespace=_ns, prefixes=True, doc="Light year") ########################################################################### # AREAS def_unit(['barn', 'barn'], 10 ** -28 * si.m ** 2, namespace=_ns, prefixes=True, doc="barn: unit of area used in HEP") ########################################################################### # ANGULAR MEASUREMENTS def_unit(['cycle', 'cy'], 2.0 * _numpy.pi * si.rad, namespace=_ns, prefixes=False, doc="cycle: angular measurement, a full turn or rotation") ########################################################################### # MASS def_unit(['solMass', 'M_sun', 'Msun'], _si.M_sun, namespace=_ns, prefixes=False, doc="Solar mass", format={'latex': r'M_{\odot}', 'unicode': 'M⊙'}) def_unit(['jupiterMass', 'M_jup', 'Mjup', 'M_jupiter', 'Mjupiter'], _si.M_jup, namespace=_ns, prefixes=False, doc="Jupiter mass", # LaTeX jupiter symbol requires wasysym format={'latex': r'M_{\rm J}', 'unicode': 'M♃'}) def_unit(['earthMass', 'M_earth', 'Mearth'], _si.M_earth, namespace=_ns, prefixes=False, doc="Earth mass", # LaTeX earth symbol requires wasysym format={'latex': r'M_{\oplus}', 'unicode': 'M⊕'}) def_unit(['M_p'], _si.m_p, namespace=_ns, doc="Proton mass", format={'latex': r'M_{p}', 'unicode': 'Mₚ'}) def_unit(['M_e'], _si.m_e, namespace=_ns, doc="Electron mass", format={'latex': r'M_{e}', 'unicode': 'Mₑ'}) # Unified atomic mass unit def_unit(['u', 'Da', 'Dalton'], _si.u, namespace=_ns, prefixes=True, exclude_prefixes=['a', 'da'], doc="Unified atomic mass unit") ########################################################################## # ENERGY # Here, explicitly convert the planck constant to 'eV s' since the constant # can override that to give a more precise value that takes into account # covariances between e and h. Eventually, this may also be replaced with # just `_si.Ryd.to(eV)`. def_unit(['Ry', 'rydberg'], (_si.Ryd * _si.c * _si.h.to(si.eV * si.s)).to(si.eV), namespace=_ns, prefixes=True, doc="Rydberg: Energy of a photon whose wavenumber is the Rydberg " "constant", format={'latex': r'R_{\infty}', 'unicode': 'R∞'}) ########################################################################### # ILLUMINATION def_unit(['solLum', 'L_sun', 'Lsun'], _si.L_sun, namespace=_ns, prefixes=False, doc="Solar luminance", format={'latex': r'L_{\odot}', 'unicode': 'L⊙'}) ########################################################################### # SPECTRAL DENSITY def_unit((['ph', 'photon'], ['photon']), format={'ogip': 'photon', 'vounit': 'photon'}, namespace=_ns, prefixes=True) def_unit(['Jy', 'Jansky', 'jansky'], 1e-26 * si.W / si.m ** 2 / si.Hz, namespace=_ns, prefixes=True, doc="Jansky: spectral flux density") def_unit(['R', 'Rayleigh', 'rayleigh'], (1e10 / (4 * _numpy.pi)) * ph * si.m ** -2 * si.s ** -1 * si.sr ** -1, namespace=_ns, prefixes=True, doc="Rayleigh: photon flux") ########################################################################### # MISCELLANEOUS # Some of these are very FITS-specific and perhaps considered a mistake. # Maybe they should be moved into the FITS format class? # TODO: This is defined by the FITS standard as "relative to the sun". # Is that mass, volume, what? def_unit(['Sun'], namespace=_ns) ########################################################################### # EVENTS def_unit((['ct', 'count'], ['count']), format={'fits': 'count', 'ogip': 'count', 'vounit': 'count'}, namespace=_ns, prefixes=True, exclude_prefixes=['p']) def_unit((['pix', 'pixel'], ['pixel']), format={'ogip': 'pixel', 'vounit': 'pixel'}, namespace=_ns, prefixes=True) ########################################################################### # MISCELLANEOUS def_unit(['chan'], namespace=_ns, prefixes=True) def_unit(['bin'], namespace=_ns, prefixes=True) def_unit((['vox', 'voxel'], ['voxel']), format={'fits': 'voxel', 'ogip': 'voxel', 'vounit': 'voxel'}, namespace=_ns, prefixes=True) def_unit((['bit', 'b'], ['bit']), namespace=_ns, prefixes=si_prefixes + binary_prefixes) def_unit((['byte', 'B'], ['byte']), 8 * bit, namespace=_ns, format={'vounit': 'byte'}, prefixes=si_prefixes + binary_prefixes, exclude_prefixes=['d']) def_unit(['adu'], namespace=_ns, prefixes=True) def_unit(['beam'], namespace=_ns, prefixes=True) def_unit(['electron'], doc="Number of electrons", namespace=_ns, format={'latex': r'e^{-}', 'unicode': 'e⁻'}) # This is not formally a unit, but is used in that way in many contexts, and # an appropriate equivalency is only possible if it's treated as a unit (see # https://arxiv.org/pdf/1308.4150.pdf for more) # Also note that h or h100 or h_100 would be a better name, but they either # conflict or have numbers in them, which is apparently disallowed def_unit(['littleh'], namespace=_ns, prefixes=False, doc="Reduced/\"dimensionless\" Hubble constant", format={'latex': r'h_{100}'}) ########################################################################### # CLEANUP del UnitBase del def_unit del si ########################################################################### # DOCSTRING # This generates a docstring for this module that describes all of the # standard units defined here. from .utils import generate_unit_summary as _generate_unit_summary if __doc__ is not None: __doc__ += _generate_unit_summary(globals())
[ "nicolas.holzschuch@inria.fr" ]
nicolas.holzschuch@inria.fr
e61020e2fbd20892008b21a3e1c7a11a32a01765
e714dfd95cb74f4e357af8d085e4dcaf8b7ecdf3
/0x0A-python-inheritance/2-is_same_class.py
aa4b57fcbf256d54c6bd85d4a1fca0b859008f73
[]
no_license
MCavigli/holbertonschool-higher_level_programming_classic
2cea769dc1fd39e90f6ef74cdb3191e2472b0282
870548f964a3deac4a41918e9c3d0bad6cd732b4
refs/heads/master
2022-03-06T09:33:56.839118
2019-09-27T06:04:34
2019-09-27T06:04:34
184,122,977
3
4
null
null
null
null
UTF-8
Python
false
false
399
py
#!/usr/bin/python3 """This module holds a function that checks if an object is an instance or a specified class """ def is_same_class(obj, a_class): """Returns True if the object is exactly an instance of the specified class; otherwise False Args: obj: the object to check a_class: The class to check against """ return True if type(obj) is a_class else False
[ "mcavigli@gmail.com" ]
mcavigli@gmail.com
19313e15aa82c9fa0f64105caffdbd82c478fe72
09e57dd1374713f06b70d7b37a580130d9bbab0d
/data/p3BR/R1/benchmark/startQiskit_QC239.py
5f958a1bc26ad324bb6b43561bf565c7887f736f
[ "BSD-3-Clause" ]
permissive
UCLA-SEAL/QDiff
ad53650034897abb5941e74539e3aee8edb600ab
d968cbc47fe926b7f88b4adf10490f1edd6f8819
refs/heads/main
2023-08-05T04:52:24.961998
2021-09-19T02:56:16
2021-09-19T02:56:16
405,159,939
2
0
null
null
null
null
UTF-8
Python
false
false
5,430
py
# qubit number=3 # total number=45 import numpy as np from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ from qiskit.visualization import plot_histogram from typing import * from pprint import pprint from math import log2 from collections import Counter from qiskit.test.mock import FakeVigo, FakeYorktown kernel = 'circuit/bernstein' def bitwise_xor(s: str, t: str) -> str: length = len(s) res = [] for i in range(length): res.append(str(int(s[i]) ^ int(t[i]))) return ''.join(res[::-1]) def bitwise_dot(s: str, t: str) -> str: length = len(s) res = 0 for i in range(length): res += int(s[i]) * int(t[i]) return str(res % 2) def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit: # implement the oracle O_f # NOTE: use multi_control_toffoli_gate ('noancilla' mode) # https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html # https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates # https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate controls = QuantumRegister(n, "ofc") target = QuantumRegister(1, "oft") oracle = QuantumCircuit(controls, target, name="Of") for i in range(2 ** n): rep = np.binary_repr(i, n) if f(rep) == "1": for j in range(n): if rep[j] == "0": oracle.x(controls[j]) oracle.mct(controls, target[0], None, mode='noancilla') for j in range(n): if rep[j] == "0": oracle.x(controls[j]) # oracle.barrier() # oracle.draw('mpl', filename=(kernel + '-oracle.png')) return oracle def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit: # implement the Bernstein-Vazirani circuit zero = np.binary_repr(0, n) b = f(zero) # initial n + 1 bits input_qubit = QuantumRegister(n+1, "qc") classicals = ClassicalRegister(n, "qm") prog = QuantumCircuit(input_qubit, classicals) # inverse last one (can be omitted if using O_f^\pm) prog.x(input_qubit[n]) # circuit begin prog.h(input_qubit[1]) # number=1 prog.rx(-0.09738937226128368,input_qubit[2]) # number=2 prog.h(input_qubit[1]) # number=33 prog.cz(input_qubit[2],input_qubit[1]) # number=34 prog.h(input_qubit[1]) # number=35 prog.h(input_qubit[1]) # number=3 # apply H to get superposition for i in range(n): prog.h(input_qubit[i]) prog.h(input_qubit[n]) prog.barrier() # apply oracle O_f oracle = build_oracle(n, f) prog.append( oracle.to_gate(), [input_qubit[i] for i in range(n)] + [input_qubit[n]]) # apply H back (QFT on Z_2^n) for i in range(n): prog.h(input_qubit[i]) prog.barrier() # measure return prog def get_statevector(prog: QuantumCircuit) -> Any: state_backend = Aer.get_backend('statevector_simulator') statevec = execute(prog, state_backend).result() quantum_state = statevec.get_statevector() qubits = round(log2(len(quantum_state))) quantum_state = { "|" + np.binary_repr(i, qubits) + ">": quantum_state[i] for i in range(2 ** qubits) } return quantum_state def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any: # Q: which backend should we use? # get state vector quantum_state = get_statevector(prog) # get simulate results # provider = IBMQ.load_account() # backend = provider.get_backend(backend_str) # qobj = compile(prog, backend, shots) # job = backend.run(qobj) # job.result() backend = Aer.get_backend(backend_str) # transpile/schedule -> assemble -> backend.run results = execute(prog, backend, shots=shots).result() counts = results.get_counts() a = Counter(counts).most_common(1)[0][0][::-1] return { "measurements": counts, # "state": statevec, "quantum_state": quantum_state, "a": a, "b": b } def bernstein_test_1(rep: str): """011 . x + 1""" a = "011" b = "1" return bitwise_xor(bitwise_dot(a, rep), b) def bernstein_test_2(rep: str): """000 . x + 0""" a = "000" b = "0" return bitwise_xor(bitwise_dot(a, rep), b) def bernstein_test_3(rep: str): """111 . x + 1""" a = "111" b = "1" return bitwise_xor(bitwise_dot(a, rep), b) if __name__ == "__main__": n = 2 a = "11" b = "1" f = lambda rep: \ bitwise_xor(bitwise_dot(a, rep), b) prog = build_circuit(n, f) sample_shot =4000 writefile = open("../data/startQiskit_QC239.csv", "w") # prog.draw('mpl', filename=(kernel + '.png')) IBMQ.load_account() provider = IBMQ.get_provider(hub='ibm-q') provider.backends() backend = provider.get_backend("ibmq_belem") circuit1 = transpile(prog, FakeYorktown()) circuit1.h(qubit=2) circuit1.x(qubit=3) circuit1.measure_all() info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts() print(info, file=writefile) print("results end", file=writefile) print(circuit1.depth(), file=writefile) print(circuit1, file=writefile) writefile.close()
[ "wangjiyuan123@yeah.net" ]
wangjiyuan123@yeah.net
7b1d972987a309a0f4c5c451712b01728c5e99c4
a9fbbfc990ad79f412d8078d27b8937e5ef00bde
/inheritance/exercise/problem_02/reptile.py
b485963519f9b88bc3f05e703e0f4304972141f5
[ "MIT" ]
permissive
BoyanPeychinov/object_oriented_programming
e2d23ec0ff681ca2c6cf1805e581af3d601aafee
a960721c7c17710bd7b151a9025647e953435962
refs/heads/main
2023-03-31T16:19:20.239216
2021-03-30T19:43:42
2021-03-30T19:43:42
342,281,483
0
0
null
null
null
null
UTF-8
Python
false
false
135
py
from problem_02.animal import Animal class Reptile(Animal): # def __init__(self, name): # super().__init__(name) pass
[ "BoyanPeychinov@gmail.com" ]
BoyanPeychinov@gmail.com
0e34820d502392826161fb5b3bcd150e9b63c521
80831d77ef6fc3b485be80501b73ccb30ce5e444
/networkapi/plugins/F5/lb.py
c9ab8665c63964bffc6932c9cf03eb11aba63fb8
[ "Apache-2.0", "BSD-3-Clause", "MIT", "LicenseRef-scancode-public-domain", "BSD-2-Clause" ]
permissive
globocom/GloboNetworkAPI
e2fdf5a9e6070359e90801bf3e45c2d499f199c5
eb27e1d977a1c4bb1fee8fb51b8d8050c64696d9
refs/heads/master
2023-06-25T21:34:04.923940
2023-05-29T12:07:20
2023-05-29T12:07:20
22,734,387
86
74
Apache-2.0
2023-05-29T12:07:21
2014-08-07T19:47:43
Python
UTF-8
Python
false
false
2,601
py
# -*- coding: utf-8 -*- import logging from time import sleep import bigsuds from networkapi.plugins import exceptions as base_exceptions from networkapi.system.facade import get_value as get_variable log = logging.getLogger(__name__) class Lb(object): def __init__(self, hostname, username, password, session=True): self._hostname = hostname self._username = username self._password = password self._time_reconn = 10 try: self._channel = bigsuds.BIGIP( hostname=self._hostname, username=self._username, password=self._password ) except Exception, e: logging.critical('Unable to connect to BIG-IP. Details: %s' % (e)) raise base_exceptions.CommandErrorException(e) else: log.info('Connected in hostname:%s' % hostname) try: self._version = self._channel.System.SystemInfo.get_version() if self._version[8:len(self._version)].split('.')[0] <= 10: raise base_exceptions.UnsupportedVersion( 'This plugin only supports BIG-IP v11 or above') else: if session: log.info('Try get new session') session_cur = self._channel.System.Session.get_session_timeout() log.info('Session Timeout Current: %s' % session_cur) session_timeout = get_variable( 'set_session_timeout_plugin_f5', '60') if int(session_cur) > int(session_timeout): self._channel.System.Session.set_session_timeout( session_timeout) self._channel = self.get_session() except Exception, e: log.error(e) raise base_exceptions.CommandErrorException(e) def get_session(self): try: channel = self._channel.with_session_id() log.info('Session %s', channel) except Exception, e: if 'There are too many existing user sessions.'.lower() in str(e).lower(): self._time_reconn *= 2 log.warning( 'There are too many existing user sessions. ' 'Trying again in %s seconds' % self._time_reconn) sleep(self._time_reconn) self.get_session() else: raise e else: return channel
[ "ederson.brilhante@corp.globo.com" ]
ederson.brilhante@corp.globo.com
968e3135fb1bfbcd2d8917579b2fb4b0c066cec9
3fc8c5588252ce522d8d0b7fdda398397475abd7
/enigma/rotor/__init__.py
0a341da2b84f2c9f997739721df1b37e879ff132
[ "MIT" ]
permissive
lukeshiner/enigma
242362a37b7c82fabef237af424a5eca7bc6cdc9
51b9dcb9ec8190b780775135dc88b95fbba120fe
refs/heads/master
2023-07-23T12:10:11.397318
2023-07-19T10:34:37
2023-07-19T10:34:37
137,946,565
0
0
MIT
2023-09-11T08:43:50
2018-06-19T21:16:36
Python
UTF-8
Python
false
false
185
py
"""Enigma's rotors.""" from .reflector import Reflector # NOQA from .rotor import Rotor # NOQA from .rotor_mechanism import RotorMechanism # NOQA from .wiring import Wiring # NOQA
[ "luke@lukeshiner.com" ]
luke@lukeshiner.com
47a1b00a4c65511b02ad861524c25a8f5032850e
d9e0016dce735cf1d72b8f922733c1aa2eb0ac69
/tests/test_dicom2nifti.py
d3dc3e3dc00f2f6bb02fa808f0d14a1c55f07931
[ "MIT" ]
permissive
fmljr/dicom2nifti
4228af03d00342fd3ab6b2fbf1846b5af2dc7790
807dbf5d8d16f9cb1fc681344de4f1eeed04b0dd
refs/heads/master
2021-01-12T10:16:17.640273
2016-10-26T11:39:22
2016-10-26T11:39:22
76,402,823
0
0
null
2016-12-13T22:11:56
2016-12-13T22:11:56
null
UTF-8
Python
false
false
4,839
py
# -*- coding: utf-8 -*- """ dicom2nifti @author: abrys """ import unittest import tempfile import shutil import os import dicom2nifti import tests.test_data as test_data from tests.test_tools import compare_nifti, ground_thruth_filenames class TestConversionDicom(unittest.TestCase): def test_main_function(self): tmp_output_dir = tempfile.mkdtemp() try: results = dicom2nifti.dicom_series_to_nifti(test_data.SIEMENS_ANATOMICAL, os.path.join(tmp_output_dir, 'test.nii.gz'), False) assert compare_nifti(results['NII_FILE'], ground_thruth_filenames(test_data.SIEMENS_ANATOMICAL)[0]) == True results = dicom2nifti.dicom_series_to_nifti(test_data.SIEMENS_ANATOMICAL, os.path.join(tmp_output_dir, 'test.nii.gz'), True) assert compare_nifti(results['NII_FILE'], ground_thruth_filenames(test_data.SIEMENS_ANATOMICAL)[1]) == True results = dicom2nifti.dicom_series_to_nifti(test_data.SIEMENS_ANATOMICAL_IMPLICIT, os.path.join(tmp_output_dir, 'test.nii.gz'), False) assert compare_nifti(results['NII_FILE'], ground_thruth_filenames(test_data.SIEMENS_ANATOMICAL_IMPLICIT)[0]) == True results = dicom2nifti.dicom_series_to_nifti(test_data.SIEMENS_ANATOMICAL_IMPLICIT, os.path.join(tmp_output_dir, 'test.nii.gz'), True) assert compare_nifti(results['NII_FILE'], ground_thruth_filenames(test_data.SIEMENS_ANATOMICAL_IMPLICIT)[1]) == True results = dicom2nifti.dicom_series_to_nifti(test_data.GENERIC_ANATOMICAL, os.path.join(tmp_output_dir, 'test.nii.gz'), False) assert compare_nifti(results['NII_FILE'], ground_thruth_filenames(test_data.GENERIC_ANATOMICAL)[0]) == True results = dicom2nifti.dicom_series_to_nifti(test_data.GENERIC_ANATOMICAL, os.path.join(tmp_output_dir, 'test.nii.gz'), True) assert compare_nifti(results['NII_FILE'], ground_thruth_filenames(test_data.GENERIC_ANATOMICAL)[1]) == True results = dicom2nifti.dicom_series_to_nifti(test_data.GENERIC_ANATOMICAL_IMPLICIT, os.path.join(tmp_output_dir, 'test.nii.gz'), False) assert compare_nifti(results['NII_FILE'], ground_thruth_filenames(test_data.GENERIC_ANATOMICAL_IMPLICIT)[0]) == True results = dicom2nifti.dicom_series_to_nifti(test_data.GENERIC_ANATOMICAL_IMPLICIT, os.path.join(tmp_output_dir, 'test.nii.gz'), True) assert compare_nifti(results['NII_FILE'], ground_thruth_filenames(test_data.GENERIC_ANATOMICAL_IMPLICIT)[1]) == True results = dicom2nifti.dicom_series_to_nifti(test_data.GENERIC_COMPRESSED, os.path.join(tmp_output_dir, 'test.nii.gz'), False) assert compare_nifti(results['NII_FILE'], ground_thruth_filenames(test_data.GENERIC_COMPRESSED)[0]) == True results = dicom2nifti.dicom_series_to_nifti(test_data.GENERIC_COMPRESSED, os.path.join(tmp_output_dir, 'test.nii.gz'), True) assert compare_nifti(results['NII_FILE'], ground_thruth_filenames(test_data.GENERIC_COMPRESSED)[1]) == True finally: shutil.rmtree(tmp_output_dir) def test_convert_directory(self): tmp_output_dir = tempfile.mkdtemp() try: dicom2nifti.convert_directory(test_data.GENERIC_ANATOMICAL, tmp_output_dir) finally: shutil.rmtree(tmp_output_dir) if __name__ == '__main__': unittest.main()
[ "arne.brys@icometrix.com" ]
arne.brys@icometrix.com
56c2ff039bc08f1a07ec735e680f16991d49556a
d4f1bd5e52fe8d85d3d0263ede936928d5811bff
/Python/Problem Solving/AlgorithmBook/9-1-1 Future City.py
fe41df785080e0f3bb92f7720429ed5e7b550f36
[]
no_license
ambosing/PlayGround
37f7d071c4402599995a50cac1e7f1a85c6d10dd
0d5262dbb2fa2128ecb3fd969244fa647b104928
refs/heads/master
2023-04-08T04:53:31.747838
2023-03-23T06:32:47
2023-03-23T06:32:47
143,112,370
0
0
null
null
null
null
UTF-8
Python
false
false
588
py
import sys INF = int(1e9) n, m = map(int, sys.stdin.readline().split()) graph = [[INF] * (n + 1) for _ in range(n + 1)] for a in range(1, n + 1): graph[a][a] = 0 for _ in range(m): a, b = map(int, sys.stdin.readline().split()) graph[a][b] = 1 graph[b][a] = 1 x, k = map(int, sys.stdin.readline().split()) for k in range(1, n + 1): for a in range(1, n + 1): for b in range(1, n + 1): graph[a][b] = min(graph[a][b], graph[a][k] + graph[k][b]) if graph[1][k] == INF or graph[k][x] == INF: print(-1) else: print(graph[1][k] + graph[k][x])
[ "ambosing_@naver.com" ]
ambosing_@naver.com
7748936c73223f57680a54fc92426bd279181b0e
af32685025305eb77125b6a0a544522422be3b2f
/basics/croping.py
772a3a71b2adc2d835a6db1f5293d8fbb5c57577
[]
no_license
Mohammed-abdelawal/py-opencv-practice
5e913f48f7ad875b00ce8a5e7bb692b2aa2efb35
876df49d54a0083650f3f9682c5e3dc226e31b76
refs/heads/master
2022-10-10T04:19:27.300790
2020-06-09T20:05:22
2020-06-09T20:05:22
270,372,834
1
0
null
null
null
null
UTF-8
Python
false
false
334
py
from cv2 import cv2 import numpy as np img = cv2.imread('data/home.png') print(img.item(100,100,0)) print(img.shape) print(img.size) print(img.dtype) ball = img[280:340, 330:390] cv2.imshow('old',img) cv2.imshow('cropped area',ball) img[273:333, 100:160] = ball cv2.imshow('new',img) cv2.waitKey(0) cv2.destroyAllWindows()
[ "mohammedabdelawaldeveloper@gmail.com" ]
mohammedabdelawaldeveloper@gmail.com
61f52032ebbb4a63f30604d312a1fa77475aa7fe
b4e4399f6d18ee83760604fc67c90d3f5eac52dd
/Python/11.PythonFunctionals/75.ValidatingEmail.py
ecefafd0f2b944edb0a95f31372d5b79104984b9
[]
no_license
angelvv/HackerRankSolution
88415c3ace68ddc10c76ae8df43ab5193aa921d4
8b2c323507f9a1826b4156aeab94815f41b6fc84
refs/heads/master
2021-07-17T20:51:50.758364
2020-05-26T17:25:05
2020-05-26T17:25:05
167,896,187
0
0
null
null
null
null
UTF-8
Python
false
false
686
py
def fun(s): # return True if s is a valid email, else return False try: username, url = s.split("@") website, extension = url.split(".") except ValueError: return False if not username.replace("-", "").replace("_", "").isalnum(): return False if not website.isalnum(): return False if not 1<= len(extension) <= 3: return False return True def filter_mail(emails): return list(filter(fun, emails)) if __name__ == '__main__': n = int(input()) emails = [] for _ in range(n): emails.append(input()) filtered_emails = filter_mail(emails) filtered_emails.sort() print(filtered_emails)
[ "angel.huang90@gmail.com" ]
angel.huang90@gmail.com
42fed85bf89726b3b133b5d263eceb2d46e6ef2d
6874015cb6043d1803b61f8978627ddce64963b4
/django/contrib/gis/utils/wkt.py
626eab9d6e36dd5f62cab03c5178868563ee66ad
[ "BSD-3-Clause", "Python-2.0" ]
permissive
yephper/django
25fbfb4147211d08ec87c41e08a695ac016454c6
cdd1689fb354886362487107156978ae84e71453
refs/heads/master
2021-01-21T12:59:14.443153
2016-04-27T09:51:41
2016-04-27T09:51:41
56,134,291
0
1
null
null
null
null
UTF-8
Python
false
false
1,982
py
""" Utilities for manipulating Geometry WKT. """ from django.utils import six def precision_wkt(geom, prec): """ Returns WKT text of the geometry according to the given precision (an integer or a string). If the precision is an integer, then the decimal places of coordinates WKT will be truncated to that number: >>> from django.contrib.gis.geos import Point >>> pnt = Point(5, 23) >>> pnt.wkt 'POINT (5.0000000000000000 23.0000000000000000)' >>> precision_wkt(pnt, 1) 'POINT (5.0 23.0)' If the precision is a string, it must be valid Python format string (e.g., '%20.7f') -- thus, you should know what you're doing. """ if isinstance(prec, int): num_fmt = '%%.%df' % prec elif isinstance(prec, six.string_types): num_fmt = prec else: raise TypeError # TODO: Support 3D geometries. coord_fmt = ' '.join([num_fmt, num_fmt]) def formatted_coords(coords): return ','.join(coord_fmt % c[:2] for c in coords) def formatted_poly(poly): return ','.join('(%s)' % formatted_coords(r) for r in poly) def formatted_geom(g): gtype = str(g.geom_type).upper() yield '%s(' % gtype if gtype == 'POINT': yield formatted_coords((g.coords,)) elif gtype in ('LINESTRING', 'LINEARRING'): yield formatted_coords(g.coords) elif gtype in ('POLYGON', 'MULTILINESTRING'): yield formatted_poly(g) elif gtype == 'MULTIPOINT': yield formatted_coords(g.coords) elif gtype == 'MULTIPOLYGON': yield ','.join('(%s)' % formatted_poly(p) for p in g) elif gtype == 'GEOMETRYCOLLECTION': yield ','.join(''.join(wkt for wkt in formatted_geom(child)) for child in g) else: raise TypeError yield ')' return ''.join(wkt for wkt in formatted_geom(geom))
[ "smileszzh@163.com" ]
smileszzh@163.com
3409b6ddd08874520206f0046806448740c5c21e
9830360802428854384d6b27a172102de0e59c8f
/2776.py
737239add71ab1b31f319104e5d8d171bb4e8a89
[]
no_license
banje/acmicpc
d4009535ec31892f706333d812c92fddead08aa1
69d44a3b60d2a559563b5a1055bcc2290090e35c
refs/heads/master
2022-07-20T20:01:56.623346
2020-05-16T11:30:17
2020-05-16T11:30:17
260,843,463
0
0
null
null
null
null
UTF-8
Python
false
false
245
py
a=int(input()) for i in range(a): b=int(input()) c=set(map(int,input().split())) d=int(input()) e=list(map(int,input().split())) for j in range(d): if e[j] in c: print(1) else: print(0)
[ "pak2t@naver.com" ]
pak2t@naver.com
8533b4b408672ace1a5fb2af100207eb14c91acc
cb94a4cdd7a9df17f9c6f1a03f8f4ff12c916cf3
/Python_Essential_Training/Exercise Files/Chap06/for.2.py
7a77df333bfaa16c51725a084f1e21a1691e55a8
[]
no_license
sedstan/LinkedIn-Learning-Python-Course
2b936d0f00703a6e66a872220ed47572123dc7fd
b4584218355bf07aa3d2939b950911eae67adb0b
refs/heads/master
2021-10-11T10:19:13.675662
2019-01-24T17:55:20
2019-01-24T17:55:20
null
0
0
null
null
null
null
UTF-8
Python
false
false
188
py
#!/usr/bin/env python3 # Copyright 2009-2017 BHG http://bw.org/ animals = ( 'bear', 'bunny', 'dog', 'cat', 'velociraptor' ) for pet in animals: if pet == 'dog': continue print(pet)
[ "sed@wearewhy.co.uk" ]
sed@wearewhy.co.uk
c46892506c1b9dcdb921b6d830ec352ff9d8c096
fdf531435b0a4d771083bab78f5a2f91b2ec1b28
/Hashing/Hashing II/2. Hashing + Sliding Window/2. Window String.py
e0b5714f9d7256422a8e49cf6a0f1275ba856489
[]
no_license
srajsonu/100DaysOfCode
d556cf4c8491d2bea2bf6c17cc4410f64ae71829
b25ff694a04a16bd2bdd33cf5bb84f9cbe5f3af6
refs/heads/main
2023-03-22T22:48:11.030576
2021-03-18T18:50:00
2021-03-18T18:50:00
325,747,085
0
0
null
null
null
null
UTF-8
Python
false
false
631
py
class Solution: def minWindow(self, A, B): m = len(A) n = len(B) if m < n: return '' freqA = {} freqB = {} for i in B: if i not in freqB: freqB[i] = 1 else: freqB[i] += 1 cnt = 0 i, j = 0, 0 while j < m: if A[j] not in freqA: freqA[A[j]] = 1 else: freqA[A[j]] += 1 j += 1 return freqA, freqB if __name__ == '__main__': A = "ADOBECODEBANC" B = "ABC" C = Solution() print(C.minWindow(A, B))
[ "srajsonu02@gmail.com" ]
srajsonu02@gmail.com
26446c43755f56300804abd5f5b9d97113d69cb9
d52413173437ba73ecdf822ca895e659f00a8ce7
/kiwibackend/application/website/messages/http/packageUse_request.py
1e9eebdd11e508f0afbc3bb02fb584f586d612a3
[]
no_license
whiteprism/mywork
2329b3459c967c079d6185c5acabd6df80cab8ea
a8e568e89744ca7acbc59e4744aff2a0756d7252
refs/heads/master
2021-01-21T11:15:49.090408
2017-03-31T03:28:13
2017-03-31T03:28:13
83,540,646
0
0
null
null
null
null
UTF-8
Python
false
false
196
py
# -*- encoding:utf8 -*- from messages.http import BaseHttp class PackageUseRequest(BaseHttp): def __init__(self): super(self.__class__, self).__init__() self.packageCode = ""
[ "snoster@163.com" ]
snoster@163.com
6ee1c27a32d25079040da61af38f1c0d53ab849f
8f6cc0e8bd15067f1d9161a4b178383e62377bc7
/__OLD_CODE_STORAGE/reinforcement_learning/a3c/from_internet/playground-ikostrikov-pytorch-a3c/envs.py
84224c825875b4c885430cd14323acae0c470d82
[ "MIT" ]
permissive
humorbeing/python_github
9c4dfc61a3cefbb266fefff335f6b28d05797e5e
e4b4b49bee7e7e3843c6874717779ce8d619bd02
refs/heads/master
2023-01-22T21:51:20.193131
2020-01-26T21:47:23
2020-01-26T21:47:23
163,707,778
0
0
null
2022-12-27T15:37:48
2019-01-01T01:58:18
Python
UTF-8
Python
false
false
2,163
py
import cv2 import gym import numpy as np from gym.spaces.box import Box from matplotlib import pyplot as plt def si(data): plt.imshow(data, interpolation='nearest') plt.show() # Taken from https://github.com/openai/universe-starter-agent def create_atari_env(env_id): # print('2') env = gym.make(env_id) env = AtariRescale42x42(env) env = NormalizedEnv(env) return env def _process_frame42(frame): # si(frame) frame = frame[34:34 + 160, :160] # si(frame) # Resize by half, then down to 42x42 (essentially mipmapping). If # we resize directly we lose pixels that, when mapped to 42x42, # aren't close enough to the pixel boundary. frame = cv2.resize(frame, (80, 80)) # si(frame) frame = cv2.resize(frame, (42, 42)) # si(frame) # print(frame.shape) frame = frame.mean(2, keepdims=True) # si(frame) # print(frame.shape) frame = frame.astype(np.float32) # si(frame) frame *= (1.0 / 255.0) # si(frame) # print(frame.shape) frame = np.moveaxis(frame, -1, 0) # print(frame.shape) # si(frame) return frame class AtariRescale42x42(gym.ObservationWrapper): def __init__(self, env=None): super(AtariRescale42x42, self).__init__(env) self.observation_space = Box(0.0, 1.0, [1, 42, 42]) def _observation(self, observation): return _process_frame42(observation) class NormalizedEnv(gym.ObservationWrapper): def __init__(self, env=None): super(NormalizedEnv, self).__init__(env) self.state_mean = 0 self.state_std = 0 self.alpha = 0.9999 self.num_steps = 0 def _observation(self, observation): self.num_steps += 1 self.state_mean = self.state_mean * self.alpha + \ observation.mean() * (1 - self.alpha) self.state_std = self.state_std * self.alpha + \ observation.std() * (1 - self.alpha) unbiased_mean = self.state_mean / (1 - pow(self.alpha, self.num_steps)) unbiased_std = self.state_std / (1 - pow(self.alpha, self.num_steps)) return (observation - unbiased_mean) / (unbiased_std + 1e-8)
[ "geemguang@gmail.com" ]
geemguang@gmail.com
8d0ba3fffd49ea3eea2c0d620f4f2610304025b1
48c0e32b803b59ed695e9d8f1dcdb877a2124252
/virtual/bin/confusable_homoglyphs
899edb47b6e39425a22faa0ecada590b2de2579c
[ "MIT" ]
permissive
willyowi/my-hood
512604dc189527c7872125daf45b1cf8173448a8
8643b901b6234fc9c5ac50727d1789893cebc3b4
refs/heads/master
2022-12-15T03:52:57.961655
2019-09-17T05:54:01
2019-09-17T05:54:01
208,743,454
0
0
null
2022-11-22T04:15:05
2019-09-16T08:01:29
Python
UTF-8
Python
false
false
262
#!/home/moringa/Django/InstaInsta/virtual/bin/python3.6 # -*- coding: utf-8 -*- import re import sys from confusable_homoglyphs.cli import cli if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit(cli())
[ "wilsonowino1@gmail.com" ]
wilsonowino1@gmail.com
caf56dfe7e0c49a264c81da81d9ea914a84447b2
b29acb2e230b3cf2f8be070850c34ed5d62dc80c
/Python/YPS/11/Sample6.py
4b1cb2312995ea62fe3367f82ff96ca26700c936
[]
no_license
MasatakaShibataSS/lesson
be6e3557c52c6157b303be268822cad613a7e0f7
4f3f81ba0161b820410e2a481b63a999d0d4338c
refs/heads/master
2020-06-17T13:42:08.383167
2019-11-11T07:23:14
2019-11-11T07:23:14
195,940,605
0
0
null
null
null
null
UTF-8
Python
false
false
140
py
import urllib.request page = urllib.request.urlopen("https://www.python.org/") html = page.read() str = html.decode() print(str)
[ "masataka.shibata.ss@gmail.com" ]
masataka.shibata.ss@gmail.com
6bdf85290bd663dbcce0fcc6c25afc13b3ec49b3
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p03076/s440054219.py
3147caa674c3d4a08d5b4d7bfac6bdbff79f2610
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
395
py
from itertools import permutations Order = [int(input()) for _ in range(5)] ans = [] for orders in permutations(Order): # print(orders) tmp = 0 cnt = 1 for order in orders: tmp += order if cnt == 5: ans.append(tmp) for _ in range(11): if tmp % 10 == 0: break tmp += 1 cnt += 1 print(min(ans))
[ "66529651+Aastha2104@users.noreply.github.com" ]
66529651+Aastha2104@users.noreply.github.com
f9c248c90a84e28028bd38121a1b2c64c0646d36
277d4ee56616bb5930c57a57c68a202bf5085501
/stubs/pytorch_transformers/modeling_transfo_xl_utilities.pyi
690e9c0e6bf58fb17eaeff7e7fa883231a190cb1
[ "MIT" ]
permissive
miskolc/spacy-pytorch-transformers
fc502523644eb25cb293e0796b46535ba581a169
ab132b674c5a91510eb8cc472cdbdf5877d24145
refs/heads/master
2020-07-22T09:47:17.905850
2019-09-04T15:12:09
2019-09-04T15:12:09
207,156,566
1
0
MIT
2019-09-08T18:37:55
2019-09-08T18:37:55
null
UTF-8
Python
false
false
1,205
pyi
# Stubs for pytorch_transformers.modeling_transfo_xl_utilities (Python 3) # # NOTE: This dynamically typed stub was automatically generated by stubgen. import torch.nn.functional as nn from typing import Any, Optional class ProjectedAdaptiveLogSoftmax(nn.Module): n_token: Any = ... d_embed: Any = ... d_proj: Any = ... cutoffs: Any = ... cutoff_ends: Any = ... div_val: Any = ... shortlist_size: Any = ... n_clusters: Any = ... head_size: Any = ... cluster_weight: Any = ... cluster_bias: Any = ... out_layers: Any = ... out_projs: Any = ... keep_order: Any = ... def __init__(self, n_token: Any, d_embed: Any, d_proj: Any, cutoffs: Any, div_val: int = ..., keep_order: bool = ...) -> None: ... def forward(self, hidden: Any, labels: Optional[Any] = ..., keep_order: bool = ...): ... def log_prob(self, hidden: Any): ... class LogUniformSampler: range_max: Any = ... dist: Any = ... log_q: Any = ... n_sample: Any = ... def __init__(self, range_max: Any, n_sample: Any) -> None: ... def sample(self, labels: Any): ... def sample_logits(embedding: Any, bias: Any, labels: Any, inputs: Any, sampler: Any): ...
[ "honnibal+gh@gmail.com" ]
honnibal+gh@gmail.com
26b8d5c8ca06054ba4ceacb635d29f047733406b
cedf275d9d0a9034f4d1227605b2f869098a4322
/guize/migrations/0002_rule.py
c2a52efa781f4067da427bec40d04527405c7710
[]
no_license
thorDemo/WagtailPointsShop
bb019dfef781d843d1d9e78fb24142f67a0d178a
f069bcb66514067197a59ffe25b68b47dea282e3
refs/heads/master
2020-06-02T23:34:47.100993
2019-08-30T10:08:43
2019-08-30T10:08:43
185,065,560
0
0
null
null
null
null
UTF-8
Python
false
false
841
py
# Generated by Django 2.2.1 on 2019-05-15 09:05 from django.db import migrations, models import django.db.models.deletion import wagtail.core.fields class Migration(migrations.Migration): dependencies = [ ('wagtailcore', '0041_group_collection_permissions_verbose_name_plural'), ('guize', '0001_initial'), ] operations = [ migrations.CreateModel( name='Rule', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')), ('description', wagtail.core.fields.RichTextField()), ], options={ 'abstract': False, }, bases=('wagtailcore.page',), ), ]
[ "thortwo@outlook.com" ]
thortwo@outlook.com
0a847c650ed4c43f4378921035fc98866e7c64ca
8441f156e53afcc6c2b5190de2439c68eb40f218
/scripts/test_pdl2resources.py
4a1a86f180bd2d2d6030e297f64ca2bb8c9e882d
[]
no_license
usnistgov/oar-metadata
99436a84d32d623d77310e75eee834c683ea1d5b
2190bfc79d97f81d52dd24df0d4e9dc844065b67
refs/heads/integration
2023-07-08T16:06:23.258608
2023-04-22T21:00:09
2023-04-22T21:00:09
82,972,531
4
7
null
2023-06-30T18:27:38
2017-02-23T21:20:34
Python
UTF-8
Python
false
false
2,595
py
#!/usr/bin/env python # import os, pdb, sys, shutil, json import unittest as test import ejsonschema as ejs datadir = os.path.join(os.path.dirname(os.path.dirname( os.path.abspath(__file__))), "jq", "tests", "data") pdlfile = os.path.join(datadir, "nist-pdl-oct2016.json") tmpname = "_test" basedir = os.getcwd() tmpdir = os.path.join(basedir, tmpname) outdir = os.path.join(tmpdir, "nerdmrecs") errdir = os.path.join(tmpdir, "errors") scriptdir = os.path.dirname(__file__) cvtscript = os.path.join(scriptdir, "pdl2resources.py") schemadir = os.path.join(os.path.dirname(scriptdir), "model") class TestConvert(test.TestCase): @classmethod def setUpClass(cls): if not os.path.isdir(outdir): if not os.path.isdir(tmpdir): os.mkdir(tmpdir) os.mkdir(outdir) os.makedirs(errdir) @classmethod def tearDownClass(cls): if os.path.exists(tmpdir): shutil.rmtree(tmpdir) def setUp(self): self.val = ejs.ExtValidator.with_schema_dir(schemadir, ejsprefix='_') def test_convert(self): script = "python3 {0} -d {1} -T {2}".format(cvtscript, outdir, pdlfile) self.assertEqual(os.system(script), 0) files = [f for f in os.listdir(outdir) if f.endswith(".json")] failed = [] passed = 0 for f in files: nf = os.path.join(outdir, f) errs = self.val.validate_file(nf, raiseex=False) if len(errs) > 0: failed.append(f) with open(os.path.join(errdir, f), 'w') as fd: for err in errs: print(str(err), file=fd) else: sys.stderr.write(".") passed += 1 with open(nf) as fd: nerd = json.load(fd) if 'theme' in nerd: self.assertEqual(len(nerd['topic']), len(nerd['theme'])) if nerd['ediid'] == 'EBC9DB05EDEE5B0EE043065706812DF85': self.assertIn('theme', nerd) self.assertEqual(nerd['theme'][0], "Physics: Spectroscopy") self.assertEqual(nerd['topic'][0]['tag'], "Physics: Spectroscopy") self.assertTrue(all([':' in t for t in nerd['theme']])) sys.stderr.write("\nValidated {0} files".format(str(passed))) self.assertEqual(len(failed), 0, "{0} converted file(s) failed validation".format(str(len(failed)))) if __name__ == '__main__': test.main()
[ "raymond.plante@nist.gov" ]
raymond.plante@nist.gov
84e6c18f5c63e7aa8929734ce272fa3a09eeb159
15ae6a6ca3a202e50e1905fb8f1bf8461d14e525
/bender_behaviors/src/python_old/entrevista1.py
184866f75c8f0ce575384b10e486eb55729d6c29
[]
no_license
uchile-robotics-graveyard/code_graveyard
5f8fa65ce0dc4698598ee4df00e27172cfd5be36
09feb04d2303456723542b95257e3ef3c86e263e
refs/heads/master
2021-01-21T08:24:24.254179
2016-12-13T03:23:19
2016-12-13T03:23:19
91,625,843
0
0
null
null
null
null
UTF-8
Python
false
false
628
py
from bender_msgs.msg import * from bender_srvs.srv import * # Messages from std_msgs.msg import * # Services from std_srvs.srv import * rospy.init_node("ceremonia") face = rospy.Publisher('/head',Emotion) client_speech_synth = rospy.ServiceProxy('/speech_synthesizer/synthesize',synthesize) print "Presione Enter" a = raw_input() # Espera un 'enter' para comenzar client_speech_synth("Pero senior rector que esta diciendo ") face.publish("changeFace", "sad1", 0) time.sleep(3) client_speech_synth("creo que esta equivocado") face.publish("changeFace", "angry1", 0) time.sleep(6) client_speech_synth("Muchas gracias")
[ "matias.pavez.b@gmail.com" ]
matias.pavez.b@gmail.com
5627b2e467cd0e1614f8d47a99d2bf22066b9b4e
fb3caa66ac0b2254b422303d670a70e597067758
/201911_AI_Sec_Baidu/core-attack-codes/a_04.py
9f9cf0803ac57b036a01e238785d504de381f628
[]
no_license
dyngq/Competitions
065ec9f153919950b161aaa9fff6a9de9e29ba32
e9b7ff8fbe038e148bc61b21b077f35cdc5368a9
refs/heads/master
2021-06-13T13:55:11.352531
2021-05-08T09:49:24
2021-05-08T09:49:24
186,392,400
0
0
null
null
null
null
UTF-8
Python
false
false
5,022
py
#coding=utf-8 from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import functools import numpy as np import paddle.fluid as fluid #加载自定义文件 import models from attack.attack_pp import FGSM, PGD from utils import init_prog, save_adv_image, process_img, tensor2img, calc_mse, add_arguments, print_arguments #######parse parameters parser = argparse.ArgumentParser(description=__doc__) add_arg = functools.partial(add_arguments, argparser=parser) add_arg('class_dim', int, 121, "Class number.") add_arg('shape', str, "3,224,224", "output image shape") add_arg('input', str, "./input2_image/", "Input directory with images") add_arg('output', str, "./input3_image/", "Output directory with images") args = parser.parse_args() print_arguments(args) ######Init args image_shape = [int(m) for m in args.shape.split(",")] class_dim=args.class_dim input_dir = args.input output_dir = args.output # Xception41 model_name="ResNet50" pretrained_model="/home/aistudio/work/attack_example/attack_code/models_parameters/ResNet50" val_list = 'val_list.txt' use_gpu=True ######Attack graph adv_program=fluid.Program() #完成初始化 with fluid.program_guard(adv_program): input_layer = fluid.layers.data(name='image', shape=image_shape, dtype='float32') #设置为可以计算梯度 input_layer.stop_gradient=False # model definition model = models.__dict__[model_name]() print(model_name) out_logits = model.net(input=input_layer, class_dim=class_dim) out = fluid.layers.softmax(out_logits) place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) #记载模型参数 fluid.io.load_persistables(exe, pretrained_model) #设置adv_program的BN层状态 init_prog(adv_program) #创建测试用评估模式 eval_program = adv_program.clone(for_test=True) #定义梯度 with fluid.program_guard(adv_program): label = fluid.layers.data(name="label", shape=[1] ,dtype='int64') loss = fluid.layers.cross_entropy(input=out, label=label) gradients = fluid.backward.gradients(targets=loss, inputs=[input_layer])[0] ######Inference def inference(img): fetch_list = [out.name] result = exe.run(eval_program, fetch_list=fetch_list, feed={ 'image':img }) result = result[0][0] pred_label = np.argmax(result) pred_score = result[pred_label].copy() return pred_label, pred_score ######FGSM attack #untarget attack def attack_nontarget_by_FGSM(img, src_label): pred_label = src_label # step = 4.0/512.0 step = 8.0/64.0 eps = 32.0/64.0 while pred_label == src_label: #生成对抗样本 adv=FGSM(adv_program=adv_program,eval_program=eval_program,gradients=gradients,o=img, input_layer=input_layer,output_layer=out,step_size=step,epsilon=eps, isTarget=False,target_label=0,use_gpu=use_gpu) pred_label, pred_score = inference(adv) step *= 2 if step > eps: break print("Test-score: {0}, class {1}".format(pred_score, pred_label)) adv_img=tensor2img(adv) return adv_img def attack_nontarget_by_FGSM_test(img, src_label): pred_label = src_label print("---------------AAAA-------------------Test-score: {0}, class {1}".format(pred_label, pred_label)) pred_label, pred_score = inference(img) print("---------------BBBB-------------------Test-score: {0}, class {1}".format(pred_score, pred_label)) ####### Main ####### def get_original_file(filepath): with open(filepath, 'r') as cfile: full_lines = [line.strip() for line in cfile] cfile.close() original_files = [] for line in full_lines: label, file_name = line.split() original_files.append([file_name, int(label)]) return original_files def gen_adv(): mse = 0 original_files = get_original_file(input_dir + val_list) for filename, label in original_files: img_path = input_dir + filename print("Image: {0} ".format(img_path)) img=process_img(img_path) # attack_nontarget_by_FGSM_test(img, label) prelabel, xxxx = inference(img) if label == prelabel: adv_img = attack_nontarget_by_FGSM(img, label) else: adv_img = tensor2img(img) image_name, image_ext = filename.split('.') ##Save adversarial image(.png) save_adv_image(adv_img, output_dir+image_name+'.jpg') # attack_nontarget_by_FGSM_test(img, label) org_img = tensor2img(img) score = calc_mse(org_img, adv_img) print(score) mse += score print("ADV {} files, AVG MSE: {} ".format(len(original_files), mse/len(original_files))) def main(): gen_adv() if __name__ == '__main__': main()
[ "dyngqk@163.com" ]
dyngqk@163.com
6509d45fac1e61e8e1adb7641a828be45e3c4bd6
f1d2a86b7dd93f4ddafa8961780775a28e7b4508
/LeetCodePractice/657. Judge Route Circle.py
c460223a113be445b4b0e16c29382cf9793142c3
[]
no_license
deepika087/CompetitiveProgramming
76f8c1451fce1a8e3c94656f81a5b04363987dc6
d40c24736a6fee43b56aa1c80150c5f14be4ff22
refs/heads/master
2021-06-12T02:26:22.374506
2021-02-20T19:27:57
2021-02-20T19:27:57
70,208,474
10
6
null
null
null
null
UTF-8
Python
false
false
540
py
__author__ = 'deepika' class Solution(object): def judgeCircle(self, moves): """ :type moves: str :rtype: bool """ start = [0, 0] for pos in moves: if pos == 'U': start[1] += 1 elif pos == 'D': start[1] -= 1 elif pos == 'R': start[0] += 1 else: start[0] -= 1 return start == [0, 0] s=Solution() assert s.judgeCircle("UD") == True assert s.judgeCircle("LL") == False
[ "deepika_087@yahoo.com" ]
deepika_087@yahoo.com
c669a2a88652de7ad32f758264e4aebdb6518c22
461d7bf019b9c7a90d15b3de05891291539933c9
/bip_utils/addr/egld_addr.py
5ce39f3efdc68e319a149a4f7e603112e1d6e4b2
[ "MIT" ]
permissive
renauddahou/bip_utils
5c21503c82644b57ddf56735841a21b6306a95fc
b04f9ef493a5b57983412c0ce460a9ca05ee1f50
refs/heads/master
2023-07-16T05:08:45.042084
2021-08-19T09:33:03
2021-08-19T09:33:03
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,965
py
# Copyright (c) 2021 Emanuele Bellocchia # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # Imports from typing import Union from bip_utils.addr.utils import AddrUtils from bip_utils.bech32 import Bech32Encoder from bip_utils.conf import Bip44Elrond from bip_utils.ecc import IPublicKey class EgldAddr: """ Elrond address class. It allows the Elrond address generation. """ @staticmethod def EncodeKey(pub_key: Union[bytes, IPublicKey]) -> str: """ Get address in Elrond format. Args: pub_key (bytes or IPublicKey): Public key bytes or object Returns: str: Address string Raises: ValueError: If the public key is not valid TypeError: If the public key is not ed25519 """ pub_key_obj = AddrUtils.ValidateAndGetEd25519Key(pub_key) return Bech32Encoder.Encode(Bip44Elrond.AddrConfKey("hrp"), pub_key_obj.RawCompressed().ToBytes()[1:])
[ "54482000+ebellocchia@users.noreply.github.com" ]
54482000+ebellocchia@users.noreply.github.com
c38c36d6ffc05d42fe02639a242917e1d5d32199
fd54c0886b81b49a55c31eb8c5254ce83df78785
/Source_Code/madagascar/appussd/utilities/data_ucip/tests.py
67147cdba1ee753b31be73451e555d3f685e8c22
[]
no_license
santsy03/RADIX
7854896651833b1be6e3279be409db59a71c76e4
da8f2535692697b80a6dc543b9eb270fe3d5e4d3
refs/heads/master
2021-01-12T09:48:32.085432
2016-12-13T06:01:41
2016-12-13T06:01:41
76,260,115
0
0
null
2016-12-13T06:01:41
2016-12-12T13:46:31
null
UTF-8
Python
false
false
984
py
def test_bill_subscriber(): '''tests the bill_subscriber function''' from utilities.ucip.core import bill_subscriber resources = {} parameters = {} parameters['msisdn'] = '254735267974' parameters['transactionId'] = 1 parameters['externalData1'] = 'test' parameters['externalData2'] = 'test' parameters['price'] = '1' resources['parameters'] = parameters resp = bill_subscriber(resources) print resp def test_get_balance(): '''tests the get_balance function''' from utilities.ucip.core import get_balance resources = {} parameters = {} parameters['msisdn'] = '254735267974' parameters['transactionId'] = 1 parameters['externalData1'] = 'test' parameters['externalData2'] = 'test' parameters['price'] = '1' resources['parameters'] = parameters resp = get_balance(resources) print resp if __name__ == '__main__': pass #test_bill_subscriber() #test_get_balance()
[ "root@oc4686551628.ibm.com" ]
root@oc4686551628.ibm.com
2e58446c7b4fbaa9e6612720013bf890545919e6
a6bc66b6c1561fc465d7c321d2584c1c7d6bd792
/sqliteschema/_text_extractor.py
ec137f4f88eedc36c5d605868782f47158484aff
[ "MIT" ]
permissive
dirkakrid/sqliteschema
defffda63d5922ec29d4f04cbe3895dfdca8a3e2
70aae30bd6e1eba0e959476246a2a6907b8f4b2d
refs/heads/master
2021-01-22T21:27:54.759880
2017-02-26T13:24:58
2017-02-26T13:24:58
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,261
py
#!/usr/bin/env python # encoding: utf-8 """ .. codeauthor:: Tsuyoshi Hombashi <gogogo.vm@gmail.com> """ from __future__ import absolute_import from __future__ import unicode_literals from collections import OrderedDict import six import typepy from ._error import DataNotFoundError from ._interface import AbstractSqliteSchemaExtractor class SqliteSchemaTextExtractorV0(AbstractSqliteSchemaExtractor): @property def verbosity_level(self): return 0 def get_table_schema(self, table_name): return [] def get_table_schema_text(self, table_name): self._validate_table_existence(table_name) return "{:s}\n".format(table_name) def _write_database_schema(self): for table_name in self.get_table_name_list(): if table_name == "sqlite_sequence": continue self._stream.write(self.get_table_schema_text(table_name)) class SqliteSchemaTextExtractorV1(SqliteSchemaTextExtractorV0): @property def verbosity_level(self): return 1 def get_table_schema(self, table_name): return [ attr.split()[0] for attr in self._get_attr_schema(table_name, "table") ] def get_table_schema_text(self, table_name): return "{:s} ({:s})\n".format( table_name, ", ".join(self.get_table_schema(table_name))) class SqliteSchemaTextExtractorV2(AbstractSqliteSchemaExtractor): @property def verbosity_level(self): return 2 def get_table_schema(self, table_name): return OrderedDict([ attr.split()[:2] for attr in self._get_attr_schema(table_name, "table") ]) def get_table_schema_text(self, table_name): attr_list = [] for key, value in six.iteritems(self.get_table_schema(table_name)): attr_list.append("{:s} {:s}".format(key, value)) return "{:s} ({:s})\n".format(table_name, ", ".join(attr_list)) def _write_table_schema(self, table_name): self._stream.write(self.get_table_schema_text(table_name)) def _write_database_schema(self): for table_name in self.get_table_name_list(): self._write_table_schema(table_name) class SqliteSchemaTextExtractorV3(SqliteSchemaTextExtractorV2): @property def verbosity_level(self): return 3 def get_table_schema(self, table_name): attr_list_list = [ attr.split() for attr in self._get_attr_schema(table_name, "table") ] return OrderedDict([ [attr_list[0], " ".join(attr_list[1:])] for attr_list in attr_list_list ]) class SqliteSchemaTextExtractorV4(SqliteSchemaTextExtractorV3): @property def verbosity_level(self): return 4 def get_table_schema_text(self, table_name): attr_list = [] for key, value in six.iteritems(self.get_table_schema(table_name)): attr_list.append("{:s} {:s}".format(key, value)) return "\n".join([ "{:s} (".format(table_name), ] + [ ",\n".join([ " {:s}".format(attr) for attr in attr_list ]) ] + [ ")\n", ]) def _write_table_schema(self, table_name): super(SqliteSchemaTextExtractorV4, self)._write_table_schema( table_name) self._stream.write("\n") class SqliteSchemaTextExtractorV5(SqliteSchemaTextExtractorV4): __ENTRY_TYPE_LIST = ["table", "index"] @property def verbosity_level(self): return 5 def get_table_schema_text(self, table_name): schema_text = super( SqliteSchemaTextExtractorV5, self ).get_table_schema_text(table_name) try: index_schema = self._get_index_schema(table_name) except DataNotFoundError: return schema_text index_schema_list = [ "{}".format(index_entry) for index_entry in index_schema if typepy.is_not_null_string(index_entry) ] if typepy.is_empty_sequence(index_schema_list): return schema_text return "{:s}{:s}\n".format(schema_text, "\n".join(index_schema_list))
[ "gogogo.vm@gmail.com" ]
gogogo.vm@gmail.com
4bd6b7505d8e2a7353534586bcc8c68933891220
c5b4d174ace61dd5914ca99fb0f2c710d0182324
/pypes/fmri/rest.py
3e34ff1e4c8139ca7c77288a6ee1befd637781ba
[ "Apache-2.0" ]
permissive
erramuzpe/pypes
636c6b31023747a571af90390fd85b2dd6806dea
3922d3162dc633b30961c036efdeb5d221ab1bfb
refs/heads/master
2020-12-24T06:43:15.063955
2017-04-05T19:51:05
2017-04-05T19:51:05
73,461,509
0
0
null
2016-11-11T08:54:15
2016-11-11T08:54:14
null
UTF-8
Python
false
false
2,291
py
# -*- coding: utf-8 -*- """ Nipype workflows to process resting-state functional MRI. """ from .grouptemplate import attach_spm_fmri_grouptemplate_wf from .clean import attach_fmri_cleanup_wf from .warp import attach_spm_warp_fmri_wf def _attach_rest_preprocessing(main_wf, registration_wf_name="spm_warp_fmri", do_group_template=False): """ Attach the resting-state MRI pre-processing workflow to the `main_wf`. Parameters ---------- main_wf: nipype Workflow registration_wf_name: str Name of the registration workflow. do_group_template: bool If True will attach the group template creation and pre-processing pipeline. Nipype Inputs for `main_wf` --------------------------- Note: The `main_wf` workflow is expected to have an `input_files` and a `datasink` nodes. input_files.select.anat: input node datasink: nipype Node Returns ------- main_wf: nipype Workflow """ main_wf = attach_fmri_cleanup_wf(main_wf) main_wf = attach_spm_warp_fmri_wf(main_wf, registration_wf_name=registration_wf_name, do_group_template=False) if do_group_template: main_wf = attach_spm_fmri_grouptemplate_wf(main_wf, wf_name="spm_fmri_grptemplate") main_wf = attach_spm_warp_fmri_wf(main_wf, registration_wf_name=registration_wf_name, do_group_template=True) reg_wf = main_wf.get_node("{}_{}".format(registration_wf_name, 'grptemplate')) grp_template = main_wf.get_node("group_template") main_wf.connect([(grp_template, reg_wf, [("fmri_template", "wfmri_input.epi_template")]),]) return main_wf def attach_rest_preprocessing(main_wf, wf_name="spm_warp_fmri"): return _attach_rest_preprocessing(main_wf, registration_wf_name=wf_name, do_group_template=False) def attach_rest_grptemplate_preprocessing(main_wf, wf_name="spm_warp_fmri"): return _attach_rest_preprocessing(main_wf, registration_wf_name=wf_name, do_group_template=True)
[ "alexsavio@gmail.com" ]
alexsavio@gmail.com