blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
56bf95e727e241941cb8404140b984bb6ebf81e8 | 2ed63f141ef1805d377f774c56464fc20a94b1f0 | /who_django/news_articles/migrations/0009_auto_20150428_0902.py | 0d75138097aaa3f5a7b7cda72c9e7472be9bb512 | [] | no_license | piafaustino/in-the-news | 115580b051060ee3238170c9657e855b589f1ff6 | 7e7ee49c11bd75e7c211901eed23eb61b1e63a38 | refs/heads/master | 2021-01-18T05:11:25.450509 | 2015-07-09T07:32:59 | 2015-07-09T07:32:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('news_articles', '0008_auto_20150428_0901'),
]
operations = [
migrations.AlterField(
model_name='newsarticle',
name='language',
field=models.CharField(max_length=1000, null=True, blank=True),
preserve_default=True,
),
]
| [
"ortizantoniocarlos@gmail.com"
] | ortizantoniocarlos@gmail.com |
1def0a0d17d4664296f6ac3d207fd6464e528d57 | a140fe192fd643ce556fa34bf2f84ddbdb97f091 | /.history/탈출문자_20200628134841.py | 52f76ab6276c1e43ead72f41b990b6c04e57c9d5 | [] | no_license | sangha0719/py-practice | 826f13cb422ef43992a69f822b9f04c2cb6d4815 | 6d71ce64bf91cc3bccee81378577d84ba9d9c121 | refs/heads/master | 2023-03-13T04:40:55.883279 | 2021-02-25T12:02:04 | 2021-02-25T12:02:04 | 342,230,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | # \n : 줄바꿈 문자
print("백문이 불여일견 \n백견이 불여일타")
# \" , \' : 문장 내에서 따옴표 표시
# 저는 "나도코딩"입니다.
print('저는 "나도코딩"입니다.')
print("저는 \"나도코딩\"입니다.")
print("저는 \'나도코딩\'입니다.")
# \\ : 문장 내에서 하나의 \ 인식
print("C:\\user\\Nadocoding\\Desktop\\PythonWorkspace")
# \r : 커서를 맨 앞으로 이동
print("Red Apple\rPine")
# Pine 을 맨 앞으로 이동해서 Red
| [
"sangha0719@gmail.com"
] | sangha0719@gmail.com |
0e410468b8ee972504b310d318e38672571433eb | c8ea0cc6fb77f9345911eeb5a46d6805dc6c0762 | /artifacts/battery_cycles.py | 6a52445d41fbbc89675d88fab7ad662caff16c96 | [
"Apache-2.0"
] | permissive | NoahRJ/unearth | a1f114faa4e5b8fe734d2e1f66c47b3d48fb984a | d4c3225cc5eca3aa7e22cb48b2457a273f42aed3 | refs/heads/master | 2022-02-26T06:13:28.467628 | 2019-07-12T15:24:48 | 2019-07-12T15:24:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | import plistlib
import subprocess
factoid = "battery_cycles"
def fact():
"""Returns the battery cycle count"""
result = "None"
try:
proc = subprocess.Popen(
["/usr/sbin/ioreg", "-r", "-c", "AppleSmartBattery", "-a"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, _ = proc.communicate()
if stdout:
result = plistlib.readPlistFromString(stdout)[0]["CycleCount"]
except (IOError, OSError):
pass
return {factoid: result}
if __name__ == "__main__":
print("<result>%s</result>" % fact()[factoid])
| [
"chilcote+github@gmail.com"
] | chilcote+github@gmail.com |
9eb4e00d680f07c58daf4d23bcde8ae439c4fef4 | 278d7f4467a112416d1adfbcd3218033ff0fd9b3 | /configs/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py | 89b01e35c8d9f9393ef9bed2d0e2317c726ad3f2 | [] | no_license | Young-1217/detection | e3d67938b454e955b5b7a82d5ae222e62f9545fb | 6760288dac92e00ddc3e813ed0e1363c1fa1ce2d | refs/heads/main | 2023-06-01T21:41:37.998947 | 2021-06-21T10:03:01 | 2021-06-21T10:03:01 | 371,868,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | _base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py'
model = dict(
pretrained='open-mmlab://regnetx_6.4gf',
backbone=dict(
type='RegNet',
arch='regnetx_6.4gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[168, 392, 784, 1624],
out_channels=256,
num_outs=5))
| [
"noreply@github.com"
] | Young-1217.noreply@github.com |
63fa0d097761e14ffc8a24f166a52a6602df5e9e | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/1e3a154bb18db45953d521b224dae5134ff1cd38-<_as_tuple>-fix.py | 67d0eec779e232394594d8590bebf64b8c3f4c82 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | def _as_tuple(xs):
if isinstance(xs, tuple):
return xs
elif isinstance(xs, list):
return tuple(xs)
else:
return (xs,) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
18df72e3135f880ec03195e7e3793b6738208e1d | a9063fd669162d4ce0e1d6cd2e35974274851547 | /test/test_tracking_field3.py | f0fce249958c46c2f52b49a16aabfc7a5a132fef | [] | no_license | rootalley/py-zoom-api | 9d29a8c750e110f7bd9b65ff7301af27e8518a3d | bfebf3aa7b714dcac78be7c0affb9050bbce8641 | refs/heads/master | 2022-11-07T14:09:59.134600 | 2020-06-20T18:13:50 | 2020-06-20T18:13:50 | 273,760,906 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,400 | py | # coding: utf-8
"""
Zoom API
The Zoom API allows developers to safely and securely access information from Zoom. You can use this API to build private services or public applications on the [Zoom App Marketplace](http://marketplace.zoom.us). To learn how to get your credentials and create private/public applications, read our [Authorization Guide](https://marketplace.zoom.us/docs/guides/authorization/credentials). All endpoints are available via `https` and are located at `api.zoom.us/v2/`. For instance you can list all users on an account via `https://api.zoom.us/v2/users/`. # noqa: E501
OpenAPI spec version: 2.0.0
Contact: developersupport@zoom.us
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from models.tracking_field3 import TrackingField3 # noqa: E501
from swagger_client.rest import ApiException
class TestTrackingField3(unittest.TestCase):
"""TrackingField3 unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTrackingField3(self):
"""Test TrackingField3"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.tracking_field3.TrackingField3() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"github@rootalley.com"
] | github@rootalley.com |
64e7d2d6a89a11242dd6ffe870524b83c7fff09b | 6ace7e15e3191d1b8228ad7922a8552ca84f84e7 | /.history/image_detector_20200614201043.py | 09d069f6be035c475b6bbaaec620ec76a5440459 | [] | no_license | mehmetaliarican/Similar-Image-Finder | f72e95be50c51aa03fc64954a03124b199ca64b1 | a9e0015c443b4a73394099cccf60329cfc4c7cef | refs/heads/master | 2022-10-27T00:57:43.173993 | 2020-06-14T18:02:16 | 2020-06-14T18:02:16 | 272,256,295 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,236 | py | from skimage.metrics import structural_similarity as ssim
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import cv2
import glob
import os
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-t", "--threshold", type=float, default=0.9,
help="threshold")
ap.add_argument("-d", "--dataset", required=True,
help="path to input dataset")
args = vars(ap.parse_args())
def mse(imageA, imageB):
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
# return the MSE, the lower the error, the more "similar"
# the two images are
return err
searching = False
def compare_images(path1,path2,imageA, imageB):
# compute the mean squared error and structural similarity
# index for the images
m = mse(imageA, imageB)
s = ssim(imageA, imageB)
tres = args['threshold']
if s >= tres:
print("Image '{0}' comparet to '{1}' Simility:'{2}".format([path1,path2,str(m), str(tres), str(s)]))
twin = np.hstack([imageA, imageB])
cv2.imshow('', twin)
cv2.waitKey(0)
searching = False
elif searching searching is not True:
print('Searching...')
searching = True
imagePaths = list(paths.list_images(args['dataset']))
companies = ['dhl', 'paypal', 'wellsfargo']
all_data = []
for path in imagePaths:
company = ''
for c in companies:
if c in path:
company = c
all_data.append({'comp': c, 'path': path})
for image in all_data:
try:
p1 = cv2.imread(image['path'])
p1 = cv2.resize(p1, (300, 300))
p1 = cv2.cvtColor(p1, cv2.COLOR_BGR2GRAY)
for i in all_data:
if i['path']!=image['path']:
p2 = cv2.imread(i['path'])
p2 = cv2.resize(p2, (300, 300))
p2 = cv2.cvtColor(p2, cv2.COLOR_BGR2GRAY)
compare_images(image['path'],i['path'],p1, p2)
except Exception as e:
print(str(e))
| [
"m.ali.arican@gmail.com"
] | m.ali.arican@gmail.com |
a8be80388f4eb71495ae77e532508c761fedde30 | a550aece79bda789826b463280b91abffbf2d372 | /django_projects/drf_store/04_03_test_ListAPIView/demo/store/tests.py | b54ff6cd8839738eed887718ccc38ff5cabf3f70 | [
"MIT"
] | permissive | phiratio/learn_python | 20376470eaa292c157fd01f52b3077e3a983cd5a | a32240d4355fb331805d515f96e1d009914e5c47 | refs/heads/master | 2022-11-27T07:07:45.712373 | 2020-12-03T22:04:31 | 2020-12-03T22:04:31 | 189,397,679 | 1 | 0 | MIT | 2022-11-22T04:40:27 | 2019-05-30T10:56:10 | Python | UTF-8 | Python | false | false | 1,832 | py | from rest_framework.test import APITestCase
from store.models import Product
class ProductCreateTestCase(APITestCase):
def test_create_product(self):
initial_product_count = Product.objects.count()
product_attrs = {
'name': 'New Product',
'description': 'Awesome product',
'price': '123.45',
}
response = self.client.post('/api/v1/products/new', product_attrs)
if response.status_code != 201:
print(response.data)
self.assertEqual(
Product.objects.count(),
initial_product_count + 1,
)
for attr, expected_value in product_attrs.items():
self.assertEqual(response.data[attr], expected_value)
self.assertEqual(response.data['is_on_sale'], False)
self.assertEqual(
response.data['current_price'],
float(product_attrs['price']),
)
class ProductDestroyTestCase(APITestCase):
def test_delete_product(self):
initial_product_count = Product.objects.count()
product_id = Product.objects.first().id
self.client.delete('/api/v1/products/{}/'.format(product_id))
self.assertEqual(
Product.objects.count(),
initial_product_count - 1,
)
self.assertRaises(
Product.DoesNotExist,
Product.objects.get, id=product_id,
)
class ProductListTestCase(APITestCase):
def test_list_products(self):
products_count = Product.objects.count()
response = self.client.get('/api/v1/products/')
self.assertIsNone(response.data['next'])
self.assertIsNone(response.data['previous'])
self.assertEqual(response.data['count'], products_count)
self.assertEqual(len(response.data['results']), products_count)
| [
"phiratio161@gmail.com"
] | phiratio161@gmail.com |
b6436774d1882ab0940596e4a817712a52d8abcd | 5e91bdf03ff63f101fc60ace00463d3350e6c21c | /lib/processors.py | 3b43ed855591e5e9ed89afe9908d01ebdba6ea19 | [
"BSD-3-Clause"
] | permissive | frkay/compression-test | 3adb645643c0a02c7e11ece9413a1f6249f12686 | 49185fe9c302c18734dad0ff4887802152cd95bf | refs/heads/master | 2021-01-18T08:28:24.440836 | 2013-01-25T01:34:35 | 2013-01-25T01:34:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,598 | py | #!/usr/bin/env python
from collections import defaultdict
from copy import copy
from importlib import import_module
import os
import sys
# pylint: disable=W0311
class Processors(object):
"""
Contains the candidate processors that we want to compare.
"""
module_dir = "compressor"
def __init__(self, options, msg_types, output):
self.options = options
self.msg_types = msg_types
self.output = output
self.warned = {'http1_gzip': True} # procs with no decompress support
self.processors = self.get_processors(options.processor_names)
def get_processors(self, processor_names):
"""
Get a hash of codec names to processors.
"""
procs = defaultdict(list)
for name in processor_names:
if "=" in name:
module_name, param_str = name.split("=", 1)
if param_str[0] == param_str[-1] == '"':
param_str = param_str[1:-1]
params = [param.strip() for param in param_str.split(',')]
else:
module_name = name
params = []
module = import_module("%s.%s" % (self.module_dir, module_name))
procs['req'].append(module.Processor(self.options, True, params))
procs['res'].append(module.Processor(self.options, False, params))
return procs
def process_stream(self, stream):
"""
Process the messages in the stream with all processors, and record
results.
"""
for (hdrs, host) in stream.messages:
results = self.process_message(hdrs, stream.msg_type, host)
for proc_name, resu in results.items():
if proc_name == self.options.baseline:
ratio = 1.0
else:
ratio = 1.0 * resu['size'] / results[self.options.baseline]['size']
stream.record_result(proc_name, resu['size'], ratio, resu['time'])
def process_message(self, hdrs, msg_type, host):
"""
message is a HTTP header dictionary in the format described in
compression.BaseProcessor.
msg_type is 'req' or 'res'.
host is the host header of the associated request.
Returns a dictionary of processor names mapped to their results.
"""
results = {}
for processor in self.processors[msg_type]:
start_time = sum(os.times()[:2])
compressed = processor.compress(copy(hdrs), host)
results[processor.name] = {
'size': len(compressed),
'time': sum(os.times()[:2]) - start_time
}
if self.options.verbose > 3:
txt = unicode(compressed, 'utf-8', 'replace') \
.encode('utf-8', 'replace')
self.output("# %s\n%s" % (processor.name, txt))
if txt[-1] != "\n":
self.output("\n\n")
try:
decompressed = processor.decompress(compressed)
except NotImplementedError:
if processor.name not in self.warned.keys():
sys.stderr.write(
" WARNING: %s decompression not checked.\n" % processor.name
)
self.warned[processor.name] = True
continue
compare_result = self.compare_headers(copy(hdrs), decompressed)
if compare_result:
self.output(' - mismatch in %s' % processor.name)
if self.options.verbose > 1:
self.output(':\n' + compare_result + "\n")
self.output("\n")
return results
@staticmethod
def compare_headers(a_hdr, b_hdr):
"""
Compares two dicts of headers, and returns a message denoting any
differences. It ignores:
- ordering differences in cookies
- connection headers
- HTTP version
- HTTP status phrase
If nothing is different, it returns an empty string. If it is, it
returns a string explaining what is different.
"""
output = []
for d_hdr in [a_hdr, b_hdr]:
if 'cookie' in d_hdr.keys():
splitvals = d_hdr['cookie'].split(';')
d_hdr['cookie'] = \
'; '.join(sorted([x.lstrip(' ') for x in splitvals]))
conn = [v.strip().lower() for v in a_hdr.get("connection", "").split(",")]
for (key, val) in a_hdr.iteritems():
if key in "connection" or conn:
pass
elif key in [':version', ':status-text']:
pass
elif not key in b_hdr:
output.append(' %s present in only one (A)' % key)
continue
elif val.strip() != b_hdr[key].strip():
output.append(' %s has mismatched values' % key)
output.append(' a -> %s' % val)
output.append(' b -> %s' % b_hdr[key])
if b_hdr.has_key(key):
del b_hdr[key]
for key in b_hdr.keys():
output.append(' %s present in only one (B)' % key)
return '\n'.join(output)
| [
"mnot@mnot.net"
] | mnot@mnot.net |
b586fd7a1f383dad2ec0742dffc9914c39b312cf | 23930efd5dc32e0959e916ce5aa5acd9cf5df147 | /ecoc/MonteCarlo/mc2p_1/mc2p_1_11.py | 2fff9b07d0ff67aebc66d2d224d7a9608e884f97 | [] | no_license | LiYan1988/ProbabilisticModel | 4c3778aa93bf71d6e248035a2a67445879e8ad6e | 4d9a11eb1f399323a00949be0796b857feca14fa | refs/heads/master | 2021-01-21T12:11:57.134088 | 2017-06-14T20:52:36 | 2017-06-14T20:52:36 | 81,770,785 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 7 09:14:36 2017
@author: li
"""
from solve_MILP import *
import os
os.chdir('/scratch/ly6j/backup/probabilisticModel/mc2p_1')
kwargs = {'mipfocus': 1, 'symmetry': 1, 'threads': 4, 'timelimit': 1200, 'presolve': 2, 'heuristics': 0.55}
Ii_hint = [33, 20, 19, 22, 29, 39, 15, 8, 27, 50, 4, 28, 13]
array_id = 11
mat_id = range(1, 51)
for n in mat_id:
input_file = 'for_python_1_{}_{}.mat'.format(array_id, n)
output_file = 'solution_{}_{}.pkl'.format(array_id, n)
solve(input_file, output_file, Ii_hint, **kwargs) | [
"li.yan.ly414@gmail.com"
] | li.yan.ly414@gmail.com |
cd197ccdf2cf11ee1fbd3d438e92db4d65c6ddda | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/volume/caps/y/_fill.py | ab34861252a95cbf68b0e9d373aecfb7d2b96efb | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 469 | py | import _plotly_utils.basevalidators
class FillValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="fill", parent_name="volume.caps.y", **kwargs):
super(FillValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
**kwargs
)
| [
"noreply@github.com"
] | hugovk.noreply@github.com |
c5fbc2e7dcf954721b39195806c383ace17ee76f | 22fe6ed51715486ebbc09e404504ed4d7a28c37d | /python-katas/EdabReverseLst.py | dccf18e9bdc92a5c02ed637f686670978d16eef2 | [] | no_license | Jethet/Practice-more | 1dd3ff19dcb3342a543ea1553a1a6fb0264b9c38 | 8488a679730e3406329ef30b4f438d41dd3167d6 | refs/heads/master | 2023-01-28T14:51:39.283741 | 2023-01-06T10:14:41 | 2023-01-06T10:14:41 | 160,946,017 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | # Write a function to reverse a list.
def reverse(lst):
return lst[::-1]
print(reverse([1, 2, 3, 4])) #➞ [4, 3, 2, 1]
print(reverse([9, 9, 2, 3, 4])) #➞ [4, 3, 2, 9, 9]
print(reverse([])) #➞ []
| [
"henriette.hettinga@gmail.com"
] | henriette.hettinga@gmail.com |
8fdb48483b20906a7daa113cc024d5973b600401 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_233/ch83_2020_04_22_11_04_53_464001.py | 63f3d097f7722e358eb5ae7ee9fb5173eed03dd6 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | def medias_por_inicial(notas):
medias = {}
alunos_por_inicial = {}
for nome in notas.keys():
inicial = nome[0]
if inicial not in medias.keys():
medias[inicial] = notas[nome]
alunos_por_inicial[inicial] = 1
else:
medias[inicial] += notas[nome]
alunos_por_inicial[inicial] += 1
for inicial in medias.keys():
medias[inicial] /= alunos_por_inicial[inicial]
return medias | [
"you@example.com"
] | you@example.com |
01e98f8c2af803c513e1e2dfc6304b82ed15cc8f | 183e4126b2fdb9c4276a504ff3ace42f4fbcdb16 | /I семестр/Програмування (Python)/Лабораторні/Лисенко 6116/Python/Лабораторна 5/Завдання.py | e0cddb38b73fd682715e0641a1ef8250cc04e7bb | [] | no_license | Computer-engineering-FICT/Computer-engineering-FICT | ab625e2ca421af8bcaff74f0d37ac1f7d363f203 | 80b64b43d2254e15338060aa4a6d946e8bd43424 | refs/heads/master | 2023-08-10T08:02:34.873229 | 2019-06-22T22:06:19 | 2019-06-22T22:06:19 | 193,206,403 | 3 | 0 | null | 2023-07-22T09:01:05 | 2019-06-22T07:41:22 | HTML | UTF-8 | Python | false | false | 1,258 | py | d = {'Київ': 2908088, 'Москва': 12197596, 'Париж': 2243833,
'Вашингтон': 601723, 'Токіо': 13185502, 'Афіни': 664046}
min_num, max_num = input('Введіть мінімальну кількісь жителів: '), input('та максимальну: ')
if min_num != "" != max_num:
if min_num.isdigit() and max_num.isdigit():
min_num, max_num = int(min_num), int(max_num)
if max_num >= min_num:
find_city = ["Місто", "Населення"]
check = False
for i in d.items():
if min_num <= i[1] <= max_num:
find_city.extend(i)
check = True
if check:
l = int(len(find_city)/2)
print(('{:_<10}{:_>10}\n'*l).format(*find_city))
else:
print('У словнику немає міст з населенням у заданому діапазоні')
else:
print("Мінімальне значення не повинне перевищувати максимальне")
else:
print("Треба ввести натуральні числа")
else:
print("Введенний рядок - пустий")
| [
"mazanyan027@gmail.com"
] | mazanyan027@gmail.com |
185701c63ea25a9a3fe9b5208b5cb34979998f5b | 1a4bc1a11fdb3f714f22f5e0e826b47aa0569de2 | /lab/lab06/tests/q1_8.py | 6e0714ed54d379ff3685d75575311282d166d713 | [] | no_license | taylorgibson/ma4110-fa21 | 201af7a044fd7d99140c68c48817306c18479610 | a306e1b6e7516def7de968781f6c8c21deebeaf5 | refs/heads/main | 2023-09-05T21:31:44.259079 | 2021-11-18T17:42:15 | 2021-11-18T17:42:15 | 395,439,687 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | test = { 'name': 'q1_8',
'points': None,
'suites': [ { 'cases': [ {'code': '>>> type(simulation_and_statistic(.5, model_proportions)) == float\nTrue', 'hidden': False, 'locked': False},
{'code': '>>> 0 <= simulation_and_statistic(.5, model_proportions) <= 25\nTrue', 'hidden': False, 'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| [
"taylorgibson@gmail.com"
] | taylorgibson@gmail.com |
f49e7bccdb3a5a086f23b2019d4c3c4db390ba89 | 36957a9ce540846d08f151b6a2c2d582cff1df47 | /VR/Python/Python36/Lib/test/test_longexp.py | ddf0ce819b366bec1421782184cea6747bef3671 | [] | no_license | aqp1234/gitVR | 60fc952307ef413e396d31e0d136faffe087ed2b | e70bd82c451943c2966b8ad1bee620a0ee1080d2 | refs/heads/master | 2022-12-29T15:30:12.540947 | 2020-10-07T15:26:32 | 2020-10-07T15:26:32 | 290,163,043 | 0 | 1 | null | 2020-08-25T09:15:40 | 2020-08-25T08:47:36 | C# | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:0c6674ff951062a7567a3b2cf1b5434c443040bb2e75254aab689fa55f16043c
size 243
| [
"aqp1234@naver.com"
] | aqp1234@naver.com |
fba232cfc312730f84819a63e4732abb4246bc25 | a9868b17ddc5f7f28911c57870e327238a2432d8 | /python_Pandas_Numpy/Pandas/Pandas05_16_GroupbyChk02year_최윤종.py | 6fa5fca86687e364f586cdbf1a367b6f47e8036a | [] | no_license | ChoiYoonJong/DataScience | 439568a668307ed0cab0cffb688fd832b10047ab | 3cab98eacecd8c1782e6f91b2b7ffa0ecefe4ed1 | refs/heads/main | 2023-06-07T02:56:08.335411 | 2021-07-09T13:23:58 | 2021-07-09T13:23:58 | 378,833,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py |
# coding: utf-8
# In[4]:
import pandas
# In[5]:
df = pandas.read_csv('../data/gapminder.tsv', sep='\t')
# In[6]:
print(df.groupby('year')['lifeExp'].mean())
# In[9]:
df['year'].unique()
# In[10]:
uniList = df['year'].unique()
print(type(uniList))
print(uniList,"\n====>")
# In[11]:
for idx in uniList:
print(idx, "=====> 1 :")
grYear = df[df['year'] == idx]
print(len(grYear), "\n ====> 2 \n:", grYear.head(3), "n =====> 3 :", grYear.shape)
print(grYear["lifeExp"].mean())
| [
"noreply@github.com"
] | ChoiYoonJong.noreply@github.com |
47e8dc654006635a6126a4703d0ce6a961458a72 | ffed7b18b2c06e807f8fa8b2c685ffcb7f3d578a | /rlgraph/components/policies/shared_value_function_policy.py | 73bbe372f5274f3a99d63359c6d7b25a7192d2a6 | [
"Apache-2.0"
] | permissive | theSoenke/rlgraph | 869eecba1bf4b1a4bd4f1b9a8d8a479377125a11 | a5ebf55820bce2d02dff22bb6db6247699fd6740 | refs/heads/master | 2020-05-02T16:31:23.346165 | 2019-04-02T10:13:02 | 2019-04-02T10:13:02 | 178,071,154 | 0 | 0 | Apache-2.0 | 2019-03-27T20:39:15 | 2019-03-27T20:39:14 | null | UTF-8 | Python | false | false | 6,257 | py | # Copyright 2018/2019 The Rlgraph Authors, All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from rlgraph.components.layers.nn.dense_layer import DenseLayer
from rlgraph.components.neural_networks.neural_network import NeuralNetwork
from rlgraph.components.policies.policy import Policy
from rlgraph.utils.decorators import rlgraph_api
class SharedValueFunctionPolicy(Policy):
def __init__(self, network_spec, value_weights_spec=None, value_biases_spec=None, value_activation=None,
value_fold_time_rank=False, value_unfold_time_rank=False,
scope="shared-value-function-policy", **kwargs):
super(SharedValueFunctionPolicy, self).__init__(network_spec, scope=scope, **kwargs)
# Create the extra value dense layer with 1 node.
self.value_unfold_time_rank = value_unfold_time_rank
self.value_network = NeuralNetwork(DenseLayer(
units=1,
activation=value_activation,
weights_spec=value_weights_spec,
biases_spec=value_biases_spec,
), fold_time_rank=value_fold_time_rank, unfold_time_rank=value_unfold_time_rank,
scope="value-function-node")
self.add_components(self.value_network)
@rlgraph_api
def get_state_values(self, nn_input, internal_states=None):
"""
Returns the state value node's output.
Args:
nn_input (any): The input to our neural network.
internal_states (Optional[any]): The initial internal states going into an RNN-based neural network.
Returns:
Dict:
state_values: The single (but batched) value function node output.
"""
nn_output = self.get_nn_output(nn_input, internal_states)
if self.value_unfold_time_rank is True:
state_values = self.value_network.apply(nn_output["output"], nn_input)
else:
state_values = self.value_network.apply(nn_output["output"])
return dict(state_values=state_values["output"], last_internal_states=nn_output.get("last_internal_states"))
@rlgraph_api
def get_state_values_logits_parameters_log_probs(self, nn_input, internal_states=None):
"""
Similar to `get_values_logits_probabilities_log_probs`, but also returns in the return dict under key
`state_value` the output of our state-value function node.
Args:
nn_input (any): The input to our neural network.
internal_states (Optional[any]): The initial internal states going into an RNN-based neural network.
Returns:
Dict:
state_values: The single (but batched) value function node output.
logits: The (reshaped) logits from the ActionAdapter.
parameters: The parameters for the distribution (gained from the softmaxed logits or interpreting
logits as mean and stddev for a normal distribution).
log_probs: The log(probabilities) values.
last_internal_states: The last internal states (if network is RNN-based).
"""
nn_output = self.get_nn_output(nn_input, internal_states)
logits, parameters, log_probs = self._graph_fn_get_action_adapter_logits_parameters_log_probs(
nn_output["output"], nn_input
)
if self.value_unfold_time_rank is True:
state_values = self.value_network.apply(nn_output["output"], nn_input)
else:
state_values = self.value_network.apply(nn_output["output"])
return dict(state_values=state_values["output"], logits=logits, parameters=parameters, log_probs=log_probs,
last_internal_states=nn_output.get("last_internal_states"))
@rlgraph_api
def get_state_values_logits_probabilities_log_probs(self, nn_input, internal_states=None):
"""
Similar to `get_values_logits_probabilities_log_probs`, but also returns in the return dict under key
`state_value` the output of our state-value function node.
Args:
nn_input (any): The input to our neural network.
internal_states (Optional[any]): The initial internal states going into an RNN-based neural network.
Returns:
Dict:
state_values: The single (but batched) value function node output.
logits: The (reshaped) logits from the ActionAdapter.
probabilities: The probabilities gained from the softmaxed logits.
log_probs: The log(probabilities) values.
last_internal_states: The last internal states (if network is RNN-based).
"""
self.logger.warn("Deprecated API method `get_state_values_logits_probabilities_log_probs` used!"
"Use `get_state_values_logits_parameters_log_probs` instead.")
nn_output = self.get_nn_output(nn_input, internal_states)
logits, parameters, log_probs = self._graph_fn_get_action_adapter_logits_parameters_log_probs(
nn_output["output"], nn_input
)
if self.value_unfold_time_rank is True:
state_values = self.value_network.apply(nn_output["output"], nn_input)
else:
state_values = self.value_network.apply(nn_output["output"])
return dict(state_values=state_values["output"], logits=logits, probabilities=parameters,
parameters=parameters, log_probs=log_probs,
last_internal_states=nn_output.get("last_internal_states"))
| [
"svenmika1977@gmail.com"
] | svenmika1977@gmail.com |
bf604da1d82d6bf059b6ed721319632c33211563 | 9036bd26d8fdd28c4fb518ed8eebb8688d85ed09 | /CursoEmVídeo/Mundo 2/Laços/tabuada.py | 899ac4cc3da2bc2c38445d766d4188f2503cc0c4 | [] | no_license | ijockeroficial/Python | efb7fffda4f32b030475023f337236695a854b23 | 918feb42dd592a18f1b29d4565baf43f4fed4efb | refs/heads/master | 2023-08-16T22:38:35.275739 | 2023-08-14T15:35:54 | 2023-08-14T15:35:54 | 273,336,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | numero = int(input("Digite um número: "))
#Dessa forma aqui teria que fazer vários IF's..
'''
for x in range(1, 10):
if numero == 1:
print("{} x {} = {}".format(numero, x, (x * numero)))
elif numero == 2:
print("{} x {} = {}".format(numero, x, (x * numero)))
'''
#Aqui já ficou mais simples e o código menor.
for x in range(1, 10):
if numero == x:
print("{} x {} = {}".format(numero, x, (x * numero)))
else: print("{} x {} = {}".format(numero, x, (x * numero)))
| [
"ijockeroficial@gmail.com"
] | ijockeroficial@gmail.com |
7b8d8b9855a7e10f8a965142905a97a3ff65c5b9 | 287c663c97e7840239794fbe84ce285773b72985 | /app/__init__.py | b73bce305c9b0d621fc9e9c4863d0c029abaf881 | [
"MIT"
] | permissive | mzazakeith/flask-blog | ea8e5b2da9a581eb026564c1b9e500fa0532ee88 | 2833404cc5e96ffdbfb767f35b9caf2bdcce7997 | refs/heads/master | 2020-03-21T21:24:57.296282 | 2018-07-02T20:20:24 | 2018-07-02T20:20:24 | 139,062,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | py | from flask import Flask
from flask_bootstrap import Bootstrap
from flask_mail import Mail
from config import config_options
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
# Instances of flask extensions
# Instance of LoginManger and using its methods
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
bootstrap = Bootstrap()
db = SQLAlchemy()
mail = Mail()
def create_app(config_name):
"""
Function that takes configuration setting key as an argument
Args:
config_name : name of the configuration to be used
"""
# Initialising application
app = Flask(__name__)
# Creating the app configurations
app.config.from_object(config_options[config_name])
# Initialising flask extensions
bootstrap.init_app(app)
db.init_app(app)
login_manager.init_app(app)
mail.init_app(app)
# Regestering the main blueprint
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
# Regestering the auth bluprint
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/')
return app
| [
"mzazakeith@gmail.com"
] | mzazakeith@gmail.com |
5a609ad0ab93e8fb87b9beda95e474f16f19c877 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/AlipayEcoLogisticsExpressPriceModifyModel.py | 374a7c32772d79071206b49960b2d8a694aaf9ff | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 5,456 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayEcoLogisticsExpressPriceModifyModel(object):
def __init__(self):
self._area_type = None
self._extra_weight_price = None
self._extra_weight_unit = None
self._from_code = None
self._logis_merch_code = None
self._preset_weight = None
self._preset_weight_price = None
self._product_type_code = None
self._to_code = None
@property
def area_type(self):
return self._area_type
@area_type.setter
def area_type(self, value):
self._area_type = value
@property
def extra_weight_price(self):
return self._extra_weight_price
@extra_weight_price.setter
def extra_weight_price(self, value):
self._extra_weight_price = value
@property
def extra_weight_unit(self):
return self._extra_weight_unit
@extra_weight_unit.setter
def extra_weight_unit(self, value):
self._extra_weight_unit = value
@property
def from_code(self):
return self._from_code
@from_code.setter
def from_code(self, value):
self._from_code = value
@property
def logis_merch_code(self):
return self._logis_merch_code
@logis_merch_code.setter
def logis_merch_code(self, value):
self._logis_merch_code = value
@property
def preset_weight(self):
return self._preset_weight
@preset_weight.setter
def preset_weight(self, value):
self._preset_weight = value
@property
def preset_weight_price(self):
return self._preset_weight_price
@preset_weight_price.setter
def preset_weight_price(self, value):
self._preset_weight_price = value
@property
def product_type_code(self):
return self._product_type_code
@product_type_code.setter
def product_type_code(self, value):
self._product_type_code = value
@property
def to_code(self):
return self._to_code
@to_code.setter
def to_code(self, value):
self._to_code = value
def to_alipay_dict(self):
params = dict()
if self.area_type:
if hasattr(self.area_type, 'to_alipay_dict'):
params['area_type'] = self.area_type.to_alipay_dict()
else:
params['area_type'] = self.area_type
if self.extra_weight_price:
if hasattr(self.extra_weight_price, 'to_alipay_dict'):
params['extra_weight_price'] = self.extra_weight_price.to_alipay_dict()
else:
params['extra_weight_price'] = self.extra_weight_price
if self.extra_weight_unit:
if hasattr(self.extra_weight_unit, 'to_alipay_dict'):
params['extra_weight_unit'] = self.extra_weight_unit.to_alipay_dict()
else:
params['extra_weight_unit'] = self.extra_weight_unit
if self.from_code:
if hasattr(self.from_code, 'to_alipay_dict'):
params['from_code'] = self.from_code.to_alipay_dict()
else:
params['from_code'] = self.from_code
if self.logis_merch_code:
if hasattr(self.logis_merch_code, 'to_alipay_dict'):
params['logis_merch_code'] = self.logis_merch_code.to_alipay_dict()
else:
params['logis_merch_code'] = self.logis_merch_code
if self.preset_weight:
if hasattr(self.preset_weight, 'to_alipay_dict'):
params['preset_weight'] = self.preset_weight.to_alipay_dict()
else:
params['preset_weight'] = self.preset_weight
if self.preset_weight_price:
if hasattr(self.preset_weight_price, 'to_alipay_dict'):
params['preset_weight_price'] = self.preset_weight_price.to_alipay_dict()
else:
params['preset_weight_price'] = self.preset_weight_price
if self.product_type_code:
if hasattr(self.product_type_code, 'to_alipay_dict'):
params['product_type_code'] = self.product_type_code.to_alipay_dict()
else:
params['product_type_code'] = self.product_type_code
if self.to_code:
if hasattr(self.to_code, 'to_alipay_dict'):
params['to_code'] = self.to_code.to_alipay_dict()
else:
params['to_code'] = self.to_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEcoLogisticsExpressPriceModifyModel()
if 'area_type' in d:
o.area_type = d['area_type']
if 'extra_weight_price' in d:
o.extra_weight_price = d['extra_weight_price']
if 'extra_weight_unit' in d:
o.extra_weight_unit = d['extra_weight_unit']
if 'from_code' in d:
o.from_code = d['from_code']
if 'logis_merch_code' in d:
o.logis_merch_code = d['logis_merch_code']
if 'preset_weight' in d:
o.preset_weight = d['preset_weight']
if 'preset_weight_price' in d:
o.preset_weight_price = d['preset_weight_price']
if 'product_type_code' in d:
o.product_type_code = d['product_type_code']
if 'to_code' in d:
o.to_code = d['to_code']
return o
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
695e788e5edef7d686c40c61f68a223e8d259b4e | b9a2daa725e1548c074fbf7b68a0926ddba3d43d | /venv/Scripts/django-admin.py | b7256d4e49ad4dc9845c803e7d75fc0480f397c6 | [] | no_license | aris-osorio/dolly-api | 87ba03d945bd745b6d396f15af271d21c4180b9a | fb702451393e7d550ea98ac889ba854f9be83146 | refs/heads/main | 2023-03-29T11:10:11.883987 | 2021-04-08T20:48:08 | 2021-04-08T20:48:08 | 356,044,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | #!c:\users\dell\documents\academlo\django\dolly_api\venv\scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"aris.osorio@alumnos.udg.mx"
] | aris.osorio@alumnos.udg.mx |
e1be58ed2fa525e8eba89fbd2e62bf9e1a1c90c1 | 40be08bbfed4bd6a951c18cc4bc0bf1f00e7e8a6 | /test/old/Old2/modules/Modules_Callextlib.py | 8a87d7d61556f0d8917d83ec68d2d949690068e0 | [
"BSD-3-Clause"
] | permissive | pulsar-chem/Pulsar-Core | 5bf4239c0a0de74d3f12a1c8b9bea2867fd8960c | f8e64e04fdb01947708f098e833600c459c2ff0e | refs/heads/master | 2021-01-18T06:51:05.905464 | 2017-06-04T02:31:44 | 2017-06-04T02:31:44 | 46,251,809 | 0 | 2 | null | 2017-05-25T14:59:51 | 2015-11-16T04:21:59 | C++ | UTF-8 | Python | false | false | 1,093 | py | #!/usr/bin/env python3
import os
import sys
import argparse
import traceback
# Add the pulsar path
thispath = os.path.dirname(os.path.realpath(__file__))
psrpath = os.path.join(os.path.dirname(thispath), "../", "modules")
sys.path.insert(0, psrpath)
import pulsar as psr
def Run(mm):
try:
out = psr.output.get_global_output()
# Load the python modules
# supermodule module name key
mm.load_module("TestModules", "TestExtLib", "TESTEXTLIB")
mm.print(out)
mm.sanity_check()
b1 = mm.get_module("TESTEXTLIB", 0)
b1.run_test()
psr.output.print_global_output("\n")
psr.output.print_global_output("\nDone testing\n")
except Exception as e:
psr.output.print_global_output("Caught exception in main handler\n")
traceback.print_exc()
psr.output.print_global_error("\n")
psr.output.print_global_error(str(e))
psr.output.print_global_error("\n")
psr.initialize(sys.argv, color = True, debug = True)
with psr.ModuleAdministrator() as mm:
Run(mm)
psr.finalize()
| [
"ben@bennyp.org"
] | ben@bennyp.org |
229049a938f96cb2b4dff2a9e4190690a45593e1 | 6c3ef00dfa8a97bb116d6d1601e881b5e1803019 | /PythonLeet/CutOffTreesGolf/cutofftrees.py | bf49968f1231267d0b1fa0a2bf7c8f677e46ce2e | [] | no_license | kedarpujara/LeetCode | 184b39062ff57d1d429baac75c8641f87e7d6795 | 91becd9a31b53026922ae3dfc203132abc148d2c | refs/heads/master | 2020-03-08T16:54:14.292400 | 2018-10-15T00:59:36 | 2018-10-15T00:59:36 | 128,254,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | def cutoff(forest):
visited = [[False for y in range(len(forest))] for x in range(len(forest[0]))]
print visited
canVisit = True
i = 0
j = 0
while canVisit:
if visited[i][j] == False:
visited[i][j] = True
else:
#skip to another one
#for tree in forest:
def get_min(i,j,forest):
val1 = val2 = val3 = val4 = val5 = 0
min_val = 0
if forest[i-1][j]:
if visited[i-1][j] == False and forest[i-1][j] != 0:
val1 = forest[i-1][j]
if forest[i][j-1]:
if visited[i][j-1] == False and forest[i][j-1] != 0:
val2 = forest[i][j-1]
if forest[i][j+1]:
if visited[i][j+1] == False and forest[i][j+1] != 0:
val3 = forest[i][j+1]
if forest[i+1][j]:
if visited[i+1][j] == False and forest[i+1][j] != 0:
val4 = forest[i+1][j]
if forest[i+1][j+1]:
if visited[i+1][j+1] == False and forest[i+1][j+1] != 0:
val5 = forest[i+1][j+1]
forest = [
[1,2,3],
[0,0,4],
[7,6,5]
]
print(cutoff(forest)) | [
"kedarpujara@gmail.com"
] | kedarpujara@gmail.com |
d9243182c5a162fedbd6a91c11d609744da8c83b | 3dd1cccf1a310b6b5f4d046fe680c5ad935833e6 | /tests/color_kd_test.py | 8b2604b1315b9e5ade03ab34f14f43eb1c82735f | [
"MIT"
] | permissive | nacleric/babi | 4660a89ef5d35c53204447330bdd8f770323286d | f1ce6d995bf3859459c5dda1d2188b6992434d30 | refs/heads/master | 2022-05-06T19:02:14.180669 | 2022-04-03T03:21:44 | 2022-04-03T03:21:44 | 218,426,293 | 0 | 0 | MIT | 2019-10-30T02:26:28 | 2019-10-30T02:26:27 | null | UTF-8 | Python | false | false | 1,844 | py | from __future__ import annotations
from babi import color_kd
from babi.color import Color
def test_build_trivial():
assert color_kd._build([]) is None
def test_build_single_node():
kd = color_kd._build([(Color(0, 0, 0), 255)])
assert kd == color_kd._KD(Color(0, 0, 0), 255, left=None, right=None)
def test_build_many_colors():
kd = color_kd._build([
(Color(0, 106, 200), 255),
(Color(1, 105, 201), 254),
(Color(2, 104, 202), 253),
(Color(3, 103, 203), 252),
(Color(4, 102, 204), 251),
(Color(5, 101, 205), 250),
(Color(6, 100, 206), 249),
])
# each level is sorted by the next dimension
assert kd == color_kd._KD(
Color(3, 103, 203),
252,
left=color_kd._KD(
Color(1, 105, 201), 254,
left=color_kd._KD(Color(2, 104, 202), 253, None, None),
right=color_kd._KD(Color(0, 106, 200), 255, None, None),
),
right=color_kd._KD(
Color(5, 101, 205), 250,
left=color_kd._KD(Color(6, 100, 206), 249, None, None),
right=color_kd._KD(Color(4, 102, 204), 251, None, None),
),
)
def test_nearest_trivial():
assert color_kd.nearest(Color(0, 0, 0), None) == 0
def test_nearest_one_node():
kd = color_kd._build([(Color(100, 100, 100), 99)])
assert color_kd.nearest(Color(0, 0, 0), kd) == 99
def test_nearest_on_square_distance():
kd = color_kd._build([
(Color(50, 50, 50), 255),
(Color(50, 51, 50), 254),
])
assert color_kd.nearest(Color(0, 0, 0), kd) == 255
assert color_kd.nearest(Color(52, 52, 52), kd) == 254
def test_smoke_kd_256():
kd_256 = color_kd.make_256()
assert color_kd.nearest(Color(0, 0, 0), kd_256) == 16
assert color_kd.nearest(Color(0x1e, 0x77, 0xd3), kd_256) == 32
| [
"asottile@umich.edu"
] | asottile@umich.edu |
0fcd71de50d72bae25ef638ec74f61bc7b256654 | 7d79d7338b955956e6f576945bc12c0cea66aab4 | /config/settings.py | 2a2532e3632107d1772811574989dd1679428ae9 | [] | no_license | parkhongbeen/Django | 741280e59b00f58968e126b8873d535c3ba4e09e | 8fc55c2decfdaaf5d3acbdb2e3b616a7118292fd | refs/heads/master | 2022-09-19T22:53:25.615097 | 2019-12-17T06:18:59 | 2019-12-17T06:18:59 | 227,061,684 | 0 | 0 | null | 2022-08-23T17:58:29 | 2019-12-10T08:02:38 | Jupyter Notebook | UTF-8 | Python | false | false | 3,751 | py | """
Django settings for config project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print('settings.py의 BASE_DIR:', BASE_DIR)
TEMPLATES_DIR = os.path.join(BASE_DIR, 'templates')
print('settings.py의 TEMPLATES_DIR:', TEMPLATES_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'rf+ng-5ya4rop&*$m#ve&r@z(2)04437api0us6tm)43d4fkx+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# PYPI패키지
# shell_plus를 쓰기 위한 library
'django_extensions',
# 이 패키지는 application취급
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
# Template관련 설정
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# 기본설정 외에 Template을 찾을 경로 목록
'DIRS': [
# djangogirls/templates/ 폴더를 추가
TEMPLATES_DIR,
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'ko-kr'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
# 정적파일을 찾는 경로를 추가
STATICFILES_DIRS = [
# djangogirls/static 폴더 path가 여기에 추가되도록 한다
os.path.join(BASE_DIR, 'static'),
] | [
"pack122@naver.com"
] | pack122@naver.com |
c6e8a6480cbc2210c7ce1590286e24b9c832cf53 | f88f900c0384f6da82eeb749371ad44115527700 | /course-book/04-basic-comp/0407-select-roi.py | bd8ce06f7c5ac2be728ee7d908531913e7d19749 | [] | no_license | aaron-kr/learning-opencv | eff382e8f0c822400f765451d57b192a63cd1b74 | 158239f0140569aec519fc1fbf255c54ef2567d2 | refs/heads/main | 2023-08-21T11:02:49.775425 | 2021-10-27T00:04:01 | 2021-10-27T00:04:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | # 0407.py
import cv2
import numpy as np
src = cv2.imread('../../img/spirit-week.jpg', cv2.IMREAD_GRAYSCALE)
roi = cv2.selectROI(src)
print('roi = ', roi)
img = src[roi[1]:roi[1] + roi[3],
roi[0]:roi[0] + roi[2]]
cv2.imshow('img', img)
cv2.waitKey()
cv2.destroyAllWindows() | [
"jekkilekki@gmail.com"
] | jekkilekki@gmail.com |
5a3237ceffbadd3c1ef8e2d5eecef4318d240e48 | 4771d5aa867ed64a08440b1be21936e893edcf77 | /basics/graph.py | 5ade622bf1205022c7ca9cb9bff8ed433e0fe264 | [] | no_license | dipu-bd/tensorflow-practice | 58aca6cae6bbeca588b7af52bf944ff8d6d7c99a | 53e2e08dc93691c71ffb003251781ae541a4e236 | refs/heads/master | 2021-01-24T08:47:28.904901 | 2017-06-17T00:38:22 | 2017-06-17T00:38:22 | 93,395,622 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | import tensorflow as tf
a = tf.placeholder(tf.float32, name='a')
b = tf.placeholder(tf.float32, name='b')
x = tf.add(a, b, name='add_a_b')
y = tf.multiply(a, b, name='mul_a_b')
z = tf.add(x, y, name='add_x_y')
sess = tf.Session()
writer = tf.summary.FileWriter('.bin', sess.graph)
res = sess.run(z, {a: 3, b: 5})
print('\nResult:', res)
writer.close()
sess.close()
| [
"dipu.sudipta@gmail.com"
] | dipu.sudipta@gmail.com |
a33c48a536e05e1895c3add7bef566c6bc2632f0 | e174e13114fe96ad2a4eeb596a3d1c564ae212a8 | /Python for Finance Analyze Big Financial Data by Y. Hilpisch/Code of Python For Finance/4375OS_05_Code/4375OS_05_05_del_math_module.py | 9056dae909499864ac1f688e45a4de3b926fa082 | [] | no_license | Kevinqian0501/python_books | c1a7632d66dceb46db439f7cbed86d85370aab42 | 0691e4685af03a296aafb02447e3585db55ce461 | refs/heads/master | 2021-08-30T19:27:03.985464 | 2017-12-19T05:56:31 | 2017-12-19T05:56:31 | 104,145,012 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | """
Name : 4375OS_05_05_del_math_module.py
Book : Python for Finance
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan
Date : 12/26/2013
email : yany@canisius.edu
paulyxy@hotmail.com
"""
import math
print math.sqrt(3)
del math
print math.sqrt(3) # you will see an errer message
| [
"kevin@Qians-MacBook-Pro.local"
] | kevin@Qians-MacBook-Pro.local |
d6cf92587b738328eb4b7ee0bc50090f9f79f85a | a9e3f3ad54ade49c19973707d2beb49f64490efd | /Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/cms/djangoapps/contentstore/management/commands/xlint.py | 655a2af01ee7b30c7a3401aaead12c4eecde2125 | [
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"MIT"
] | permissive | luque/better-ways-of-thinking-about-software | 8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d | 5809eaca7079a15ee56b0b7fcfea425337046c97 | refs/heads/master | 2021-11-24T15:10:09.785252 | 2021-11-22T12:14:34 | 2021-11-22T12:14:34 | 163,850,454 | 3 | 1 | MIT | 2021-11-22T12:12:31 | 2019-01-02T14:21:30 | JavaScript | UTF-8 | Python | false | false | 995 | py | """
Verify the structure of courseware as to it's suitability for import
"""
from argparse import REMAINDER
from django.core.management.base import BaseCommand
from xmodule.modulestore.xml_importer import perform_xlint
class Command(BaseCommand):
"""Verify the structure of courseware as to its suitability for import"""
help = """
Verify the structure of courseware as to its suitability for import.
To run: manage.py cms <data directory> [<course dir>...]
"""
def add_arguments(self, parser):
parser.add_argument('data_dir')
parser.add_argument('source_dirs', nargs=REMAINDER)
def handle(self, *args, **options):
"""Execute the command"""
data_dir = options['data_dir']
source_dirs = options['source_dirs']
print("Importing. Data_dir={data}, source_dirs={courses}".format(
data=data_dir,
courses=source_dirs))
perform_xlint(data_dir, source_dirs, load_error_modules=False)
| [
"rafael.luque@osoco.es"
] | rafael.luque@osoco.es |
b7122f570a1c4af04759b91bb9965d0f21c201c5 | 03e3138f99f275d15d41a5c5bfb212f85d64d02e | /source/res/scripts/common/Lib/distutils/spawn.py | 28afad9b9a8bce41e1d3090f898b6a61b07fa537 | [] | no_license | TrenSeP/WorldOfTanks-Decompiled | e428728e7901146d0b599d02c930d70532232a97 | 1faa748acec1b7e435b657fd054ecba23dd72778 | refs/heads/1.4.1 | 2020-04-27T08:07:49.813023 | 2019-03-05T17:37:06 | 2019-03-05T17:37:06 | 174,159,837 | 1 | 0 | null | 2019-03-06T14:33:33 | 2019-03-06T14:24:36 | Python | UTF-8 | Python | false | false | 5,826 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/common/Lib/distutils/spawn.py
__revision__ = '$Id$'
import sys
import os
from distutils.errors import DistutilsPlatformError, DistutilsExecError
from distutils.debug import DEBUG
from distutils import log
def spawn(cmd, search_path=1, verbose=0, dry_run=0):
cmd = list(cmd)
if os.name == 'posix':
_spawn_posix(cmd, search_path, dry_run=dry_run)
elif os.name == 'nt':
_spawn_nt(cmd, search_path, dry_run=dry_run)
elif os.name == 'os2':
_spawn_os2(cmd, search_path, dry_run=dry_run)
else:
raise DistutilsPlatformError, "don't know how to spawn programs on platform '%s'" % os.name
def _nt_quote_args(args):
for i, arg in enumerate(args):
if ' ' in arg:
args[i] = '"%s"' % arg
return args
def _spawn_nt(cmd, search_path=1, verbose=0, dry_run=0):
executable = cmd[0]
cmd = _nt_quote_args(cmd)
if search_path:
executable = find_executable(executable) or executable
log.info(' '.join([executable] + cmd[1:]))
if not dry_run:
try:
rc = os.spawnv(os.P_WAIT, executable, cmd)
except OSError as exc:
if not DEBUG:
cmd = executable
raise DistutilsExecError, 'command %r failed: %s' % (cmd, exc[-1])
if rc != 0:
if not DEBUG:
cmd = executable
raise DistutilsExecError, 'command %r failed with exit status %d' % (cmd, rc)
def _spawn_os2(cmd, search_path=1, verbose=0, dry_run=0):
executable = cmd[0]
if search_path:
executable = find_executable(executable) or executable
log.info(' '.join([executable] + cmd[1:]))
if not dry_run:
try:
rc = os.spawnv(os.P_WAIT, executable, cmd)
except OSError as exc:
if not DEBUG:
cmd = executable
raise DistutilsExecError, 'command %r failed: %s' % (cmd, exc[-1])
if rc != 0:
if not DEBUG:
cmd = executable
log.debug('command %r failed with exit status %d' % (cmd, rc))
raise DistutilsExecError, 'command %r failed with exit status %d' % (cmd, rc)
if sys.platform == 'darwin':
from distutils import sysconfig
_cfg_target = None
_cfg_target_split = None
def _spawn_posix(cmd, search_path=1, verbose=0, dry_run=0):
global _cfg_target
global _cfg_target_split
log.info(' '.join(cmd))
if dry_run:
return
else:
executable = cmd[0]
exec_fn = search_path and os.execvp or os.execv
env = None
if sys.platform == 'darwin':
if _cfg_target is None:
_cfg_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') or ''
if _cfg_target:
_cfg_target_split = [ int(x) for x in _cfg_target.split('.') ]
if _cfg_target:
cur_target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', _cfg_target)
if _cfg_target_split > [ int(x) for x in cur_target.split('.') ]:
my_msg = '$MACOSX_DEPLOYMENT_TARGET mismatch: now "%s" but "%s" during configure' % (cur_target, _cfg_target)
raise DistutilsPlatformError(my_msg)
env = dict(os.environ, MACOSX_DEPLOYMENT_TARGET=cur_target)
exec_fn = search_path and os.execvpe or os.execve
pid = os.fork()
if pid == 0:
try:
if env is None:
exec_fn(executable, cmd)
else:
exec_fn(executable, cmd, env)
except OSError as e:
if not DEBUG:
cmd = executable
sys.stderr.write('unable to execute %r: %s\n' % (cmd, e.strerror))
os._exit(1)
if not DEBUG:
cmd = executable
sys.stderr.write('unable to execute %r for unknown reasons' % cmd)
os._exit(1)
else:
while 1:
try:
pid, status = os.waitpid(pid, 0)
except OSError as exc:
import errno
if exc.errno == errno.EINTR:
continue
if not DEBUG:
cmd = executable
raise DistutilsExecError, 'command %r failed: %s' % (cmd, exc[-1])
if os.WIFSIGNALED(status):
if not DEBUG:
cmd = executable
raise DistutilsExecError, 'command %r terminated by signal %d' % (cmd, os.WTERMSIG(status))
if os.WIFEXITED(status):
exit_status = os.WEXITSTATUS(status)
if exit_status == 0:
return
if not DEBUG:
cmd = executable
raise DistutilsExecError, 'command %r failed with exit status %d' % (cmd, exit_status)
if os.WIFSTOPPED(status):
continue
if not DEBUG:
cmd = executable
raise DistutilsExecError, 'unknown error executing %r: termination status %d' % (cmd, status)
return
def find_executable(executable, path=None):
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
base, ext = os.path.splitext(executable)
if (sys.platform == 'win32' or os.name == 'os2') and ext != '.exe':
executable = executable + '.exe'
if not os.path.isfile(executable):
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
return f
return
else:
return executable
return
| [
"StranikS_Scan@mail.ru"
] | StranikS_Scan@mail.ru |
b5df1bb110f24dcfa227db63c8e25ff08fa4b21d | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/231/58969/submittedfiles/testes.py | d8dd148abe96655ed59b2a5c1cca1a842280b264 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | # -*- coding: utf-8 -*-
def vampiro(a):
for i in range(0,len(a),1):
if a[i]>(a[i]+1):
return True
else:
return False
b=[]
n=2
for i in range(0,n,1):
valor=int(input('vv:'))
b.append(valor)
if vampiro(b):
print('s')
else:
print('n')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
cdb32dcef51d89421546886b3875f86dfdae967a | 96cfaaa771c2d83fc0729d8c65c4d4707235531a | /SUSYBSMAnalysis/CSA07Skims/python/lepSUSY_1Muon_0Elec_1Jets_MET_Path_cff.py | 52a2fd2481201402ef2a4b3a3e331993d9219edb | [] | no_license | khotilov/cmssw | a22a160023c7ce0e4d59d15ef1f1532d7227a586 | 7636f72278ee0796d0203ac113b492b39da33528 | refs/heads/master | 2021-01-15T18:51:30.061124 | 2013-04-20T17:18:07 | 2013-04-20T17:18:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | import FWCore.ParameterSet.Config as cms
from SUSYBSMAnalysis.CSA07Skims.lepSUSY_1Muon_0Elec_1Jets_MET_Skim_cfi import *
lepSUSY_1Muon_0Elec_1Jets_MET = cms.Path(lepSUSY_1Muon_0Elec_1Jets_MET_Seq)
| [
"sha1-197b93d87bf2e1eb4349df76c6ec25fd8f1f348e@cern.ch"
] | sha1-197b93d87bf2e1eb4349df76c6ec25fd8f1f348e@cern.ch |
4a677b33daf3f7b8fd76b3bdd4aa9e33dfc202a6 | f707303e4dfe383cf82c23a6bb42ccfdc4cfdb67 | /pandas-ml-utils-tf/pandas_ml_utils_tf/tf_nn.py | cd941f8f97c2c6ecfc635ef7ee7074c3ac42cd11 | [
"MIT"
] | permissive | jcoffi/pandas-ml-quant | 1830ec256f8c09c04f1aa77e2eecfba07d34fe68 | 650a8e8f77bc4d71136518d1c7ee65c194a99cf0 | refs/heads/master | 2023-08-31T06:45:38.060737 | 2021-09-09T04:44:35 | 2021-09-09T04:44:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,005 | py | from abc import abstractmethod
from typing import List, Union, Callable
import tensorflow as tf
from keras import Model
from tensorflow.python.ops.variables import VariableMetaclass
from tensorflow.python.training.tracking import base as trackable
class TensorflowNN(object):
# we will later explicitly wrap this function into `@tf.function`
# The Function stores the tf.Graph corresponding to that signature in a ConcreteFunction.
# A ConcreteFunction is a wrapper around a tf.Graph.
def forward_training(self, *input) -> tf.Tensor:
return self.forward_predict(*input)
# we will later explicitly wrap this function into `@tf.function`
# The Function stores the tf.Graph corresponding to that signature in a ConcreteFunction.
# A ConcreteFunction is a wrapper around a tf.Graph.
@abstractmethod
def forward_predict(self, *input) -> tf.Tensor:
raise NotImplementedError
@abstractmethod
def trainable_variables(self) -> List[Union[VariableMetaclass, trackable.Trackable]]:
raise NotImplementedError
class TensorflowNNFactory(TensorflowNN):
@staticmethod
def create(
net: Model,
predictor: Callable[[Model, tf.Tensor], tf.Tensor],
trainer: Callable[[Model, tf.Tensor], tf.Tensor] = None):
def factory(**kwargs):
return TensorflowNNFactory(net, predictor, predictor if trainer is None else trainer, **kwargs)
return factory
def __init__(self, net, predictor, trainer, *args, **kwargs):
super().__init__(*args, **kwargs)
self.net = net
self.predictor = predictor
self.trainer = trainer
def forward_training(self, *input) -> tf.Tensor:
return self.trainer(self.net, *input)
def forward_predict(self, *input) -> tf.Tensor:
return self.predictor(self.net, *input)
def trainable_variables(self) -> List[Union[VariableMetaclass, trackable.Trackable]]:
return self.net.trainable_variables
| [
"kic@kic.kic"
] | kic@kic.kic |
2b189526a687e705a9f01645fe58d4eb4c20c3dd | 2701fe5c6a1c612f08b39119bdd833388eba248e | /examples/00-load/create-unstructured-surface.py | 42cbba74b93c892dda8d32535176211827864d03 | [
"MIT"
] | permissive | pinshuai/pyvista | eab3a43a7ff0e07c82d1d03f35be1524f83355fc | 131d780a6eadcbe051e2af2e1637433424319792 | refs/heads/master | 2020-06-24T19:49:53.093264 | 2019-07-30T22:50:48 | 2019-07-30T22:50:48 | 199,065,857 | 0 | 0 | MIT | 2019-07-26T18:51:44 | 2019-07-26T18:51:44 | null | UTF-8 | Python | false | false | 1,650 | py | """
.. _ref_create_unstructured:
Creating an Unstructured Surface
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Create an irregular, unstructured grid from NumPy arrays
"""
import pyvista as pv
import vtk
import numpy as np
###############################################################################
# An unstructured grid can be created directly from numpy arrays.
# This is useful when creating a grid from scratch or copying it from another
# format. See `vtkUnstructuredGrid <https://www.vtk.org/doc/nightly/html/classvtkUnstructuredGrid.html>`_
# for available cell types and their descriptions.
# offset array. Identifies the start of each cell in the cells array
offset = np.array([0, 9])
# Contains information on the points composing each cell.
# Each cell begins with the number of points in the cell and then the points
# composing the cell
cells = np.array([8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, 15])
# cell type array. Contains the cell type of each cell
cell_type = np.array([vtk.VTK_HEXAHEDRON, vtk.VTK_HEXAHEDRON])
cell1 = np.array(
[
[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 1],
[0, 1, 1],
]
)
cell2 = np.array(
[
[0, 0, 2],
[1, 0, 2],
[1, 1, 2],
[0, 1, 2],
[0, 0, 3],
[1, 0, 3],
[1, 1, 3],
[0, 1, 3],
]
)
# points of the cell array
points = np.vstack((cell1, cell2))
# create the unstructured grid directly from the numpy arrays
grid = pv.UnstructuredGrid(offset, cells, cell_type, points)
# plot the grid
grid.plot(show_edges=True)
| [
"banesullivan@gmail.com"
] | banesullivan@gmail.com |
d5e3c66f74c67a4bacb931a51ca8a34d93dee778 | 3700bab3cd5042401d7e1994beaeeb2e754418c8 | /tests/test.py | dd8f97f1087f7b7702d92f9e631c320a33407c62 | [] | no_license | marcwebbie/gitutoring | c536f4c712c5071674cb34a9902889c69210c369 | e19822a1be707bf0df39d4984d5d5385561c8346 | refs/heads/master | 2020-04-09T16:51:12.420916 | 2015-02-13T15:05:03 | 2015-02-13T15:05:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | import unittest
class Tests(unittest.TestCase):
def test_equals(self):
self.assertEqual(1, 1)
if __name__ == "__main__":
unittest.main()
| [
"marcwebbie@gmail.com"
] | marcwebbie@gmail.com |
2ecc2e7c4ea151e9610f4e014c3d0c5552911b11 | 12a42054b156383ebbe3ccc5de4150633c66da5d | /problems/reverse-string/solution.py | babfb9c48e6d23aec4d29acd74c407db33e2306d | [] | no_license | cfoust/leetcode-problems | 93c33029f74f32c64caf8294292226d199d6e272 | f5ad7866906d0a2cf2250e5972ce910bf35ce526 | refs/heads/master | 2020-03-16T23:05:45.123781 | 2018-05-11T16:41:09 | 2018-05-11T16:41:09 | 133,064,772 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | class Solution:
def reverseString(self, s):
"""
:type s: str
:rtype: str
"""
"""
| [
"cfoust@sqweebloid.com"
] | cfoust@sqweebloid.com |
ed47648682d8b2f8a6e38f5e7be0c10073d25ad8 | 1df4920c911b18e8d445375442c3c4023dffcc5e | /securityvaluator/dcf_calculator/migrations/0007_auto_20200713_1226.py | e788f881102416cee0102502378eab4a42a9e57d | [] | no_license | sebkeil/SecurityValuator | 0dcfc6af3acc88f47f0c06116de6232909949148 | 332e1e42ebd79b6892d9706c6a8739276308bcb6 | refs/heads/master | 2022-12-06T03:58:04.344769 | 2020-08-30T12:09:54 | 2020-08-30T12:09:54 | 276,162,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | # Generated by Django 3.0.7 on 2020-07-13 10:26
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dcf_calculator', '0006_auto_20200713_1222'),
]
operations = [
migrations.AlterField(
model_name='enterprise',
name='date_added',
field=models.DateTimeField(default=datetime.datetime(2020, 7, 13, 12, 26, 42, 629625)),
),
migrations.AlterField(
model_name='enterprise',
name='terminal_value',
field=models.FloatField(null=True),
),
migrations.AlterField(
model_name='enterprise',
name='todays_value',
field=models.FloatField(null=True),
),
]
| [
"basti.keil@hotmail.de"
] | basti.keil@hotmail.de |
be675416e62e50643e048386160971d3cea63073 | a533010ba7e74422c5c7c0193ea2d880e427cb9d | /Python_auto_operation/project/fabric/demo3.py | 226da2675215a0a715682dd96c5b0dc473fe8786 | [] | no_license | gateray/learning_python | 727b3effe4875f27c86c3e5e66655905f3d5d681 | bc08a58f3a5c1f1db884398efa9d27834514199f | refs/heads/master | 2021-01-19T06:31:01.616421 | 2016-06-30T07:39:23 | 2016-06-30T07:39:23 | 62,290,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | #!/usr/bin/env python
# coding: utf-8
from fabric.api import *
env.hosts = ["192.168.10.31",]
env.user = "vagrant"
env.password = "vagrant"
@task
def hello():
with settings(warn_only=True):
rs = local("grep -c '^fuck' /etc/group || /bin/true")
print(rs.stdout)
| [
"gateray.example.com"
] | gateray.example.com |
8e1625c024a45144ae62553096d9c7519a159d0c | ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1 | /res/packages/scripts/scripts/common/arena_achievements.py | cd349250c4f78ed36aafcfc5857179d805c953f1 | [] | no_license | webiumsk/WOT-0.9.20.0 | de3d7441c5d442f085c47a89fa58a83f1cd783f2 | 811cb4e1bca271372a1d837a268b6e0e915368bc | refs/heads/master | 2021-01-20T22:11:45.505844 | 2017-08-29T20:11:38 | 2017-08-29T20:11:38 | 101,803,045 | 0 | 1 | null | null | null | null | WINDOWS-1250 | Python | false | false | 7,052 | py | # 2017.08.29 21:52:23 Střední Evropa (letní čas)
# Embedded file name: scripts/common/arena_achievements.py
from dossiers2.custom.records import RECORD_DB_IDS
from arena_bonus_type_caps import ARENA_BONUS_TYPE_CAPS as BONUS_CAPS
ACHIEVEMENTS = ('warrior', 'invader', 'sniper', 'sniper2', 'mainGun', 'defender', 'steelwall', 'supporter', 'scout', 'evileye', 'medalWittmann', 'medalOrlik', 'medalOskin', 'medalHalonen', 'medalBurda', 'medalBillotte', 'medalKolobanov', 'medalFadin', 'medalRadleyWalters', 'medalLafayettePool', 'medalLehvaslaiho', 'medalNikolas', 'medalPascucci', 'medalDumitru', 'medalBrunoPietro', 'medalTarczay', 'heroesOfRassenay', 'medalDeLanglade', 'medalTamadaYoshio', 'raider', 'kamikaze', 'huntsman', 'bombardier', 'luckyDevil', 'ironMan', 'sturdy', 'alaric', 'lumberjack', 'medalBrothersInArms', 'medalCrucialContribution', 'armoredFist', 'kingOfTheHill', 'willToWinSpirit', 'shoulderToShoulder', 'aloneInTheField', 'fallenFlags', 'effectiveSupport', 'falloutDieHard', 'stormLord', 'winnerLaurels', 'predator', 'unreachable', 'champion', 'bannerman', 'markIProtector', 'markIBaseProtector', 'markIBomberman', 'markIRepairer')
ACHIEVEMENTS_WITH_REWARD = set([ RECORD_DB_IDS['achievements', name] for name in ('warrior', 'invader', 'sniper', 'sniper2', 'mainGun', 'defender', 'steelwall', 'supporter', 'scout', 'evileye', 'heroesOfRassenay', 'medalFadin', 'medalNikolas', 'medalPascucci', 'medalLehvaslaiho', 'medalRadleyWalters', 'medalHalonen', 'medalDumitru', 'medalDeLanglade', 'medalOrlik', 'medalOskin', 'medalLafayettePool', 'medalBurda', 'medalTamadaYoshio', 'medalBrothersInArms', 'medalCrucialContribution', 'huntsman', 'medalStark', 'medalGore') ] + [ RECORD_DB_IDS['falloutAchievements', name] for name in ('shoulderToShoulder', 'falloutDieHard', 'champion', 'bannerman') ])
INBATTLE_SERIES = ('sniper', 'killing', 'piercing')
INBATTLE_SERIES_INDICES = dict(((x[1], x[0]) for x in enumerate(INBATTLE_SERIES)))
_BILLOTTE_CMN_CNDS = {'hpPercentage': 20,
'minCrits': 5}
ACHIEVEMENT_CONDITIONS = {'warrior': {'minFrags': 6},
'invader': {'minCapturePts': 80},
'sniper': {'minAccuracy': 0.85,
'minShots': 10,
'minDamage': 1000},
'sniper2': {'minAccuracy': 0.85,
'minDamage': 1000,
'minHitsWithDamagePercent': 0.8,
'sniperDistance': 300.0,
'minShots': 8},
'mainGun': {'minDamage': 1000,
'minDamageToTotalHealthRatio': 0.2},
'defender': {'minPoints': 70},
'steelwall': {'minDamage': 1000,
'minHits': 11},
'supporter': {'minAssists': 6},
'scout': {'minDetections': 9},
'evileye': {'minAssists': 6},
'medalRadleyWalters': {'minLevel': 5,
'minKills': 8,
'maxKills': 9},
'medalLafayettePool': {'minLevel': 5,
'minKills': 10,
'maxKills': 13},
'heroesOfRassenay': {'minKills': 14,
'maxKills': 255},
'medalOrlik': {'minVictimLevelDelta': 1,
'minKills': 2},
'medalLehvaslaiho': {'minVictimLevelDelta': 1,
'minKills': 2,
'maxKills': 2},
'medalOskin': {'minVictimLevelDelta': 1,
'minKills': 3,
'maxKills': 3},
'medalNikolas': {'minVictimLevelDelta': 1,
'minKills': 4,
'maxKills': 255},
'medalHalonen': {'minVictimLevelDelta': 2,
'minKills': 2},
'medalPascucci': {'minKills': 2,
'maxKills': 2},
'medalDumitru': {'minKills': 3,
'maxKills': 255},
'medalBurda': {'minVictimLevelDelta': 1,
'minKills': 3,
'maxKills': 255},
'medalBillotte': {'cmn_cnds': _BILLOTTE_CMN_CNDS,
'minKills': 2,
'maxKills': 2},
'medalBrunoPietro': {'cmn_cnds': _BILLOTTE_CMN_CNDS,
'minKills': 3,
'maxKills': 4},
'medalTarczay': {'cmn_cnds': _BILLOTTE_CMN_CNDS,
'minKills': 5,
'maxKills': 255},
'medalKolobanov': {'teamDiff': 5},
'medalBrothersInArms': {'minKills': 3},
'medalCrucialContribution': {'minKills': 12},
'medalDeLanglade': {'minKills': 4},
'medalTamadaYoshio': {'minKills': 2,
'maxKills': 255,
'minVictimLevelDelta': 1},
'kamikaze': {'levelDelta': 1},
'huntsman': {'minKills': 3},
'bombardier': {'minKills': 2},
'luckyDevil': {'radius': 10.99},
'ironMan': {'minHits': 10},
'sturdy': {'minHealth': 10.0},
'alaric': {'minKills': 2,
'minMonuments': 1},
'lumberjack': {'minKills': 3,
'minTrees': 30},
'wolfAmongSheep': {'minDamage': 1},
'geniusForWar': {'minXP': 1},
'willToWinSpirit': {'enemyCount': 3},
'fightingReconnaissance': {'maxPosInTopDamager': 3,
'minSpottedCount': 2},
'monolith': {'maxSpeed_ms': 11 / 3.6},
'medalAntiSpgFire': {'minKills': 2},
'medalStark': {'minKills': 2,
'hits': 2},
'medalGore': {'minDamageRate': 8,
'minDamage': 2000},
'medalCoolBlood': {'maxDistance': 100,
'minKills': 2},
'promisingFighter': {'maxPosInTopXPGainer': 3},
'heavyFire': {'maxPosInTopDamager': 3},
'fighter': {'minKills': 4,
'maxKills': 5},
'duelist': {'minKills': 2},
'bonecrusher': {'minCrits': 5},
'charmed': {'minVehs': 4},
'tacticalAdvantage': {'maxLevel': 7},
'secretOperations': {'minGroupLen': 2},
'shoulderToShoulder': {'minKills': 12,
'minDamageDealt': 30000},
'aloneInTheField': {'minDamageDealt': 10000},
'fallenFlags': {'minFlags': 4},
'effectiveSupport': {'minDamageDealt': 2000},
'falloutDieHard': {'minKills': 5,
'minDamageDealt': 10000},
'predator': {'minKills': 5},
'champion': {'minKills': 5,
'minDamageDealt': 10000,
'minFlagsCapture': 3},
'bannerman': {'minFlagsCapture': 4}}
ACHIEVEMENT_CONDITIONS_EXT = {'warrior': {'minFrags': 8},
'heroesOfRassenay': {'minKills': 21,
'maxKills': 255},
'medalLafayettePool': {'minLevel': 5,
'minKills': 13,
'maxKills': 20},
'medalRadleyWalters': {'minLevel': 5,
'minKills': 10,
'maxKills': 12}}
def getAchievementCondition(arenaBonusType, medal):
"""
Returns condition for medal depending on arena bonus type.
:param arenaBonusType: arena bonus type.
:param medal: medal name.
:return: dict with medal conditions. See ACHIEVEMENT_CONDITIONS and other conditions.
"""
if BONUS_CAPS.checkAny(arenaBonusType, BONUS_CAPS.ACHIEVEMENT_CONDITIONS_EXT):
return ACHIEVEMENT_CONDITIONS_EXT.get(medal, ACHIEVEMENT_CONDITIONS[medal])
return ACHIEVEMENT_CONDITIONS[medal]
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\common\arena_achievements.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:52:23 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
f6d07372b5ed3cb9a938c15924c6216a4cb21aaf | 0ad2458c85ce545b1d3a4b75dabe2c94ed8c2518 | /supervised_learning/0x06-keras/3-one_hot.py | dc94f663cdf66ed4f5bb778f3bb186f090a3c69c | [] | no_license | felipeserna/holbertonschool-machine_learning | fc82eda9ee4cb8765ad0ffb5fa923407b200480d | 161e33b23d398d7d01ad0d7740b78dda3f27e787 | refs/heads/master | 2023-07-06T20:26:12.892875 | 2021-08-17T17:03:30 | 2021-08-17T17:03:30 | 317,288,341 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | #!/usr/bin/env python3
"""
Converts a label vector into a one-hot matrix
"""
import tensorflow.keras as K
def one_hot(labels, classes=None):
"""
- The last dimension of the one-hot matrix must be the number of classes
- Returns: the one-hot matrix
"""
oh_matrix = K.utils.to_categorical(labels, classes)
return oh_matrix
| [
"feserna86@gmail.com"
] | feserna86@gmail.com |
e4bc81179549efc87d977cdf68ac60e73d92cfc3 | 00c7bd96f1afab807746f1f7f013d4aadc5f6a6e | /sakura/client/apiobject/dataflows.py | 8546cc0d35f83779ab969d2b3a8a89d4d9453447 | [] | no_license | sakura-team/sakura | 350ae27bdf5c3e7c338c04ec33fb50f4cdc7bfb4 | 306bfe82ffd6b204b0b574bb7f75b35712a3202f | refs/heads/master | 2021-06-02T01:30:14.294572 | 2021-03-04T10:16:44 | 2021-03-04T10:16:44 | 61,307,818 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,014 | py | from sakura.common.errors import APIObjectDeniedError
from sakura.client.apiobject.operators import APIOperator
from sakura.client.apiobject.links import APILink
from sakura.client.apiobject.base import APIObjectBase, APIObjectRegistryClass
from sakura.client.apiobject.grants import APIGrants
from sakura.common.streams import LOCAL_STREAMS
from sakura.common.tools import create_names_dict, snakecase
class APIDataflowOperatorsDict:
def __new__(cls, remote_api, dataflow_id, d):
class APIDataflowOperatorsDictImpl(APIObjectRegistryClass(d)):
"""Sakura operators registry for this dataflow"""
def create(self, op_class):
"""Create a new operator of specified class"""
op_id = remote_api.operators.create(dataflow_id, op_class.id, local_streams=LOCAL_STREAMS)
op_info = remote_api.operators[op_id].info()
return APIOperator(remote_api, op_info)
return APIDataflowOperatorsDictImpl()
class APIDataflowLinksDict:
def __new__(cls, remote_api, d):
class APIDataflowLinksDictImpl(APIObjectRegistryClass(d)):
"""Sakura links registry for this dataflow"""
pass
return APIDataflowLinksDictImpl()
class APIDataflow:
_deleted = set()
def __new__(cls, remote_api, info):
dataflow_id = info['dataflow_id']
remote_obj = remote_api.dataflows[dataflow_id]
def get_remote_obj():
if dataflow_id in APIDataflow._deleted:
raise ReferenceError('This dataflow is no longer valid! (was deleted)')
else:
return remote_obj
class APIDataflowImpl(APIObjectBase):
__doc__ = "Sakura dataflow: " + info['name']
@property
def operators(self):
info = self.__buffered_get_info__()
if 'operators' not in info:
raise APIObjectDeniedError('access denied')
d = create_names_dict(
((op_info['cls_name'], APIOperator(remote_api, op_info)) \
for op_info in info['operators']),
name_format = snakecase
)
return APIDataflowOperatorsDict(remote_api, self.dataflow_id, d)
@property
def links(self):
info = self.__buffered_get_info__()
if 'links' not in info:
raise APIObjectDeniedError('access denied')
d = { link_info['link_id']: APILink(remote_api, link_info) \
for link_info in info['links'] }
return APIDataflowLinksDict(remote_api, d)
@property
def grants(self):
return APIGrants(get_remote_obj())
def delete(self):
"""Delete this dataflow"""
get_remote_obj().delete()
APIDataflow._deleted.add(dataflow_id)
def __get_remote_info__(self):
info = get_remote_obj().info()
if 'op_instances' in info:
info['operators'] = info['op_instances']
del info['op_instances']
return info
return APIDataflowImpl()
class APIDataflowDict:
def __new__(cls, remote_api, d):
class APIDataflowDictImpl(APIObjectRegistryClass(d)):
"""Sakura dataflows registry"""
def create(self, name):
"""Create a new dataflow"""
dataflow_id = remote_api.dataflows.create(name = name)
info = remote_api.dataflows[dataflow_id].info()
return APIDataflow(remote_api, info)
return APIDataflowDictImpl()
def get_dataflows(remote_api):
d = create_names_dict(
((remote_dataflow_info['name'], APIDataflow(remote_api, remote_dataflow_info)) \
for remote_dataflow_info in remote_api.dataflows.list()),
name_format = snakecase
)
return APIDataflowDict(remote_api, d)
| [
"etienne.duble@imag.fr"
] | etienne.duble@imag.fr |
33323f29d8530c52afed35824ef41977df17722d | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/Generators/MadGraphModels/python/models/lrsm_1_3_2_UFO/lorentz.py | ae8a6198eaa90e43a5839f0ef5711ff412d031ad | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,567 | py | # This file was automatically created by FeynRules 2.1
# Mathematica version: 8.0 for Mac OS X x86 (64-bit) (November 6, 2010)
# Date: Tue 2 Dec 2014 07:52:51
from object_library import all_lorentz, Lorentz
from function_library import complexconjugate, re, im, csc, sec, acsc, asec, cot
try:
import form_factors as ForFac
except ImportError:
pass
FFS1 = Lorentz(name = 'FFS1',
spins = [ 2, 2, 1 ],
structure = 'ProjM(2,1)')
FFS2 = Lorentz(name = 'FFS2',
spins = [ 2, 2, 1 ],
structure = 'ProjM(2,1) - ProjP(2,1)')
FFS3 = Lorentz(name = 'FFS3',
spins = [ 2, 2, 1 ],
structure = 'ProjP(2,1)')
FFS4 = Lorentz(name = 'FFS4',
spins = [ 2, 2, 1 ],
structure = 'ProjM(2,1) + ProjP(2,1)')
FFV1 = Lorentz(name = 'FFV1',
spins = [ 2, 2, 3 ],
structure = 'Gamma(3,2,1)')
FFV2 = Lorentz(name = 'FFV2',
spins = [ 2, 2, 3 ],
structure = 'Gamma(3,2,-1)*ProjM(-1,1)')
FFV3 = Lorentz(name = 'FFV3',
spins = [ 2, 2, 3 ],
structure = 'Gamma(3,2,-1)*ProjP(-1,1)')
VSS1 = Lorentz(name = 'VSS1',
spins = [ 3, 1, 1 ],
structure = 'P(1,2) - P(1,3)')
VVS1 = Lorentz(name = 'VVS1',
spins = [ 3, 3, 1 ],
structure = 'Metric(1,2)')
VVV1 = Lorentz(name = 'VVV1',
spins = [ 3, 3, 3 ],
structure = 'P(3,1)*Metric(1,2) - P(3,2)*Metric(1,2) - P(2,1)*Metric(1,3) + P(2,3)*Metric(1,3) + P(1,2)*Metric(2,3) - P(1,3)*Metric(2,3)')
VVSS1 = Lorentz(name = 'VVSS1',
spins = [ 3, 3, 1, 1 ],
structure = 'Metric(1,2)')
VVVV1 = Lorentz(name = 'VVVV1',
spins = [ 3, 3, 3, 3 ],
structure = 'Metric(1,4)*Metric(2,3) - Metric(1,3)*Metric(2,4)')
VVVV2 = Lorentz(name = 'VVVV2',
spins = [ 3, 3, 3, 3 ],
structure = 'Metric(1,4)*Metric(2,3) + Metric(1,3)*Metric(2,4) - 2*Metric(1,2)*Metric(3,4)')
VVVV3 = Lorentz(name = 'VVVV3',
spins = [ 3, 3, 3, 3 ],
structure = 'Metric(1,4)*Metric(2,3) - Metric(1,2)*Metric(3,4)')
VVVV4 = Lorentz(name = 'VVVV4',
spins = [ 3, 3, 3, 3 ],
structure = 'Metric(1,3)*Metric(2,4) - Metric(1,2)*Metric(3,4)')
VVVV5 = Lorentz(name = 'VVVV5',
spins = [ 3, 3, 3, 3 ],
structure = 'Metric(1,4)*Metric(2,3) - (Metric(1,3)*Metric(2,4))/2. - (Metric(1,2)*Metric(3,4))/2.')
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
c263944a2b99249aa586642cfa89802c5834cf23 | 8d5fa94541b9672366da110fef6e0fa582e61928 | /setup.py | 3aa8feec7f6ee8b708cf0edc85f4f65606b94979 | [] | no_license | CxAalto/databinner | 99bcd1a57134a65ae060801e9febd9469a4c3f89 | f83e14cfb2e70d6543b7d5282abf4cf4dd43a6d7 | refs/heads/master | 2020-07-04T14:55:31.399244 | 2019-08-14T11:34:59 | 2019-08-14T11:37:31 | 202,317,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="databinner",
version="0.0.1",
author="Example Author",
author_email="author@example.com",
description="Powerful correct binner for data",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/CxAalto/databinner",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 2",
"Operating System :: OS Independent",
],
)
| [
"rkd@zgib.net"
] | rkd@zgib.net |
ebfc125778f8db33c3cfbbe6c0f8df5490125943 | 34b09bc83e5726fccb524a93cf2742f5aeadedef | /Baekjoon/Greedy Algorithm/1931_test.py | 3d033d82ed322486edb699f104ca47352ab38d4d | [] | no_license | mjson1954/WIC | 57eb20ffe7aaf8695d679c893efacdeede573e72 | 670112209aacd274d09f6e9a89d948120486bfc8 | refs/heads/master | 2023-03-20T00:57:19.740025 | 2021-03-05T10:52:51 | 2021-03-05T10:52:51 | 289,925,829 | 0 | 0 | null | 2021-02-21T02:16:11 | 2020-08-24T12:46:58 | Python | UTF-8 | Python | false | false | 719 | py | N=int(input())
meetingInfo=[]
time=[]
for _ in range(N):
start, end=map(int, input().split())
meetingInfo.append((start, end))
meetingInfo.sort(key=lambda element:element[1])
print(meetingInfo)
meetingPlan=[]
i=len(meetingInfo)-1
for s in range(len(meetingInfo)):
if(s>0 and meetingInfo[i][1]>=x):
i-=1
continue
start=meetingInfo[i][0]
end=meetingInfo[i][1]
print(start, end)
j=i-1
for _ in range(len(meetingInfo)-1):
if(meetingInfo[j][1]==end and meetingInfo[j][0]>start):
start=meetingInfo[j][0]
end=meetingInfo[j][1]
else:
j-=1
meetingPlan.append((start,end))
x=start
i-=1
print(meetingPlan)
| [
"mjson1954@gmail.com"
] | mjson1954@gmail.com |
c5b021c3e05aa3f6c32ae4d29352136f2aaf7939 | c27862cc24513c1d1c221e07c261b9fe65a37f54 | /class_test/Class__super.py | 350312fc71bd840689e647c45a49521d235eaecd | [] | no_license | MannixZ/Mannix | ac62ef29c1dcbb513b121ad9d42db851103884fc | 7b86d0a619e0d6e3eecb94331ee60d89542b99f2 | refs/heads/master | 2020-05-26T13:27:24.979334 | 2019-05-23T14:10:47 | 2019-05-23T14:10:47 | 164,109,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | class FooParent(object):
def __init__(self):
self.parent = 'i\'m the parent.'
print('Parent')
def bar(self, message):
print("%s from Parent" % message)
class FooChild(FooParent):
def __init__(self):
# super(FooChild,self) 首先找到FooChild 的父类(就是FooParent), 然后把类B的对象 FooChild 转换为类 FooParent 的对象
FooParent.__init__(self)
print('Child')
def bar(self, message):
# super(FooChild, self).bar(message)
FooParent.bar(self, message)
print('Child bar function')
print(self.parent)
if __name__ == '__main__':
fooChild = FooChild()
fooChild.bar('helloworld') | [
"noreply@github.com"
] | MannixZ.noreply@github.com |
1f98df99da4c02307deb4632c05ad228c271d1d1 | 62c4aad9dbdf19f8aae548bc10b3549f627200a7 | /code/tracks/tracks.py | dd7adb006b8d1c394142b47b5f6e9b9a83dc6b74 | [] | no_license | RedVis55/Railnl | 71052b452f19031c707bbb56eaa7bc7b6402a541 | 146800d6f51af06e1a1ae433a6c6e7242b9345f4 | refs/heads/master | 2021-08-30T01:10:54.142390 | 2017-12-15T13:47:00 | 2017-12-15T13:47:00 | 109,139,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,477 | py | def ndict(graph,all_nodes,dict,step):
list=[]
for x in dict[step]:
for y in graph.graph[x]:
if y not in all_nodes:
list.append(y)
all_nodes.append(y)
dict[step+1]=list
step += 1
return all_nodes,dict,step
def pathcount(graph,node,nrange):
dict={}
all_nodes=[node]
dict[0]=[node]
step=0
for step in range(nrange):
if step==nrange-1:
return dict
if dict[step]==[]:
return dict
else:
ndict(graph,all_nodes,dict,step)
def pathmaker(graph,bfs):
parent_children=[]
parent_children2=[]
lenght= len(bfs)
for i in range(lenght-1):
for parent in bfs[i]:
# print(parent)
for child in bfs[i+1]:
if child in graph[parent]:
parent_children.append([parent+child])
parent_children2.append([parent,child])
return parent_children2
def all_shortest_routes(graph,station):
bfs=pathcount(graph,station,20)
list1=[x for x in pathmaker(graph,bfs)]
list2=[x for x in pathmaker(graph,bfs)]
list3=[]
for x in list1:
l_index=len(x)-1
first=x[:l_index]
second=x[l_index:]
for y in list2:
if y[0] == second[0]:
new_c=first+y
list1.append(new_c)
#print(new_c)#x,y,first,second,
for x in list1:
if x[0]==station:
list3.append(x)
return list3
def combine_all(graph,station):
finallist= all_shortest_routes(graph,station)
templist= copy.copy(finallist)
for x in finallist:
last= x[len(x)-1:]
for y in all_shortest_routes(graph,last[0]):
if y[0]!=station and y[1]!=station and len([l for l in y[1:] if l in x])==0:
new=x+y[1:]
if new not in templist:
templist.append(new)
return templist
# def random_track(start,connectionlist):
# tracks= self.all_shortest_routes(start)
# track_index = len(tracks)
# track_random = tracks[randint(0,track_index)]
# track= track_random
# tracklist=[]
# for i in range(len(track)):
# if i+1<len(track):
# tracklist.append([track[i],track[i+1]])
# track_w=[[[x[0],x[1]],float(x[2])] for x in connectionlist if [x[0],x[1]] in tracklist or [x[1],x[0]] in tracklist]
# nontracklist=[]
# for x in connectionlist:
# if [x[0],x[1]] not in track and [x[1],x[0]] not in tracklist:
# nontracklist.append([[x[0],x[1]],x[2]])
# time= sum([float(x[1]) for x in track_w])
# final_track=track_w,time
# return tracklist,nontracklist,track_w,time,final_track
# def unique(dict):
# apct=[] #all possible critical tracks
# uct=[] #unique critical tracks
# for x in dict:
# if dict[x]['importance']=='critical':
# for y in dict[x]['neighbours']:
# n_input=[[x,y[0]],float(y[1])]
# i_input =[[y[0],x],float(y[1])]
# apct.append(i_input)
# apct.append(n_input)
# if i_input not in uct:
# uct.append(n_input)
# return apct,uct
# def score(tracks,apct,uct):
# bkv=[] #bereden kritieke verbindingen
# min=0
# minlist=[]
# t= len(tracks)
# for i in range(t):
# min += tracks[i][1]
# minlist.append(tracks[i][1])
# track= tracks[i][0]
# clist=[x for x in track if x in apct]
# for x in clist:
# inverse_x = [[x[0][1],x[0][0]],x[1]]
# if x not in bkv:
# if inverse_x not in bkv:
# bkv.append(x)
# p= len(bkv)/len(uct)
# S= p*10000 - (t*20 + min/100000)
# # print(bkv)
# return S,len(bkv),min
# ( [[['Assen', 'Zwolle'], 40.0], [['Groningen', 'Assen'], 17.0],
# [['Utrecht Centraal', 'Amersfoort'], 14.0], [['Utrecht Centraal', 'Schiphol Airport'], 33.0],
# [['Zwolle', 'Amersfoort'], 35.0]]
# , 139.0)
| [
"you@example.com"
] | you@example.com |
ba4d7fbdc7bba2a8bf4297b5dea97f3c05c0c774 | a1ec2007ec1ef60a70e16a61398d987983f27b36 | /ci/appveyor-download.py | 8fe6b9f1b2123b5276cef313124468c196184391 | [] | no_license | dohlee/prism | a982404f78b7469a6942fbff6f22295692c0bbb4 | 4aaf9aca7f59855112d72220166850681ddd745f | refs/heads/master | 2021-06-30T10:01:21.444977 | 2021-06-22T14:26:22 | 2021-06-22T14:26:22 | 167,760,100 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,816 | py | #!/usr/bin/env python
"""
Use the AppVeyor API to download Windows artifacts.
Taken from: https://bitbucket.org/ned/coveragepy/src/tip/ci/download_appveyor.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""
from __future__ import unicode_literals
import argparse
import os
import zipfile
import requests
def make_auth_headers():
"""Make the authentication headers needed to use the Appveyor API."""
path = os.path.expanduser("~/.appveyor.token")
if not os.path.exists(path):
raise RuntimeError(
"Please create a file named `.appveyor.token` in your home directory. "
"You can get the token from https://ci.appveyor.com/api-token"
)
with open(path) as f:
token = f.read().strip()
headers = {
'Authorization': 'Bearer {}'.format(token),
}
return headers
def download_latest_artifacts(account_project, build_id):
"""Download all the artifacts from the latest build."""
if build_id is None:
url = "https://ci.appveyor.com/api/projects/{}".format(account_project)
else:
url = "https://ci.appveyor.com/api/projects/{}/build/{}".format(account_project, build_id)
build = requests.get(url, headers=make_auth_headers()).json()
jobs = build['build']['jobs']
print(u"Build {0[build][version]}, {1} jobs: {0[build][message]}".format(build, len(jobs)))
for job in jobs:
name = job['name']
print(u" {0}: {1[status]}, {1[artifactsCount]} artifacts".format(name, job))
url = "https://ci.appveyor.com/api/buildjobs/{}/artifacts".format(job['jobId'])
response = requests.get(url, headers=make_auth_headers())
artifacts = response.json()
for artifact in artifacts:
is_zip = artifact['type'] == "Zip"
filename = artifact['fileName']
print(u" {0}, {1} bytes".format(filename, artifact['size']))
url = "https://ci.appveyor.com/api/buildjobs/{}/artifacts/{}".format(job['jobId'], filename)
download_url(url, filename, make_auth_headers())
if is_zip:
unpack_zipfile(filename)
os.remove(filename)
def ensure_dirs(filename):
"""Make sure the directories exist for `filename`."""
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
def download_url(url, filename, headers):
"""Download a file from `url` to `filename`."""
ensure_dirs(filename)
response = requests.get(url, headers=headers, stream=True)
if response.status_code == 200:
with open(filename, 'wb') as f:
for chunk in response.iter_content(16 * 1024):
f.write(chunk)
else:
print(u" Error downloading {}: {}".format(url, response))
def unpack_zipfile(filename):
"""Unpack a zipfile, using the names in the zip."""
with open(filename, 'rb') as fzip:
z = zipfile.ZipFile(fzip)
for name in z.namelist():
print(u" extracting {}".format(name))
ensure_dirs(name)
z.extract(name)
parser = argparse.ArgumentParser(description='Download artifacts from AppVeyor.')
parser.add_argument('--id',
metavar='PROJECT_ID',
default='dohlee/python-prism',
help='Project ID in AppVeyor.')
parser.add_argument('build',
nargs='?',
metavar='BUILD_ID',
help='Build ID in AppVeyor. Eg: master-123')
if __name__ == "__main__":
# import logging
# logging.basicConfig(level="DEBUG")
args = parser.parse_args()
download_latest_artifacts(args.id, args.build)
| [
"apap950419@gmail.com"
] | apap950419@gmail.com |
41229ed84b5892f2488ac610d6c66c31bf9f8354 | 47ea3cdb8e9b52fe465dbe16692a4672f48d305e | /IndexTags.py | dcc5320481a3f9db963fadb6071e2bab35b8f0f6 | [] | no_license | venkatram64/mypython | 273a3cf604164756a6e8822b9c7558390edc3b3c | 88d1da892a753b02e4accb77d5abf890acb45b42 | refs/heads/master | 2020-06-04T18:58:31.551952 | 2019-08-19T17:00:18 | 2019-08-19T17:00:18 | 192,154,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | import csv
from collections import deque
import elasticsearch
from elasticsearch import helpers
def readMovies():
csvfile = open('movies.csv', 'r', encoding='utf8', errors='ignore')
reader = csv.DictReader( csvfile )
titleLookup = {}
for movie in reader:
titleLookup[movie['movieId']] = movie['title']
return titleLookup
def readTags():
csvfile = open('tags.csv', 'r', encoding='utf8', errors='ignore')
titleLookup = readMovies()
reader = csv.DictReader( csvfile )
for line in reader:
tag = {}
tag['user_id'] = int(line['userId'])
tag['movie_id'] = int(line['movieId'])
tag['title'] = titleLookup[line['movieId']]
tag['tag'] = line['tag']
tag['timestamp'] = int(line['timestamp'])
yield tag
es = elasticsearch.Elasticsearch()
es.indices.delete(index="tags",ignore=404)
deque(helpers.parallel_bulk(es,readTags(),index="tags",doc_type="tag"), maxlen=0)
es.indices.refresh() | [
"venkat.veerareddy@hotmail.com"
] | venkat.veerareddy@hotmail.com |
ab657bf9b3b2b1481da33fed2db02f6a05cf38a6 | f8826a479f2b9d2f28993ceea7a7d0e3847aaf3d | /apps/backups/__init__.py | 09ab7da2bd3ddc68a6279132c0dfadfc27c14858 | [] | no_license | icomms/wqmanager | bec6792ada11af0ff55dc54fd9b9ba49242313b7 | f683b363443e1c0be150656fd165e07a75693f55 | refs/heads/master | 2021-01-20T11:59:42.299351 | 2012-02-20T15:28:40 | 2012-02-20T15:28:40 | 2,154,449 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,255 | py | import logging
from datetime import datetime, timedelta
from django.http import HttpResponse
from receiver.submitresponse import SubmitResponse
SUCCESSFUL_BACKUP = "Successful backup"
def backup_response(way_handled, additional_params):
'''Return a custom http response associated the handling
of the xform, in this case as a valid backup file.
'''
try:
from backups.models import Backup
# Backups should only ever be posting as a single file
# We don't know what it means if they're not
if way_handled.submission.attachments.count() == 1:
attachment = way_handled.submission.attachments.all()[0]
backup = Backup.objects.get(attachment=attachment)
response = SubmitResponse(status_code=200,
submit_id=way_handled.submission.id,
or_status_code=2000,
or_status=SUCCESSFUL_BACKUP,
**{"BackupId": backup.id })
return response.to_response()
except Exception, e:
logging.error("Problem in properly responding to backup handling of %s: %s" % \
(way_handled, e.message))
| [
"michael.champanis@gmail.com"
] | michael.champanis@gmail.com |
0679ab4a0de0ba29595338edde182a939c1b9a86 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part009666.py | 0f5125e70f78994d56b011c7629e1331512dae9e | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher95617(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i3.1.4.1.0', 1, 1, None), Mul),
(VariableWithCount('i3.1.4.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher95617._instance is None:
CommutativeMatcher95617._instance = CommutativeMatcher95617()
return CommutativeMatcher95617._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 95616
return
yield
from collections import deque | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
0dcec8b7600039efd135080ff9be3d134d240dda | f5dbf8b9fc7a67167a966ad842999c5ec41d2363 | /app/migrations/0113_auto_20160414_1046.py | 7e29fee06a8521ce7f18781ed1dafdd24b045658 | [] | no_license | super0605/cogofly-v1 | 324ead9a50eaeea370bf40e6f37ef1372b8990fe | dee0f5db693eb079718b23099992fba3acf3e2dd | refs/heads/master | 2022-11-27T12:16:30.312089 | 2019-10-11T20:35:09 | 2019-10-11T20:35:09 | 214,522,983 | 0 | 0 | null | 2022-11-22T00:57:28 | 2019-10-11T20:25:01 | JavaScript | UTF-8 | Python | false | false | 910 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0112_taggooglemapstraduit_tag_google_maps'),
]
operations = [
migrations.RemoveField(
model_name='taggooglemaps',
name='tagbase_ptr',
),
migrations.RemoveField(
model_name='taggooglemapstraduit',
name='langue',
),
migrations.RemoveField(
model_name='taggooglemapstraduit',
name='tag_google_maps',
),
migrations.RemoveField(
model_name='taggooglemapstraduit',
name='taggooglemaps_ptr',
),
migrations.DeleteModel(
name='TagGoogleMaps',
),
migrations.DeleteModel(
name='TagGoogleMapsTraduit',
),
]
| [
"dream.dev1025@gmail.com"
] | dream.dev1025@gmail.com |
dcae28ce5c5e009a79ff5e917a4afd7e9a86252c | ccc3aae73d0af165ff46ad746346266f3d3b28bc | /jupyter-u07157121/Quiz_Python_20190309.git/Quiz_9.py | c13d98b1245b00ac31acd1ea2065211cd02f0e12 | [] | no_license | oilmcut2019/final-assign | bc016cf1d88d15772b47a80e9e9b65e15e3de599 | c93cd80463deff2419e7b2e3fd049b318d88ef50 | refs/heads/master | 2020-07-09T06:24:32.932035 | 2019-08-23T12:52:08 | 2019-08-23T12:52:08 | 203,905,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
a=int(input())
d=1
s=0
p=0
x=100
while x>0:
a-=15
if a>0:
s=1
p=a
a-=30
if a>0:
s=2
p=a
a-=25
if a>0:
p=a
d+=1
s=3
else:
print(d,"_",s,"_",p)
x=0
else:
print(d,"_",s,"_",p)
x=0
else:
print(d,"_",s,"_",p)
x=0
# In[ ]:
| [
"07158031@o365.mcut.edu.tw"
] | 07158031@o365.mcut.edu.tw |
549fd71e29a07dae483b78501ac9f1c97cd43e1e | f8e8e365c9cf58b61d72655bc2340baeaed5baff | /Leetcode/Python Solutions/Hash Table/SortCharactersByFrequency.py | a86e42278e33144cbdaddda483d3d324a2596d51 | [
"MIT"
] | permissive | Mostofa-Najmus-Sakib/Applied-Algorithm | 39a69f6b9ed113efe4a420d19cad79e0aa317637 | bc656fd655617407856e0ce45b68585fa81c5035 | refs/heads/master | 2023-08-31T19:54:34.242559 | 2021-11-05T03:43:35 | 2021-11-05T03:43:35 | 412,263,430 | 0 | 0 | MIT | 2021-09-30T23:45:29 | 2021-09-30T23:45:25 | null | UTF-8 | Python | false | false | 1,133 | py | """
LeetCode Problem: 451. Sort Characters By Frequency
Link: https://leetcode.com/problems/sort-characters-by-frequency/
Language: Python
Written by: Mostofa Adib Shakib
Time Complexity: O(n log n)
Space Complexity: O(h)
"""
class Solution:
def frequencySort(self, s: str) -> str:
if not s: return s # if the string is empty
hashmap = {} # initializing the hashmap data structure.
# Populating the HashMap data structure to keep track of the frequency of each character.
for i in list(s):
if i not in hashmap:
hashmap[i] = 1
else:
hashmap[i] += 1
# sort the HashMap in descending order (The most frequent character takes first position)
hashmap = sorted(hashmap.items(), key = operator.itemgetter(1), reverse=True)
# initializing an empty string
string = ""
# multiplying the character with the number of occurances.
for i in hashmap:
string += i[0] * i[1]
return string | [
"adibshakib@gmail.com"
] | adibshakib@gmail.com |
80e2db2324d39078df59bfa6fd7f1ae3d6b280b4 | 66f34eb965ce69670a8b4bcdfcd0fb700b324a02 | /r_utils.py | 0679c1aefa973abcd448c41c8f4f039c8ba22c6c | [] | no_license | IanEisenberg/SR_dietary_decisions | 2b25458712f0e717b6d86746575fab3ca2168d45 | e4aa6a134c039e29fd486198846f229c45a4d5a5 | refs/heads/master | 2020-04-05T06:05:35.067171 | 2018-11-08T00:11:42 | 2018-11-08T00:11:42 | 156,625,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,067 | py | import numpy as np
from rpy2.robjects import pandas2ri, Formula
from rpy2.robjects.packages import importr
pandas2ri.activate()
def convert_to_dataframe(r_matrix):
base = importr('base')
df = pandas2ri.ri2py_dataframe(r_matrix)
df.columns = base.colnames(r_matrix)
df.index = base.rownames(r_matrix)
return df
def polr(data, formula):
"Python wrapper around the MASS function polr in R"
# reference: https://www.analyticsvidhya.com/blog/2016/02/multinomial-ordinal-logistic-regression/
base = importr('base')
stats = importr('stats')
MASS = importr('MASS')
out = MASS.polr(Formula(formula), data)
summary = base.summary(out)
coefs = convert_to_dataframe(summary[0])
# confidence intervals
confint = convert_to_dataframe(stats.confint(out))
# get pvalue
pvals = np.array(stats.pnorm(abs(coefs['t value']), **{"lower.tail": False}))**2
coefs.loc[:, 'p value'] = pvals
coefs = coefs.join(confint)
return {'polr_out': out,
'summary': summary,
'coefs': coefs}
| [
"ianeisenberg90@gmail.com"
] | ianeisenberg90@gmail.com |
aeece47e30fc0dc5ed2675b4d1559187b51e5f33 | a7f3524fe53bf7549683ccd7378074ba103a09f4 | /python/SRMs/SRM 145/VendingMachine.py | 787965d2bc09b6202e5ea9960ffd6a438d8778e4 | [] | no_license | kazuhayase/study | 7a16377d7c689a6e73fa7604a98ed002272ffa81 | c233b30d6390f6cb8d77cf0f1c1b6a724259cdae | refs/heads/master | 2023-08-21T18:11:35.508639 | 2023-08-20T08:54:33 | 2023-08-20T08:54:33 | 8,031,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,761 | py | # -*- coding: utf-8 -*-
import math,string,itertools,fractions,heapq,collections,re,array,bisect
class VendingMachine:
def move(self, s,t,c):
dist = abs(t-s)
return min(dist, c-dist)
def motorUse(self, prices, purchases):
M = len(prices)
shelf = [ list(map(int, prices[i].split(' '))) for i in range (M) ]
N = len(shelf[0])
column = [ [s[j] for s in shelf] for j in range (N) ]
co=[]
heapq.heapify(co)
for j in range (N):
heapq.heappush(co,[-sum(column[j]),j,column[j]])
pat = re.compile('(\d+),(\d+):(\d+)')
col=0
tim=0
Sum=0
[tmps, tmpcolnum, tmpco] = heapq.heappop(co)
Sum += self.move(0,tmpcolnum,N)
col = tmpcolnum
heapq.heappush(co,[tmps,tmpcolnum,tmpco])
flag = True
for pur in purchases:
ma = pat.match(pur)
(sh,cc,ti) = map(int, ma.groups())
if ti - tim > 4:
[tmps, tmpcolnum, tmpco] = heapq.heappop(co)
Sum += self.move(col,tmpcolnum,N)
col = tmpcolnum
heapq.heappush(co,[tmps,tmpcolnum,tmpco])
Sum += self.move(col,cc,N)
col=cc
tim=ti
for CO in co:
[tmps, tmpcolnum, tmpco] = CO
if tmpcolnum == cc:
if tmpco[sh] == 0:
flag = False
CO[0] += tmpco[sh]
tmpco[sh] = 0
heapq.heapify(co)
[tmps, tmpcolnum, tmpco] = heapq.heappop(co)
Sum += self.move(col,tmpcolnum,N)
if flag:
return Sum
else:
return -1
# CUT begin
# TEST CODE FOR PYTHON {{{
import sys, time, math
def tc_equal(expected, received):
try:
_t = type(expected)
received = _t(received)
if _t == list or _t == tuple:
if len(expected) != len(received): return False
return all(tc_equal(e, r) for (e, r) in zip(expected, received))
elif _t == float:
eps = 1e-9
d = abs(received - expected)
return not math.isnan(received) and not math.isnan(expected) and d <= eps * max(1.0, abs(expected))
else:
return expected == received
except:
return False
def pretty_str(x):
if type(x) == str:
return '"%s"' % x
elif type(x) == tuple:
return '(%s)' % (','.join( (pretty_str(y) for y in x) ) )
else:
return str(x)
def do_test(prices, purchases, __expected):
startTime = time.time()
instance = VendingMachine()
exception = None
try:
__result = instance.motorUse(prices, purchases);
except:
import traceback
exception = traceback.format_exc()
elapsed = time.time() - startTime # in sec
if exception is not None:
sys.stdout.write("RUNTIME ERROR: \n")
sys.stdout.write(exception + "\n")
return 0
if tc_equal(__expected, __result):
sys.stdout.write("PASSED! " + ("(%.3f seconds)" % elapsed) + "\n")
return 1
else:
sys.stdout.write("FAILED! " + ("(%.3f seconds)" % elapsed) + "\n")
sys.stdout.write(" Expected: " + pretty_str(__expected) + "\n")
sys.stdout.write(" Received: " + pretty_str(__result) + "\n")
return 0
def run_tests():
sys.stdout.write("VendingMachine (600 Points)\n\n")
passed = cases = 0
case_set = set()
for arg in sys.argv[1:]:
case_set.add(int(arg))
with open("VendingMachine.sample", "r") as f:
while True:
label = f.readline()
if not label.startswith("--"): break
prices = []
for i in range(0, int(f.readline())):
prices.append(f.readline().rstrip())
prices = tuple(prices)
purchases = []
for i in range(0, int(f.readline())):
purchases.append(f.readline().rstrip())
purchases = tuple(purchases)
f.readline()
__answer = int(f.readline().rstrip())
cases += 1
if len(case_set) > 0 and (cases - 1) in case_set: continue
sys.stdout.write(" Testcase #%d ... " % (cases - 1))
passed += do_test(prices, purchases, __answer)
sys.stdout.write("\nPassed : %d / %d cases\n" % (passed, cases))
T = time.time() - 1474720366
PT, TT = (T / 60.0, 75.0)
points = 600 * (0.3 + (0.7 * TT * TT) / (10.0 * PT * PT + TT * TT))
sys.stdout.write("Time : %d minutes %d secs\n" % (int(T/60), T%60))
sys.stdout.write("Score : %.2f points\n" % points)
if __name__ == '__main__':
run_tests()
# }}}
# CUT end
| [
"kazuyoshi.hayase@jcom.home.ne.jp"
] | kazuyoshi.hayase@jcom.home.ne.jp |
0a3f847105c7529e20fec0764f3863145ac5fed7 | 49b9d13946767efbb68d5c86a3f11ed2eb7665cf | /pyOCD/target/target_k64f.py | 5a67a3abf40b17544e6304de923f928027f43c65 | [
"Apache-2.0"
] | permissive | sg-/pyOCD | a8f1ea7dbdad769841286108212ca657b912b448 | 02d4ed1e8ff550d00071ed182e09b9a89cfbe9fa | refs/heads/master | 2021-01-21T23:29:14.218140 | 2014-07-29T15:00:25 | 2014-07-29T15:00:25 | 19,388,535 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,232 | py | """
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from target_kinetis import Kinetis
import logging
class K64F(Kinetis):
memoryMapXML = """<?xml version="1.0"?>
<!DOCTYPE memory-map PUBLIC "+//IDN gnu.org//DTD GDB Memory Map V1.0//EN" "http://sourceware.org/gdb/gdb-memory-map.dtd">
<memory-map>
<memory type="flash" start="0x0" length="0x100000"> <property name="blocksize">0x1000</property></memory>
<memory type="ram" start="0x1ffe0000" length="0x40000"> </memory>
</memory-map>
"""
def __init__(self, transport):
super(K64F, self).__init__(transport)
self.auto_increment_page_size = 0x400
self.mdm_idr = 0x001c0000
| [
"flit@me.com"
] | flit@me.com |
a515fae962d71bf48c4c511b94d45ea3b46f0681 | 98effa51c2a50080d5fb59434f35e865446268ad | /andalous/models.py | 5eea9df2420ee1ba08ab6be8ba222c66e2a5261f | [] | no_license | faresTerminal/andalous | a61dcf9cc04e96ed4ce0d2c6eb0b7046ba54ae17 | 86fc9efaadae3b85d361311f2abfb906b1682e64 | refs/heads/master | 2022-11-17T14:17:14.554094 | 2020-07-14T16:21:01 | 2020-07-14T16:21:01 | 279,620,556 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,754 | py | from django.db import models
from django.shortcuts import reverse, Http404
from django.db import models
from django.contrib.auth.models import User
from django.contrib.postgres.fields import JSONField
import hashlib
from django.utils import timezone
from django.template.defaultfilters import slugify
from tinymce.models import HTMLField
from sorl.thumbnail import ImageField
from django.conf import settings
from phonenumber_field.modelfields import PhoneNumberField
from datetime import date
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length=200)
slug = models.SlugField(max_length=250, allow_unicode=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
super(Category, self).save(*args, **kwargs)
class author(models.Model):
name = models.ForeignKey(User, on_delete=models.CASCADE)
profile_picture = models.ImageField(blank = True, upload_to = 'Avatar', default= 'Avatar/deafult-profile-image.png')
def __str__(self):
return self.name.username
class articles(models.Model):
article_author = models.ForeignKey(User, on_delete=models.CASCADE)
avatar = models.ForeignKey(author, on_delete = models.CASCADE)
title = models.CharField('العنوان', max_length=9500)
slug = models.SlugField(max_length=9500, unique_for_date='publish', allow_unicode=True)
image = models.ImageField('صورة مناسبة', upload_to = 'Images')
body = HTMLField()
posted_on = models.DateTimeField(auto_now=False, auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True, auto_now_add=False)
publish = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.title
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.title)
if not self.slug:
self.slug = arabic_slugify(self.title)
super(articles, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('andalous:show_article', kwargs={'id':self.id, 'slug': self.slug})
class Plat_a_manger(models.Model):
user_plat = models.ForeignKey(User, on_delete=models.CASCADE)
category = models.ForeignKey(Category, max_length=200, on_delete=models.CASCADE)
title_plat = models.CharField('العنوان', max_length=9500)
price = models.DecimalField(max_digits=200, decimal_places=2, default=0.00)
image_plat = models.ImageField('صورة مناسبة', upload_to = 'Images_Plat')
available = models.BooleanField(default=True)
def calculate_price(self):
self.price = price
# print(f"Price for {self.subtype} version of Sub is {self.price}")
def __str__(self):
return self.title_plat
def get_absolute_url(self):
return reverse('andalous:product_detail',
args=[self.id])
class booking(models.Model):
user_booking = models.ForeignKey(User, on_delete=models.CASCADE)
nom = models.CharField(max_length = 500, blank = True, null = True)
email = models.EmailField(max_length = 500, blank = True, null = True)
phone = PhoneNumberField(null=False, blank=False, unique=True)
date = models.DateField(("Date"), default=date.today)
hour = models.IntegerField()
person = models.IntegerField()
def __str__(self):
return self.nom
class contact(models.Model):
name = models.CharField(max_length = 500, blank = True, null = True)
email = models.EmailField(max_length = 500, blank = True, null = True)
subject = models.CharField(max_length = 500, blank = True, null = True)
message = models.TextField()
def __str__(self):
return self.subject
class comment_put(models.Model):
user_comment = models.ForeignKey(User, default = None, on_delete = models.CASCADE)
user_put = models.ForeignKey(articles, on_delete = models.CASCADE)
avatar_commenter = models.ForeignKey(author, on_delete = models.CASCADE)
comment = models.TextField(max_length = 500)
date = models.DateTimeField(auto_now=False, auto_now_add=True)
def __str__(self):
return self.comment
class happy_costumer(models.Model):
coctumer = models.ForeignKey(User, default = None, on_delete = models.CASCADE)
costumer_pic = models.ForeignKey(author, on_delete = models.CASCADE)
body = HTMLField()
date = models.DateTimeField(auto_now=False, auto_now_add=True)
def __str__(self):
return self.body
def get_absolute_url(self):
return reverse('andalous:index')
| [
"you@example.com"
] | you@example.com |
3be85e0c39c42cfb57a3c3c2bd7e89f88dff7c2d | e38660ee26709b229b5a60adbb7c7f5cb7d690ac | /tests/acceptance/test_pushover.py | 0aa5bfc95b6640d77be90c9c83b2898945b568d9 | [
"Apache-2.0"
] | permissive | Mattlk13/sentry | 7de81bc70a4731f081ec9414f049e89c7c4a9e92 | 0d94c56cfb7d8ffb6ce946fd090bfa7bde362eb2 | refs/heads/master | 2022-12-18T07:48:53.791499 | 2017-02-01T22:44:04 | 2017-02-01T22:44:04 | 81,067,165 | 0 | 0 | Apache-2.0 | 2020-05-06T03:35:40 | 2017-02-06T08:54:02 | Python | UTF-8 | Python | false | false | 1,103 | py | from __future__ import absolute_import
from sentry.testutils import AcceptanceTestCase
class PushoverTest(AcceptanceTestCase):
def setUp(self):
super(PushoverTest, self).setUp()
self.user = self.create_user('foo@example.com')
self.org = self.create_organization(
name='Rowdy Tiger',
owner=None,
)
self.team = self.create_team(
organization=self.org,
name='Mariachi Band'
)
self.project = self.create_project(
organization=self.org,
team=self.team,
name='Bengal',
)
self.create_member(
user=self.user,
organization=self.org,
role='owner',
teams=[self.team],
)
self.login_as(self.user)
self.path = '/{}/{}/settings/plugins/pushover/'.format(
self.org.slug, self.project.slug
)
def test_simple(self):
self.browser.get(self.path)
self.browser.wait_until_not('.loading-indicator')
self.browser.snapshot('pushover settings')
| [
"noreply@github.com"
] | Mattlk13.noreply@github.com |
707f33465f2b92aaa6f29218672df3ea88735bd0 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/fbs_0208+390/sdB_FBS_0208+390_lc.py | 9595acfa7c35cecfdb852eaa64c041fda1a6d611 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[32.881292,39.287139], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_FBS_0208+390 /sdB_FBS_0208+390_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
0b3a86fa58cba842cfdaa08d02edb18078fe0737 | c2a4a2277844062390ffdf0d8908f4c98989541d | /kisti_condor/templates/create_ajob.py~ | 04bf3d7a66e9d67ad05d2c8313a1898af4a916f2 | [] | no_license | soarnsoar/submission_scripts | 0ca147a3b18b0e398e62ba182c35dcf23d9537ef | fc6bd53da075affbaf9ab1d9c559827a6afb0e2d | refs/heads/master | 2020-04-20T23:53:44.558018 | 2019-05-13T14:22:21 | 2019-05-13T14:22:21 | 169,180,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,772 | #!/usr/bin/python
#runshell=$1
#njob=$2
#inputtar=$3
###To emulate "source"
import argparse
parser = argparse.ArgumentParser()
####Set options###
parser.add_argument("--runshell", help="shell file to run")
parser.add_argument("--inputtar", help="input file to pass")
parser.add_argument("--njob", help=" number of jobs")
parser.add_argument("--jobname", help=" job name")
parser.add_argument("--nosubmit", help=" No submit. Only create the jobdir")
args = parser.parse_args()
if args.jobname:
jobname = args.jobname
else:
print "need --jobname argument"
quit()
if args.nosubmit:
submit = False
else:
submit = True
if args.runshell:
runshell = args.runshell
else:
print "need --runshell argument"
quit()
if args.njob:
njob = args.njob
else:
print "no --njob argument"
print "SET njob =1 !!"
njob = 1
if args.inputtar:
inputtar = args.inputtar
else:
print "No input tar"
inputtar = ""
import os
currentPath = os.getcwd()
os.system("mkdir -p JOBDIR_"+jobname)
os.system("make_submit_jds.sh "+runshell+" "+str(njob)+" "+inputtar)
os.system("mv submit.jds JOBDIR_"+jobname)
os.system("cp "+runshell+" JOBDIR_"+jobname)
os.chdir(currentPath+"/JOBDIR_"+jobname)
script="submit_tmp.sh"
f_new = open(script,'w')
f_new.write('condor_submit submit.jds')
f_new.close()
os.chmod(script, 0755)
if submit == True:
#os.system("condor_submit submit.jds")
#os.system("source "+script)
#shell_source(script)
#os.system("cd "+os.getcwd()+";ls; source "+script)
#os.system("source "+os.getcwd()+"/"+script)
#shell_source(os.getcwd()+"/"+script)
import subprocess
submit=subprocess.Popen(["/bin/bash","-i","-c","source "+script])
submit.communicate()
os.chdir(currentPath)
| [
"soarnsoar@gmail.com"
] | soarnsoar@gmail.com | |
ee4e0bda2007cbc69978b80a7b4e2616ffec426c | d2e69d4d3d1e11a87f5a377e4a423422fe0a7058 | /numpy_code/07/three_d.py | ae81fcc09dc8ad45479bd4b60ae87b8fafb193a7 | [] | no_license | oJacker/_python | 6f30dd4a60c1593d27c00ac485163fc0ba77dd8c | 8086d0cd78e156abfff9819a56384149dd431c56 | refs/heads/master | 2021-05-06T03:13:29.167281 | 2018-02-01T09:41:42 | 2018-02-01T09:41:42 | 114,827,084 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
fig = plt.figure()
# (1) 我们需要使用3d关键字来指定图像的三维投影。
ax = fig.add_subplot(111, projection='3d')
# (2) 我们将使用meshgrid函数创建一个二维的坐标网格。这将用于变量x和y的赋值。
u = np.linspace(-1, 1, 100)
x, y = np.meshgrid(u, u)
z = x ** 2 + y ** 2
# (3) 我们将指定行和列的步幅,以及绘制曲面所用的色彩表(color map)。步幅决定曲面上“瓦
# 片”的大小,而色彩表的选择取决于个人喜好
ax.plot_surface(x, y, z, rstride=4, cstride=4, cmap=cm.YlGnBu_r)
plt.show() | [
"623657285@qq.com"
] | 623657285@qq.com |
a5566de5e6863b335b301322eff0ad3b0daf4834 | fac98eb457e313f67d804945728ca2a5eef9d7d6 | /devel/json/xoppy_calc_template.py | f961ce9158a3ffcec41e48aef1709291e63988ba | [
"BSD-2-Clause"
] | permissive | PaNOSC-ViNYL/XOPPY | 4cc9490a59e547cc1a277f0ba44a4a47861651f7 | 97362c576af7ee90006de99e440e6ee9636977db | refs/heads/master | 2020-05-05T02:29:54.475644 | 2019-04-07T21:27:56 | 2019-04-07T21:27:56 | 179,640,109 | 0 | 1 | BSD-2-Clause | 2019-04-05T12:42:29 | 2019-04-05T07:52:06 | Python | UTF-8 | Python | false | false | 7,142 | py |
def xoppy_calc_black_body(TITLE="Thermal source: Planck distribution",TEMPERATURE=1200000.0,E_MIN=10.0,E_MAX=1000.0,NPOINTS=500):
print("Inside xoppy_calc_black_body. ")
return(None)
def xoppy_calc_mlayer(MODE=0,SCAN=0,F12_FLAG=0,SUBSTRATE="Si",ODD_MATERIAL="Si",EVEN_MATERIAL="W",ENERGY=8050.0,THETA=0.0,SCAN_STEP=0.009999999776483,NPOINTS=600,ODD_THICKNESS=25.0,EVEN_THICKNESS=25.0,NLAYERS=50,FILE="layers.dat"):
print("Inside xoppy_calc_mlayer. ")
return(None)
def xoppy_calc_nsources(TEMPERATURE=300.0,ZONE=0,MAXFLUX_F=200000000000000.0,MAXFLUX_EPI=20000000000000.0,MAXFLUX_TH=200000000000000.0,NPOINTS=500):
print("Inside xoppy_calc_nsources. ")
return(None)
def xoppy_calc_ws(TITLE="Wiggler A at APS",ENERGY=7.0,CUR=100.0,PERIOD=8.5,N=28.0,KX=0.0,KY=8.739999771118164,EMIN=1000.0,EMAX=100000.0,NEE=2000,D=30.0,XPC=0.0,YPC=0.0,XPS=2.0,YPS=2.0,NXP=10,NYP=10):
print("Inside xoppy_calc_ws. ")
return(None)
def xoppy_calc_xtubes(ITUBE=0,VOLTAGE=30.0):
print("Inside xoppy_calc_xtubes. ")
return(None)
def xoppy_calc_xtube_w(VOLTAGE=100.0,RIPPLE=0.0,AL_FILTER=0.0):
print("Inside xoppy_calc_xtube_w. ")
return(None)
def xoppy_calc_xinpro(CRYSTAL_MATERIAL=0,MODE=0,ENERGY=8000.0,MILLER_INDEX_H=1,MILLER_INDEX_K=1,MILLER_INDEX_L=1,ASYMMETRY_ANGLE=0.0,THICKNESS=500.0,TEMPERATURE=300.0,NPOINTS=100,SCALE=0,XFROM=-50.0,XTO=50.0):
print("Inside xoppy_calc_xinpro. ")
return(None)
def xoppy_calc_xcrystal(FILEF0=0,FILEF1F2=0,FILECROSSSEC=0,CRYSTAL_MATERIAL=0,MILLER_INDEX_H=1,MILLER_INDEX_K=1,MILLER_INDEX_L=1,I_ABSORP=2,TEMPER="1.0",MOSAIC=0,GEOMETRY=0,SCAN=2,UNIT=1,SCANFROM=-100.0,SCANTO=100.0,SCANPOINTS=200,ENERGY=8000.0,ASYMMETRY_ANGLE=0.0,THICKNESS=0.7,MOSAIC_FWHM=0.1,RSAG=125.0,RMER=1290.0,ANISOTROPY=0,POISSON=0.22,CUT="2 -1 -1 ; 1 1 1 ; 0 0 0",FILECOMPLIANCE="mycompliance.dat"):
print("Inside xoppy_calc_xcrystal. ")
return(None)
def xoppy_calc_xwiggler(FIELD=0,NPERIODS=12,ULAMBDA=0.125,K=14.0,ENERGY=6.04,PHOT_ENERGY_MIN=100.0,PHOT_ENERGY_MAX=100100.0,NPOINTS=100,LOGPLOT=1,NTRAJPOINTS=101,CURRENT=200.0,FILE="?"):
print("Inside xoppy_calc_xwiggler. ")
return(None)
def xoppy_calc_xxcom(NAME="Pyrex Glass",SUBSTANCE=3,DESCRIPTION="SiO2:B2O3:Na2O:Al2O3:K2O",FRACTION="0.807:0.129:0.038:0.022:0.004",GRID=1,GRIDINPUT=0,GRIDDATA="0.0804:0.2790:0.6616:1.3685:2.7541",ELEMENTOUTPUT=0):
print("Inside xoppy_calc_xxcom. ")
return(None)
def xoppy_calc_xpower(F1F2=0,MU=0,SOURCE=1,DUMMY1="",DUMMY2="",DUMMY3="",ENER_MIN=1000.0,ENER_MAX=50000.0,ENER_N=100,SOURCE_FILE="?",NELEMENTS=1,EL1_FOR="Be",EL1_FLAG=0,EL1_THI=0.5,EL1_ANG=3.0,EL1_ROU=0.0,EL1_DEN="?",EL2_FOR="Rh",EL2_FLAG=1,EL2_THI=0.5,EL2_ANG=3.0,EL2_ROU=0.0,EL2_DEN="?",EL3_FOR="Al",EL3_FLAG=0,EL3_THI=0.5,EL3_ANG=3.0,EL3_ROU=0.0,EL3_DEN="?",EL4_FOR="B",EL4_FLAG=0,EL4_THI=0.5,EL4_ANG=3.0,EL4_ROU=0.0,EL4_DEN="?",EL5_FOR="Pt",EL5_FLAG=1,EL5_THI=0.5,EL5_ANG=3.0,EL5_ROU=0.0,EL5_DEN="?"):
print("Inside xoppy_calc_xpower. ")
return(None)
def xoppy_calc_xbfield(PERIOD=4.0,NPER=42,NPTS=40,IMAGNET=0,ITYPE=0,K=1.379999995231628,GAP=2.0,GAPTAP=10.0,FILE="undul.bf"):
print("Inside xoppy_calc_xbfield. ")
return(None)
def xoppy_calc_xfilter(EMPTY1=" ",EMPTY2=" ",NELEMENTS=1,SOURCE=0,ENER_MIN=1000.0,ENER_MAX=50000.0,ENER_N=100,SOURCE_FILE="SRCOMPW",EL1_SYM="Be",EL1_THI=500.0,EL2_SYM="Al",EL2_THI=50.0,EL3_SYM="Pt",EL3_THI=10.0,EL4_SYM="Au",EL4_THI=10.0,EL5_SYM="Cu",EL5_THI=10.0):
print("Inside xoppy_calc_xfilter. ")
return(None)
def xoppy_calc_xtc(TITLE="APS Undulator A, Beam Parameters for regular lattice nux36nuy39.twi, 1.5% cpl.",ENERGY=7.0,CUR=100.0,SIGE=0.000959999975748,TEXT_MACHINE="",SIGX=0.273999989032745,SIGY=0.010999999940395,SIGX1=0.011300000362098,SIGY1=0.00359999993816,TEXT_BEAM="",PERIOD=3.299999952316284,NP=70,TEXT_UNDULATOR="",EMIN=2950.0,EMAX=13500.0,N=40,TEXT_ENERGY="",IHMIN=1,IHMAX=15,IHSTEP=2,TEXT_HARM="",IHEL=0,METHOD=1,IK=1,NEKS=100,TEXT_PARM="",RUN_MODE_NAME="foreground"):
print("Inside xoppy_calc_xtc. ")
return(None)
def xoppy_calc_xus(TITLE="APS Undulator A, Beam Parameters for regular lattice nux36nuy39.twi, 1.5% cpl.",ENERGY=7.0,CUR=100.0,SIGE=0.000959999975748,TEXT_MACHINE="",SIGX=0.273999989032745,SIGY=0.010999999940395,SIGX1=0.011300000362098,SIGY1=0.00359999993816,TEXT_BEAM="",PERIOD=3.299999952316284,NP=70,KX=0.0,KY=2.75,TEXT_UNDULATOR="",EMIN=1000.0,EMAX=50000.0,N=5000,TEXT_ENERGY="",D=30.0,XPC=0.0,YPC=0.0,XPS=2.5,YPS=1.0,NXP=25,NYP=10,TEXT_PINHOLE="",MODE=2,METHOD=4,IHARM=0,TEXT_MODE="",NPHI=0,NALPHA=0,CALPHA2=0.0,NOMEGA=64,COMEGA=8.0,NSIGMA=0,TEXT_CALC="",RUN_MODE_NAME="foreground"):
print("Inside xoppy_calc_xus. ")
return(None)
def xoppy_calc_xurgent(TITLE="ESRF HIGH BETA UNDULATOR",ENERGY=6.039999961853027,CUR=0.100000001490116,SIGX=0.400000005960464,SIGY=0.079999998211861,SIGX1=0.016000000759959,SIGY1=0.00899999961257,ITYPE=1,PERIOD=0.046000000089407,N=32,KX=0.0,KY=1.700000047683716,PHASE=0.0,EMIN=10000.0,EMAX=50000.0,NENERGY=100,D=27.0,XPC=0.0,YPC=0.0,XPS=3.0,YPS=3.0,NXP=25,NYP=25,MODE=4,ICALC=2,IHARM=-1,NPHI=0,NSIG=0,NALPHA=0,DALPHA=0.0,NOMEGA=0,DOMEGA=0.0):
print("Inside xoppy_calc_xurgent. ")
return(None)
def xoppy_calc_xyaup(TITLE="YAUP EXAMPLE (ESRF BL-8)",PERIOD=4.0,NPER=42,NPTS=40,EMIN=3000.0,EMAX=30000.0,NENERGY=100,ENERGY=6.039999961853027,CUR=0.100000001490116,SIGX=0.425999999046326,SIGY=0.08500000089407,SIGX1=0.017000000923872,SIGY1=0.008500000461936,D=30.0,XPC=0.0,YPC=0.0,XPS=2.0,YPS=2.0,NXP=0,NYP=0,MODE=4,NSIG=2,TRAJECTORY="new+keep",XSYM="yes",HANNING=0,BFILE="undul.bf",TFILE="undul.traj"):
print("Inside xoppy_calc_xyaup. ")
return(None)
def xoppy_calc_xf0(DATASETS=0,MAT_FLAG=0,MAT_LIST=0,DESCRIPTOR="Si",GRID=0,GRIDSTART=0.0,GRIDEND=4.0,GRIDN=100):
print("Inside xoppy_calc_xf0. ")
return(None)
def xoppy_calc_xcrosssec(DATASETS=1,MAT_FLAG=0,MAT_LIST=0,DESCRIPTOR="Si",DENSITY=1.0,CALCULATE="all",GRID=0,GRIDSTART=100.0,GRIDEND=10000.0,GRIDN=200,UNIT=0):
print("Inside xoppy_calc_xcrosssec. ")
return(None)
def xoppy_calc_xf1f2(DATASETS=1,MAT_FLAG=0,MAT_LIST=0,DESCRIPTOR="Si",DENSITY=1.0,CALCULATE=1,GRID=0,GRIDSTART=5000.0,GRIDEND=25000.0,GRIDN=100,THETAGRID=0,ROUGH=0.0,THETA1=2.0,THETA2=5.0,THETAN=50):
print("Inside xoppy_calc_xf1f2. ")
return(None)
def xoppy_calc_xfh(FILEF0=0,FILEF1F2=0,FILECROSSSEC=0,ILATTICE=0,HMILLER=1,KMILLER=1,LMILLER=1,I_ABSORP=2,TEMPER="1.0",ENERGY=8000.0,ENERGY_END=18000.0,NPOINTS=20):
print("Inside xoppy_calc_xfh. ")
return(None)
def xoppy_calc_mare(CRYSTAL=2,H=2,K=2,L=2,HMAX=3,KMAX=3,LMAX=3,FHEDGE=1e-08,DISPLAY=0,LAMBDA=1.54,DELTALAMBDA=0.009999999776483,PHI=-20.0,DELTAPHI=0.1):
print("Inside xoppy_calc_mare. ")
return(None)
def xoppy_calc_bm(TYPE_CALC=0,MACHINE_NAME="ESRF bending magnet",RB_CHOICE=0,MACHINE_R_M=25.0,BFIELD_T=0.8,BEAM_ENERGY_GEV=6.0,CURRENT_A=0.1,HOR_DIV_MRAD=1.0,VER_DIV=0,PHOT_ENERGY_MIN=100.0,PHOT_ENERGY_MAX=100000.0,NPOINTS=500,LOG_CHOICE=1,PSI_MRAD_PLOT=1.0,PSI_MIN=-1.0,PSI_MAX=1.0,PSI_NPOINTS=500):
print("Inside xoppy_calc_bm. ")
return(None)
| [
"srio@esrf.eu"
] | srio@esrf.eu |
23cb75a768fa6e48650636a825adea015dddf154 | 9a40856164cbbf6c889f391c1a9edb4b9137e54b | /data_structures/python/dp/fib_hashtable.py | 35cd57d3203cf1fe5dfd8e54fd79e36408724ff4 | [
"MIT"
] | permissive | minicloudsky/leetcode_solutions | b77f08369c859a9d85c8ffa9f7125710b195c7b2 | c9c1a87a61c2867bd1f7015f0ebc4acedde3a469 | refs/heads/master | 2023-03-30T15:27:57.031692 | 2021-04-06T13:42:29 | 2021-04-06T13:42:29 | 341,922,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | #!/usr/bin/env python
# coding=utf-8
hash_table = {}
def fibonacci(n):
if n==1:
return 1
if n==2:
return 1
if hash_table.get(n):
return hash_table.get(n)
res = fibonacci(n-1) + fibonacci(n-2)
hash_table[n] = res
return res
if __name__ == '__main__':
n = 5
print(fibonacci(n))
| [
"1397991131@qq.com"
] | 1397991131@qq.com |
0425144dbb0b99fc85b79d0208592fcf934bc6c4 | ff67008e53abced81b3a167b289103863223a61c | /python_cook_book/class_and_object/8_定义一个接口或抽象类.py | bc0a9aa8a198c2cef1417e8067c499059bc13fe0 | [] | no_license | w1131680660/tets | 48695ea7a9dbb0bea783b5e44066ea039bfbe84f | 9f409e325c86a897ad3209e6420a2c6f6287a0ab | refs/heads/master | 2023-05-03T17:21:12.663817 | 2021-05-30T16:26:55 | 2021-05-30T16:26:55 | 369,422,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 879 | py |
# 定义一个接口或抽象类,并且通过执行类型检查来确保子类实现了某些特定的方法
from abc import ABCMeta, abstractmethod
class IStream(metaclass=ABCMeta):
@abstractmethod
def read(self,maxbytes =-1):
pass
@abstractmethod
def write(self,data):
pass
# 抽象类的一个特点不能被直接实例化
# a = IStream() # 报错
# 抽象类的目的1 .就是让别的类继承它并实现特定的抽象方法:
class SocketStream(IStream):
def read(self, maxbytes=-1):
print(213)
return 222
def write(self, data):
pass
P =SocketStream()
# 2. 抽象基类的一个主要用途是在代码中检查某些类是否为特定类型,实现了特定接口:x
def serialize(obj, stream):
if not isinstance(stream, IStream):
raise TypeError('Expected an IStream')
pass | [
"1131680660@qq.com"
] | 1131680660@qq.com |
b64d594379d1a6beb80b92f95720d76742b49597 | 8a432b7074e71264326bba2885c140097f9ad041 | /itng/doc/source/conf1.py | 838c7c38940df3c3b932a2e58508d906b4486690 | [
"MIT"
] | permissive | ITNG/itng_toolbox | b11ce89845b7370cab18781216afdaa0de3555dc | 8b2edd87d9011139ddd3baf0c9eada49388e3744 | refs/heads/master | 2022-12-29T19:39:51.113180 | 2020-09-30T09:17:00 | 2020-09-30T09:17:00 | 241,953,875 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,745 | py | # -*- coding: utf-8 -*-
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Sphinx-Themes template'
copyright = '2018, sphinx-themes.org'
author = 'sphinx-themes.org'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
html_theme = 'cloud'
# pip install cloud_sptheme | [
"a.ziaeemehr@gmail.com"
] | a.ziaeemehr@gmail.com |
d73a7cb7fd041b19efd8bca0db4a3d138c2e16c7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03222/s568418770.py | 4e51bc7fea54d52d1817f2592c5631893a937a81 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,145 | py | # -*- coding: utf-8 -*-
import sys
def input(): return sys.stdin.readline().strip()
def list2d(a, b, c): return [[c] * b for i in range(a)]
def list3d(a, b, c, d): return [[[d] * c for j in range(b)] for i in range(a)]
def list4d(a, b, c, d, e): return [[[[e] * d for j in range(c)] for j in range(b)] for i in range(a)]
def ceil(x, y=1): return int(-(-x // y))
def INT(): return int(input())
def MAP(): return map(int, input().split())
def LIST(N=None): return list(MAP()) if N is None else [INT() for i in range(N)]
def Yes(): print('Yes')
def No(): print('No')
def YES(): print('YES')
def NO(): print('NO')
sys.setrecursionlimit(10 ** 9)
INF = 10 ** 18
MOD = 10 ** 9 + 7
H, W, K = MAP()
# あみだくじとして妥当かの判定
def check(S):
for i in range(W-1):
# 1つでも隣り合う場所に横棒があればNG
if S & 1<<i and S & 1<<(i+1):
return False
return True
# dp[i][S][j] := i段目まで見て、今の段の横棒の状態がSで、j本目の縦棒に行く通り数
dp = list3d(H+1, 1<<W, W+1, 0)
dp[0][0][1] = 1
for i in range(H):
for S1 in range(1<<W):
# 左端はただの番兵なので、立っていたらスキップ
if S1 & 1:
continue
# あみだくじとして妥当でない集合は全てスキップ
if not check(S1):
continue
for S2 in range(1<<W):
if S2 & 1:
continue
if not check(S2):
continue
for j in range(1, W+1):
# 左に棒があれば左に遷移
if S2 & 1<<(j-1):
dp[i+1][S2][j-1] += dp[i][S1][j]
dp[i+1][S2][j-1] %= MOD
# 右に棒があれば右に遷移
elif S2 & 1<<j:
dp[i+1][S2][j+1] += dp[i][S1][j]
dp[i+1][S2][j+1] %= MOD
# 棒がなければ真下に遷移
else:
dp[i+1][S2][j] += dp[i][S1][j]
dp[i+1][S2][j] %= MOD
ans = 0
for S in range(1<<W):
ans += dp[H][S][K]
ans %= MOD
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
3ef5e2b0d0a6b9a4e486ee864e44c88183a8767b | 93713f46f16f1e29b725f263da164fed24ebf8a8 | /Library/bin/jupyter-trust | 6c459f7485a2fbb679a408f1e3cdef503301c8ba | [
"BSD-3-Clause"
] | permissive | holzschu/Carnets | b83d15136d25db640cea023abb5c280b26a9620e | 1ad7ec05fb1e3676ac879585296c513c3ee50ef9 | refs/heads/master | 2023-02-20T12:05:14.980685 | 2023-02-13T15:59:23 | 2023-02-13T15:59:23 | 167,671,526 | 541 | 36 | BSD-3-Clause | 2022-11-29T03:08:22 | 2019-01-26T09:26:46 | Python | UTF-8 | Python | false | false | 477 | #!/var/mobile/Containers/Data/Application/EB768CE0-A764-4058-914E-C213876012C3/Library/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'nbformat==4.4.0','console_scripts','jupyter-trust'
__requires__ = 'nbformat==4.4.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('nbformat==4.4.0', 'console_scripts', 'jupyter-trust')()
)
| [
"nicolas.holzschuch@inria.fr"
] | nicolas.holzschuch@inria.fr | |
6026ac13de1b8b84925cfc151f1b2550d6abde5d | cd4a912cae6b00f3ef32c1d497cfba2cd36879fe | /PracticePython/Arrays/plus_one.py | b78f1e820b81655f824c3f2446d8b6ecf08be92b | [] | no_license | natepill/problem-solving | d8927251b79022265731300b2164cf939862f07c | cf49168db0206bfe68364fdd7190099b3593a31d | refs/heads/master | 2020-03-29T13:19:19.588509 | 2020-03-01T19:53:58 | 2020-03-01T19:53:58 | 149,952,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
# if num > 9:
# subtract 1 from num
# if at start then
#insert 1 at start of array
# add 1 to next element
# else
# return nums
# add 1 to last element
digits[-1] += 1
print(digits[-1])
# iterate over list backwards
for i in range(len(digits)-1,-1, -1):
# Account for carry over
print(i)
print(digits[i])
if digits[i] > 9:
digits[i] = 0
if i == 0:
digits.insert(0, 1)
print(digits)
else:
digits[i-1] += 1
else:
# all digits are less than 10. 1 has been added to end
return digits
return digits
| [
"natepill@gmail.com"
] | natepill@gmail.com |
3f739c7832d610b85adf72c22a9f137827bd4a42 | ce6fc44470dcb5fca78cdd3349a7be70d75f2e3a | /CodeForces/Round E34/D.py | 9181e636afdc42ef3bd5732ce12654159f6230eb | [] | no_license | cormackikkert/competitive-programming | f3fa287fcb74248ba218ecd763f8f6df31d57424 | 3a1200b8ff9b6941c422371961a127d7be8f2e00 | refs/heads/master | 2022-12-17T02:02:40.892608 | 2020-09-20T11:47:15 | 2020-09-20T11:47:15 | 266,775,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | n = int(input())
arr = list(map(int, input().split()))
total = 0
for i, ai in enumerate(arr):
for aj in arr[i+1:]:
if -1 <= ai - aj <= 1:
continue
total += aj - ai
print(total)
| [
"u6427001@anu.edu.au"
] | u6427001@anu.edu.au |
6069767777f01ae48681840cf317a4b6d6cf1c8e | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Keras_tensorflow/source/keras/backend/common.py | 97646067c9b2fa3ffee64857bc6523c3d44302c5 | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 4,194 | py | import numpy as np
# the type of float to use throughout the session.
_FLOATX = 'float32'
_EPSILON = 10e-8
_IMAGE_DATA_FORMAT = 'channels_last'
def epsilon():
"""Returns the value of the fuzz
factor used in numeric expressions.
# Returns
A float.
# Example
```python
>>> keras.backend.epsilon()
1e-08
```
"""
return _EPSILON
def set_epsilon(e):
"""Sets the value of the fuzz
factor used in numeric expressions.
# Arguments
e: float. New value of epsilon.
# Example
```python
>>> from keras import backend as K
>>> K.epsilon()
1e-08
>>> K.set_epsilon(1e-05)
>>> K.epsilon()
1e-05
```
"""
global _EPSILON
_EPSILON = e
def floatx():
"""Returns the default float type, as a string.
(e.g. 'float16', 'float32', 'float64').
# Returns
String, the current default float type.
# Example
```python
>>> keras.backend.floatx()
'float32'
```
"""
return _FLOATX
def set_floatx(floatx):
"""Sets the default float type.
# Arguments
floatx: String, 'float16', 'float32', or 'float64'.
# Example
```python
>>> from keras import backend as K
>>> K.floatx()
'float32'
>>> K.set_floatx('float16')
>>> K.floatx()
'float16'
```
"""
global _FLOATX
if floatx not in {'float16', 'float32', 'float64'}:
raise ValueError('Unknown floatx type: ' + str(floatx))
_FLOATX = str(floatx)
def cast_to_floatx(x):
"""Cast a Numpy array to the default Keras float type.
# Arguments
x: Numpy array.
# Returns
The same Numpy array, cast to its new type.
# Example
```python
>>> from keras import backend as K
>>> K.floatx()
'float32'
>>> arr = numpy.array([1.0, 2.0], dtype='float64')
>>> arr.dtype
dtype('float64')
>>> new_arr = K.cast_to_floatx(arr)
>>> new_arr
array([ 1., 2.], dtype=float32)
>>> new_arr.dtype
dtype('float32')
```
"""
return np.asarray(x, dtype=_FLOATX)
def image_data_format():
"""Returns the default image data format convention ('channels_first' or 'channels_last').
# Returns
A string, either `'channels_first'` or `'channels_last'`
# Example
```python
>>> keras.backend.image_data_format()
'channels_first'
```
"""
return _IMAGE_DATA_FORMAT
def set_image_data_format(data_format):
"""Sets the value of the data format convention.
# Arguments
data_format: string. `'channels_first'` or `'channels_last'`.
# Example
```python
>>> from keras import backend as K
>>> K.image_data_format()
'channels_first'
>>> K.set_image_data_format('channels_last')
>>> K.image_data_format()
'channels_last'
```
"""
global _IMAGE_DATA_FORMAT
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('Unknown data_format:', data_format)
_IMAGE_DATA_FORMAT = str(data_format)
# Legacy methods
def set_image_dim_ordering(dim_ordering):
"""Legacy setter for `image_data_format`.
# Arguments
dim_ordering: string. `tf` or `th`.
# Example
```python
>>> from keras import backend as K
>>> K.image_data_format()
'channels_first'
>>> K.set_image_data_format('channels_last')
>>> K.image_data_format()
'channels_last'
```
# Raises
ValueError: if `dim_ordering` is invalid.
"""
global _IMAGE_DATA_FORMAT
if dim_ordering not in {'tf', 'th'}:
raise ValueError('Unknown dim_ordering:', dim_ordering)
if dim_ordering == 'th':
data_format = 'channels_first'
else:
data_format = 'channels_last'
_IMAGE_DATA_FORMAT = data_format
def image_dim_ordering():
"""Legacy getter for `image_data_format`.
# Returns
string, one of `'th'`, `'tf'`
"""
if _IMAGE_DATA_FORMAT == 'channels_first':
return 'th'
else:
return 'tf'
| [
"ryfeus@gmail.com"
] | ryfeus@gmail.com |
c0fc3cdd7fc2eb48e915ff2d9c58482668d45f43 | 923a14dd594191d77e30465027ece8371f28a7a6 | /web-serpng/code/serpng/articles/scripts/import_articles_csv.py | 5ce7acd20ea67b344c389d7cf0232c5fa3308941 | [] | no_license | alyago/django-web | 3af7b3389df59104eaf5e50ed9cc2c3e730fed7f | da3073eec6d676dfe0164502b80d2a1c75e89575 | refs/heads/master | 2021-01-10T19:33:45.425520 | 2013-11-21T09:43:37 | 2013-11-21T09:43:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,095 | py | #!/usr/bin/env python
# encoding: utf-8
"""
import_articles_csv.py
Various importers for bringing data into the articles database from CSV, etc.
This script requires django-extensions in order to run. To install, activate
your employers virtual environment and run:
pip install django-extensions
Then, add the following to your settings.py:
'django_extensions'
Afterwards, you can run the script via:
python manage.py runscript import_articles_csv
Copyright (c) 2013 Simply Hired, Inc. All rights reserved.
"""
import os
import unicodecsv
from datetime import date, timedelta
from django.template.defaultfilters import slugify
from articles.models import Category, Article
def get_path(filename):
folder = os.path.dirname(__file__)
return os.path.join(folder, filename)
class ArticleImporter(object):
"""Imports articles from CSV into the articles application."""
def __init__(self, path_to_csv):
"""Initialize a new `ArticleImporter` instance."""
today = date.today()
days_since_last_monday = 7 if today.weekday() < 1 else today.weekday()
self.post_date = date.today() - timedelta(days=days_since_last_monday)
self.path_to_csv = path_to_csv
def _category(self, category_title):
"""Get the `Category` instance with the given title."""
category, created = Category.objects.get_or_create(
slug=slugify(category_title),
defaults={'title': category_title})
if created:
print "Created '%s' category" % category.title
return category
def _post_article(self, title, body_text, category, row_idx):
"""Post an article."""
article, created = Article.objects.get_or_create(
slug=slugify(title),
defaults={'title': title,
'body': body_text,
'posted': self.post_date,
'category': category})
if created:
print "Created '%s' article" % title
else:
print "Article '%s' already exists" % title
return
if row_idx % 2 == 0:
# Go a week back
self.post_date = self.post_date = timedelta(days=7)
def post_csv(self):
"""Post the rows in the CSV file and return the number of rows
processed."""
csv_row_idx = 2 # The Excel row number for debugging purposes.
with open(self.path_to_csv, 'rU') as file_in:
csv_file = unicodecsv.DictReader(file_in)
for row in csv_file:
category = self._category(row['category'])
self._post_article(row['title'],
row['body'],
category,
csv_row_idx)
csv_row_idx += 1
return csv_row_idx - 2
def run(): # pragma: no cover
"""Import the CSV file specified by `filename`"""
filename = get_path('article_import_1.csv')
ai = ArticleImporter(filename)
rows = ai.post_csv()
print '%d rows processed' % rows
| [
"oleg@simplyhired.com"
] | oleg@simplyhired.com |
4e2d94bb5d84f3d8f50aa86e6208f4d10b36161d | 2eae961147a9627a2b9c8449fa61cb7292ad4f6a | /openapi_client/models/post_addresses.py | 7b56305b4ed654a80626a2a6d77d4faee0e9b3e2 | [] | no_license | kgr-eureka/SageOneSDK | 5a57cc6f62ffc571620ec67c79757dcd4e6feca7 | 798e240eb8f4a5718013ab74ec9a0f9f9054399a | refs/heads/master | 2021-02-10T04:04:19.202332 | 2020-03-02T11:11:04 | 2020-03-02T11:11:04 | 244,350,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,356 | py | # coding: utf-8
"""
Sage Business Cloud Accounting - Accounts
Documentation of the Sage Business Cloud Accounting API. # noqa: E501
The version of the OpenAPI document: 3.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class PostAddresses(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'address': 'PostAddressesAddress'
}
attribute_map = {
'address': 'address'
}
def __init__(self, address=None, local_vars_configuration=None): # noqa: E501
"""PostAddresses - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._address = None
self.discriminator = None
if address is not None:
self.address = address
@property
def address(self):
"""Gets the address of this PostAddresses. # noqa: E501
:return: The address of this PostAddresses. # noqa: E501
:rtype: PostAddressesAddress
"""
return self._address
@address.setter
def address(self, address):
"""Sets the address of this PostAddresses.
:param address: The address of this PostAddresses. # noqa: E501
:type: PostAddressesAddress
"""
self._address = address
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PostAddresses):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PostAddresses):
return True
return self.to_dict() != other.to_dict()
| [
"kevin.gray@eurekasolutions.co.uk"
] | kevin.gray@eurekasolutions.co.uk |
e82d90d33521853f74d0cbc78f9b0cbd8864eddc | 2127cabeeda296f7a6b692982872d91e8bdd3016 | /tests/test_optional.py | 16610ee71c8d8995010ca1ba491fdbb729738abd | [
"Apache-2.0"
] | permissive | nomilkinmyhome/dataclass_factory | 26059993af95509e386793c42fd743d6f08e1079 | 7bcbd395acd5c61806ae36042067a7f9882cec28 | refs/heads/master | 2022-11-18T21:51:40.308764 | 2020-03-26T08:51:08 | 2020-03-26T08:51:08 | 279,984,132 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | from typing import Dict, Optional
from unittest import TestCase
from dataclasses import dataclass
from dataclass_factory import Factory
@dataclass
class Data:
x: Optional[Dict[str, None]]
class TestOptional(TestCase):
def test_optional(self):
factory = Factory()
y = factory.load({"x": None}, Data)
self.assertEqual(y, Data(None))
| [
"tishka17@mail.ru"
] | tishka17@mail.ru |
744789b1e04f1a94cb4c59e13391f7b2f4a92127 | 920ab19b73a7cba21d340a49d9d24e2d1eeabf3d | /idpsreact/bin/bandit | 4313ce9452e852ccaaafb40663998c38d0c5e984 | [
"MIT"
] | permissive | DTrafford/IDPS | 5fa2b73f2c47cbf50b90a1a786c10f7d69c995b4 | 1eaccfc218adcb7231e64271731c765f8362b891 | refs/heads/master | 2022-12-16T16:28:34.801962 | 2020-03-30T18:08:09 | 2020-03-30T18:08:09 | 234,163,829 | 0 | 0 | MIT | 2020-09-10T06:26:02 | 2020-01-15T20:10:09 | Python | UTF-8 | Python | false | false | 278 | #!/Users/sangit/Downloads/django-react-boilerplate-master/idpsreact/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from bandit.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"d.trafford@outlook.com"
] | d.trafford@outlook.com | |
0e81bf594e1ecc283e6e06fd99e065c54c7070e6 | 35b2ab54ac96f08b8b48cb8a8a1d596724b70f04 | /nutrientes/management/commands/piramid.py | 95a094d38b67343ed3998fe6f124d8795fbef0d5 | [] | no_license | elaeon/foods | 108be8a7d415b81b40e60f1fb7a0a13296ff147f | 2ba3cf2dacbc91f2b2c197926779b095f4457f01 | refs/heads/master | 2021-01-10T18:45:56.780763 | 2016-03-02T17:56:15 | 2016-03-02T17:56:15 | 35,621,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,741 | py | from django.core.management.base import BaseCommand
from nutrientes.utils import PiramidFood, Food
from nutrientes.weights import WEIGHT_NUTRS as weight_nutrs
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--meat',
default=False,
help='Select a meat in the piramid: beef, pork, luncheon, hunt')
parser.add_argument('--categories',
default='all',
help='Select a category: all, meats, no-meats')
parser.add_argument('--dataset',
default=None,
help='Select a dataset: all or foodimg')
parser.add_argument('--number-category',
default=None,
help='Select a number food category: 1500, ...')
parser.add_argument('--radio-omega',
action='store_true',
help='Weights with radio omega')
parser.add_argument('--piramid',
action='store_true',
help='Build a food piramid')
def handle(self, *args, **options):
meat = options.get('meat', "chicken")
dataset = options.get('dataset', "foodimg")
categories = options["categories"]
number_category = options["number_category"]
radio_omega = options["radio_omega"]
piramid_print = options["piramid"]
if number_category is not None:
dataset = list(self.datasets(number_category, dataset))
piramid = PiramidFood(meat=meat, dataset=dataset, categories=categories,
weight_nutrs=weight_nutrs, radio_omega=radio_omega)
total_value = 0
for category, value, _ in piramid.process(reverse=False):
print(category, Food(category, avg=False).name, value)
total_value += value
print("Total: ", total_value)
elif dataset == 'test':
dataset = ["19903", "14545", "09079", "20051", "35193", "25000", "12006", "12220"]
piramid = PiramidFood(meat=meat, dataset=dataset, categories=categories,
weight_nutrs={"omega3":.1, "omega9": .1},
radio_omega=radio_omega)
#{"omega3":.1, "omega9": .1, "322": 1, "263": 2, "418": 3})
total_value = 0
for category, value, _ in piramid.process(reverse=False):
print(category, Food(category, avg=False).name, value)
total_value += value
print("Total: ", total_value)
else:
piramid = PiramidFood(meat=meat, dataset=dataset, categories=categories,
weight_nutrs=weight_nutrs, radio_omega=radio_omega, energy=True)
if piramid_print:
piramid.build_piramid()
else:
total_value = 0
total_energy = 0
total_weight = 800
for category, value, energy in piramid.process(reverse=False):
weight = total_weight * (value / 100)
energy_weight = ((weight * energy) / 100)
print(piramid.categories_name.get(category, category), "{}%".format(value), "{}g".format(int(round(weight, 0))), "{}kcal".format(round(energy_weight, 2)))
total_value += value
total_energy += energy_weight
print("Total: ", "{}%".format(total_value), "{}kcal".format(int(round(total_energy, 0))))
def datasets(self, category, dataset):
from nutrientes.utils import get_fooddescimg, alimentos_category
if dataset == "foodimg":
return (ndb_no[0] for ndb_no in get_fooddescimg(category=category))
else:
return (ndb_no for ndb_no, _ in alimentos_category(category, limit='limit 9000'))
| [
"mara80@gmail.com"
] | mara80@gmail.com |
7584d501340c304a1054dff77e8439addda74d78 | 53e9fc57ac1a1d1b8621150c2f3a091d0ff86476 | /blog/migrations/0005_remove_post_image.py | 81a7a69c6fe17ea4fac936f9a7314532a99529ff | [] | no_license | rdahal35/DjangoBlog | 4babfec4d6589c61647127482487c91275e99381 | 2ca753d81ffcdc07e4c43d7f429c14a7bb2cc4a5 | refs/heads/master | 2020-03-19T17:08:30.485947 | 2018-06-09T17:56:35 | 2018-06-09T17:56:35 | 136,745,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | # Generated by Django 2.0.6 on 2018-06-09 11:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_auto_20180609_1704'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='image',
),
]
| [
"rdahal35@gmail.com"
] | rdahal35@gmail.com |
1de6cc075cd647956bdf5d8f3d817ae1730128c2 | 45da19631c7c88e559a02d976d8227d31ace5f04 | /env_event_service/bin/pip3.7 2 | fb81b812a9660301be35a5f8c8e85589c2785d53 | [] | no_license | tebbythomas/Event_Service_API_Django_Rest_Framework | 104adec9c92eeae7609038b55396cef9029dc99b | 5efe1aafc292e9a0a47b273955e84461c89debe7 | refs/heads/master | 2021-09-28T06:08:25.648630 | 2020-03-25T00:15:25 | 2020-03-25T00:15:25 | 249,147,830 | 1 | 1 | null | 2021-09-22T18:47:21 | 2020-03-22T09:08:14 | Python | UTF-8 | Python | false | false | 291 | #!/Users/tebbythomas/Documents/Practice/Event-Service/Second_Try/env_event_service/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"tebby.thomas@gmail.com"
] | tebby.thomas@gmail.com | |
ea427416629c0dbb96e7d348707ed07f931f09a3 | c273bf08d42c5c038a355a721972848a4847682c | /openpyxl/styles/tests/test_named_style.py | 395172576232ff654448c5ef3f00164488c52fa3 | [
"MIT",
"BSD-2-Clause",
"PSF-2.0"
] | permissive | BlueGreenMagick/sync-excel-with-anki | 31e39ae07c7360d5abab1357cb26bc629b4adb87 | b78322899fec66b4a0b9b835f173057a844fe3ce | refs/heads/master | 2021-09-26T04:37:45.538223 | 2021-09-20T05:11:03 | 2021-09-20T05:11:03 | 196,643,205 | 15 | 3 | null | null | null | null | UTF-8 | Python | false | false | 7,467 | py | from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyxl
import pytest
from array import array
from ..fonts import Font
from ..borders import Border
from ..fills import PatternFill
from ..alignment import Alignment
from ..protection import Protection
from ..cell_style import CellStyle, StyleArray
from openpyxl import Workbook
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.tests.helper import compare_xml
@pytest.fixture
def NamedStyle():
from ..named_styles import NamedStyle
return NamedStyle
class TestNamedStyle:
def test_ctor(self, NamedStyle):
style = NamedStyle()
assert style.font == Font()
assert style.border == Border()
assert style.fill == PatternFill()
assert style.protection == Protection()
assert style.alignment == Alignment()
assert style.number_format == "General"
assert style._wb is None
def test_dict(self, NamedStyle):
style = NamedStyle()
assert dict(style) == {'name':'Normal', 'hidden':'0', 'xfId':'0'}
def test_bind(self, NamedStyle):
style = NamedStyle(xfId=0)
wb = Workbook()
style.bind(wb)
assert style._wb is wb
def test_as_tuple(self, NamedStyle):
style = NamedStyle()
assert style.as_tuple() == array('i', (0, 0, 0, 0, 0, 0, 0, 0, 0))
def test_as_xf(self, NamedStyle):
style = NamedStyle(xfId=0)
style.alignment = Alignment(horizontal="left")
xf = style.as_xf()
assert xf == CellStyle(numFmtId=0, fontId=0, fillId=0, borderId=0,
applyNumberFormat=None,
applyFont=None,
applyFill=None,
applyBorder=None,
applyAlignment=True,
applyProtection=None,
alignment=Alignment(horizontal="left"),
protection=None,
)
def test_as_name(self, NamedStyle, _NamedCellStyle):
style = NamedStyle(xfId=0)
name = style.as_name()
assert name == _NamedCellStyle(name='Normal', xfId=0, hidden=False)
@pytest.mark.parametrize("attr, key, collection, expected",
[
('font', 'fontId', '_fonts', 0),
('fill', 'fillId', '_fills', 0),
('border', 'borderId', '_borders', 0),
('alignment', 'alignmentId', '_alignments', 0),
('protection', 'protectionId', '_protections', 0),
('number_format', 'numFmtId', '_number_formats', 164),
]
)
def test_recalculate(self, NamedStyle, attr, key, collection, expected):
style = NamedStyle(xfId=0)
wb = Workbook()
wb._number_formats.append("###")
style.bind(wb)
style._style = StyleArray([1, 1, 1, 1, 1, 1, 1, 1, 1])
obj = getattr(wb, collection)[0]
setattr(style, attr, obj)
assert getattr(style._style, key) == expected
@pytest.fixture
def _NamedCellStyle():
from ..named_styles import _NamedCellStyle
return _NamedCellStyle
class TestNamedCellStyle:
def test_ctor(self, _NamedCellStyle):
named_style = _NamedCellStyle(xfId=0, name="Normal", builtinId=0)
xml = tostring(named_style.to_tree())
expected = """
<cellStyle name="Normal" xfId="0" builtinId="0"/>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, _NamedCellStyle):
src = """
<cellStyle name="Followed Hyperlink" xfId="10" builtinId="9" hidden="1"/>
"""
node = fromstring(src)
named_style = _NamedCellStyle.from_tree(node)
assert named_style == _NamedCellStyle(
name="Followed Hyperlink",
xfId=10,
builtinId=9,
hidden=True
)
@pytest.fixture
def _NamedCellStyleList():
from ..named_styles import _NamedCellStyleList
return _NamedCellStyleList
class TestNamedCellStyleList:
def test_ctor(self, _NamedCellStyleList):
styles = _NamedCellStyleList()
xml = tostring(styles.to_tree())
expected = """
<cellStyles count ="0" />
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, _NamedCellStyleList):
src = """
<cellStyles />
"""
node = fromstring(src)
styles = _NamedCellStyleList.from_tree(node)
assert styles == _NamedCellStyleList()
def test_styles(self, _NamedCellStyleList):
src = """
<cellStyles count="11">
<cellStyle name="Followed Hyperlink" xfId="2" builtinId="9" hidden="1"/>
<cellStyle name="Followed Hyperlink" xfId="4" builtinId="9" hidden="1"/>
<cellStyle name="Followed Hyperlink" xfId="6" builtinId="9" hidden="1"/>
<cellStyle name="Followed Hyperlink" xfId="8" builtinId="9" hidden="1"/>
<cellStyle name="Followed Hyperlink" xfId="10" builtinId="9" hidden="1"/>
<cellStyle name="Hyperlink" xfId="1" builtinId="8" hidden="1"/>
<cellStyle name="Hyperlink" xfId="3" builtinId="8" hidden="1"/>
<cellStyle name="Hyperlink" xfId="5" builtinId="8" hidden="1"/>
<cellStyle name="Hyperlink" xfId="7" builtinId="8" hidden="1"/>
<cellStyle name="Hyperlink" xfId="9" builtinId="8" hidden="1"/>
<cellStyle name="Normal" xfId="0" builtinId="0"/>
</cellStyles>
"""
node = fromstring(src)
styles = _NamedCellStyleList.from_tree(node)
assert [s.name for s in styles.names] == ['Normal', 'Hyperlink', 'Followed Hyperlink']
@pytest.fixture
def NamedStyleList():
from ..named_styles import NamedStyleList
return NamedStyleList
class TestNamedStyleList:
def test_append_valid(self, NamedStyle, NamedStyleList):
styles = NamedStyleList()
style = NamedStyle(name="special")
styles.append(style)
assert style in styles
def test_append_invalid(self, NamedStyleList):
styles = NamedStyleList()
with pytest.raises(TypeError):
styles.append(1)
def test_duplicate(self, NamedStyleList, NamedStyle):
styles = NamedStyleList()
style = NamedStyle(name="special")
styles.append(style)
with pytest.raises(ValueError):
styles.append(style)
def test_names(self, NamedStyleList, NamedStyle):
styles = NamedStyleList()
style = NamedStyle(name="special")
styles.append(style)
assert styles.names == ['special']
def test_idx(self, NamedStyleList, NamedStyle):
styles = NamedStyleList()
style = NamedStyle(name="special")
styles.append(style)
assert styles[0] == style
def test_key(self, NamedStyleList, NamedStyle):
styles = NamedStyleList()
style = NamedStyle(name="special")
styles.append(style)
assert styles['special'] == style
def test_key_error(self, NamedStyleList):
styles = NamedStyleList()
with pytest.raises(KeyError):
styles['special']
| [
"bluegreenmagick@gmail.com"
] | bluegreenmagick@gmail.com |
d72cacf7c7f70e32cd90cc03959c3278bfe13363 | 060967fa3e6e390ac0504172e6dea8421ffb9d98 | /2022/python2022/tests/test_day07.py | a8b087aa8e6560dbbbb77a0086a176a458ef3593 | [] | no_license | mreishus/aoc | 677afd18521b62c9fd141a45fec4b7bc844be259 | e89db235837d2d05848210a18c9c2a4456085570 | refs/heads/master | 2023-02-22T12:00:52.508701 | 2023-02-09T04:37:50 | 2023-02-09T04:39:44 | 159,991,022 | 16 | 3 | null | 2023-01-05T10:00:46 | 2018-12-01T22:00:22 | Python | UTF-8 | Python | false | false | 605 | py | #!/usr/bin/env python3
"""
Test Day07.
"""
import unittest
from aoc.day07 import Day07
class TestDay07(unittest.TestCase):
"""Test Day07."""
def test_part1(self):
"""Test part1"""
self.assertEqual(Day07.part1("../inputs/07/input_small.txt"), 95437)
self.assertEqual(Day07.part1("../inputs/07/input.txt"), 1428881)
def test_part2(self):
"""Test part2"""
self.assertEqual(Day07.part2("../inputs/07/input_small.txt"), 24933642)
self.assertEqual(Day07.part2("../inputs/07/input.txt"), 10475598)
if __name__ == "__main__":
unittest.main()
| [
"mreishus@users.noreply.github.com"
] | mreishus@users.noreply.github.com |
acc118a4715ec4af727822d1921170fc2f6b26e0 | de7cae42ec760bae3b532259b399a8c6df0394c9 | /wxPython/samples/wxPIA_book/Chapter-07/text_ctrl.py | 9cec352fbd8a629d850e9cc384c67349a3e3e0a5 | [] | no_license | nvaccess/wxPython | b70f91bd682b86e20434d0c69ac497dfbf875838 | 13cceab2a1891ab443e62078be729dc1e1e2e283 | refs/heads/master | 2020-05-31T00:11:36.079007 | 2016-07-12T09:25:04 | 2016-07-12T09:25:04 | 63,135,776 | 1 | 2 | null | 2016-07-18T02:43:08 | 2016-07-12T07:18:57 | C++ | UTF-8 | Python | false | false | 829 | py | import wx
class TextFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1, 'Text Entry Example',
size=(300, 100))
panel = wx.Panel(self, -1)
basicLabel = wx.StaticText(panel, -1, "Basic Control:")
basicText = wx.TextCtrl(panel, -1, "I've entered some text!",
size=(175, -1))
basicText.SetInsertionPoint(0)
pwdLabel = wx.StaticText(panel, -1, "Password:")
pwdText = wx.TextCtrl(panel, -1, "password", size=(175, -1),
style=wx.TE_PASSWORD)
sizer = wx.FlexGridSizer(cols=2, hgap=6, vgap=6)
sizer.AddMany([basicLabel, basicText, pwdLabel, pwdText])
panel.SetSizer(sizer)
if __name__ == '__main__':
app = wx.App()
frame = TextFrame()
frame.Show()
app.MainLoop()
| [
"robin@alldunn.com"
] | robin@alldunn.com |
7d73db95afd3e239759b48db5abfeb07099f16d8 | e5d5fa28999bcc6c642bb42dda93afd38e272b81 | /LeetCode/896. Monotonic Array/solve1.py | 97d3ce6c80618ac850aa2113559e14802a96df71 | [] | no_license | chiahsun/problem_solving | cd3105969983d16d3d5d416d4a0d5797d4b58e91 | 559fafa92dd5516058bdcea82a438eadf5aa1ede | refs/heads/master | 2023-02-05T06:11:27.536617 | 2023-01-26T10:51:23 | 2023-01-26T10:51:23 | 30,732,382 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | class Solution:
def isMonotonic(self, nums: List[int]) -> bool:
def get_sign(cur, nxt):
return 0 if cur == nxt else (-1 if cur > nxt else 1)
sign = 0
for i in range(1, len(nums)):
nxt_sign = get_sign(nums[i-1], nums[i])
if sign == 0:
sign = nxt_sign
elif sign != 0 and nxt_sign != 0 and sign != nxt_sign:
return False
return True
| [
"chiahsun0814@gmail.com"
] | chiahsun0814@gmail.com |
bedba96a63dac694fb9424f406a9605be76814f2 | dbf76237e39087bf1a73243bbb019710182be0e4 | /Capitulo 2/34 - registro.py | f349a77b4ee48ef35e2ed8497c7377f556ee9e56 | [] | no_license | sandromelobrazil/Python_Para_Pentest | 52edd86fa5929e0303e60e9872c027aae564becd | 1837b523ad55e1c8ca066341459714e2fc88f037 | refs/heads/master | 2020-04-05T16:56:22.342925 | 2018-11-11T00:52:23 | 2018-11-11T00:52:23 | 157,035,879 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | #Adaptacao: https://stackoverflow.com/questions/3050262/change-browser-proxy-settings-from-python
import _winreg
INTERNET_SETTINGS = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
r'Software\Microsoft\Windows\CurrentVersion\Internet Settings',
0, _winreg.KEY_ALL_ACCESS) #1
def set_key(name, value, type): #2
_winreg.SetValueEx(INTERNET_SETTINGS, name, 0, type, value)
set_key('ProxyEnable', 1, _winreg.REG_DWORD) #3
set_key('ProxyOverride', u'localhost; 192.168.0.*', _winreg.REG_SZ) #4
set_key('ProxyServer', u'localhost:8080', _winreg.REG_SZ) #5
import ctypes #6
internet_set_option = ctypes.windll.Wininet.InternetSetOptionW
internet_set_option(0, 37, 0, 0)
internet_set_option(0, 39, 0, 0) | [
"sandromelo.brazil@gmail.com"
] | sandromelo.brazil@gmail.com |
e18a9825f3647a229985ab9f79946fc76955d988 | 73d9b5664d6949140b13e92d8b91a01e8502752a | /good_spot/images/migrations/0003_placeimage_thumbnail.py | 15cd5861e6fed6085e8dbed866568f7800fccea3 | [
"MIT"
] | permissive | jasmine92122/NightClubBackend | 3ed46cce0f6b534b4b49829f53fe7cb6a42ae42e | 7f59129b78baaba0e0c25de2b493033b858f1b00 | refs/heads/master | 2022-11-23T00:42:25.606762 | 2019-10-02T01:56:29 | 2019-10-02T01:56:29 | 212,234,882 | 0 | 0 | MIT | 2022-11-22T02:10:16 | 2019-10-02T01:47:52 | JavaScript | UTF-8 | Python | false | false | 515 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-03-07 13:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('images', '0002_auto_20180306_1923'),
]
operations = [
migrations.AddField(
model_name='placeimage',
name='thumbnail',
field=models.ImageField(blank=True, null=True, upload_to='thumbnails/', verbose_name='Thumbnail'),
),
]
| [
"jasminegarcia111@outlook.com"
] | jasminegarcia111@outlook.com |
253eedac306b02cb121f2fe1a4189604cc420975 | c5e50dfd9bf1ce7ae7f5a56aed3abe95cd9184d2 | /classroom/models.py | fb5cfecb8aa5fc14152a1b507056c9bd1cff2b59 | [] | no_license | RicePad/COVID19-NurseHelper | d93fd1f3c0b8e0018a2039bd27b4f0ff32a5c115 | 87d7785027648b2515ca47edb219e3a3f2c97b9b | refs/heads/master | 2021-04-18T04:43:06.808455 | 2020-06-03T03:16:02 | 2020-06-03T03:16:02 | 249,505,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,324 | py | from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.html import escape, mark_safe
PAYMENT_CHOICES = (
('1', 'Fruits'),
('2', 'Vegetables'),
('3', 'Dairy')
)
class User(AbstractUser):
is_helper = models.BooleanField(default=False)
is_nurse = models.BooleanField(default=False)
class Nurse(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
telepone = models.IntegerField()
city = models.CharField(max_length=100)
address = models.CharField(max_length=100)
payment_type = models.CharField(choices=PAYMENT_CHOICES, max_length=50)
description = models.TextField()
def __str__(self):
return self.user.username
class Helper(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
telepone = models.IntegerField()
address = models.CharField(max_length=100)
payment_type = models.CharField(choices=PAYMENT_CHOICES, max_length=50)
description = models.TextField()
def __str__(self):
return self.user.username
| [
"jonn.thinkrtc@gmail.com"
] | jonn.thinkrtc@gmail.com |
0ece2a49283a025468b379bb013bc17a19f4bacf | 7b9813e6c805edfeca538b69bd79119db439f284 | /exp_mmdet/exp032/fold3_resume.py | eb3297cdd944e98bec4ea62f216be10eb9bb6b1d | [] | no_license | Ino-Ichan/SIIM-RSNA-Covid19-2021 | 61bfd0b2baef58f6b1673e02f45acaa998916a89 | caba038bbb403cb55753ecc68d5fb92ef93b1f8e | refs/heads/main | 2023-07-08T13:19:46.226739 | 2021-08-13T18:05:37 | 2021-08-13T18:05:37 | 373,910,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,460 | py | # exp = "exp006"
exp = "exp032"
model_name = "retinanet_r50_fpn_1x_coco"
cv = "3"
# https://www.kaggle.com/sreevishnudamodaran/siim-mmdetection-cascadercnn-weight-bias
_base_ = "/workspace/customized_mmdetection/configs/retinanet/retinanet_r50_fpn_1x_coco.py"
# =============================================================
# schedule_1x
# =============================================================
# optimizer
optimizer = dict(type='SGD', lr=0.0005, momentum=0.9, weight_decay=0.0001)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1,
warmup_ratio=0.001,
step=[10, 15])
# ## Learning rate scheduler config used to register LrUpdater hook
# lr_config = dict(
# policy='CosineAnnealing', # The policy of scheduler, also support CosineAnnealing, Cyclic, etc. Refer to details of supported LrUpdater from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9.
# by_epoch=False,
# warmup='linear', # The warmup policy, also support `exp` and `constant`.
# warmup_iters=500, # The number of iterations for warmup
# warmup_ratio=0.001, # The ratio of the starting learning rate used for warmup
# min_lr=1e-07)
runner = dict(type='EpochBasedRunner', max_epochs=20)
# # fp16 settings
# fp16 = dict(loss_scale=512.)
# =============================================================
# default_runtime
# =============================================================
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
dict(type='WandbLoggerHook',
init_kwargs=dict(project='siim-rsna-covid19-2021-mmdet',
tags=[exp, f"cv{cv}", model_name],
name=f"{exp}_cv{cv}_{model_name}",
entity='inoichan'))
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = '/workspace/output/mmdet_exp006_cv3/best_bbox_mAP_50_epoch_19.pth'
resume_from = None
workflow = [('train', 1), ('val', 1)]
# =============================================================
# Model
# =============================================================
# model settings
model = dict(
type='RetinaNet',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_input',
num_outs=5),
bbox_head=dict(
type='RetinaHead',
num_classes=1,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
# score_thr=0.05,
score_thr=0.001,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
# =============================================================
# coco_detection
# =============================================================
# dataset settings
dataset_type = 'CocoDataset'
classes = ('opacity',)
image_root = '/workspace/data/train_640_2/'
# img_norm_cfg = dict(
# mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_norm_cfg = dict(
mean=[0, 0, 0], std=[255, 255, 255], to_rgb=True)
albu_train_transforms = [
dict(
type='ShiftScaleRotate',
shift_limit=0.,
scale_limit=0.2,
rotate_limit=15,
interpolation=1,
p=0.5),
dict(
type='RandomBrightnessContrast',
brightness_limit=[0.0, 0.2],
contrast_limit=[0.0, 0.2],
p=0.5),
dict(
type='HueSaturationValue',
hue_shift_limit=20,
sat_shift_limit=20,
val_shift_limit=0,
p=0.1),
dict(
type='OneOf',
transforms=[
dict(type='Blur', blur_limit=3, p=1.0),
dict(type='MedianBlur', blur_limit=3, p=1.0)
],
p=0.1),
]
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
# dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='Resize', img_scale=(640, 640), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='Albu',
transforms=albu_train_transforms,
bbox_params=dict(
type='BboxParams',
format='pascal_voc',
label_fields=['gt_labels'],
min_visibility=0.0,
filter_lost_elements=True),
keymap={
'img': 'image',
'gt_bboxes': 'bboxes'
},
update_pad_shape=False,
skip_img_without_anno=True),
dict(type='CutOut', n_holes=2, cutout_ratio=(0.2, 0.2)),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(640, 640),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=32,
workers_per_gpu=12,
train=dict(
type=dataset_type,
classes=classes,
ann_file=f"/workspace/exp_mmdet/config/train_cv{cv}.json",
img_prefix=image_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
classes=classes,
ann_file=f"/workspace/exp_mmdet/config/val_cv{cv}.json",
img_prefix=image_root,
pipeline=test_pipeline),
test=dict(
type=dataset_type,
classes=classes,
ann_file=f"/workspace/exp_mmdet/config/val_cv{cv}.json",
img_prefix=image_root,
pipeline=test_pipeline))
# evaluation = dict(interval=1, metric='bbox', iou_thrs=[0.5], save_best='bbox_mAP')
evaluation = dict(interval=1, metric='bbox', save_best='bbox_mAP_50')
| [
"sys.b11noway@gmail.com"
] | sys.b11noway@gmail.com |
38ab17175b731a93f2859eac59fcbf8a7bce4c89 | 66124f7876fb3f26d5c6c2617be023af03c2d135 | /leet_code/distribute_coins_in_binary_tree.py | 46e7fecd3858d28d9ad2f2a9739b7ddc062023c7 | [] | no_license | s-surineni/atice | 494568c923a2a11531b3d12d694928d466b37d32 | 779c14fbefe1066d17cbe0f4cf1eeb8572a6d910 | refs/heads/master | 2021-12-11T03:22:50.177452 | 2021-12-05T06:32:43 | 2021-12-05T06:32:43 | 13,205,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | def distribute_coins_ib_binary_tree(root):
if not root:
return 0, 0
l_moves, l_coins = distribute_coins_ib_binary_tree(root.left)
r_moves, r_coins = distribute_coins_ib_binary_tree(root.right)
moves = l_moves + r_moves
return moves, (root.val - 1) + (l_coins) + (r_coins)
| [
"sampath.skt@gmail.com"
] | sampath.skt@gmail.com |
66a3f36e1fc31f8b33ce3e271f64b600964edf54 | ce55c319f5a78b69fefc63595d433864a2e531b5 | /爬虫知识/爬虫/10day/03-selenium登录豆瓣.py | d72e0d70592aa91e0fa0f1d18881b4fd491e13e7 | [] | no_license | Suijng/1809_data | a072c875e8746190e3b715e53f1afe3323f4666b | 45f8a57089f5c30ccc1a3cddb03b76dc95355417 | refs/heads/master | 2022-12-21T12:38:30.458291 | 2019-09-27T01:14:41 | 2019-09-27T01:14:41 | 211,207,071 | 0 | 0 | null | 2022-11-22T03:16:18 | 2019-09-27T00:55:21 | HTML | UTF-8 | Python | false | false | 807 | py | from selenium import webdriver
import requests
driver = webdriver.Chrome(executable_path='/home/sj/桌面/爬虫/chromedriver')
driver.get('https://www.douban.com/')
driver.find_element_by_id('form_email').send_keys('1627765913@qq.com')
driver.find_element_by_id('form_password').send_keys('sj7845464@')
element = driver.page_source.find('captcha_field')
if element == -1:
driver.find_element_by_class_name('bn-submit').click()
else:
url = driver.find_element_by_xpath('//img[@class="captcha_image"]').get_attribute('src')
html = requests.get(url)
img = html.content
with open('yzm.png', 'wb') as f:
f.write(img)
code = input('请输入验证码:')
driver.find_element_by_id('captcha_field').send_keys(code)
driver.find_element_by_class_name('bn-submit').click()
| [
"1627765913@qq.com"
] | 1627765913@qq.com |
24242919d0b3cf89d9afcd93f173f8419091a74c | 6c8c2c3f2798ac060173c571d965b388c6ed269b | /bin/generateschema.py | a9f22a4380a46a4f55f58f367764074c3850898a | [] | no_license | hsolbrig/ShEx | 441cbb649e1e7cbad9ea1fbf1a9bddb814ed7510 | c47aa340ebd76c2c53b921acd6c43f7a1c91381e | refs/heads/master | 2021-01-19T09:44:26.538573 | 2014-05-23T19:55:43 | 2014-05-23T19:55:43 | 19,274,443 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,816 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2013, Mayo Clinic
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the <ORGANIZATION> nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
#!/usr/local/bin/python
# Copied from pyxbgen 1.2.3
import pyxb.xmlschema
import pyxb.binding.generate
import pyxb.utils.utility
import pyxb.utils.domutils
schemaroot="../ShEx/static/xsd/"
cmdline = '--schema-root %s -u ShEx.xsd -m ShEx --binding-root=../ShEx/schema' % schemaroot
import logging
logging.basicConfig()
log_ = logging.getLogger(__name__)
generator = pyxb.binding.generate.Generator()
parser = generator.optionParser()
(options, args) = parser.parse_args(cmdline.split())
generator.applyOptionValues(options, args)
generator.resolveExternalSchema()
if 0 == len(generator.namespaces()):
parser.print_help()
sys.exit(1)
import sys
import traceback
# Save binding source first, so name-in-binding is stored in the
# parsed schema file
try:
tns = generator.namespaces().pop()
modules = generator.bindingModules()
print ('Python for %s requires %d modules' % (tns, len(modules)))
top_module = None
path_dirs = set()
for m in modules:
m.writeToModuleFile()
generator.writeNamespaceArchive()
except Exception as e:
print ('Exception generating bindings: %s' % (e,))
traceback.print_exception(*sys.exc_info())
sys.exit(3)
# LocalVariables:
# mode:python
# End:
| [
"solbrig.harold@mayo.edu"
] | solbrig.harold@mayo.edu |
12f1aae11a411a52b37520abd60dc0a20b61ee55 | 89213af925471c5954a12d0fe5bb47dfd988c351 | /bst/0109_convert_sorted_ll_to_bst.py | b3a3b00a3f55335e986d2a62f0869a006e7fdd65 | [] | no_license | seanchen513/leetcode | be554dd668221b6d03c598090d6684165bc512c5 | 4723a64b00502c824bb9b848a1737478096aa3e1 | refs/heads/master | 2021-11-10T11:50:16.674255 | 2021-11-10T02:57:02 | 2021-11-10T02:57:02 | 237,393,266 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,082 | py | """
109. Convert Sorted List to Binary Search Tree
Medium
Given a singly linked list where elements are sorted in ascending order, convert it to a height balanced BST.
For this problem, a height-balanced binary tree is defined as a binary tree in which the depth of the two subtrees of every node never differ by more than 1.
Example:
Given the sorted linked list: [-10,-3,0,5,9],
One possible answer is: [0,-3,9,-10,null,5], which represents the following height balanced BST:
0
/ \
-3 9
/ /
-10 5
"""
import sys
sys.path.insert(1, '../tree/')
sys.path.insert(1, '../linked_list/')
from binary_tree import TreeNode, print_tree #, array_to_bt
from linked_list import ListNode, build_ll
###############################################################################
"""
Solution #1: recursion by passing head and count of current sublist.
Don't convert linked list to array.
n/2 time to find first mid
2*(n/4) = n/2 time to find the 2 mids at 2nd level of recursion
4*(n/8) = n/2 time to find the 4 mids at 3rd level of recursion
etc.
O(log n) levels of recursion since tree is height-balanced
Total time = (log n)*(n/2) = O(n log n)
O(n log n) time
O(n) extra space for tree that is built and returned
O(log n) for recursion stack since tree is height-balanced
"""
def build_bst(head):
def build(head, count):
#if (not head) or (count == 0):
if count == 0:
return None
mid = head
mid_count = count // 2 # tree is left-biased
#mid_count = (count - 1) // 2 # tree is right-biased
for _ in range(mid_count):
mid = mid.next
root = TreeNode(mid.val)
root.left = build(head, mid_count)
root.right = build(mid.next, count - mid_count - 1)
return root
count = 0
node = head
while node:
count += 1
node = node.next
return build(head, count)
"""
count = 1
mid_count = 0
root = mid = head
root.left = build(head, 0) # returns None
root.right = build(mid.next, 1 - 0 - 1 = 0) # returns None
count = 2
mid_count = 1
root = mid = head.next # 2nd of 2 elts
root.left = build(head, 1) # returns 1st of 2 elts
root.right = build(None, 2 - 1 - 1 = 0) # returns None
"""
###############################################################################
"""
Solution #2: inorder construction of BST while traversing linked list.
Traversal of the sorted linked list visits the values in the same order as an
inorder traversal of any BST corresponding to the sorted values.
So we can do an inorder construction of the BST while travering the linked
list at the same time.
Recursively:
The first element of the list is the leftmost descendant of the root.
When the left half is built, the "head" in the linked list is pointing at
the middle node of the list, which is used to create the root of the tree.
The remaining nodes of the list are used to build the right subtree.
O(n) time
O() extra space
"""
def build_bst2(head):
def build(low, high):
nonlocal head
if low > high:
return None
mid = (low + high) // 2 # tree is right-biased
#mid = (low + high + 1) // 2 # tree is left-biased
left = build(low, mid-1)
root = TreeNode(head.val)
root.left = left
head = head.next
root.right = build(mid+1, high)
return root
count = 0
node = head
while node:
count += 1
node = node.next
return build(0, count-1)
###############################################################################
if __name__ == "__main__":
def test(arr):
head, _ = build_ll(arr)
#root = build_bst(head)
root = build_bst2(head)
print("#"*80)
print(arr)
print()
print_tree(root)
arrays = [
[],
[0],
[1],
[1,2],
[1,2,3],
[1,2,3,4],
[1,2,3,4,5],
[1,2,3,4,5,6,7,8,9],
[-10,-3,0,5,9], # LC
]
for arr in arrays:
test(arr)
| [
"seanchen513@gmail.com"
] | seanchen513@gmail.com |
aa7871fad8d1e98c8a8ff9178264d583801e6d97 | 6ab3d02c6b5426cd122b3d3c7b31faee7ea917d4 | /tree_countLeaves.py | e7aa9681de9fd78b4b69fc53662920cf06e792d1 | [] | no_license | AishwaryalakshmiSureshKumar/DS-Algo | e54967ed24c641059fe15b286359f1b71141eeff | a624b29182c92b5fa8017aae597eb4ad2475deae | refs/heads/main | 2023-04-21T17:17:10.342833 | 2021-04-18T18:03:57 | 2021-04-18T18:03:57 | 356,888,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | def countLeaves(root):
if root is None:
return 0
if root.left is None and root.right is None:
return 1
else:
return countLeaves(root.left) + countLeaves(root.right)
class Tree:
def __init__(self, val):
self.data = val
self.left = None
self.right = None
root = Tree(4)
root.left = Tree(8)
root.right = Tree(10)
root.left.left = Tree(7)
root.left.left.left = Tree(3)
root.right.left = Tree(5)
root.right.right = Tree(1)
print(countLeaves(root))
| [
"noreply@github.com"
] | AishwaryalakshmiSureshKumar.noreply@github.com |
719ae4275c713c5afced1364bf4100d1c33bd849 | f6fd5e6ed1ba59f2cefefd4859bea35212ede00e | /p35.py | ee8f0edf72221853da900398415432726cc507e9 | [] | no_license | comorina/Ducat_class_code | f1d03ab3e470977899c9f2c860470dc3f843eae9 | e105847f00d9599afea9132902da3d393b006b58 | refs/heads/main | 2023-04-14T01:47:02.588049 | 2021-04-29T13:58:39 | 2021-04-29T13:58:39 | 345,077,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | cp=int(input("enter cp:"))
sp=int(input("enter sp:"))
if(sp>cp):
print("profit",sp-cp)
elif(cp>sp):
print("loss",cp-sp)
else:
print("No profit no loss")
| [
"noreply@github.com"
] | comorina.noreply@github.com |
bd9cdda5700e2d847320c7028287cddbac28518f | 165ff8f98e6a49d43bda300255790f8c91ef0aad | /payments_service/settings.py | 16ff5853f24be3cc4080c8b56139b65859e830dd | [
"MIT"
] | permissive | gbozee/now-payments | ec624b04e9d8327160c647b39a9a14214c9e7a48 | a21767027d9ee5303c2d0220c7983196c4674b81 | refs/heads/master | 2023-08-04T08:33:17.979516 | 2021-05-27T08:31:39 | 2021-05-27T08:31:39 | 263,529,637 | 1 | 1 | MIT | 2023-02-14T21:59:07 | 2020-05-13T04:59:27 | Python | UTF-8 | Python | false | false | 320 | py | from starlette.config import Config
from starlette.datastructures import Secret, URL
config = Config(".env")
DEBUG = config("DEBUG", cast=bool, default=True)
PAYMENT_SHEET = config("PAYMENT_SHEET")
NOW_SHEET_SERVICE = config("NOW_SHEET_SERVICE")
HOST_URL = config("HOST_URL", default="http://localhost:8000")
| [
"gbozee@gmail.com"
] | gbozee@gmail.com |
02950a804aa1cdc12500489489dc16580ef4173d | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/418/usersdata/306/83627/submittedfiles/av1_programa2.py | 81393241d4148641b4a44292751241643f24b419 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | # -*- coding: utf-8 -*-
mat=int(input("---------------------------------\nDigite a matricula do aluno: "))
nota1=float(input("\n---------------------------------\nDigite a primeira nota do aluno: "))
nota2=float(input("\n---------------------------------\nDigite a segunda nota do aluno: "))
nota3=float(input("\n---------------------------------\nDigite a terceira nota do aluno: "))
me=float(input("\n---------------------------------\nDigite a dos exercícios do aluno: "))
ma=((nota1)+(nota2*2)+(nota3*3)+(me))/7
if ma>=9:
print(A)
elif ma<9 and ma>=7.5:
print(B)
elif ma<7.5 and ma>=6:
print(C)
elif ma<6 and ma>=4:
print(D)
elif ma<4:
print(E)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
123d65c4f450b8119cbcce835a6c24f506dc77f7 | 54ee247b1053c122c7c6390319f915236e589545 | /ContinuedFractions.py | 0b036dd2ae1031f20c5e24ebde2c7d222f5327f3 | [] | no_license | kelvinblaser/EulerProject | b92fc9b16830148a10b7330cc3fb8dfb90bfe433 | 023ec34da77000197b09ba407f81651ff2afda6a | refs/heads/master | 2023-08-16T21:39:02.031873 | 2023-08-15T04:40:37 | 2023-08-15T04:40:37 | 169,328,005 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,220 | py | """Continued Fraction
Functions useful for working with continued fractions
"""
import scipy as sp
from fractions import Fraction
def sqrtContFrac(n):
""" Computes the continued fraction of the sqrt of n
For non-square n, returns in the form [a0,a1,a2,(a3,a4,a5)] where the
tuple (a3, a4, a5) is the repeated part.
No good for square n. So don't use it. - Kelvin =)
"""
rootn = sp.sqrt(n)
a = []
a.append(int(sp.floor(rootn)))
bLast = Fraction(0)
cLast = Fraction(1)
abcList = []
while not (a[-1], bLast, cLast) in abcList:
abcList.append((a[-1], bLast, cLast))
bNext = cLast * a[-1] - bLast
cNext = (n - bNext**2)/cLast
aNext = int(sp.floor((rootn + bNext)/cNext))
a.append(aNext)
bLast = bNext
cLast = cNext
repeatIndex = abcList.index((a[-1],bLast,cLast))
a.pop()
contFraction = a[:repeatIndex]
repeat = tuple(a[repeatIndex:])
contFraction.append(repeat)
return contFraction
def calculateConvergent(contFraction):
"""Given a list of numbers continued fraction coefficients, returns the
convergent as a fraction
"""
pass
| [
"kelvinblaser@gmail.com"
] | kelvinblaser@gmail.com |
2dc0933af950155994691b585261e540da161bf9 | 2bcf18252fa9144ece3e824834ac0e117ad0bdf3 | /Kraken/tags/0.2/Kraken.py | 35c10a384501504ba4eeaad8b3301c1a8d6acda3 | [
"Beerware"
] | permissive | chadwhitacre/public | 32f65ba8e35d38c69ed4d0edd333283a239c5e1d | 0c67fd7ec8bce1d8c56c7ff3506f31a99362b502 | refs/heads/master | 2021-05-10T14:32:03.016683 | 2010-05-13T18:24:20 | 2010-05-13T18:24:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,752 | py | # ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE":
# <chad@zetaweb.com> wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. --Chad Whitacre
# ----------------------------------------------------------------------------
import os, re, imaplib, smtplib, email
from os.path import join
from ConfigParser import SafeConfigParser as ConfigParser
class Kraken:
"""
This guy polls an IMAP account. If a new message is from a list member, then
it forwards the message along to the rest of the list. Otherwise it moves
the message to the trash. Usage:
>>> FROM = 'From: Chad Whitacre <douglas.wicker@pncyeah.com>'
>>> pattern = r'From:.* <?(.*@.*\..*)>?$'
>>> from_addr = re.search(pattern, FROM).group(1)
>>> print from_addr
#>>> from Kraken import Kraken
#>>> k = Kraken()
#>>> k.release()
"""
def __init__(self, root='.'):
""" read in config info """
conf_path = join(root,'conf/kraken.conf')
send_path = join(root,'conf/send_to.conf')
from_path = join(root,'conf/also_accept_from.conf')
cp = ConfigParser()
cp.read(conf_path)
self.imap = dict(cp.items('imap'))
self.smtp = dict(cp.items('smtp'))
self.list_addr = cp.get('default', 'list_addr')
self.send_to = self.addrs(send_path)
self.accept_from = self.send_to + \
self.addrs(from_path)
def addrs(self, fn):
""" given a filename, return a list of email addresses """
raw = file(fn).read()
lines = [l.strip() for l in raw.split(os.linesep)]
return [l for l in lines
if not l.startswith('#') and
not l == '']
# maybe eventually validate email addresses
def release(self):
""" get all mail from our inbox and process """
imap = self.imap
smtp = self.smtp
# open the IMAP connection and get everything in the INBOX
if imap['secure'] == 'True':
raise 'NotImplemented', 'secure IMAP is not implemented yet'
else:
M = imaplib.IMAP4(imap['server'], int(imap['port']))
M.login(imap['username'], imap['password'])
M.select()
typ, raw = M.search(None, 'ALL')
msg_nums = raw[0].split()
if len(msg_nums) == 0:
# print '/me twiddles its thumbs'
# only do something -- and only tell us -- if you actually
# have something to do
return
else:
i_good = i_bad = 0
for num in msg_nums:
# get the From header
typ, raw = M.fetch(num, '(BODY[HEADER.FIELDS (FROM)])')
FROM = raw[0][1]
pattern = r'From:.* <?(.*@.*\.[A-Za-z]*)>?'
from_addr = re.search(pattern, FROM).group(1)
# and compare it to our membership lists
if from_addr in self.accept_from:
# get the raw email
typ, raw = M.fetch(num, '(RFC822)')
raw = raw[0][1]
msg = email.message_from_string(raw)
# tweak the headers
try:
msg.replace_header('Reply-To', self.list_addr)
except KeyError:
msg.__setitem__('Reply-To', self.list_addr)
msg.add_header('X-Released-By','THE KRAKEN!!!!!!!!1')
# and pass it on!
if smtp['secure'] == 'True':
raise 'NotImplemented', 'secure SMTP is not implemented yet'
else:
server = smtplib.SMTP(smtp['server'],smtp['port'])
server.login(smtp['username'],smtp['password'])
server.sendmail(self.list_addr,self.send_to,msg.__str__())
server.quit()
# and move to archive
M.copy(num, 'Archive')
M.store(num, 'FLAGS.SILENT', '(\Deleted)')
i_good += 1
else:
# move it to trash!
M.copy(num, 'Trash')
M.store(num, 'FLAGS.SILENT', '(\Deleted)')
i_bad += 1
M.close()
M.logout()
print 'approved %s; rejected %s' % (i_good, i_bad)
def _test():
import doctest, Kraken
return doctest.testmod(Kraken)
if __name__ == "__main__":
_test()
| [
"chad@zetaweb.com"
] | chad@zetaweb.com |
5de272d4702a3247e8cdfd7515c75e6893e5651c | 551b75f52d28c0b5c8944d808a361470e2602654 | /huaweicloud-sdk-kafka/huaweicloudsdkkafka/v2/model/batch_delete_instance_topic_resp_topics.py | 63877b1c5f0f410420d96874594e4fbea55b5cf0 | [
"Apache-2.0"
] | permissive | wuchen-huawei/huaweicloud-sdk-python-v3 | 9d6597ce8ab666a9a297b3d936aeb85c55cf5877 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | refs/heads/master | 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 | NOASSERTION | 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null | UTF-8 | Python | false | false | 3,502 | py | # coding: utf-8
import pprint
import re
import six
class BatchDeleteInstanceTopicRespTopics:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'success': 'bool'
}
attribute_map = {
'id': 'id',
'success': 'success'
}
def __init__(self, id=None, success=None):
"""BatchDeleteInstanceTopicRespTopics - a model defined in huaweicloud sdk"""
self._id = None
self._success = None
self.discriminator = None
if id is not None:
self.id = id
if success is not None:
self.success = success
@property
def id(self):
"""Gets the id of this BatchDeleteInstanceTopicRespTopics.
Topic名称。
:return: The id of this BatchDeleteInstanceTopicRespTopics.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this BatchDeleteInstanceTopicRespTopics.
Topic名称。
:param id: The id of this BatchDeleteInstanceTopicRespTopics.
:type: str
"""
self._id = id
@property
def success(self):
"""Gets the success of this BatchDeleteInstanceTopicRespTopics.
是否删除成功。
:return: The success of this BatchDeleteInstanceTopicRespTopics.
:rtype: bool
"""
return self._success
@success.setter
def success(self, success):
"""Sets the success of this BatchDeleteInstanceTopicRespTopics.
是否删除成功。
:param success: The success of this BatchDeleteInstanceTopicRespTopics.
:type: bool
"""
self._success = success
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BatchDeleteInstanceTopicRespTopics):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
72a306642711ce2e9c61849608f07d7ebf71ec0a | 41586d36dd07c06860b9808c760e2b0212ed846b | /multimedia/misc/liblqr/actions.py | 7fd115840873e6d278dd9c345847ef56e0bc0414 | [] | no_license | SulinOS/SulinRepository | 4d5551861f57bc1f4bec6879dfe28ce68c7c125d | 9686811a1e06080f63199233561a922fe1f78d67 | refs/heads/master | 2021-06-15T21:34:25.039979 | 2021-06-05T13:43:34 | 2021-06-05T13:43:34 | 207,672,864 | 6 | 3 | null | 2019-12-06T08:11:22 | 2019-09-10T22:16:17 | Python | UTF-8 | Python | false | false | 584 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from inary.actionsapi import autotools
from inary.actionsapi import inarytools
from inary.actionsapi import get
WorkDir = "%s-1-%s" % (get.srcNAME(), get.srcVERSION())
def setup():
autotools.configure("--disable-static")
def build():
autotools.make()
def install():
autotools.install()
inarytools.removeDir("/usr/share/man")
inarytools.dodoc("AUTHORS", "COPYING*", "ChangeLog", "README", "NEWS")
| [
"zaryob.dev@gmail.com"
] | zaryob.dev@gmail.com |
06a80e72bb1d7f32115610a482e88c74e1878a2b | 49bbe24c3686fc99bce6bd97c412b9c82b952292 | /groups/urls.py | b1a84d72d53dddb0953f9a4469e3203614e2ac6d | [] | no_license | sokolovdp/social | 5e446176334118b41690771c6037e43c72e205f4 | 103bd82320939e91fa3afee31a5fb4548d937880 | refs/heads/master | 2021-06-27T01:42:54.272941 | 2017-09-14T15:22:05 | 2017-09-14T15:22:05 | 103,045,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | # GROUP URLS.PY
from django.conf.urls import url
from . import views
app_name = 'groups'
urlpatterns = [
url(r'^$', views.ListGroups.as_view(), name='all'),
url(r'^new/$', views.CreateGroup.as_view(), name='create'),
url(r'^posts/in/(?P<slug>[-\w]+)/$', views.SingleGroup.as_view(), name='single'),
url(r'^join/(?P<slug>[-\w]+)/$', views.JoinGroup.as_view(), name='join'),
url(r'^leave/(?P<slug>[-\w]+)/$', views.LeaveGroup.as_view(), name='leave'),
]
| [
"sokolovdp@gmail.com"
] | sokolovdp@gmail.com |
cc4f7bf99ed535d09dabc2addf4fd18aa9234f32 | d0404c2410e95b03c14887e7c292b6ad46e74fed | /frontend_backend_servers/backend/modules/api/routes/change_routes.py | 8082b363135894b56e54df669a8835d29b246070 | [] | no_license | Vadbeg/networks-and-info-security | c947524e58f6bfddfe5d8d35d9cab0a4d30eaebc | 6409677d9f68cb9809124d2997d2f2e4bd7a0313 | refs/heads/master | 2023-04-20T05:57:22.965738 | 2021-05-07T19:44:43 | 2021-05-07T19:44:43 | 336,023,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,814 | py | """Module with routes for Flask application"""
import os
from datetime import datetime
from flask import (Blueprint, Response,
make_response, jsonify,
request, abort)
try:
# Used for server setup using command line
from frontend_backend_servers.backend.modules.api.database_connection import connection, cursor
from frontend_backend_servers.backend.modules.api.status_codes import StatusCodes
from frontend_backend_servers.backend.modules.database.database_interactions import (close_connection,
connect_to_database)
from frontend_backend_servers.backend.modules.database.document import Document
from frontend_backend_servers.backend.modules.database.user import User
from frontend_backend_servers.backend.modules.database.task import Task
from frontend_backend_servers.backend.modules.database.factory import Factory
from frontend_backend_servers.backend.modules.api.schemas import (AddNewUser, AddNewDocument,
AddNewTask, UpdateTableSchema,
AddNewFactory)
except ModuleNotFoundError as err:
# Used for server setup using Docker
from modules.api.database_connection import connection, cursor
from modules.api.status_codes import StatusCodes
from modules.database.database_interactions import close_connection, connect_to_database
from modules.database.document import Document
from modules.database.user import User
from modules.database.task import Task
from modules.database.factory import Factory
from modules.api.schemas import (AddNewUser, AddNewDocument,
AddNewTask, UpdateTableSchema,
AddNewFactory)
change_blue_print = Blueprint('change_documentation', __name__, url_prefix=os.environ['API_PREFIX'])
@change_blue_print.route('/change_document/<int:document_idx>', methods=("GET", "POST"))
def change_document(document_idx: int):
"""View for document changing"""
if request.method == 'POST':
document = Document(connection=connection, cursor=cursor)
document_to_change = document.get_document_by_id(document_id=document_idx)
creators_ids = request.args.getlist('creators_ids') # if there is no such name, returns empty list
controllers_ids = request.args.getlist('controllers_ids')
request_args = dict(request.args)
request_args.pop('creators_ids') # there is no need in it now
request_args.pop('controllers_ids')
request_args['creators_ids'] = creators_ids
request_args['controllers_ids'] = controllers_ids
request_args['date_of_creation'] = datetime.strptime(request_args['date_of_creation'],
'%Y-%m-%d')
request_args['date_of_registration'] = datetime.strptime(request_args['date_of_registration'],
'%Y-%m-%d')
add_new_document_schema = AddNewDocument()
errors = add_new_document_schema.validate(data=request_args)
if errors:
abort(StatusCodes.BadRequest, str(errors))
args = add_new_document_schema.dump(request_args)
document = Document(connection=connection, cursor=cursor)
document.change_document(
document_id=document_idx,
document_name=args['document_name'],
document_type=args['document_type'],
date_of_creation=args['date_of_creation'],
date_of_registration=args['date_of_registration'],
controllers_ids=args['controllers_ids'],
creators_ids=args['creators_ids'],
)
context = {'idx': document_to_change['id']}
return make_response(jsonify(context), StatusCodes.Created)
return Response(status=StatusCodes.BadRequest)
@change_blue_print.route('/change_factory/<int:factory_idx>', methods=("GET", "POST"))
def change_factory(factory_idx: int):
"""View for factory changing"""
if request.method == 'POST':
factory = Factory(connection=connection, cursor=cursor)
factory_to_change = factory.get_factory_by_id(factory_id=factory_idx)
add_new_factory_schema = AddNewFactory()
errors = add_new_factory_schema.validate(data=request.args)
if errors:
abort(StatusCodes.BadRequest, str(errors))
args = add_new_factory_schema.dump(request.args)
factory = Factory(connection=connection, cursor=cursor)
factory.change_factory(
factory_id=factory_idx,
factory_name=args['factory_name'],
size=args['size'],
city=args['city'],
)
context = {'idx': factory_to_change['id']}
return make_response(jsonify(context), StatusCodes.Created)
return Response(status=StatusCodes.BadRequest)
@change_blue_print.route('/change_task/<int:task_idx>', methods=("GET", "POST"))
def change_task(task_idx: int):
"""View for task changing"""
if request.method == 'POST':
add_new_task_schema = AddNewTask()
errors = add_new_task_schema.validate(data=request.args)
if errors:
abort(StatusCodes.BadRequest, str(errors))
args = add_new_task_schema.dump(request.args)
task = Task(connection=connection, cursor=cursor)
task.change_task(
task_id=task_idx,
task_name=args['task_name'],
executor_id=args['executor_id'],
document_id=args['document_id'],
factory_id=args['factory_id']
)
return Response(status=StatusCodes.Created)
return Response(status=StatusCodes.BadRequest)
@change_blue_print.route('/change_user/<int:user_idx>', methods=("GET", "POST"))
def change_user(user_idx: int):
"""View for user changing"""
if request.method == 'POST':
add_new_user_schema = AddNewUser()
errors = add_new_user_schema.validate(data=request.args)
if errors:
abort(StatusCodes.BadRequest, str(errors))
args = add_new_user_schema.dump(request.args)
user = User(connection=connection, cursor=cursor)
user.change_user(
user_id=user_idx,
first_name=args['first_name'],
second_name=args['second_name'],
is_internal=args['is_internal'],
position=args['position'],
email=args['email'],
phone_number=args['phone_number']
)
return Response(status=StatusCodes.Created)
return Response(status=StatusCodes.BadRequest)
| [
"vadbeg@tut.by"
] | vadbeg@tut.by |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.