blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
15f8cd0d04070fe3d412a6539a5235a6a8ef6c98
|
da19363deecd93a73246aaea877ee6607daa6897
|
/xlsxwriter/test/worksheet/test_write_filter.py
|
bfe6d60fc98f85a51ac98245e67242f2736abce1
|
[] |
no_license
|
UNPSJB/FarmaciaCrisol
|
119d2d22417c503d906409a47b9d5abfca1fc119
|
b2b1223c067a8f8f19019237cbf0e36a27a118a6
|
refs/heads/master
| 2021-01-15T22:29:11.943996
| 2016-02-05T14:30:28
| 2016-02-05T14:30:28
| 22,967,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 775
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ...worksheet import Worksheet
class TestWriteFilter(unittest.TestCase):
"""
Test the Worksheet _write_filter() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_filter(self):
"""Test the _write_filter() method"""
self.worksheet._write_filter('East')
exp = """<filter val="East"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
|
[
"lealuque.tw@gmail.com"
] |
lealuque.tw@gmail.com
|
9db96c6dbb62656d25dbde9c5f8ba667c8585fa8
|
3817a595bdf246996c1f3846860cea9a9154f1cc
|
/AltviaDogs/apps.py
|
6e7c68cfaf6935515b6abbaa873e7c0dd3d098b0
|
[] |
no_license
|
shedwyn/AltviaDogs
|
99e83f217d891c5060d0b9a8f0997ea4ea2c5dc2
|
150078897d38c84ac3c492635ea777476e84b0d9
|
refs/heads/master
| 2021-01-24T00:32:32.176410
| 2018-08-24T20:01:47
| 2018-08-24T20:01:47
| 122,768,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 319
|
py
|
"""
Designed and built by Erin 'Ernie' L. Fough.
June 2018 (start)
contact: shedwyn@gmail.com
want to know when this was last updated? See README.md.
"""
from django.apps import AppConfig
class AltviaDogsConfig(AppConfig):
"""Establish Link to Dog Manager (AltviaDogs) Application."""
name = 'AltviaDogs'
|
[
"shedwyn@gmail.com"
] |
shedwyn@gmail.com
|
23bcdebc6312f35fb67216dff2380ccbe74890ab
|
68215f224dd76f8bf299c66c80cb4a0de1a05a20
|
/PythonTutorial/Advance/MetaClass/meta_class_example2.py
|
2e013f02ffac5c75fa0acaf6a410b099a0bd7425
|
[] |
no_license
|
jhappye/Python-Study
|
3935e1d9ffead3afb1b39823a4114006094a6542
|
40a421980e9f56a97f6f7b9eb00bbf983ee12c5b
|
refs/heads/master
| 2023-08-30T17:36:26.313896
| 2021-11-16T09:53:39
| 2021-11-16T09:53:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
# 常规方法创建
class Foo:
pass
# 常规方法创建
class Bar(Foo):
attr = 100
x = Foo()
print(x)
# 动态创建,和上面等价
Bar = type('Bar', (Foo,), dict(attr=100))
x = Bar()
print(x)
print(type(x))
|
[
"516495459@qq.com"
] |
516495459@qq.com
|
76a14a2500ada13704b7d69ed40d39c206497011
|
680db028bdfd570688e66009dd41a424b2395a6e
|
/Camelcase.py
|
51a76dd0a42b6afcae6e169004a153a3dac0257b
|
[] |
no_license
|
vitcmaestro/player
|
46fcf3138a7ba98a3aadae60aebdd68efdd16c55
|
8b9b132ec5f3dfe22521daf6122431451db8d880
|
refs/heads/master
| 2020-04-15T16:43:53.944347
| 2019-01-20T05:00:31
| 2019-01-20T05:00:31
| 164,848,284
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 205
|
py
|
str1 = input("")
str2 =""
for i in range(len(str1)):
if(i==0):
str2 +=str1[i].upper()
elif(str1[i-1].isspace()):
str2 +=str1[i].upper()
else:
str2 +=str1[i]
print(str2)
|
[
"noreply@github.com"
] |
vitcmaestro.noreply@github.com
|
149488d35576cf1a5937c6f95ae55ab3c88828d9
|
837d683ccf6aa08b0ad6d95f6af47fcc12b72da3
|
/pedersen_vss.py
|
586d0dff4ce73808e4596e669d77272e83f15f10
|
[] |
no_license
|
froyobin/secretsharing
|
deef2cdb9be72ff51be339bfaae4822e351b8cf1
|
737f2aa074e0cb6f082916bd11afc658732777e1
|
refs/heads/master
| 2021-01-11T21:32:12.720629
| 2017-01-20T00:46:31
| 2017-01-20T00:46:31
| 78,798,569
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,479
|
py
|
import argparse
import shamir_secret_sharing
from Crypto.Util import number
#To make it simple, we give the f(X)=5+3x+8x^2 as params. It is easy to do
# the test and write the code.
p = int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF",
16)
g = 2
h = int(
"3d941d6d9cd4c77719840d6b391a63ca6ded5b5cf6aafeefb9ea530f523039e9c372736a79b7eb022e50029f7f2cb4fb16fd1def75657288eca90d2c880f306be76fe0341b3c8961ae6e61aabbb60e416069d97eeada2f1408f2017449dddcd5ac927f164b1a379727941bd7f2170d02ef12ef3ec801fae585ac7b9d4079f50feced64687128208d46e3e10c5d78eb05832f5322c07a3b4e14c6f595206fde99115e8eea19b5fb13dd434332ec3eccb41a4baa54a14183c3416313678697db8507abdcfc6a97c86099fa5172316d784c6997fc2e74e8e59c7c1bc90426164682f5bfbf6373b13ea90d7e13fbffd65e10c4ad96c38ccbf8e8def28d76746729dc",
16)
def create_verifies(params1, params2, p, g, h):
verifies = []
for i in range(0, len(params1)):
verifies.append((pow(g, params1[i], p) * pow(h, params2[i], p)) % p)
return verifies
def verify_each(i, verifies, len_pamras):
left_value = 1
for j in range(0, len_pamras):
upper = pow(i, j, p)
left_value *= pow(verifies[j], upper, p)
def calculate_left(verfies, i, t, p):
powerall = [1]
for each_t in range(1, t):
powerall.append(pow(i, each_t))
left_val = 1
for j in range(0, len(verfies)):
c = pow(verfies[j], powerall[j], p)
left_val *= c
left_val %= p
return left_val % p
def verifies_shares(secrets1, secrets2, verifies, params,p,g):
for i in range(0, len(secrets1)):
left_value = calculate_left(verifies, i+1, len(params), p)
right_value = (pow(g, (secrets1[i])%p, p)*pow(h, (secrets2[i])%p, p))%p
if left_value == right_value:
print "checking %d Successfully!!" % i
else:
print "secret %d has been modified!!" % i
def test_prime():
n_length = 2048
q= (p-1)%2
print q
print number.isPrime((p-1)/2)
primeNum1 = number.getPrime(n_length)
# primeNum2 = number.getStrongPrime(n_length, primeNum1)
i=2
while True:
pl = p-1
print i
if pl%i == 0:
if number.isPrime(pl / i):
print "############"
print i
print "############"
break
i += 1
if i==10000:
break
print "found!!"
print "############################"
print primeNum1
print "############################"
def main(args):
s = shamir_secret_sharing.StringtoInt(args.secret)
parties = int(args.party)
min_party = int(args.min_party)
params1 = shamir_secret_sharing.create_params(s, min_party, p)
params2 = shamir_secret_sharing.create_params(s, min_party, p)
secrets1 = shamir_secret_sharing.create_secret(parties, params1, p)
secrets2 = shamir_secret_sharing.create_secret(parties, params2, p)
secret = shamir_secret_sharing.construct_secret(secrets1, min_party, p)
verifies = create_verifies(params1, params2, p, g, h)
verifies_shares(secrets1, secrets2, verifies, params2, p, g)
print "The secret you give is " + str(params1[0]) + "\n"
print "The rejoin code is " + str(secret)
if params1[0] == secret:
print "rejoin Successfully!!"
else:
print "we cannot rejoin the secret"
#We change secret2 3's secret and see whether we can check it out
secrets2[3] -= 1
verifies_shares(secrets1, secrets2, verifies, params2, p, g)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Create secret shares')
parser.add_argument('secret', metavar='\"secret\"', type=str,
help='the secret to share')
parser.add_argument('party', metavar='\"secret\"', type=str,
help='the secret to share')
parser.add_argument('min_party', metavar='\"secret\"', type=str,
help='the secret to share')
args = parser.parse_args()
main(args)
|
[
"froyo.bin@gmail.com"
] |
froyo.bin@gmail.com
|
32e1da53f66f709ec826c5e6e98b69f964055011
|
644bcdabf35261e07c2abed75986d70f736cb414
|
/python-project/Defis/Euler_44_Test.py
|
059d736f3c4546ec361ff33f1fc9b32183c5bd48
|
[] |
no_license
|
matcianfa/playground-X1rXTswJ
|
f967ab2c2cf3905becafb6d77e89a31414d014de
|
67859b496e407200afb2b1d2b32bba5ed0fcc3f0
|
refs/heads/master
| 2023-04-03T11:56:15.878757
| 2023-03-24T15:52:37
| 2023-03-24T15:52:37
| 122,226,979
| 5
| 20
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,110
|
py
|
#Ne pas oublier de changer le module à importer
module="Defis/Euler_44"
import sys
import io
#On récupère les données de l'utilisateur
sauvegarde_stdout=sys.stdout
sys.stdout=io.StringIO()
from Euler_44 import *
count1 = sys.stdout.getvalue()[:-1]
sys.stdout=sauvegarde_stdout
from ma_bao import *
#La réponse
reponse=5482660
#message d'aide si besoin
help="N'oublie pas d'utiliser print pour afficher le resultat"
def send_msg(channel, msg):
print("TECHIO> message --channel \"{}\" \"{}\"".format(channel, msg))
def success():
send_msg("Tests validés","Bravo !")
afficher_correction(module)
print("TECHIO> success true")
def fail():
print("TECHIO> success false")
def test():
try:
assert str(count1) == str(reponse), "Le résultat obtenu est {} mais ce n'est pas le bon.".format(str(count1))
send_msg("Tests validés","Le résultat cherché est bien {}".format(str(count1)))
success()
except AssertionError as e:
fail()
send_msg("Oops! ", e)
if help:
send_msg("Aide 💡", help)
if __name__ == "__main__": test()
|
[
"noreply@github.com"
] |
matcianfa.noreply@github.com
|
07e8af6fb34efbf28fd13da8b7998358fa68674c
|
f9c2e53c48839fe5583f6da48e800eba793f83c8
|
/custom_components/hacs/repositories/integration.py
|
8470873279f13b52e25cd0fb77a631d30b168ab2
|
[] |
no_license
|
Bobo2012/homeassistant-config
|
d6a5ebdd8de978d14a5d060ad77975280efdf95e
|
ee2567182e3ef250ee2a2e1a403fbc6f27057861
|
refs/heads/master
| 2020-08-21T20:04:53.531610
| 2019-10-18T22:59:54
| 2019-10-18T22:59:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,501
|
py
|
"""Class for integrations in HACS."""
import json
from aiogithubapi import AIOGitHubException
from homeassistant.loader import async_get_custom_components
from .repository import HacsRepository, register_repository_class
@register_repository_class
class HacsIntegration(HacsRepository):
"""Integrations in HACS."""
category = "integration"
def __init__(self, full_name):
"""Initialize."""
super().__init__()
self.information.full_name = full_name
self.information.category = self.category
self.manifest = None
self.domain = None
self.content.path.remote = "custom_components"
self.content.path.local = self.localpath
@property
def localpath(self):
"""Return localpath."""
return f"{self.system.config_path}/custom_components/{self.domain}"
@property
def config_flow(self):
"""Return bool if integration has config_flow."""
if self.manifest is not None:
if self.information.full_name == "custom-components/hacs":
return False
return self.manifest.get("config_flow", False)
return False
async def validate_repository(self):
"""Validate."""
await self.common_validate()
# Attach repository
if self.repository_object is None:
self.repository_object = await self.github.get_repo(
self.information.full_name
)
# Custom step 1: Validate content.
if self.repository_manifest:
if self.repository_manifest.content_in_root:
self.content.path.remote = ""
if self.content.path.remote == "custom_components":
ccdir = await self.repository_object.get_contents(
self.content.path.remote, self.ref
)
if not isinstance(ccdir, list):
self.validate.errors.append("Repostitory structure not compliant")
for item in ccdir or []:
if item.type == "dir":
self.content.path.remote = item.path
break
if self.repository_manifest.zip_release:
self.content.objects = self.releases.last_release_object.assets
else:
self.content.objects = await self.repository_object.get_contents(
self.content.path.remote, self.ref
)
self.content.files = []
for filename in self.content.objects or []:
self.content.files.append(filename.name)
if not await self.get_manifest():
self.validate.errors.append("Missing manifest file.")
# Handle potential errors
if self.validate.errors:
for error in self.validate.errors:
if not self.system.status.startup:
self.logger.error(error)
return self.validate.success
async def registration(self):
"""Registration."""
if not await self.validate_repository():
return False
# Run common registration steps.
await self.common_registration()
# Get the content of the manifest file.
await self.get_manifest()
# Set local path
self.content.path.local = self.localpath
async def update_repository(self):
"""Update."""
await self.common_update()
# Get integration objects.
if self.repository_manifest:
if self.repository_manifest.content_in_root:
self.content.path.remote = ""
if self.content.path.remote == "custom_components":
ccdir = await self.repository_object.get_contents(
self.content.path.remote, self.ref
)
if not isinstance(ccdir, list):
self.validate.errors.append("Repostitory structure not compliant")
self.content.path.remote = ccdir[0].path
try:
self.content.objects = await self.repository_object.get_contents(
self.content.path.remote, self.ref
)
except AIOGitHubException:
return
self.content.files = []
for filename in self.content.objects or []:
self.content.files.append(filename.name)
await self.get_manifest()
# Set local path
self.content.path.local = self.localpath
async def reload_custom_components(self):
"""Reload custom_components (and config flows)in HA."""
self.logger.info("Reloading custom_component cache")
del self.hass.data["custom_components"]
await async_get_custom_components(self.hass)
async def get_manifest(self):
"""Get info from the manifest file."""
manifest_path = f"{self.content.path.remote}/manifest.json"
try:
manifest = await self.repository_object.get_contents(
manifest_path, self.ref
)
manifest = json.loads(manifest.content)
except Exception: # pylint: disable=broad-except
return False
if manifest:
self.manifest = manifest
self.information.authors = manifest["codeowners"]
self.domain = manifest["domain"]
self.information.name = manifest["name"]
self.information.homeassistant_version = manifest.get("homeassistant")
# Set local path
self.content.path.local = self.localpath
return True
return False
|
[
"arsaboo@gmx.com"
] |
arsaboo@gmx.com
|
d95810328bbed890c7daf776a9675beedc3d8cd2
|
473568bf080e3637ee118b374f77e9f561286c6c
|
/SudoPlacementCourse/LeadersInAnArray.py
|
b2b997ace46a797ad0dd241ff7cd5667a35adba3
|
[] |
no_license
|
VineetPrasadVerma/GeeksForGeeks
|
c2f7fc94b0a07ba146025ca8a786581dbf7154c8
|
fdb4e4a7e742c4d67015977e3fbd5d35b213534f
|
refs/heads/master
| 2020-06-02T11:23:11.421399
| 2020-01-07T16:51:18
| 2020-01-07T16:51:18
| 191,138,925
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,241
|
py
|
# n = int(input())
#
# for k in range(n):
# final_ans_list = []
# size_of_array = int(input())
# list_of_elements = input()
#
# int_list_of_elements = [int(i) for i in list_of_elements.split()]
#
# for i in range(len(int_list_of_elements)):
# if i == len(int_list_of_elements) - 1:
# final_ans_list.append(int_list_of_elements[i])
# break
#
# max_element = int_list_of_elements[i]
# temp_max_element = max(int_list_of_elements[i+1:])
#
# if max_element >= temp_max_element:
# final_ans_list.append(int_list_of_elements[i])
#
# string_list_of_elements = [str(i) for i in final_ans_list]
# print(" ".join(string_list_of_elements))
n = int(input())
for _ in range(n):
final_ans_list = []
size_of_array = int(input())
list_of_elements = input()
int_list_of_elements = [int(i) for i in list_of_elements.split()]
maximum = -1
for i in range(len(int_list_of_elements)-1, -1, -1):
if int_list_of_elements[i] >= maximum:
maximum = int_list_of_elements[i]
final_ans_list.append(maximum)
for i in range(len(final_ans_list)-1, -1, -1):
print(final_ans_list[i], end=" ")
print()
|
[
"vineetpd1996@gmail.com"
] |
vineetpd1996@gmail.com
|
8294fce530b848259f949592831c9de1c760dbad
|
14a913fce4b538b22f28409645cd6abe3455808f
|
/bigquery/cloud-client/natality_tutorial.py
|
5bfa8f1d27a9736075f271b77657e9342df6d688
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
iamLoi/Python-Random-Number-Generator
|
8da7dbd37cb13a01232c8ed49b9df35a99c63d73
|
7579e8b15130802aaf519979e475c6c75c403eda
|
refs/heads/master
| 2022-08-29T19:05:32.649931
| 2019-09-14T14:48:58
| 2019-09-14T14:48:58
| 208,454,877
| 2
| 1
|
Apache-2.0
| 2022-08-05T21:57:49
| 2019-09-14T14:51:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,926
|
py
|
#!/usr/bin/env python
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def run_natality_tutorial():
# [START bigquery_query_natality_tutorial]
"""Create a Google BigQuery linear regression input table.
In the code below, the following actions are taken:
* A new dataset is created "natality_regression."
* A query is run against the public dataset,
bigquery-public-data.samples.natality, selecting only the data of
interest to the regression, the output of which is stored in a new
"regression_input" table.
* The output table is moved over the wire to the user's default project via
the built-in BigQuery Connector for Spark that bridges BigQuery and
Cloud Dataproc.
"""
from google.cloud import bigquery
# Create a new Google BigQuery client using Google Cloud Platform project
# defaults.
client = bigquery.Client()
# Prepare a reference to a new dataset for storing the query results.
dataset_ref = client.dataset('natality_regression')
dataset = bigquery.Dataset(dataset_ref)
# Create the new BigQuery dataset.
dataset = client.create_dataset(dataset)
# In the new BigQuery dataset, create a reference to a new table for
# storing the query results.
table_ref = dataset.table('regression_input')
# Configure the query job.
job_config = bigquery.QueryJobConfig()
# Set the destination table to the table reference created above.
job_config.destination = table_ref
# Set up a query in Standard SQL, which is the default for the BigQuery
# Python client library.
# The query selects the fields of interest.
query = """
SELECT
weight_pounds, mother_age, father_age, gestation_weeks,
weight_gain_pounds, apgar_5min
FROM
`bigquery-public-data.samples.natality`
WHERE
weight_pounds IS NOT NULL
AND mother_age IS NOT NULL
AND father_age IS NOT NULL
AND gestation_weeks IS NOT NULL
AND weight_gain_pounds IS NOT NULL
AND apgar_5min IS NOT NULL
"""
# Run the query.
query_job = client.query(query, job_config=job_config)
query_job.result() # Waits for the query to finish
# [END bigquery_query_natality_tutorial]
if __name__ == '__main__':
run_natality_tutorial()
|
[
"noreply@github.com"
] |
iamLoi.noreply@github.com
|
0618850a5db7746378ebd7c6d18fc84b9305a5e3
|
05324b134108e0e0fde392e0ae0fc22bfa1fb75f
|
/df_user/islogin.py
|
d2149c4633a4871adc7f90b232f8ccc8e25afcec
|
[] |
no_license
|
2339379789/zhang_ttshop
|
2c8d546b9ed3710fd1f48d6075ea01955247d34f
|
44f9eb998182f4aa027d5d313b4957410b54a39d
|
refs/heads/master
| 2020-03-06T15:40:54.261769
| 2018-03-27T09:23:52
| 2018-03-27T09:23:52
| 126,960,140
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
from django.http import HttpResponseRedirect
def islogin(func):
def login_fun(request, *args, **kwargs):
if request.session.get('user_id'):
return func(request, *args, **kwargs)
else:
red = HttpResponseRedirect('/user/login')
red.set_cookie('url', request.get_full_path)
return red
return login_fun
|
[
"2339379789@qq.com.com"
] |
2339379789@qq.com.com
|
fcdd844f805ea4a7bd79824935397e4ae355b4f3
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/x12/4052/354004052.py
|
c318eb1028c5e6c2d9a7772e6ec4c9f7cf7069da
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
from bots.botsconfig import *
from records004052 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'AY',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'M10', MIN: 1, MAX: 1},
{ID: 'P4', MIN: 1, MAX: 20, LEVEL: [
{ID: 'X01', MIN: 1, MAX: 1},
{ID: 'X02', MIN: 0, MAX: 9999},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
c3b06571705e3d42ea421dbcc6cb94538267f247
|
078533ce919371451564646e3c311c8dd6fca7ea
|
/app/settings/prod.py
|
1425b25dbaa062e89c3c39a5947da1224923cd03
|
[] |
no_license
|
cr8ivecodesmith/djdraft
|
cf5415a967dc7fc6f4f8d191def9c2b687c0d744
|
364ded3ea43acc874de367cd679c4bddfb64d837
|
refs/heads/master
| 2020-04-05T23:07:20.690873
| 2016-10-06T05:11:29
| 2016-10-06T05:11:29
| 23,010,461
| 2
| 1
| null | 2016-10-03T13:42:04
| 2014-08-16T04:17:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,558
|
py
|
from .base import *
DEBUG = False
ALLOWED_HOSTS = [
'{{ project_name }}.dev',
]
###### APP CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS += [
]
###### DATABASE CONFIGURATION
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'caffeinedb',
'USER': 'caffeine',
'PASSWORD': 'changeme',
'HOST': 'postgres',
'PORT': 5432,
}
}
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/topics/email/#smtp-backend
EMAIL_BACKEND = 'django_smtp_ssl.SSLEmailBackend'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-user
EMAIL_HOST_USER = get_key('EMAIL_HOST_USER')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-password
EMAIL_HOST_PASSWORD = get_key('EMAIL_HOST_PASSWORD')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = 'smtp.{{ project_name }}.dev'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 465
SERVER_EMAIL = 'errors@{{ project_name }}.dev'
DEFAULT_FROM_EMAIL = 'noreply@{{ project_name }}.dev'
###### MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS += [
('caffeine prod', 'errors@{{ project_name }}.dev'),
]
###### CELERY CONFIGURATION
# CELERY_ROUTES = {
# 'app.tasks.example_queue': {
# 'queue': 'express_queue'
# },
# }
|
[
"matt@lebrun.org"
] |
matt@lebrun.org
|
0af0e6aeb4bf93bb5c2c00acba6daf1feb4788c2
|
0566cf76b456518875edecece15e763a36a4795f
|
/scrapers/tv_showsonline_com.py
|
6208f8a041fec633df9bb9e2989a3db26e001ce2
|
[] |
no_license
|
theclonedude/Scraping_BeautifulSoup_phantomjs
|
684b1f7a993e0d2555daa7a5455cf19bd29b0b1b
|
faf653feae46c21a72d13b2123cdebdb2f7c05d8
|
refs/heads/master
| 2023-03-16T19:36:14.867361
| 2018-06-14T14:21:02
| 2018-06-14T14:21:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,148
|
py
|
from sandcrawler.scraper import ScraperBase
from sandcrawler.scraper import SimpleScraperBase
class TvShowsOnline(SimpleScraperBase):
BASE_URL = 'http://www.7stream.pro/'
OTHER_URLS = ['http://tv-showsonline.com', ]
def setup(self):
self.register_scraper_type(ScraperBase.SCRAPER_TYPE_OSP)
self.search_term_language = 'eng'
raise NotImplementedError('the website returns the bad gateway error')
self.register_media(ScraperBase.MEDIA_TYPE_FILM)
self.register_media(ScraperBase.MEDIA_TYPE_TV)
for url in [self.BASE_URL, ] + self.OTHER_URLS:
self.register_url(ScraperBase.URL_TYPE_SEARCH, url)
self.register_url(ScraperBase.URL_TYPE_LISTING, url)
def _fetch_search_url(self, search_term, media_type):
return self.BASE_URL + '/?s='+ '%s' % self.util.quote(search_term)
def _fetch_no_results_text(self):
return 'No Articles Found'
def _fetch_next_button(self, soup):
link = soup.select_one('a[class="next page-numbers"]')
self.log.debug(link)
self.log.debug('------------------------')
return link['href'] if link else None
def _parse_search_result_page(self, soup):
results = soup.select('div.item_content h4 a')
if not results or len(results) == 0:
return self.submit_search_no_results()
for result in results:
self.submit_search_result(
link_url = result['href'],
link_title = result.text
)
def _parse_parse_page(self, soup):
title = soup.select_one('h1[class="entry_title entry-title"]').text.strip()
season, episode = self.util.extract_season_episode(title)
titles = soup.select('span.ser-name')
index = 0
for link in soup.select('a.wo-btn'):
self.submit_parse_result(
index_page_title = self.util.get_page_title(soup),
link_url=link['href'],
link_title=titles[index].text,
series_season=season,
series_episode=episode
)
index += 1
|
[
"stryokka@gmail.com"
] |
stryokka@gmail.com
|
585c1f38e68fa896e9f1b91523d7cde15f833d05
|
544fe02a27cc4d987724b1bf45c2ba2994676521
|
/Q6.3_brain_teasers.py
|
c42fb93923fdbb832381c85a4fbe252f28b128c3
|
[
"Unlicense"
] |
permissive
|
latika18/learning
|
1e7a6dbdea399b845970317dc62089911a13df1c
|
a57c9aacc0157bf7c318f46c1e7c4971d1d55aea
|
refs/heads/master
| 2021-06-16T19:20:28.146547
| 2019-09-03T06:43:28
| 2019-09-03T06:43:28
| 115,537,386
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 524
|
py
|
#You have a five quart jug and a three quart jug, and an unlimited supply of water (but no measuring cups).
How would you come up with exactly four quarts of water?
#NOTE: The jugs are oddly shaped, such that filling up exactly ‘half’ of the jug would be impossible.
_
________________________________________________________________
5 Quart Contents 3 Quart Contents Note
5 0 Filled 5 quart jug
2 3 Filled 3Q with 5Q’s contents
0 2 Dumped 3Q
5 2 Filled 5Q
4 3 Fill remainder of 3Q with 5Q
4
Done! We have four quarts
|
[
"noreply@github.com"
] |
latika18.noreply@github.com
|
e2e7d5b9b92f7a0b1b6801b43db9c95bca9229f0
|
1d159ff6d4d72b1a2399916ec1e28ef885b59323
|
/solutions/module_3/01_guess_my_number.py
|
4b9c9b119b0c1c64362318c3850d074b0c21f484
|
[] |
no_license
|
Zerl1990/python_essentials
|
4e329b6e36b36ff340d505b26608d2b244ad2d09
|
ce257c25072debed9717960591e39c586edd7110
|
refs/heads/master
| 2023-02-28T17:42:17.465769
| 2021-02-02T23:39:41
| 2021-02-02T23:39:41
| 297,385,479
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 884
|
py
|
import random
print((
"+=========================================+\n"
"| Guess my number! |\n"
"| Please, select a numbers between [0-100]|\n"
"| Let's start you have 10 opportunities |\n"
"+=========================================+\n"
))
my_number = random.randint(0, 100)
opportunities = 0
max_opportunities = 10
guess = -1
win = False
while not win and opportunities < max_opportunities:
guess = int(input('What is the number?: '))
win = (my_number == guess)
opportunities += 1
if my_number < guess:
print(f'[---] My number is less than {guess}')
elif my_number > guess:
print(f'[+++] My number is greater than {guess}')
else:
print('You found my number!')
if win:
print(f'It only took {opportunities} opportunities, you have won!!!')
else:
print("Better luck next time")
|
[
"luis.m.rivas@oracle.com"
] |
luis.m.rivas@oracle.com
|
3972d6d7840538ecae880e9e1fda6b25c54b8bb0
|
db12b990924703cd74748d8585cd9c11fafa6746
|
/h2o-py/tests/testdir_apis/H2O_Module/pyunit_h2oas_list.py
|
3e7ec87a09dad726385a034be07e5ed147db3d49
|
[
"Apache-2.0"
] |
permissive
|
h2oai/h2o-3
|
919019a8f297eec676011a9cfd2cc2d97891ce14
|
d817ab90c8c47f6787604a0b9639b66234158228
|
refs/heads/master
| 2023-08-17T18:50:17.732191
| 2023-08-17T16:44:42
| 2023-08-17T16:44:42
| 17,371,412
| 6,872
| 2,345
|
Apache-2.0
| 2023-09-14T18:05:40
| 2014-03-03T16:08:07
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 727
|
py
|
import sys
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
from h2o.utils.typechecks import assert_is_type
def h2oas_list():
"""
Python API test: h2o.as_list(data, use_pandas=True, header=True)
Copied from pyunit_frame_as_list.py
"""
iris = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris_wheader.csv"))
res1 = h2o.as_list(iris, use_pandas=False)
assert_is_type(res1, list)
res1 = list(zip(*res1))
assert abs(float(res1[0][9]) - 4.4) < 1e-10 and abs(float(res1[1][9]) - 2.9) < 1e-10 and \
abs(float(res1[2][9]) - 1.4) < 1e-10, "incorrect values"
if __name__ == "__main__":
pyunit_utils.standalone_test(h2oas_list)
else:
h2oas_list()
|
[
"noreply@github.com"
] |
h2oai.noreply@github.com
|
c79d336c33025ee87041b0924dfe4af9287a02f3
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03087/s875530520.py
|
c6f926255e87de91fe2fee869e4c531b1ffd87e1
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
N, Q = map(int, input().split())
S = str(input())
li = []
cnt = 0
for i in range(N-1):
if S[i]=='A' and S[i+1]=='C':
cnt += 1
li.append(cnt)
else:
li.append(cnt)
for i in range(Q):
l, r = map(int, input().split())
if l > 1:
ans = li[r-2] - li[l-2]
else:
ans = li[r-2]
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
16d806ed7bb7721136f1534dbe98b1aaa13ec985
|
b15c47a45207e854fb002d69f7e33f8943a5e2b3
|
/cluster/preprocess/pre_node_merge_text2seq.py
|
c25a02debf845bcb7cf7835b41e46d424efd847a
|
[
"Apache-2.0"
] |
permissive
|
yurimkoo/tensormsa
|
e1af71c00a6b2ec3b3ed35d5adad7bafc34c6fbe
|
6ad2fbc7384e4dbe7e3e63bdb44c8ce0387f4b7f
|
refs/heads/master
| 2021-07-22T13:41:45.110348
| 2017-11-02T07:13:31
| 2017-11-02T07:13:31
| 109,469,204
| 1
| 0
| null | 2017-11-04T05:19:51
| 2017-11-04T05:19:50
| null |
UTF-8
|
Python
| false
| false
| 2,165
|
py
|
from cluster.preprocess.pre_node import PreProcessNode
from master.workflow.preprocess.workflow_pre_merge import WorkFlowPreMerge as WFPreMerge
class PreNodeMergeText2Seq(PreProcessNode):
"""
"""
def run(self, conf_data):
return True
def _init_node_parm(self, key):
"""
:return:
"""
wf_conf = WFPreMerge(key)
self.batch_size = wf_conf.get_batchsize()
self.merge_rule = wf_conf.get_merge_rule()
self.merge_type = wf_conf.get_type()
self.state_code = wf_conf.get_state_code()
def _set_progress_state(self):
pass
def load_data(self, node_id, parm = 'all'):
"""
load train data
:param node_id:
:param parm:
:return:
"""
self._init_node_parm(node_id)
if(self.merge_type == 'seq2seq') :
return self._merge_seq2seq_type()
else :
raise Exception ("merge node error: not defined type {0}".format(self.merge_type))
def _merge_seq2seq_type(self):
"""
merge two data node into one for seq2seq anal
:return:
"""
file_lists = []
encode_data = []
encode_node_list = self.merge_rule['encode_node']
if (len(encode_node_list) > 0):
for node_name in encode_node_list:
cls_path, cls_name = self.get_cluster_exec_class(str(self.state_code) + "_" + node_name)
dyna_cls = self.load_class(cls_path, cls_name)
encode_data = encode_data + dyna_cls.load_data(self.state_code + "_" + node_name, parm='all')
file_lists.append(encode_data)
decode_data = []
decode_node_list = self.merge_rule['decode_node']
if (len(decode_node_list) > 0):
for node_name in decode_node_list:
cls_path, cls_name = self.get_cluster_exec_class(self.state_code + "_" + node_name)
dyna_cls = self.load_class(cls_path, cls_name)
decode_data = decode_data + dyna_cls.load_data(self.state_code + "_" + node_name, parm='all')
file_lists.append(decode_data)
return file_lists
|
[
"tmddno1@naver.com"
] |
tmddno1@naver.com
|
ae3450610d3f4a3c4469aa2562e125c8a4c0108f
|
475e2fe71fecddfdc9e4610603b2d94005038e94
|
/Facebook/moveZeroes.py
|
f41fef3843fc872dc295c4699bbbe39783028f77
|
[] |
no_license
|
sidhumeher/PyPractice
|
770473c699aab9e25ad1f8b7b7cd8ad05991d254
|
2938c14c2e285af8f02e2cfc7b400ee4f8d4bfe0
|
refs/heads/master
| 2021-06-28T20:44:50.328453
| 2020-12-15T00:51:39
| 2020-12-15T00:51:39
| 204,987,730
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 572
|
py
|
'''
Created on Oct 8, 2020
@author: sidteegela
'''
'''
Input: [0,1,0,3,12]
Output: [1,3,12,0,0]
Input: [0,0]
Output: [0,0]
'''
'''
A two-pointer approach could be helpful here. The idea would be to have one pointer
for iterating the array and another pointer that just works on the non-zero elements
of the array.
'''
def moveZeroes(nums):
p = 0
nonZ = 0
while nonZ < len(nums):
if nums[nonZ] != 0:
nums[p], nums[nonZ] = nums[nonZ], nums[p]
p += 1
nonZ += 1
print(nums)
if __name__ == '__main__':
pass
|
[
"sidhumeher@yahoo.co.in"
] |
sidhumeher@yahoo.co.in
|
2ff02576828483feb35dafe118dc773c7fb9f7a0
|
da19363deecd93a73246aaea877ee6607daa6897
|
/xlsxwriter/test/comparison/test_page_breaks05.py
|
fb2c64036f0be6ae53d7ae0229ca42202a630f17
|
[] |
no_license
|
UNPSJB/FarmaciaCrisol
|
119d2d22417c503d906409a47b9d5abfca1fc119
|
b2b1223c067a8f8f19019237cbf0e36a27a118a6
|
refs/heads/master
| 2021-01-15T22:29:11.943996
| 2016-02-05T14:30:28
| 2016-02-05T14:30:28
| 22,967,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,378
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'page_breaks05.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = ['xl/printerSettings/printerSettings1.bin',
'xl/worksheets/_rels/sheet1.xml.rels']
self.ignore_elements = {'[Content_Types].xml': ['<Default Extension="bin"'],
'xl/worksheets/sheet1.xml': ['<pageMargins', '<pageSetup']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with page breaks."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_v_pagebreaks([8, 3, 1, 0])
worksheet.write('A1', 'Foo')
workbook.close()
self.assertExcelEqual()
|
[
"lealuque.tw@gmail.com"
] |
lealuque.tw@gmail.com
|
2ae73fa529d6a5d8123828c55ea1d50f63660ebd
|
4a43cded9d76bd05ca87f037de19ff921a60e151
|
/13day/07-线程多任务.py
|
4669c5331f43b6623bc152dc6442a8ad57e8eec5
|
[] |
no_license
|
lishuang1994/-1807
|
f7d54c3c93d41539747fc77529d56e73a931676f
|
23ee6d351a2797d3f5ba7907954d1a93d2745c10
|
refs/heads/master
| 2020-03-25T10:09:39.333264
| 2018-08-22T10:53:49
| 2018-08-22T10:53:49
| 143,685,206
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
import time
from threading import Thread
def saysorry():
print("亲爱的,跪安了")
time.sleep(1)
for i in range(5):
#t = Threadinng(target=saysorry)
#t.start()
saysorry()
|
[
"840948928@qq.com"
] |
840948928@qq.com
|
e34d413a8c1c98d6d21b693a46353b5a9c8d1190
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2819/60731/291725.py
|
8bfd0a4aca43e95e2c7b094a9d0e61a6a52acd4a
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
n=int(input())
data=list(map(int,input().split()))
numof1=data.count(1)
numof2=data.count(2)
numof3=data.count(3)
numof4=data.count(4)
ans=0
ans+=numof4
num1=int(numof1/4)
ans+=num1
numof1-=4*num1
num2=int(numof2/2)
ans+=num2
numof2-=2*num2
if (num2+numof3)>=numof1:
ans+=(numof2+numof3)
else:
ans+=numof1
print(ans)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
55782bca4fc53bfb30a3d26d1336c6538a21a16a
|
6b8c3974d3ce5f7841e51dcb406666c0c5d92155
|
/heat/heat_integrationtests/common/clients.py
|
c7a7f60e6cf45d26c6119542ab40388bb7cfed8a
|
[
"Apache-2.0"
] |
permissive
|
swjang/cloudexchange
|
bbbf78a2e7444c1070a55378092c17e8ecb27059
|
c06ed54f38daeff23166fb0940b27df74c70fc3e
|
refs/heads/master
| 2020-12-29T03:18:43.076887
| 2015-09-21T07:13:22
| 2015-09-21T07:13:22
| 42,845,532
| 1
| 1
| null | 2015-09-21T07:13:22
| 2015-09-21T05:19:35
|
C++
|
UTF-8
|
Python
| false
| false
| 5,888
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import ceilometerclient.client
import cinderclient.client
import heatclient.client
import keystoneclient.exceptions
import keystoneclient.v2_0.client
import neutronclient.v2_0.client
import novaclient.client
import swiftclient
class ClientManager(object):
"""
Manager that provides access to the official python clients for
calling various OpenStack APIs.
"""
CINDERCLIENT_VERSION = '1'
HEATCLIENT_VERSION = '1'
NOVACLIENT_VERSION = '2'
CEILOMETER_VERSION = '2'
def __init__(self, conf):
self.conf = conf
self.identity_client = self._get_identity_client()
self.orchestration_client = self._get_orchestration_client()
self.compute_client = self._get_compute_client()
self.network_client = self._get_network_client()
self.volume_client = self._get_volume_client()
self.object_client = self._get_object_client()
self.metering_client = self._get_metering_client()
def _get_orchestration_client(self):
region = self.conf.region
endpoint = os.environ.get('HEAT_URL')
if os.environ.get('OS_NO_CLIENT_AUTH') == 'True':
token = None
else:
keystone = self._get_identity_client()
token = keystone.auth_token
try:
if endpoint is None:
endpoint = keystone.service_catalog.url_for(
attr='region',
filter_value=region,
service_type='orchestration',
endpoint_type='publicURL')
except keystoneclient.exceptions.EndpointNotFound:
return None
else:
return heatclient.client.Client(
self.HEATCLIENT_VERSION,
endpoint,
token=token,
username=self.conf.username,
password=self.conf.password)
def _get_identity_client(self):
return keystoneclient.v2_0.client.Client(
username=self.conf.username,
password=self.conf.password,
tenant_name=self.conf.tenant_name,
auth_url=self.conf.auth_url,
insecure=self.conf.disable_ssl_certificate_validation)
def _get_compute_client(self):
dscv = self.conf.disable_ssl_certificate_validation
region = self.conf.region
client_args = (
self.conf.username,
self.conf.password,
self.conf.tenant_name,
self.conf.auth_url
)
# Create our default Nova client to use in testing
return novaclient.client.Client(
self.NOVACLIENT_VERSION,
*client_args,
service_type='compute',
endpoint_type='publicURL',
region_name=region,
no_cache=True,
insecure=dscv,
http_log_debug=True)
def _get_network_client(self):
auth_url = self.conf.auth_url
dscv = self.conf.disable_ssl_certificate_validation
return neutronclient.v2_0.client.Client(
username=self.conf.username,
password=self.conf.password,
tenant_name=self.conf.tenant_name,
endpoint_type='publicURL',
auth_url=auth_url,
insecure=dscv)
def _get_volume_client(self):
auth_url = self.conf.auth_url
region = self.conf.region
endpoint_type = 'publicURL'
dscv = self.conf.disable_ssl_certificate_validation
return cinderclient.client.Client(
self.CINDERCLIENT_VERSION,
self.conf.username,
self.conf.password,
self.conf.tenant_name,
auth_url,
region_name=region,
endpoint_type=endpoint_type,
insecure=dscv,
http_log_debug=True)
def _get_object_client(self):
dscv = self.conf.disable_ssl_certificate_validation
args = {
'auth_version': '2.0',
'tenant_name': self.conf.tenant_name,
'user': self.conf.username,
'key': self.conf.password,
'authurl': self.conf.auth_url,
'os_options': {'endpoint_type': 'publicURL'},
'insecure': dscv,
}
return swiftclient.client.Connection(**args)
def _get_metering_client(self):
dscv = self.conf.disable_ssl_certificate_validation
keystone = self._get_identity_client()
try:
endpoint = keystone.service_catalog.url_for(
attr='region',
filter_value=self.conf.region,
service_type='metering',
endpoint_type='publicURL')
except keystoneclient.exceptions.EndpointNotFound:
return None
else:
args = {
'username': self.conf.username,
'password': self.conf.password,
'tenant_name': self.conf.tenant_name,
'auth_url': self.conf.auth_url,
'insecure': dscv,
'region_name': self.conf.region,
'endpoint_type': 'publicURL',
'service_type': 'metering',
}
return ceilometerclient.client.Client(self.CEILOMETER_VERSION,
endpoint, **args)
|
[
"kiku4@kinx.net"
] |
kiku4@kinx.net
|
7b56fc22aa32b03fb0cfa39a1d0b49bca82553d5
|
59d5a801dd8361fe2b68f0cdfc1a0c06bbe9d275
|
/Competition/fast-flux域名检测/backup/v1/feature_engineering.py
|
70c7ab417fa009bb50ad40233fda391a9e85cdb7
|
[] |
no_license
|
HanKin2015/Machine_to_DeepingLearning
|
2ff377aa68655ca246eb19bea20fec232cec5d77
|
58fa8d06ef8a8eb0762e7cbd32a09552882c5412
|
refs/heads/master
| 2023-01-25T01:16:41.440064
| 2023-01-18T08:23:49
| 2023-01-18T08:23:49
| 134,238,811
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,016
|
py
|
# -*- coding: utf-8 -*-
"""
文 件 名: feature_engineering.py
文件描述: 特征工程
作 者: HanKin
创建日期: 2022.10.18
修改日期:2022.10.18
Copyright (c) 2022 HanKin. All rights reserved.
"""
from common import *
def exception_value_processing_by_delete(dataset, feature, lower_threshold, upper_threshold):
"""异常值处理(删除)
"""
dataset = dataset[(lower_threshold <= dataset[feature]) & (dataset[feature] <= upper_threshold)]
return dataset
def exception_value_processing_by_median(dataset, feature, lower_threshold, upper_threshold):
"""异常值处理(取中位数)
"""
df = dataset[(lower_threshold <= dataset[feature]) & (dataset[feature] <= upper_threshold)]
logger.debug('{}<{},{}>: {}/{}.'.format(feature, lower_threshold, upper_threshold, df.shape[0], dataset.shape[0]))
dataset.loc[dataset[feature] < lower_threshold, feature] = df[feature].median()
dataset.loc[dataset[feature] > upper_threshold, feature] = df[feature].median()
return dataset
def exception_value_processing_by_mean(dataset, feature, lower_threshold, upper_threshold):
"""异常值处理(取平均值)
"""
df = dataset[(lower_threshold <= dataset[feature]) & (dataset[feature] <= upper_threshold)]
logger.debug('{}<{},{}>: {}/{}.'.format(feature, lower_threshold, upper_threshold, df.shape[0], dataset.shape[0]))
dataset.loc[dataset[feature] < lower_threshold, feature] = int(df[feature].mean())
dataset.loc[dataset[feature] > upper_threshold, feature] = int(df[feature].mean())
return dataset
def missing_value_processing(dataset):
"""缺失值处理
"""
# 用前一个值填充
dataset = dataset.fillna(method='ffill')
return dataset
def exception_value_processing(dataset):
exception_values = [
['SizeOfStackReserve', 0, 2e7],
['ExportRVA', 0, 2e7],
['DebugRVA', 0, 1e7],
['IATRVA', 0, 1e7],
]
for exception_value in exception_values:
feature, lower_threshold, upper_threshold = [elem for elem in exception_value]
dataset = exception_value_processing_by_mean(dataset, feature, lower_threshold, upper_threshold)
if 'label' in dataset.columns:
dataset = exception_value_processing_by_delete(dataset, 'SizeOfImage', 1e4, 5e9)
return dataset
def delete_uncorrelated_features(dataset):
"""删除相关性低的特征
"""
uncorrelated_features = ['time_first', 'time_last', 'rrtype', 'rdata', 'bailiwick']
dataset.drop(uncorrelated_features, axis=1, inplace=True)
return dataset
def datetime_processing(dataset):
"""日期时间处理
"""
dataset['TimeDateStamp'] = dataset['TimeDateStamp'].apply(lambda x: time.strftime("%Y-%m-%d %X", time.localtime(x)))
ts_objs = np.array([pd.Timestamp(item) for item in np.array(dataset['TimeDateStamp'])])
dataset['TS_obj'] = ts_objs
# 日期处理(DayName需要转换成数值特征)
dataset['Year'] = dataset['TS_obj'].apply(lambda x: x.year)
dataset['Month'] = dataset['TS_obj'].apply(lambda x: x.month)
dataset['Day'] = dataset['TS_obj'].apply(lambda x: x.day)
dataset['DayOfWeek'] = dataset['TS_obj'].apply(lambda x: x.dayofweek)
dataset['DayName'] = dataset['TS_obj'].apply(lambda x: x.day_name())
dataset['DayOfYear'] = dataset['TS_obj'].apply(lambda x: x.dayofyear)
dataset['WeekOfYear'] = dataset['TS_obj'].apply(lambda x: x.weekofyear)
dataset['Quarter'] = dataset['TS_obj'].apply(lambda x: x.quarter)
day_name_map = {'Monday': 1, 'Tuesday': 2, 'Wednesday': 3, 'Thursday': 4, 'Friday': 5,
'Saturday': 6, 'Sunday': 7}
dataset['DayNameBinMap'] = dataset['DayName'].map(day_name_map)
# 时间处理
dataset['Hour'] = dataset['TS_obj'].apply(lambda x: x.hour)
dataset['Minute'] = dataset['TS_obj'].apply(lambda x: x.minute)
dataset['Second'] = dataset['TS_obj'].apply(lambda x: x.second)
#dataset['MUsecond'] = dataset['TS_obj'].apply(lambda x: x.microsecond)
#dataset['UTC_offset'] = dataset['TS_obj'].apply(lambda x: x.utcoffset())
## 按照早晚切分时间
hour_bins = [-1, 5, 11, 16, 21, 23]
bin_names = ['LateNight', 'Morning', 'Afternoon', 'Evening', 'Night']
dataset['HourBin'] = pd.cut(dataset['Hour'], bins=hour_bins, labels=bin_names)
hour_bin_dummy_features = pd.get_dummies(dataset['HourBin'])
dataset = pd.concat([dataset, hour_bin_dummy_features], axis=1)
return dataset
def discrete_value_processing(dataset):
"""
"""
gle = LabelEncoder()
rrname_label = gle.fit_transform(dataset['rrname'])
rrname_mapping = {index: label for index, label in enumerate(gle.classes_)}
#logger.info(rrname_mapping)
dataset['rrname_label'] = rrname_label
bailiwick_label = gle.fit_transform(dataset['bailiwick'])
bailiwick_mapping = {index: label for index, label in enumerate(gle.classes_)}
#logger.info(bailiwick_mapping)
dataset['bailiwick_label'] = bailiwick_label
dataset['rdata_count'] = dataset['rdata'].apply(lambda x: len(x.split(',')))
return dataset
def time_processing(dataset):
"""观察时间处理
"""
dataset['time_interval'] = dataset['time_last'] - dataset['time_first']
dataset['time_interval'] = dataset['time_interval'].apply(lambda x: int(x / 86400))
return dataset
def features_processing(dataset):
# 缺失值处理
#dataset = missing_value_processing(dataset)
# 异常值处理
#dataset = exception_value_processing(dataset)
# 日期时间处理
#datetime_processing(dataset)
time_processing(dataset)
# 离散值处理
dataset = discrete_value_processing(dataset)
# 删除不相关的特征(相关性低)
dataset = delete_uncorrelated_features(dataset)
# 特殊处理
return dataset
def extended_custom_features(dataset, extended_features_path):
"""扩展的特征(自定义的字符串特征)
"""
return dataset
def extended_features(dataset, sample_path, extended_features_path):
"""扩展的特征
"""
return dataset
def main():
# 获取数据集
train_dataset = pd.read_csv(TRAIN_RAW_DATASET_PATH)
train_label = pd.read_csv(TRAIN_LABEL_PATH)
test_dataset = pd.read_csv(TEST_RAW_DATASET_PATH)
logger.info([train_dataset.shape, train_label.shape, test_dataset.shape])
# 添加标签
train_label.rename(columns = {"domain": "rrname"}, inplace=True)
train_dataset = train_dataset.merge(train_label, on='rrname', how='left')
logger.info([train_dataset.shape])
# 去除脏数据
# 特征工程
train_dataset = features_processing(train_dataset)
test_dataset = features_processing(test_dataset)
logger.info('train_dataset: ({}, {}), test_dataset: ({}, {}).'.format(
train_dataset.shape[0], train_dataset.shape[1],
test_dataset.shape[0], test_dataset.shape[1],))
# 将标签移动到最后一列
label = train_dataset['label']
train_dataset.drop(['label'], axis=1, inplace=True)
train_dataset = pd.concat([train_dataset, label], axis=1)
# 保存数据集
logger.info([train_dataset.shape, test_dataset.shape])
logger.info(train_dataset.columns)
train_dataset.to_csv(TRAIN_DATASET_PATH, sep=',', encoding='utf-8', index=False)
test_dataset.to_csv(TEST_DATASET_PATH, sep=',', encoding='utf-8', index=False)
if __name__ == '__main__':
#os.system('chcp 936 & cls')
logger.info('******** starting ********')
start_time = time.time()
main()
end_time = time.time()
logger.info('process spend {} s.\n'.format(round(end_time - start_time, 3)))
|
[
"1058198502@qq.com"
] |
1058198502@qq.com
|
e3c587cf4a2d905939a4c7e8750bc6d0ff07f00c
|
4c718d78039ca97e1a9b18897642e8bc5cd394d9
|
/Python code of quizzes/Lesson 6/TagTypes.py
|
e4bdc42307b8303b26cd8b79cf4aa17d2960ee3f
|
[] |
no_license
|
aish27/data-wrangle-openstreetmaps-data
|
81a3fa4a1a06ea8972f6e98af77e9e71350a65bc
|
9e5eedf49ca67b480bb969ebe4db77df62bf238d
|
refs/heads/master
| 2020-12-02T19:26:04.726496
| 2015-06-12T17:38:37
| 2015-06-12T17:38:37
| 37,049,682
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,562
|
py
|
#Determines the problematic tag types that exist in a dataset and their numbers.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import xml.etree.cElementTree as ET
import pprint
import re
lower = re.compile(r'^([a-z]|_)*$')
lower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')
problemchars = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
#Uses regular expressions to check each element and find if it contains a problematic tag.
def key_type(element, keys):
if element.tag == "tag":
temp = element.attrib['k']
print temp
a = re.search(lower,temp)
b = re.search(lower_colon,temp)
c = re.search(problemchars,temp)
if a!=None:
keys["lower"] = keys["lower"] + 1
elif b!=None:
keys["lower_colon"] = keys["lower_colon"] + 1
elif c!=None:
keys["problemchars"] = keys["problemchars"] + 1
else:
keys["other"] = keys["other"] + 1
return keys
#Processes the maps and finds probelematic tags.
def process_map(filename):
keys = {"lower": 0, "lower_colon": 0, "problemchars": 0, "other": 0}
for _, element in ET.iterparse(filename):
keys = key_type(element, keys)
return keys
#Uses other methods to find tag types and tests the output.
def test():
keys = process_map('example.osm')
pprint.pprint(keys)
assert keys == {'lower': 5, 'lower_colon': 0, 'other': 1, 'problemchars': 1}
if __name__ == "__main__":
test()
|
[
"root@ip-10-47-174-141.ec2.internal"
] |
root@ip-10-47-174-141.ec2.internal
|
a0c7ffbc2b8b323aaf0df3752796182fa2ad9aa5
|
0d0cf0165ca108e8d94056c2bae5ad07fe9f9377
|
/12_Dimensionality_Reduction_in_Python/2_Feature_selection_I_selecting_for_feature_information/visualizingTheCorrelationMatrix.py
|
219bc39cbbc113f94d69e038232a34aa880c624e
|
[] |
no_license
|
MACHEIKH/Datacamp_Machine_Learning_For_Everyone
|
550ec4038ebdb69993e16fe22d5136f00101b692
|
9fe8947f490da221430e6dccce6e2165a42470f3
|
refs/heads/main
| 2023-01-22T06:26:15.996504
| 2020-11-24T11:21:53
| 2020-11-24T11:21:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,245
|
py
|
# Visualizing the correlation matrix
# Reading the correlation matrix of ansur_df in its raw, numeric format doesn't allow us to get a quick overview. Let's improve this by removing redundant values and visualizing the matrix using seaborn.
# Seaborn has been pre-loaded as sns, matplotlib.pyplot as plt, NumPy as np and pandas as pd.
# Instructions 1/4
# 100 XP
# Create the correlation matrix.
# Visualize it using Seaborn's heatmap function.
# Instructions 2/4
# 0 XP
# Create a boolean mask for the upper triangle of the plot.
# Instructions 3/4
# 0 XP
# Add the mask to the heatmap.
# # Create the correlation matrix (Instruction 1)
# corr = ansur_df.corr()
# # Draw the heatmap
# sns.heatmap(corr, cmap=cmap, center=0, linewidths=1, annot=True, fmt=".2f")
# plt.show()
# # Create the correlation matrix (Instruction 2)
# corr = ansur_df.corr()
# # Generate a mask for the upper triangle
# mask = np.triu(np.ones_like(corr, dtype=bool))
# Create the correlation matrix (Instruction 3)
corr = ansur_df.corr()
# Generate a mask for the upper triangle
mask = np.triu(np.ones_like(corr, dtype=bool))
# Add the mask to the heatmap
sns.heatmap(corr, mask=mask, cmap=cmap, center=0, linewidths=1, annot=True, fmt=".2f")
plt.show()
|
[
"noreply@github.com"
] |
MACHEIKH.noreply@github.com
|
f765a53567eb4d535fe4f1d1fd0c6899ff8f27de
|
4edbeb3e2d3263897810a358d8c95854a468c3ca
|
/python3/psutil/count1.py
|
2930af68edad4e73b64927a1ebaa37e2c82cd7d6
|
[
"MIT"
] |
permissive
|
jtraver/dev
|
f505d15d45b67a59d11306cc7252114c265f388b
|
2197e3443c7619b856470558b737d85fe1f77a5a
|
refs/heads/master
| 2023-08-06T02:17:58.601861
| 2023-08-01T16:58:44
| 2023-08-01T16:58:44
| 14,509,952
| 0
| 1
|
MIT
| 2020-10-14T18:32:48
| 2013-11-19T00:51:19
|
Python
|
UTF-8
|
Python
| false
| false
| 233
|
py
|
#!/usr/bin/env python3
#!/usr/bin/python
import time
import sys
def main():
# for count in xrange(10000000):
for count in range(10):
print("%s" % str(count))
sys.stdout.flush()
time.sleep(1)
main()
|
[
"john@aeropsike.com"
] |
john@aeropsike.com
|
22333f24e36f7de94f303b98576d2fb4166f585b
|
e91fe9c77c39ab855383839820867d5dda27cfdd
|
/posts/api/pagination.py
|
feb17a52e7d48c1ab4073025106caa309455469e
|
[] |
no_license
|
chutchais/Blog-API
|
6a69580c1f97ecce01e500de56e730864f1e519a
|
8402d638832703aa4ede7e7e05ac47a902320cd9
|
refs/heads/master
| 2021-01-12T03:19:36.874801
| 2017-01-12T14:48:42
| 2017-01-12T14:48:42
| 78,195,920
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 249
|
py
|
from rest_framework.pagination import (
LimitOffsetPagination,
PageNumberPagination,
)
class PostLimitOffsetPagination(LimitOffsetPagination):
default_limit =2
max_limit=10
class PostPageNumberPagination(PageNumberPagination):
page_size=2
|
[
"chutchai.s@gmail.com"
] |
chutchai.s@gmail.com
|
b878c8557764a724f1d3aeb71971de4a8a664095
|
2fac796fa58c67fb5a4a95a6e7f28cbef169318b
|
/python/connecting-graph-ii.py
|
8dd9c83c7a70a19f748db2b7a41f9c71f45adecf
|
[] |
no_license
|
jwyx3/practices
|
f3fe087432e79c8e34f3af3a78dd10278b66dd38
|
6fec95b9b4d735727160905e754a698513bfb7d8
|
refs/heads/master
| 2021-03-12T20:41:59.816448
| 2019-04-14T06:47:30
| 2019-04-14T06:47:30
| 18,814,777
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 753
|
py
|
class ConnectingGraph2:
# @param {int} n
def __init__(self, n):
self.father = [0 for i in xrange(n + 1)]
self.counter = [1 for i in xrange(n + 1)]
def find(self, a):
if self.father[a] == 0:
return a
self.father[a] = self.find(self.father[a])
return self.father[a]
# @param {int} a, b
# return nothing
def connect(self, a, b):
ra = self.find(a)
rb = self.find(b)
if ra != rb:
self.father[ra] = rb
self.counter[rb] += self.counter[ra]
# @param {int} a
# return {int} the number of nodes connected component
# which include a node.
def query(self, a):
ra = self.find(a)
return self.counter[ra]
|
[
"jwyx88003@gmail.com"
] |
jwyx88003@gmail.com
|
10f6230475b7cd896a058d46d101b08a3ccd9aa9
|
1885e952aa4a89f8b417b4c2e70b91bf1df887ff
|
/ABC163/D.py
|
618f915abc6ca69fb1b9175433ddb970de9c633a
|
[] |
no_license
|
takumiw/AtCoder
|
01ed45b4d537a42e1120b1769fe4eff86a8e4406
|
23b9c89f07db8dd5b5345d7b40a4bae6762b2119
|
refs/heads/master
| 2021-07-10T12:01:32.401438
| 2020-06-27T14:07:17
| 2020-06-27T14:07:17
| 158,206,535
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 355
|
py
|
import sys
readline = sys.stdin.buffer.readline
MOD = 10 ** 9 + 7
def main():
N, K = map(int, readline().rstrip().split())
ans = 0
m = (N * (N + 1)) // 2
for k in range(K, N+2):
mi = ((k-1) * k) // 2
ma = m - ((N-k) * (N-k+1)) // 2
ans += (ma - mi + 1)
print(ans % MOD)
if __name__ == '__main__':
main()
|
[
"w.tak.1229@gmail.com"
] |
w.tak.1229@gmail.com
|
5ae57a50894ee06d799ee88acb9132651d570cb8
|
78f54f911d47019da0deeeb6579c7e9e65bb8d21
|
/src/scheduler/models/dao/common/Log.py
|
1171ab5340bfce2bfeec5a4abb90b01fc98e85fa
|
[
"MIT"
] |
permissive
|
jedicontributors/pythondataintegrator
|
02f8ae1a50cf5ddd85341da738c24aa6a320c442
|
3e877b367ab9b20185476128ec053db41087879f
|
refs/heads/main
| 2023-06-15T07:37:13.313988
| 2021-07-03T15:46:43
| 2021-07-03T15:46:43
| 354,021,102
| 0
| 0
|
MIT
| 2021-07-03T15:46:44
| 2021-04-02T13:03:12
|
Python
|
UTF-8
|
Python
| false
| false
| 913
|
py
|
from sqlalchemy import Column, String, Integer, DateTime
from IocManager import IocManager
from models.dao.Entity import Entity
class Log(Entity, IocManager.Base):
__tablename__ = "Log"
__table_args__ = {"schema": "Common"}
TypeId = Column(Integer, index=True, unique=False, nullable=False)
Content = Column(String(4000), index=False, unique=False, nullable=True)
LogDatetime = Column(DateTime, index=False, unique=False, nullable=True)
JobId = Column(Integer, index=True, unique=False, nullable=True)
def __init__(self,
TypeId: str = None,
Content: str = None,
LogDatetime: DateTime = None,
JobId: int = None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.TypeId = TypeId
self.Content = Content
self.LogDatetime = LogDatetime
self.JobId = JobId
|
[
"ahmetcagriakca@gmail.com"
] |
ahmetcagriakca@gmail.com
|
084706420a235cb05902a3d6090d79a5ce7dd498
|
3ce592352627591346ea33ea0c2665ad879414e2
|
/References/opengl/computer.graphics.through.opengl/02_basic_drawing/moveSphere.py
|
eda2a48c42e3589ed85713e89d5a27c85fe955ba
|
[
"MIT"
] |
permissive
|
royqh1979/python_libs_usage
|
113df732ef106f4a5faae1343493756fd703c8c0
|
57546d5648d8a6b7aca7d7ff9481aa7cd4d8f511
|
refs/heads/master
| 2021-04-16T18:14:43.835482
| 2021-01-11T03:55:25
| 2021-01-11T03:55:25
| 249,374,754
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,672
|
py
|
"""
// moveSphere.py
//
// This program allows the user to move a sphere to demonstrate
// distortion at the edges of the viewing frustum.
//
// Interaction:
// Press the arrow keys to move the sphere.
// Press the space bar to rotate the sphere..
// Press r to reset.
//
"""
from PyQt5 import QtWidgets, QtCore, QtGui, QtOpenGL
from OpenGL.GL import *
import math as m
# Co-ordinates of the sphere center.
Xvalue = 0.0
Yvalue = 0.0
Angle = 0.0; # Angle to rotate the sphere.
def myWireSphere(r:float, nParal:int, nMerid:int):
j=0
while j<m.pi:
glBegin(GL_LINE_LOOP)
y=r*m.cos(j)
i=0
while i < 2*m.pi:
x=r*m.cos(i)*m.sin(j)
z=r*m.sin(i)*m.sin(j)
glVertex3f(x,y,z)
i += m.pi / 60
glEnd();
j += m.pi / (nParal + 1)
j=0
while j<m.pi:
glBegin(GL_LINE_LOOP)
i=0
while i < 2*m.pi:
x=r*m.sin(i)*m.cos(j)
y=r*m.cos(i)
z=r*m.sin(j)*m.sin(i)
glVertex3f(x,y,z)
i += m.pi / 60
glEnd();
j += m.pi / nMerid
class MyWidget(QtWidgets.QOpenGLWidget):
def __init__(self):
super().__init__()
self.setFixedSize(500,500)
def initializeGL(self) -> None:
glClearColor(1.0, 1.0, 1.0, 0.0)
def paintGL(self) -> None:
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
#Set the position of the sphere.
glTranslatef(Xvalue, Yvalue, -5.0)
glRotatef(Angle, 1.0, 1.0, 1.0)
glColor3f(0.0, 0.0, 0.0)
myWireSphere(0.5, 16, 10)
glFlush()
def resizeGL(self, w: int, h: int) -> None:
glViewport(0, 0, w, h)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glFrustum(-1.0, 1.0, -1.0, 1.0, 1.5, 20.0)
glMatrixMode(GL_MODELVIEW)
def keyPressEvent(self, event: QtGui.QKeyEvent) -> None:
global Xvalue,Yvalue,Angle
if event.key() == QtCore.Qt.Key_R:
Xvalue = Yvalue = Angle = 0.0
self.update()
elif event.key() == QtCore.Qt.Key_Space:
Angle += 10.0
self.update()
elif event.key() == QtCore.Qt.Key_Up:
Yvalue += 0.1
self.update()
elif event.key() == QtCore.Qt.Key_Down:
Yvalue -= 0.1
self.update()
elif event.key() == QtCore.Qt.Key_Left:
Xvalue -= 0.1
self.update()
elif event.key() == QtCore.Qt.Key_Right:
Xvalue += 0.1
self.update()
if __name__ == '__main__':
app = QtWidgets.QApplication([])
win = MyWidget()
win.show()
app.exec()
|
[
"royqh1979@gmail.com"
] |
royqh1979@gmail.com
|
e0287539bd801f859c461d1ae1d70b8055288938
|
5759c0ed3219c06437ce5b39ef9ad92b5e191fed
|
/py/0428_serialize_and_deserialize_n_ary_tree.py
|
1df779c0ca32628d1c3067e97cc5b065abbbda52
|
[] |
no_license
|
mengnan1994/Surrender-to-Reality
|
ba69df7c36112ad19f19157a9f368eae6340630f
|
66232728ce49149188f863271ec2c57e426abb43
|
refs/heads/master
| 2022-02-25T01:34:49.526517
| 2019-09-22T17:21:28
| 2019-09-22T17:21:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,773
|
py
|
"""
Serialization is the process of converting a data structure or object into a sequence of bits so that it can be stored in a file or memory buffer,
or transmitted across a network connection link to be reconstructed later in the same or another computer environment.
Design an algorithm to serialize and deserialize an N-ary tree. An N-ary tree is a rooted tree in which each node has no more than N children.
There is no restriction on how your serialization/deserialization algorithm should work.
You just need to ensure that an N-ary tree can be serialized to a string and this string can be deserialized to the original tree structure.
For example, you may serialize the following 3-ary tree as [1 [3[5 6] 2 4]].
You do not necessarily need to follow this format, so please be creative and come up with different approaches yourself.
Note:
1. N is in the range of [1, 1000]
2. Do not use class member/global/static variables to store states. Your serialize and deserialize algorithms should be stateless.
"""
class Node(object):
def __init__(self, val, children):
self.val = val
self.children = children
class Codec(object):
def serialize(self, root):
serialization = str(self.inorder_helper(root))
print(serialization)
res = ""
for symbol in serialization:
if symbol != " " and symbol != ",":
res += symbol
return res
def inorder_helper(self, node):
if not node.children:
return [node.val, []]
return [node.val] + [self.inorder_helper(child) for child in node.children]
def deserialize(self, data):
if not data:
return None
scanner = 0
dummy_head = Node("dummy head", [])
stack = [dummy_head]
val_stack = ["#"]
root = None
while scanner < len(data):
# print(val_stack)
# print(data[scanner:])
# print("===")
if data[scanner] == '[':
# 如果遇到左括号,准备入栈
scanner += 1
if data[scanner] == ']':
scanner += 1
elif data[scanner] in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:
val = ""
while data[scanner] in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:
val += data[scanner]
scanner += 1
val = int(val)
node = Node(val, [])
stack[-1].children.append(node)
stack.append(node)
val_stack.append(node.val)
else:
root = stack.pop()
val_stack.pop()
scanner += 1
return root
|
[
"hurricane.cui@gmail.com"
] |
hurricane.cui@gmail.com
|
18f43854543ea9c9c8c46129f56e665c7042d7d6
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/468/usersdata/304/111685/submittedfiles/Av2_Parte3.py
|
8f2fd9356f208463f08f81498b9a718876b02d30
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
# -*- coding: utf-8 -*-
m = int(input('Quantas listas: '))
n = int(input('Qntd elemento listas: '))
for i in range (0,m,1):
lista=[]
for j in range (0,n,1):
lista.append(int(input('Elemento: ')))
media = sum(lista)/len(lista)
soma = 0
for i in range (0,(n-1),1):
soma += ((i - media))
dp = ((1/(n-1))*(soma)**2)**(1/2)
print('%.2f'%media)
print('%.2f'%dp)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
8e858adedb2bd3195fa4dfb7bb2b4c6af2189603
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03910/s110121278.py
|
4c9ecfd6bfbe6621fcbe2a07ea7568326440d590
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
N = int(input())
s = 0
j = 1
for i in range(1, N+1):
s += i
if s >= N:
j = i
break
k = s - N
for i in range(1, j+1):
if i == k: continue
print(i)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
fa1fa3378a017fe7890883215c6f4ed40f43828a
|
a2b696ba8edd5e6b8aa1a4c4aea19cc56e6beb66
|
/api/serializers.py
|
d4db895abe2ac6146ca319be0a8d40433d07655a
|
[] |
no_license
|
kyrios213/drf-django
|
7163eeaba2f32cd0f1cfa5871718c1892a7dc83a
|
d61438bbb919e7ab845da14bf04cfd85d108de73
|
refs/heads/main
| 2023-06-05T03:19:33.440410
| 2021-06-21T03:23:46
| 2021-06-21T03:23:46
| 378,791,890
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
from rest_framework import serializers, viewsets
from django.contrib.auth import get_user_model
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ['id', 'email', 'password']
extra_kwargs = {'password': {'write_only': True, 'min_length': 8}}
def create(self, validated_data):
return get_user_model().objects.create_user(**validated_data)
|
[
"kyrios213@gmail.com"
] |
kyrios213@gmail.com
|
9132e1ad54fca3fe47ba425f3bbb3e1377737f65
|
6b6bf72694e5aa6425f11c956e4a5371b2c73e09
|
/populate/main.py
|
bcccf70630692e0c3d23db7eb0615a5c25787907
|
[] |
no_license
|
danielspeixoto/elasticsearch-workshop
|
5d9dccebb80bf58b786f167ff8eb6de8a0282a8c
|
9872831361ea349453b8e202eb02880c43cbc238
|
refs/heads/master
| 2020-04-02T01:18:28.858625
| 2018-10-25T22:24:28
| 2018-10-25T22:24:28
| 153,848,788
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
from helpers.Index import Index, connect
from helpers.XMLRepository import XMLRepository
connection = connect("localhost", "9200")
index = Index(connection, "meu-index")
dados = XMLRepository("./datasets/bitcoin/")
index.bulk_insert(dados.data())
|
[
"danielspeixoto@outlook.com"
] |
danielspeixoto@outlook.com
|
15aa14bd147b72e801f55c46ff70efe32d7ef80e
|
d6074aac6e9e5f2fa5355c3c9ddaebe892c4151d
|
/setup.py
|
8813b411347ea2ca52a5f6274befae3a458b8877
|
[
"BSD-2-Clause"
] |
permissive
|
pombredanne/pydeps
|
f5aae8b825cf86db8308adad808b4bbf8cee52a2
|
95833f42f86f9c5c23ef14b675828d0ddc4df73e
|
refs/heads/master
| 2021-01-18T07:52:41.522028
| 2016-05-19T13:53:14
| 2016-05-19T13:53:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 913
|
py
|
import setuptools
from distutils.core import setup
setup(
name='pydeps',
version='1.2.7',
packages=['pydeps'],
install_requires=[
'enum34'
],
long_description=open('README.rst').read(),
entry_points={
'console_scripts': [
'pydeps = pydeps.pydeps:pydeps',
]
},
url='https://github.com/thebjorn/pydeps',
license='BSD',
author='bjorn',
author_email='bp@datakortet.no',
description='Display module dependencies',
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
[
"bp@datakortet.no"
] |
bp@datakortet.no
|
7cb57f1aee5711236c63768e429265d71ba59e80
|
21b461b71b4c63f7aac341bd12ba35d211c7956e
|
/codes/03_func/Graph_Permutation_isomorphic/01_test_permutation.py
|
587c4d9c282ed70920f61d81d9c4546575e05260
|
[] |
no_license
|
Ziaeemehr/workshop_scripting
|
cebdcb552720f31fd6524fd43f257ca46baf70e2
|
ed5f232f6737bc9f750d704455442f239d4f0561
|
refs/heads/main
| 2023-08-22T23:00:36.121267
| 2023-07-19T10:53:41
| 2023-07-19T10:53:41
| 153,342,386
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 771
|
py
|
"""
A_G = P A_H P.T
P is permutation matrix
P.T is the transpose matrix of T
"""
import numpy as np
import networkx as nx
def permutation(alpha, n):
P = np.zeros((n, n), dtype=int)
for i in range(n):
P[i][alpha[i]] = 1
return P
A_G = np.array([[0, 1, 0, 0, 0],
[1, 0, 1, 0, 1],
[0, 1, 0, 1, 1],
[0, 0, 1, 0, 1],
[0, 1, 1, 1, 0]])
A_H = np.array([[0, 1, 1, 1, 0],
[1, 0, 1, 0, 0],
[1, 1, 0, 1, 0],
[1, 0, 1, 0, 1],
[0, 0, 0, 1, 0]])
# arbitrary permutation of nodes.
alpha = [4, 3, 0, 1, 2]
P = permutation(alpha, 5)
A_Gp = np.matmul(np.matmul(P, A_H), P.T)
# check if it works?
print(np.array_equal(A_Gp, A_G))
|
[
"a.ziaeemehr@gmail.com"
] |
a.ziaeemehr@gmail.com
|
38bbf0422041425be9f4f1aac557fcfbfe23a739
|
694d57c3e512ce916269411b51adef23532420cd
|
/leetcode_review2/74search_a_2D_matrix.py
|
866fe5944bf1f8a5a7c20dee6738ef99ba24a70b
|
[] |
no_license
|
clovery410/mycode
|
5541c3a99962d7949832a0859f18819f118edfba
|
e12025e754547d18d5bb50a9dbe5e725fd03fd9c
|
refs/heads/master
| 2021-05-16T02:46:47.996748
| 2017-05-10T23:43:50
| 2017-05-10T23:43:50
| 39,235,141
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
class Solution(object):
def searchMatrix(self, matrix, target):
m, n = len(matrix), len(matrix[0]) if len(matrix) > 0 else 0
cur_i, cur_j = 0, n-1
while cur_i < m and cur_j >= 0:
if target == matrix[cur_i][cur_j]:
return True
if target < matrix[cur_i][cur_j]:
cur_j -= 1
else:
cur_i += 1
return False
|
[
"seasoul410@gmail.com"
] |
seasoul410@gmail.com
|
b76b50f294cf1d7560a3699d3704b41173c7ea8c
|
c956401119e44e41f3873b4734c857eda957e2cd
|
/metrics/lr_smape.py
|
8a71c4a237fb069f751070fdc252cea4f1b598f7
|
[] |
no_license
|
zhekunz2/c4pp
|
6f689bce42215507d749b8b2be96c7f68ed8c49c
|
9f8054addb48d9440662d8c8f494359846423ffd
|
refs/heads/master
| 2020-09-24T06:17:29.866975
| 2019-12-03T17:41:25
| 2019-12-03T17:41:25
| 225,678,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 880
|
py
|
#!/usr/bin/env python
import os
import json
import sys
import numpy as np
try:
smape_thres=float(sys.argv[1])
weight=float(sys.argv[2])
bias=float(sys.argv[3])
data_dir=sys.argv[4]
with open(os.path.join(data_dir, 'data.json')) as json_data:
d = json.load(json_data)
datas = [x for x in d.keys() if x=='x' or x=='y']
data_x = sorted(datas)[0]
data_y = sorted(datas)[1]
predict = [x * float(weight) + float(bias) for x in d[data_x]]
observe = d[data_y]
ape = np.zeros(len(observe))
for i,p in enumerate(predict):
o = observe[i]
ape[i] = abs(o - p) / ((abs(o) + abs(p)))
smape = np.mean(ape)
if abs(smape) <= smape_thres:
print('True: ' + str(smape))
else:
print('False: ' + str(smape))
except:
print('Error')
exit(0)
|
[
"zhekunz2@Zhekuns-MacBook-Pro.local"
] |
zhekunz2@Zhekuns-MacBook-Pro.local
|
7a144841bd9fe8ec5e8663356c33fac86b2dbf31
|
acb082b215e6d214a5065f76c0454dcf1fb2a533
|
/src/cobra/core/auth/access.py
|
e3881977e6cf7cd7420cdef66f2da916e6890e6d
|
[
"Apache-2.0"
] |
permissive
|
lyoniionly/django-cobra
|
130b25cd897cc94b6a8da722e9a83ecea3b00c49
|
2427e5cf74b7739115b1224da3306986b3ee345c
|
refs/heads/master
| 2016-09-06T10:23:10.607025
| 2016-02-22T14:16:49
| 2016-02-22T14:16:49
| 29,646,851
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,147
|
py
|
from __future__ import absolute_import
from cobra.core.loading import get_model
__all__ = ['from_user', 'from_member', 'DEFAULT', 'SCOPES']
OrganizationMember = get_model('organization', 'OrganizationMember')
SCOPES = set([
'org:read',
'org:write',
'org:delete',
'team:read',
'team:write',
'team:delete',
'project:read',
'project:write',
'project:delete',
'event:read',
'event:write',
'event:delete',
])
class BaseAccess(object):
is_global = False
is_active = False
sso_is_valid = False
teams = ()
scopes = frozenset()
def has_scope(self, scope):
if not self.is_active:
return False
return scope in self.scopes
def has_team(self, team):
if not self.is_active:
return False
if self.is_global:
return True
return team in self.teams
class Access(BaseAccess):
# TODO(dcramer): this is still a little gross, and ideally backend access
# would be based on the same scopes as API access so theres clarity in
# what things mean
def __init__(self, scopes, is_global, is_active, teams, sso_is_valid):
self.teams = teams
self.scopes = scopes
self.is_global = is_global
self.is_active = is_active
self.sso_is_valid = sso_is_valid
def from_user(user, organization):
if user.is_superuser:
return Access(
scopes=SCOPES,
is_global=True,
is_active=True,
teams=(),
sso_is_valid=True,
)
if not organization:
return DEFAULT
try:
om = OrganizationMember.objects.get(
user=user,
organization=organization,
)
except OrganizationMember.DoesNotExist:
return DEFAULT
return from_member(om)
def from_member(member):
# TODO(dcramer): we want to optimize this access pattern as its several
# network hops and needed in a lot of places
if member.has_global_access:
teams = ()
else:
teams = member.teams.all()
# try:
# auth_provider = AuthProvider.objects.get(
# organization=member.organization_id,
# )
# except AuthProvider.DoesNotExist:
# sso_is_valid = True
# else:
# try:
# auth_identity = AuthIdentity.objects.get(
# auth_provider=auth_provider,
# )
# except AuthIdentity.DoesNotExist:
# sso_is_valid = False
# else:
# sso_is_valid = auth_identity.is_valid(member)
sso_is_valid = True
return Access(
is_global=member.has_global_access,
is_active=True,
sso_is_valid=sso_is_valid,
scopes=member.scopes,
teams=teams,
)
class NoAccess(BaseAccess):
@property
def sso_is_valid(self):
return True
@property
def is_global(self):
return False
@property
def is_active(self):
return False
@property
def teams(self):
return ()
@property
def scopes(self):
return frozenset()
DEFAULT = NoAccess()
|
[
"beihua_445566@126.com"
] |
beihua_445566@126.com
|
59620cdbfa9167aeb555cca708e1f7ede5412081
|
7d31324f874130bc5059314048193f474f2a820a
|
/gui.py
|
6a41ea6e86f10145d7f19d02e451b0a0f571e17f
|
[] |
no_license
|
deshudiosh/PyTSF
|
2ae13c9724b6e290016aad6329db5c175bd319fa
|
6c3a8b6b35a4fc602f10aa10aff72d9bc887ccdb
|
refs/heads/master
| 2021-01-20T01:36:11.138441
| 2017-10-27T18:57:31
| 2017-10-27T18:57:31
| 89,306,166
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,025
|
py
|
import remi.gui as gui
from remi import start, App
class PyTsfGui(App):
def __init__(self, *args):
super(PyTsfGui, self).__init__(*args)
def main(self):
# margin 0px auto allows to center the app to the screen
container = gui.VBox(width=400, margin='0px auto')
container.style['background'] = '#808080'
logo = gui.Label('PyTSF', width='80%', height=60, margin='0px auto')
logo.style['margin'] = 'auto'
panel = gui.HBox(width=400, height=100, margin='0px auto')
dropdown = gui.DropDown()
refresh = gui.Button('R')
options = gui.Button("O")
go = gui.Button("Go!")
panel.append(dropdown)
panel.append(refresh)
panel.append(options)
panel.append(go)
container.append(logo)
container.append(panel)
# returning the root widget
return container
def startApp():
start(PyTsfGui, address="127.0.0.20", debug=True, enable_file_cache=False, multiple_instance=True)
|
[
"deshudiosh@gmail.com"
] |
deshudiosh@gmail.com
|
2bc3743b880efe5c8a8739a55179572019820af5
|
c31e46965ea47cdb0c61a6b525aecea45dbf4d0b
|
/gram/views.py
|
b442f39e96c2ad10dff14a8042bb03981ca1e2de
|
[] |
no_license
|
marysinaida/-Instagram
|
687563c7b9a44adcdd09217ed02ff5c2c24623d2
|
cb0768ad24b7650db6b7fbfd9d445232b154cece
|
refs/heads/master
| 2022-12-06T07:02:55.786066
| 2020-01-08T06:57:47
| 2020-01-08T06:57:47
| 228,771,439
| 0
| 0
| null | 2022-11-22T05:13:53
| 2019-12-18T06:16:03
|
Python
|
UTF-8
|
Python
| false
| false
| 2,960
|
py
|
from django.shortcuts import render,get_object_or_404
from django.utils import timezone
from .forms import PostForm
from .models import Post,Profile
from django.contrib.auth.models import User
from django.views.generic import (ListView,CreateView,DetailView)
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
# Create your views here.
class PostListView(ListView):
template_name = "post_list.html"
queryset = Post.objects.all().filter(created_date__lte=timezone.now()).order_by('-created_date')
context_object_name = "posts"
success_url = '/'
class PostCreateView(CreateView):
template_name = "post_create.html"
form_class = PostForm
queryset = Post.objects.all()
success_url = '/'
def form_valid(self,form):
print(form.cleaned_data)
form.instance.author = self.request.user
return super().form_valid(form)
class PostDetailView(DetailView):
template_name ="post_details.html"
queryset = Post.objects.all().filter(created_date__lte=timezone.now())
def get_object(self):
id_ = self.kwargs.get('id')
return get_object_or_404(Post,id=id_)
def signUp(request):
return render(request,'registration/registration_form.html')
@login_required(login_url='/accounts/login/')
def login(request):
return render(request,'registration/login.html')
@login_required(login_url='/profile')
def search_results(request):
if 'username' in request.GET and request.GET["username"]:
search_term = request.GET.get("username")
searched_users = User.objects.filter(username = search_term)
message = f"{search_term}"
profile_pic = User.objects.all()
return render(request, 'search.html', {'message':message, 'results':searched_users, 'profile_pic':profile_pic})
else:
message = "You haven't searched for any term"
return render(request, 'search.html', {'message':message})
def profile(request):
# image = request.user.profile.posts.all()
if request.method == 'POST':
user_form = UpdateUserForm(request.POST, instance=request.user)
prof_form = UpdateUserProfileForm(request.POST, request.FILES, instance=request.user.profile)
if user_form.is_valid() and prof_form.is_valid():
user_form.save()
prof_form.save()
return HttpResponseRedirect(request.path_info)
return render(request, 'profile.html', {})
return render(request, 'profile.html', {})
def timeline(request):
posts = Post.objects.all().filter(created_date__lte=timezone.now()).order_by('-created_date')
if request.method == 'POST':
form = PostForm(request.POST, request.FILES)
if edit_form.is_valid():
form.save()
return render(request, 'post_list.html', {'form': form, 'posts': posts})
return render(request, 'post_list.html', {'posts': posts})
|
[
"marydorcassinaida54@gmail.com"
] |
marydorcassinaida54@gmail.com
|
e0d376cdb3b6cbbbbeea5b2ea6c2a81b88fbf34c
|
422c9cc1c5ef7eba24610e66d6a74ec2e16bf39e
|
/install_isolated/lib/python2.7/dist-packages/turtle_actionlib/msg/_ShapeFeedback.py
|
332cdc621b2cf527210a07a88ad56ccb86dca474
|
[] |
no_license
|
twighk/ROS-Pi3
|
222c735d3252d6fce43b427cdea3132f93025002
|
9f2912c44ae996040f143c1e77e6c714162fc7d2
|
refs/heads/master
| 2021-01-01T05:16:20.278770
| 2016-05-08T19:24:15
| 2016-05-08T19:24:15
| 58,306,257
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,927
|
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from turtle_actionlib/ShapeFeedback.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class ShapeFeedback(genpy.Message):
_md5sum = "d41d8cd98f00b204e9800998ecf8427e"
_type = "turtle_actionlib/ShapeFeedback"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
#feedback
"""
__slots__ = []
_slot_types = []
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(ShapeFeedback, self).__init__(*args, **kwds)
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
|
[
"twighk@outlook.com"
] |
twighk@outlook.com
|
36da4a1a8ee737c91a2e4cfd313a97f8c36a836a
|
56482e0b2ce6517fff41d0f78e0c0ed000d977a1
|
/fmcapi/api_objects/helper_functions.py
|
73723b685d3181277032d2a1b3b56c4c54e1fb90
|
[
"BSD-3-Clause"
] |
permissive
|
banzigaga/fmcapi
|
ab4d7aaaf4be4f2b0686d07b6272f8b9531577da
|
fd924de96e200ca8e0d5088b27a5abaf6f915bc6
|
refs/heads/master
| 2020-12-11T14:45:07.896571
| 2019-12-12T20:02:07
| 2019-12-12T20:02:07
| 233,876,405
| 1
| 0
|
BSD-3-Clause
| 2020-01-14T15:46:26
| 2020-01-14T15:46:26
| null |
UTF-8
|
Python
| false
| false
| 4,316
|
py
|
"""Misc methods/functions that are used by the fmcapi package's modules."""
import re
import ipaddress
import json
import logging
logging.debug(f"In the {__name__} module.")
def syntax_correcter(value, permitted_syntax="""[.\w\d_\-]""", replacer="_"):
"""
Check 'value' for invalid characters (identified by 'permitted_syntax') and replace them with 'replacer'.
:param value: (str) String to be checked.
:param permitted_syntax: (str) regex of allowed characters.
:param replacer: (str) character used to replace invalid characters.
:return: (str) Modified string with "updated" characters.
"""
logging.debug("In syntax_correcter() helper_function.")
new_value = ""
for char in range(0, len(value)):
if not re.match(permitted_syntax, value[char]):
new_value += replacer
else:
new_value += value[char]
return new_value
def get_networkaddress_type(value):
"""
Check to see whether 'value' is a host, range, or network.
:param value: (str) x.x.x.x, x.x.x.x/xx, or x.x.x.x-x.x.x.x
:return: (str) 'host', 'network', or 'range'
"""
logging.debug("In get_networkaddress_type() helper_function.")
if "/" in value:
ip, bitmask = value.split("/")
if ip == "32" or bitmask == "128":
return "host"
else:
return "network"
else:
if "-" in value:
return "range"
else:
return "host"
def is_ip(ip):
"""
Check whether the provided string is an IP address.
:param ip: (str) x.x.x.x
:return: (boolean)
"""
logging.debug("In is_ip() helper_function.")
try:
ipaddress.ip_address(ip)
except ValueError as err:
logging.error(err)
return False
return True
def is_ip_network(ip):
"""
Check whether provided string is a valid network address.
See if the provided IP/SM is the "network address" of the subnet.
:param ip: (str) x.x.x.x/xx
:return: (boolean)
"""
logging.debug("In is_ip_network() helper_function.")
try:
ipaddress.ip_network(ip)
except ValueError as err:
logging.error(err)
return False
return True
def validate_ip_bitmask_range(value="", value_type=""):
"""
We need to check the provided IP address (or range of addresses) and make sure the IPs are valid.
:param value: (str) x.x.x.x, x.x.x.x/xx, or x.x.x.x-x.x.x.x
:param value_type: (str) 'host', 'network', or 'range'
:return: (dict) {value=value_fixed, valid=boolean}
"""
logging.debug("In validate_ip_bitmask_range() helper_function.")
return_dict = {"value": value, "valid": False}
if value_type == "range":
for ip in value.split("-"):
if is_ip(ip):
return_dict["valid"] = True
elif value_type == "host" or value_type == "network":
if is_ip_network(value):
return_dict["valid"] = True
return return_dict["valid"]
def mocked_requests_get(**kwargs):
"""
Use to "mock up" a response from using the "requests" library to avoid actually using the "requests" library.
:param kwargs:
:return: (boolean)
"""
logging.debug("In mocked_requests_get() helper_function.")
class MockResponse:
def __init__(self, **kwargs):
logging.debug("In MockResponse __init__ method.")
self.text = json.dumps(kwargs["text"])
self.status_code = kwargs["status_code"]
def close(self):
logging.debug("In MockResponse close method.")
return True
return MockResponse(**kwargs)
def validate_vlans(start_vlan, end_vlan=""):
"""
Validate that the start_vlan and end_vlan numbers are in 1 - 4094 range. If not, then return 1, 4094.
:param start_vlan: (int) Lower VLAN number in range.
:param end_vlan: (int) Upper VLAN number in range.
:return: (int) start_vlan, (int) end_vlan)
"""
logging.debug("In validate_vlans() helper_function.")
if end_vlan == "":
end_vlan = start_vlan
if int(end_vlan) < int(start_vlan):
start_vlan, end_vlan = end_vlan, start_vlan
if 0 < int(start_vlan) < 4095 and 0 < int(end_vlan) < 4095:
return start_vlan, end_vlan
else:
return 1, 4094
|
[
"dmickels@cisco.com"
] |
dmickels@cisco.com
|
7036e36ad6e119dac97c2175715c6b24857595cf
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AnttechOceanbaseVerificationcodeVerifyResponse.py
|
2808330b22ff50c4c507563278735baa72cdba13
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 479
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AnttechOceanbaseVerificationcodeVerifyResponse(AlipayResponse):
def __init__(self):
super(AnttechOceanbaseVerificationcodeVerifyResponse, self).__init__()
def parse_response_content(self, response_content):
response = super(AnttechOceanbaseVerificationcodeVerifyResponse, self).parse_response_content(response_content)
|
[
"jishupei.jsp@alibaba-inc.com"
] |
jishupei.jsp@alibaba-inc.com
|
f5c468beecaa5a2685e310d47134afa02ab63714
|
386d5d4f8f102e701d02b326cd066f520e3dff9f
|
/ProjectApplication/project_core/migrations/0166_organisation_long_name_english.py
|
3e520cccaf379fa51b06988bbdeee96f5063231b
|
[
"MIT"
] |
permissive
|
Swiss-Polar-Institute/project-application
|
ae2561c3ae2c1d5412d165d959ce2e5886135e0a
|
7dc4a9f7e0f8d28c89977b85f99bc5e35ea77d43
|
refs/heads/master
| 2023-08-31T04:01:23.492272
| 2023-08-25T14:33:02
| 2023-08-25T14:33:02
| 206,330,401
| 7
| 5
|
MIT
| 2023-09-13T08:03:53
| 2019-09-04T13:49:39
|
Python
|
UTF-8
|
Python
| false
| false
| 496
|
py
|
# Generated by Django 3.2.3 on 2021-05-24 14:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project_core', '0165_callcareerstage_enabled'),
]
operations = [
migrations.AddField(
model_name='organisation',
name='long_name_english',
field=models.CharField(blank=True, help_text='English name by which the organisation is known', max_length=100, null=True),
),
]
|
[
"jenny_t152@yahoo.co.uk"
] |
jenny_t152@yahoo.co.uk
|
09499b5a40b99525ef5398364dfabc50873929aa
|
d69995905dcd2522e53082c32c10f582b6779cba
|
/apps/testsuites/utils.py
|
046c1755163e1f7524679acbd2cd94c4a979ed31
|
[] |
no_license
|
liqi629/learn_nm_drf
|
010b35ab4254267a601a13f29e2ea3adb3be615e
|
ad0eef6fe2b338d613974850977de51b9d82ccc6
|
refs/heads/master
| 2023-02-10T14:33:33.521450
| 2021-01-13T09:14:23
| 2021-01-13T09:14:23
| 324,942,039
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 854
|
py
|
import re
from datetime import datetime
from apps.testcases.models import Testcases
def modify_output(results):
datas_list = []
for item in results:
mtch = re.search(r'(.*)T(.*)\..*?',item['create_time'])
item['create_time'] = mtch.group(1) +' '+mtch.group(2)
mtch = re.search(r'(.*)T(.*)\..*?', item['update_time'])
item['update_time'] = mtch.group(1) + ' ' + mtch.group(2)
datas_list.append(item)
return datas_list
def get_testcases_by_interface_ids(ids_list):
"""
通过接口ID获取用例
:param ids_list:
:return:
"""
one_list =[]
for interface_id in ids_list:
testcases_qs = Testcases.objects.values_list('id',flat=True).\
filter(interface_id=interface_id,is_delete=False)
one_list.extend(list(testcases_qs))
return one_list
|
[
"liqi_629@163.com"
] |
liqi_629@163.com
|
cb5c4ccba717cd9c2920942e071419e95cf3aa0d
|
2d276785c3663d4798be462115291c4706dbd255
|
/Python从菜鸟到高手/chapter2/demo2.03.py
|
5e0bf41280f733cc7bad2b80a3e3d347a440b9a8
|
[] |
no_license
|
bupthl/Python
|
81c92433bd955663e6cda5fe7cab5ea3d067c3de
|
bdb33aeeb179a43100b9ef7129a925c63a133fd3
|
refs/heads/master
| 2022-02-21T11:02:40.195265
| 2019-08-16T05:49:18
| 2019-08-16T05:49:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,049
|
py
|
'''
--------《Python从菜鸟到高手》源代码------------
欧瑞科技版权所有
作者:李宁
如有任何技术问题,请加QQ技术讨论群:264268059
或关注“极客起源”订阅号或“欧瑞科技”服务号或扫码关注订阅号和服务号,二维码在源代码根目录
如果QQ群已满,请访问https://geekori.com,在右侧查看最新的QQ群,同时可以扫码关注公众号
“欧瑞学院”是欧瑞科技旗下在线IT教育学院,包含大量IT前沿视频课程,
请访问http://geekori.com/edu或关注前面提到的订阅号和服务号,进入移动版的欧瑞学院
“极客题库”是欧瑞科技旗下在线题库,请扫描源代码根目录中的小程序码安装“极客题库”小程序
关于更多信息,请访问下面的页面
https://geekori.com/help/videocourse/readme.html
'''
# 需要在Windows下执行
import os
import sys
f_handler=open('out.log', 'w')
oldstdout = sys.stdout
sys.stdout=f_handler
os.system('cls')
sys.stdout = oldstdout
|
[
"registercn@outlook.com"
] |
registercn@outlook.com
|
90f5d0ecdb0f132e76809d77dca276ad9a766253
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/eventhub/azure-eventhub-checkpointstoretable/azure/eventhub/extensions/checkpointstoretable/_vendor/data/tables/aio/_base_client_async.py
|
06b3136394d128cf3c39652676f430c56596b3be
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 6,981
|
py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from typing import Any, List, Mapping, Optional, Union, TYPE_CHECKING
from uuid import uuid4
from azure.core.credentials import AzureSasCredential, AzureNamedKeyCredential
from azure.core.pipeline.policies import (
ContentDecodePolicy,
AsyncBearerTokenCredentialPolicy,
AsyncRedirectPolicy,
DistributedTracingPolicy,
HttpLoggingPolicy,
UserAgentPolicy,
ProxyPolicy,
AzureSasCredentialPolicy,
RequestIdPolicy,
CustomHookPolicy,
NetworkTraceLoggingPolicy,
)
from azure.core.pipeline.transport import (
AsyncHttpTransport,
HttpRequest,
)
from .._generated.aio import AzureTable
from .._base_client import AccountHostsMixin, get_api_version, extract_batch_part_metadata
from .._authentication import SharedKeyCredentialPolicy
from .._constants import STORAGE_OAUTH_SCOPE
from .._error import RequestTooLargeError, TableTransactionError, _decode_error
from .._policies import StorageHosts, StorageHeadersPolicy
from .._sdk_moniker import SDK_MONIKER
from ._policies_async import AsyncTablesRetryPolicy
if TYPE_CHECKING:
from azure.core.credentials_async import AsyncTokenCredential
class AsyncTablesBaseClient(AccountHostsMixin):
def __init__( # pylint: disable=missing-client-constructor-parameter-credential
self,
endpoint: str,
*,
credential: Optional[Union[AzureSasCredential, AzureNamedKeyCredential, "AsyncTokenCredential"]] = None,
**kwargs: Any
) -> None:
super(AsyncTablesBaseClient, self).__init__(endpoint, credential=credential, **kwargs) # type: ignore
self._client = AzureTable(
self.url,
policies=kwargs.pop('policies', self._policies),
**kwargs
)
self._client._config.version = get_api_version(kwargs, self._client._config.version) # pylint: disable=protected-access
async def __aenter__(self):
await self._client.__aenter__()
return self
async def __aexit__(self, *args):
await self._client.__aexit__(*args)
async def close(self) -> None:
"""This method is to close the sockets opened by the client.
It need not be used when using with a context manager.
"""
await self._client.close()
def _configure_credential(self, credential):
# type: (Any) -> None
if hasattr(credential, "get_token"):
self._credential_policy = AsyncBearerTokenCredentialPolicy( # type: ignore
credential, STORAGE_OAUTH_SCOPE
)
elif isinstance(credential, SharedKeyCredentialPolicy):
self._credential_policy = credential # type: ignore
elif isinstance(credential, AzureSasCredential):
self._credential_policy = AzureSasCredentialPolicy(credential) # type: ignore
elif isinstance(credential, AzureNamedKeyCredential):
self._credential_policy = SharedKeyCredentialPolicy(credential) # type: ignore
elif credential is not None:
raise TypeError("Unsupported credential: {}".format(credential))
def _configure_policies(self, **kwargs):
return [
RequestIdPolicy(**kwargs),
StorageHeadersPolicy(**kwargs),
UserAgentPolicy(sdk_moniker=SDK_MONIKER, **kwargs),
ProxyPolicy(**kwargs),
self._credential_policy,
ContentDecodePolicy(response_encoding="utf-8"),
AsyncRedirectPolicy(**kwargs),
StorageHosts(**kwargs),
AsyncTablesRetryPolicy(**kwargs),
CustomHookPolicy(**kwargs),
NetworkTraceLoggingPolicy(**kwargs),
DistributedTracingPolicy(**kwargs),
HttpLoggingPolicy(**kwargs),
]
async def _batch_send(self, *reqs: "HttpRequest", **kwargs) -> List[Mapping[str, Any]]:
"""Given a series of request, do a Storage batch call."""
# Pop it here, so requests doesn't feel bad about additional kwarg
policies = [StorageHeadersPolicy()]
changeset = HttpRequest("POST", None) # type: ignore
changeset.set_multipart_mixed(
*reqs, policies=policies, boundary="changeset_{}".format(uuid4())
)
request = self._client._client.post( # pylint: disable=protected-access
url="https://{}/$batch".format(self._primary_hostname),
headers={
"x-ms-version": self.api_version,
"DataServiceVersion": "3.0",
"MaxDataServiceVersion": "3.0;NetFx",
"Content-Type": "application/json",
"Accept": "application/json"
},
)
request.set_multipart_mixed(
changeset,
policies=policies,
enforce_https=False,
boundary="batch_{}".format(uuid4()),
)
pipeline_response = await self._client._client._pipeline.run(request, **kwargs) # pylint: disable=protected-access
response = pipeline_response.http_response
# TODO: Check for proper error model deserialization
if response.status_code == 413:
raise _decode_error(
response,
error_message="The transaction request was too large",
error_type=RequestTooLargeError)
if response.status_code != 202:
raise _decode_error(response)
parts_iter = response.parts()
parts = []
async for p in parts_iter:
parts.append(p)
error_parts = [p for p in parts if not 200 <= p.status_code < 300]
if any(error_parts):
if error_parts[0].status_code == 413:
raise _decode_error(
response,
error_message="The transaction request was too large",
error_type=RequestTooLargeError)
raise _decode_error(
response=error_parts[0],
error_type=TableTransactionError,
)
return [extract_batch_part_metadata(p) for p in parts]
class AsyncTransportWrapper(AsyncHttpTransport):
"""Wrapper class that ensures that an inner client created
by a `get_client` method does not close the outer transport for the parent
when used in a context manager.
"""
def __init__(self, async_transport):
self._transport = async_transport
async def send(self, request, **kwargs):
return await self._transport.send(request, **kwargs)
async def open(self):
pass
async def close(self):
pass
async def __aenter__(self):
pass
async def __aexit__(self, *args): # pylint: disable=arguments-differ
pass
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
feb64c06c982a14449c43ee53cc8a6183106fe90
|
093b7b3ce929fa1383d5c1571271807f171aec23
|
/rocket/entities/api/endpoint.py
|
971b53d78b7948b93078e3f8e4b2b12806ebba05
|
[] |
no_license
|
takaaki-mizuno/smart-rocket-cli
|
fa728a13bc80781aea182922f7a0ee2c546518b1
|
1b4a522c54e94857803f9be246f0c480e50dae3d
|
refs/heads/master
| 2020-03-21T02:06:54.115723
| 2018-06-25T22:43:32
| 2018-06-25T22:43:32
| 137,980,655
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 934
|
py
|
from .parameter import Parameter
from .response import Reference
class Endpoint:
def __init__(self, path, method, definition, spec):
self.path = path
self.method = method
self.definition = definition
self.spec = spec
self.parameters = []
self.response = None
self.parse()
def parse(self):
self.parse_parameters()
self.parse_response()
def parse_parameters(self):
if 'parameters' not in self.definition:
return
for parameter in self.definition['parameters']:
self.parameters.append(Parameter(parameter))
def parse_response(self):
if 'responses' not in self.definition:
return
for name, definition in self.definition['responses'].items():
if name[0:1] == '2' and 'schema' in definition:
self.response = Reference(definition['schema']['$ref'])
|
[
"takaaki.mizuno@gmail.com"
] |
takaaki.mizuno@gmail.com
|
8d5c12675fbee82c4d20320e48951b0b9f1842e2
|
f716ec8240b775170283cb6d43da50d0ff3561b7
|
/testkraken/testing_functions/check_output.py
|
9eeb9c524b75d5c5bbe91ba9471dee63bc318ab0
|
[] |
no_license
|
jdkent/testkraken
|
63493df89a5ae427dab9da14fc794de96cda6cef
|
3b5d83f0eeaad7969902cfa867e6b4a1d4d7e691
|
refs/heads/master
| 2020-06-01T11:39:57.286709
| 2019-06-07T09:55:16
| 2019-06-07T09:55:16
| 190,766,540
| 0
| 0
| null | 2019-06-07T15:24:49
| 2019-06-07T15:24:49
| null |
UTF-8
|
Python
| false
| false
| 3,766
|
py
|
#/usr/bin/env python
from __future__ import division
import json
import os, inspect
from glob import glob
import pandas as pd
import numpy as np
import pdb
def creating_dataframe(files_list):
""" reads every json file from the files_list and creates one data frame """
outputmap = {0: 'voxels', 1: 'volume'}
df = pd.DataFrame()
for (i, filename) in enumerate(files_list):
with open(filename, 'rt') as fp:
in_dict = json.load(fp)
# in cwl i'm loosing the directory name
#subject = filename.split(os.path.sep)[-3]
subject = "subject_{}".format(i)
in_dict_mod = {}
for k, v in in_dict.items():
if isinstance(v, list):
for idx, value in enumerate(v):
in_dict_mod["%s_%s" % (k, outputmap[idx])] = value
else:
in_dict_mod[k] = v
df[subject] = pd.Series(in_dict_mod)
return df.T
def check_output(file_out, file_ref=None, name=None, **kwargs):
if type(file_ref) is list:
expected_files = file_ref
elif type(file_ref) is str:
expected_files = [file_ref]
if type(file_out) is list:
output_files = file_out
elif type(file_out) is str:
output_files = [file_out]
df_exp = creating_dataframe(expected_files)
df_out = creating_dataframe(output_files)
#df_exp.to_csv('output/ExpectedOutput.csv')
#df_out.to_csv('output/ActualOutput.csv')
# DJ TOD: this doesn't work, check with the original repo
#df_diff = df_exp - df_out
#df_diff = df_diff.dropna()
report_filename = "report_{}.json".format(name)
out = {}
# chosing just a few columns
keys_test = ["white_voxels", "gray_voxels", "csf_voxels",
"Right-Hippocampus_voxels", "Right-Amygdala_voxels", "Right-Caudate_voxels"]
out["index_name"] = list(df_exp.index)
for key in keys_test:
out["re_{}".format(key.replace("_voxels", "").replace("Right-", "R-"))] = []
for subj in df_exp.index:
for key in keys_test:
if df_exp.loc[subj, key] != 0.:
out["re_{}".format(key.replace("_voxels", "").replace("Right-", "R-"))].append(round(
1. * abs(df_exp.loc[subj, key] - df_out.loc[subj, key]) / df_exp.loc[subj, key], 5))
elif df_out.loc[subj, key] != 0.:
out["re_{}".format(key.replace("_voxels", "").replace("Right-", "R-"))].append(1.)
else:
out["re_{}".format(key.replace("_voxels", "").replace("Right-", "R-"))].append(0.)
out["regr"] = []
for i, subj in enumerate(out["index_name"]):
list_tmp = []
for k in out.keys():
if k not in ["index_name", "regr"]:
list_tmp.append(out[k][i])
try:
assert max(list_tmp) < 0.05
out["regr"].append("PASSED")
except(AssertionError):
out["regr"].append("FAILED")
#out_max = {"max_diff": max(diff)}
with open(report_filename, "w") as f:
json.dump(out, f)
if __name__ == "__main__":
from argparse import ArgumentParser, RawTextHelpFormatter
defstr = ' (default %(default)s)'
parser = ArgumentParser(description=__doc__,
formatter_class=RawTextHelpFormatter)
parser.add_argument("-out", nargs="+", dest="file_out",
help="file with the output for testing")
parser.add_argument("-ref", nargs="+", dest="file_ref",
help="file with the reference output")
parser.add_argument("-name", dest="name",
help="name of the test provided by a user")
args = parser.parse_args()
check_output(**vars(args))
|
[
"djarecka@gmail.com"
] |
djarecka@gmail.com
|
533ca7f861f7a6a34ff0265d601d017f0f9835f7
|
b6d82335dfe93f86977e4cbafe592eff32536712
|
/src/aws_hashicorp_packer_reaper/schema.py
|
f028da830fb0a93c2da236b6aabc65f3a6ed1a36
|
[
"Apache-2.0"
] |
permissive
|
felixubierar/aws-hashicorp-packer-reaper
|
2bb9709b3c95c53d519fa5d009cbafc48232b1b4
|
9b29f6896f43a01f47d2a894059e4afa191a8ff2
|
refs/heads/master
| 2023-05-19T17:01:23.872830
| 2021-06-14T08:57:50
| 2021-06-14T08:57:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,835
|
py
|
import durations
from aws_hashicorp_packer_reaper.logger import log
from jsonschema.exceptions import ValidationError
from jsonschema import validators, Draft7Validator, FormatChecker, validators
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"mode": {
"type": "string",
"description": "of operations",
"enum": ["stop", "terminate"],
},
"older_than": {
"type": "string",
"description": "period since launched",
"format": "duration",
"default": "2h",
},
"dry_run": {
"type": "boolean",
"description": "if you only want output",
"default": False,
},
"tags": {
"type": "array",
"description": "to select EC2 instances with",
"items": {"type": "string", "minLength": 1},
},
},
"required": [
"mode",
"older_than",
],
}
@FormatChecker.cls_checks("duration")
def duration_checker(value) -> bool:
"""
checks whether the `value` is a valid duration.
>>> duration_checker({})
False
>>> duration_checker(1.0)
False
>>> duration_checker("2h")
True
>>> duration_checker("hundred days")
False
"""
try:
if isinstance(value, str):
durations.Duration(value)
return True
except durations.exceptions.InvalidTokenError as e:
pass
return False
def extend_with_default(validator_class):
validate_properties = validator_class.VALIDATORS["properties"]
def set_defaults(validator, properties, instance, schema):
for property, subschema in properties.items():
if "default" in subschema:
instance.setdefault(property, subschema["default"])
for error in validate_properties(
validator,
properties,
instance,
schema,
):
yield error
return validators.extend(
validator_class,
{"properties": set_defaults},
)
validator = extend_with_default(Draft7Validator)(schema, format_checker=FormatChecker())
def validate(request: dict) -> bool:
"""
return True and completes the missing values if the dictionary matches the schema, otherwise False.
>>> validate({"mode": "stoep"})
False
>>> validate({"mode": "stop", "older_than": "sdfsdfsf dagen"})
False
>>> x = {"mode": "stop"}
>>> validate(x)
True
>>> print(x)
{'mode': 'stop', 'older_than': '2h', 'dry_run': False}
"""
try:
validator.validate(request)
return True
except ValidationError as e:
log.error("invalid request received: %s" % str(e.message))
return False
|
[
"mark.van.holsteijn@gmail.com"
] |
mark.van.holsteijn@gmail.com
|
3d6fae9b8527d4f8823a630b37b8fecb7d2d8207
|
b9f21bc90eed396dde950c30a1b482be0fb8ba30
|
/AtCoder/ABC/114-B_1.py
|
85c142e193b383c2ee95f438be3c37a3edec97b1
|
[] |
no_license
|
nanigasi-san/nanigasi
|
127a21db1b31759908fd74cebabe240e5abf8267
|
5e3c3e78344dd9558cafe439beb272b9a80d0f3a
|
refs/heads/master
| 2020-04-03T18:57:40.132489
| 2019-06-19T15:03:38
| 2019-06-19T15:03:38
| 155,504,101
| 1
| 0
| null | 2019-02-20T09:40:05
| 2018-10-31T05:33:11
|
Python
|
UTF-8
|
Python
| false
| false
| 155
|
py
|
S = list(input())
list = []
for _ in range(len(S)-2):
Sx = S[_]+S[_+1]+S[_+2]
sa = int(Sx)-753
list.append(abs(sa))
list.sort()
print(list[0])
|
[
"nanigasi.py@gmail.com"
] |
nanigasi.py@gmail.com
|
a4f92fc07660a377a8f5ca8b05dec21c348b7a8e
|
2caa47f0bdb2f03469a847c3ba39496de315d992
|
/Contest/ABC086/c/main.py
|
1e87c7d1f6b031f11c2f42d5104c63354529a69a
|
[
"CC0-1.0"
] |
permissive
|
mpses/AtCoder
|
9023e44885dc67c4131762281193c24b69d3b6da
|
9c101fcc0a1394754fcf2385af54b05c30a5ae2a
|
refs/heads/master
| 2023-03-23T17:00:11.646508
| 2021-03-20T12:21:19
| 2021-03-20T12:21:19
| 287,489,233
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
#!/usr/bin/env python3
n, *a = map(int, open(0).read().split())
a = [0]*3 + a
for i in range(n):
i *= 3
t, x, y = map(lambda j:abs(a[i+3+j]-a[i+j]), [0,1,2])
d = x+y
if d>t or d%2-t%2: print("No"); exit()
print("Yes")
|
[
"nsorangepv@gmail.com"
] |
nsorangepv@gmail.com
|
671f43d908baa3c118b7e3e44c09aee84552f4d5
|
ff58ba25d940ed34d9684efab04adef85d1e1c0f
|
/ENV/lib/python2.6/site-packages/gunicorn/app/pasterapp.py
|
388478181468378e0cb86f5300a8946ea4738159
|
[] |
no_license
|
afsmith/Kneto-Sello
|
e9046a81ff83652531adc55aab3f90f77af5b5be
|
a1b12daf8a04ef485ddcaa2944b2d87878a8cdd0
|
refs/heads/master
| 2021-03-27T17:31:23.830989
| 2013-06-04T07:29:58
| 2013-06-04T07:29:58
| 6,720,999
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,270
|
py
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import logging
import os
import pkg_resources
import sys
import ConfigParser
from paste.deploy import loadapp, loadwsgi
SERVER = loadwsgi.SERVER
from gunicorn.app.base import Application
from gunicorn.config import Config
class PasterBaseApplication(Application):
def app_config(self):
cx = loadwsgi.loadcontext(SERVER, self.cfgurl, relative_to=self.relpath)
gc, lc = cx.global_conf.copy(), cx.local_conf.copy()
cfg = {}
host, port = lc.pop('host', ''), lc.pop('port', '')
if host and port:
cfg['bind'] = '%s:%s' % (host, port)
elif host:
cfg['bind'] = host
cfg['workers'] = int(lc.get('workers', 1))
cfg['umask'] = int(lc.get('umask', 0))
cfg['default_proc_name'] = gc.get('__file__')
for k, v in gc.items():
if k not in self.cfg.settings:
continue
cfg[k] = v
for k, v in lc.items():
if k not in self.cfg.settings:
continue
cfg[k] = v
return cfg
def configure_logging(self):
if hasattr(self, "cfgfname"):
self.logger = logging.getLogger('gunicorn')
# from paste.script.command
parser = ConfigParser.ConfigParser()
parser.read([self.cfgfname])
if parser.has_section('loggers'):
if sys.version_info >= (2, 6):
from logging.config import fileConfig
else:
# Use our custom fileConfig -- 2.5.1's with a custom Formatter class
# and less strict whitespace (which were incorporated into 2.6's)
from gunicorn.logging_config import fileConfig
config_file = os.path.abspath(self.cfgfname)
fileConfig(config_file, dict(__file__=config_file,
here=os.path.dirname(config_file)))
return
super(PasterBaseApplication, self).configure_logging()
class PasterApplication(PasterBaseApplication):
def init(self, parser, opts, args):
if len(args) != 1:
parser.error("No application name specified.")
cfgfname = os.path.normpath(os.path.join(os.getcwd(), args[0]))
cfgfname = os.path.abspath(cfgfname)
if not os.path.exists(cfgfname):
parser.error("Config file not found: %s" % cfgfname)
self.cfgurl = 'config:%s' % cfgfname
self.relpath = os.path.dirname(cfgfname)
self.cfgfname = cfgfname
sys.path.insert(0, self.relpath)
pkg_resources.working_set.add_entry(self.relpath)
return self.app_config()
def load(self):
return loadapp(self.cfgurl, relative_to=self.relpath)
class PasterServerApplication(PasterBaseApplication):
def __init__(self, app, gcfg=None, host="127.0.0.1", port=None, *args, **kwargs):
self.cfg = Config()
self.app = app
self.callable = None
gcfg = gcfg or {}
cfgfname = gcfg.get("__file__")
if cfgfname is not None:
self.cfgurl = 'config:%s' % cfgfname
self.relpath = os.path.dirname(cfgfname)
self.cfgfname = cfgfname
cfg = kwargs.copy()
if port and not host.startswith("unix:"):
bind = "%s:%s" % (host, port)
else:
bind = host
cfg["bind"] = bind
if gcfg:
for k, v in list(gcfg.items()):
cfg[k] = v
cfg["default_proc_name"] = cfg['__file__']
try:
for k, v in list(cfg.items()):
if k.lower() in self.cfg.settings and v is not None:
self.cfg.set(k.lower(), v)
except Exception, e:
sys.stderr.write("\nConfig error: %s\n" % str(e))
sys.stderr.flush()
sys.exit(1)
self.configure_logging()
def load_config(self):
if not hasattr(self, "cfgfname"):
return
cfg = self.app_config()
for k,v in cfg.items():
try:
self.cfg.set(k.lower(), v)
except:
sys.stderr.write("Invalid value for %s: %s\n\n" % (k, v))
raise
def load(self):
if hasattr(self, "cfgfname"):
return loadapp(self.cfgurl, relative_to=self.relpath)
return self.app
def run():
"""\
The ``gunicorn_paster`` command for launcing Paster compatible
apllications like Pylons or Turbogears2
"""
from gunicorn.app.pasterapp import PasterApplication
PasterApplication("%prog [OPTIONS] pasteconfig.ini").run()
def paste_server(app, gcfg=None, host="127.0.0.1", port=None, *args, **kwargs):
"""\
A paster server.
Then entry point in your paster ini file should looks like this:
[server:main]
use = egg:gunicorn#main
host = 127.0.0.1
port = 5000
"""
from gunicorn.app.pasterapp import PasterServerApplication
PasterServerApplication(app, gcfg=gcfg, host=host, port=port, *args, **kwargs).run()
|
[
"andrew.smith@kneto.fi"
] |
andrew.smith@kneto.fi
|
1382dcbea411dd5c7a5227efcd1b1545775e46c7
|
8ecf97e0f12037ccd0b63f265fddd3bf84229d7a
|
/keras/inception_autoencoder_predict.py
|
966fa0964b70a98e8e0ee7e382925390c8b2781a
|
[] |
no_license
|
ArifSohaib/video_collission_detection
|
158af02ce55d8e7f39b532417010b882106bdb2f
|
79ac145f4a3d0386ffac4f4fc2cc6efda056e53f
|
refs/heads/master
| 2021-09-11T01:36:09.085256
| 2018-04-05T19:44:40
| 2018-04-05T19:44:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,739
|
py
|
from inception_autoencoder import build_autoencoder
import numpy as np
import pickle
import matplotlib.pyplot as plt
from sklearn import preprocessing
def main():
impact_data = np.loadtxt('../data/features/impacts_period1.npy')
full_data = np.loadtxt('../data/features/period1_full.npy')
min_max_scaler = preprocessing.MinMaxScaler()
impact_data = min_max_scaler.fit_transform(impact_data)
full_data = min_max_scaler.fit_transform(full_data)
model = build_autoencoder()
model.load_weights('../data/weights/autoencoder_weights.h5')
predict_impact = model.predict(impact_data)
predict_full = model.predict(full_data)
mse_full = ((predict_full - full_data) ** 2).mean(axis=1)
mse_impact = ((predict_impact - impact_data) ** 2).mean(axis=1)
mean_mse = mse_full.mean()
print("full mse avg {}".format(mean_mse))
print("impact mse avg {}".format(mse_impact.mean()))
print("full mse min {}".format(mse_full.min()))
print("impact mse min {}".format(mse_impact.min()))
print("full mse max {}".format(mse_full.max()))
print("impact mse max {}".format(mse_impact.max()))
plt.hist(mse_full, label='full_mse stats')
plt.show()
plt.hist(mse_impact, label='impact_mse stats')
plt.show()
full_percentile = np.percentile(mse_full, 50)
impact_percentile = np.percentile(mse_impact, 50);
print("full mse percentile {}".format(full_percentile))
print("impact mse percentile {}".format(impact_percentile))
print("length of full data {}".format(len(mse_full)))
pred_impact_idx = []
#running the above statistics, we can say that if the mse is above the max of impact mse, then it is not an impact
for idx, err in enumerate(mse_full):
if err > impact_percentile:
pred_impact_idx.append(idx)
with open('../data/frames/frames_vid1_div5.pkl', 'rb') as f:
confirmed_idx = pickle.load(f)
confirmed_idx = sorted(confirmed_idx.values())
"""
for each value in confirmed_idx we need 10 numbers before and 10 after it(totaling 29 * 20 = 580)
"""
full_idx = []
for idx in confirmed_idx:
for i in range(-10, 10):
full_idx.append(idx+i)
true_count = 0
false_pos = 0
#to check accuracy, we can compare against idx's computed before
idx_count = 0;
for idx in pred_impact_idx:
if idx in full_idx:
true_count += 1
else:
false_pos += 1
print("num predictions {}".format(len(pred_impact_idx)))
print("true count {}".format(true_count))
print("length of pred impacts {}".format(len(full_idx)))
print("false pos {}".format(false_pos))
if __name__ == '__main__':
main()
|
[
"arif_sohaib@outlook.com"
] |
arif_sohaib@outlook.com
|
490e204bc88e69d4ea255b9c9b4e172ee01ae582
|
eb26f2a53339cc9880c193225919bd37078113aa
|
/flask/mysite/app.py
|
1ce6098bfcafb1745e57a8640c7e316044ddd05d
|
[] |
no_license
|
nnyong/TIL-c9
|
c2590ea94c13221a45f274beb22a0f03b09bf6d4
|
2e2803ee60467ffbad7d9a704b0029476ebead0b
|
refs/heads/master
| 2020-04-17T17:17:53.798516
| 2019-05-02T03:08:56
| 2019-05-02T03:08:56
| 166,777,119
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,614
|
py
|
import os, csv
from flask import Flask, render_template, request
app=Flask(__name__)
@app.route('/')
def index():
return 'Hello World!'
@app.route('/greeting/<string:name>')
def greeting(name):
return f'반갑습니다! {name}님!'
@app.route('/cube/<int:num>')
def cube(num):
result=num**3
return str(result)
@app.route('/html_file')
def html_file():
return render_template('html_file.html')
@app.route('/hi/<name>')
def hi(name):
return render_template('hi.html', name_in_html=name)
@app.route('/fruits')
def fruits():
fruits=['apple','banana','mango','melon']
return render_template('fruits.html',fruits=fruits)
@app.route('/send')
def send():
return render_template('send.html')
@app.route('/receive')
def receive():
# request.args
# {'who':'junwoo','message':'hello'}
who=request.args.get('who')
message=request.args.get('message')
with open('guestbook.csv','a',encoding='utf8',newline='') as f:
writer=csv.DictWriter(f,fieldnames=['who','message'])
writer.writerow({
'who': who,
'message': message
})
return render_template('receive.html',name=who, message=message)
@app.route('/guestbook')
def guestbook():
messages=[]
with open('guestbook.csv','r',encoding='utf8',newline='') as f:
reader=csv.DictReader(f)
for row in reader:
messages.append(row)
return render_template('guestbook.html',messages=messages)
if __name__=='__main__':
app.run(host=os.getenv('IP'), port=os.getenv('PORT'), debug=True)
|
[
"jo.youthful@gmail.com"
] |
jo.youthful@gmail.com
|
e9ec52b3a17bacb2dda1181371225ad96a07f778
|
90e4fe85b70221ae82f99ca6930da980ef8f674a
|
/Anul_1/Semestru_1/FundamentalsOfProg/Seminar/s08p1_I6/src/store/ui/console.py
|
e9bdf474e451dc086c98f9ea7956991ddff0ca45
|
[] |
no_license
|
stefangeorgescu970/university-assignments
|
108235e047b963efb6cd1b952f6b96849e1dc3d3
|
9253cc084b74a62035c96a4a2accfbba43812c16
|
refs/heads/master
| 2022-12-10T14:49:20.299356
| 2020-04-28T14:05:41
| 2020-04-28T14:05:41
| 259,648,446
| 0
| 0
| null | 2022-12-07T20:33:05
| 2020-04-28T13:43:02
|
C++
|
UTF-8
|
Python
| false
| false
| 2,116
|
py
|
"""
@author: radu
"""
import traceback
from store.domain.dto import OrderDTO
from store.domain.validators import StoreException
from util.common import MyUtil
class Console(object):
def __init__(self, product_controller, order_controller, statistics_controller):
self.__product_controller = product_controller
self.__order_controller = order_controller
self.__statistics_controller = statistics_controller
def run_console(self):
# TODO implement an menu or cmd based console
self.__init_data()
print("all products:")
self.__print_all_products()
print("all orders:")
self.__print_all_orders()
print("products filtered by name (name containing the string 'p'):")
MyUtil.print_list(self.__product_controller.filter_products_by_name("p"))
print("the cost of all orders is: ", self.__statistics_controller.compute_all_orders_cost())
print("the orders with the cost greater than 2 is:")
MyUtil.print_list(self.__statistics_controller.filter_orders(2))
self.__print_sorted_orders()
def __print_all_products(self):
MyUtil.print_list(self.__product_controller.get_all())
def __print_all_orders(self):
MyUtil.print_list(self.__order_controller.get_all())
def __init_data(self):
try:
self.__product_controller.add_product(1, "p1", 100)
self.__product_controller.add_product(2, "p2", 200)
self.__product_controller.add_product(3, "bla", 300)
self.__order_controller.add_order(1, 1, 2)
self.__order_controller.add_order(2, 1, 3)
self.__order_controller.add_order(3, 2, 4)
except StoreException as se:
print("exception when initializing data: ", se)
traceback.print_exc()
def __print_sorted_orders(self):
print("the orders sorted descending by cost and ascending by name:")
sorted_orders = self.__statistics_controller.sort_orders()
for i in range(0, len(sorted_orders)):
print(i + 1, sorted_orders[i])
|
[
"stefan.georgescu.970@gmail.com"
] |
stefan.georgescu.970@gmail.com
|
b65f85e32d7186281a2604cc160bd674c86d13a5
|
7aecab27c231c5207f26a1682543b0d6c5093c06
|
/server/dancedeets/util/korean_dates.py
|
c6c4fe2fe48b14f79ae353c0f2e1153c24e04d9e
|
[] |
no_license
|
mikelambert/dancedeets-monorepo
|
685ed9a0258ea2f9439ae4ed47ebf68bb5f89256
|
4eff1034b9afd3417d168750ea3acfaecd20adc6
|
refs/heads/master
| 2022-08-10T07:16:32.427913
| 2018-04-15T22:05:58
| 2018-04-15T22:05:58
| 75,126,334
| 24
| 2
| null | 2022-07-29T22:28:45
| 2016-11-29T22:04:44
|
Python
|
UTF-8
|
Python
| false
| false
| 2,382
|
py
|
# -*-*- encoding: utf-8 -*-*-
import datetime
import re
DAWN = 6 # sun rises at 6am-ish
_DATETIME_SPAN_SEPARATOR = ur'(?:~|/|부터)' # '부터' = 'from'
_D_DATE = ur'(?P<day>\d+)[일\(]'
_MD_DATE = ur'(?:(?P<month>\d+)월\s*)?' + _D_DATE
_YMD_DATE = ur'(?:(?P<year>\d+)[년녀]\s*)?' + _MD_DATE
# WEEKDAY = r'(?:\(.\)| ..일)?'
_AM_PM_TIME = ur'(?:(?P<ampm>오후|오전) )?(?P<hour>\d+)시 ?(?:(?P<minute>\d+)분|(?P<half>반))?'
_TIME = ur'(?:(?P<dawn>새벽)|%s)' % _AM_PM_TIME
def _extract_date(m, date_default=None):
return datetime.date(
int(m.group('year') or date_default.year), int(m.group('month') or date_default.month), int(m.group('day') or date_default.day)
)
def _extract_time(m, time_default=None):
if m.group('dawn'):
return datetime.time(DAWN)
if m.group('half'):
minute = 30
else:
minute = int(m.group('minute') or 0)
if unicode(m.group('ampm')) == u'오후':
ampm_offset = 12
elif m.group('ampm') == u'오전':
ampm_offset = 0
else:
ampm_offset = 12
hour = int(m.group('hour'))
if hour == 12:
hour = 0
return datetime.time(hour + ampm_offset, minute)
def parse_times(s):
elems = re.split(_DATETIME_SPAN_SEPARATOR, s, 2)
if len(elems) == 1:
start_str, end_str = elems[0], None
else:
start_str, end_str = elems[0], elems[1]
start_date_match = re.search(_YMD_DATE, start_str)
start_time_match = re.search(_TIME, start_str)
start_datetime = _extract_date(start_date_match)
if start_time_match:
start_datetime = datetime.datetime.combine(start_datetime, _extract_time(start_time_match))
end_datetime = None
if end_str:
end_date_match = re.search(_YMD_DATE, end_str)
end_time_match = re.search(_TIME, end_str)
if end_date_match or end_time_match:
if end_date_match:
end_datetime = _extract_date(end_date_match, date_default=start_datetime)
else:
if isinstance(start_datetime, datetime.datetime):
end_datetime = start_datetime.date()
else:
end_datetime = start_datetime
if end_time_match:
end_datetime = datetime.datetime.combine(end_datetime, _extract_time(end_time_match))
return (start_datetime, end_datetime)
|
[
"mlambert@gmail.com"
] |
mlambert@gmail.com
|
220ac4e18bfaea6b2a5fc122ae8bcc38836508db
|
fafb89a3552e4dbb47d134966462ef5f3f37f576
|
/KEMP/v0.6_ovelap_ok/fdtd3d/cpu/test/pbc_multi_plot.py
|
218f06c00053697b3fa19d4b987ec597ccaa7b61
|
[] |
no_license
|
EMinsight/fdtd_accelerate
|
78fa1546df5264550d12fba3cf964838b560711d
|
a566c60753932eeb646c4a3dea7ed25c7b059256
|
refs/heads/master
| 2021-12-14T03:26:52.070069
| 2012-07-25T08:25:21
| 2012-07-25T08:25:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,528
|
py
|
import numpy as np
import sys, os
sys.path.append( os.path.expanduser('~') )
from kemp.fdtd3d.cpu import QueueTask, Fields, Core, Pbc, IncidentDirect
tmax = 150
tfunc = lambda tstep: np.sin(0.05 * tstep)
# plot
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rc('image', interpolation='nearest', origin='lower')
fig = plt.figure(figsize=(14,8))
# z-axis
nx, ny, nz = 180, 160, 2
fields = Fields(QueueTask(), nx, ny, nz)
Core(fields)
Pbc(fields, 'xyz')
IncidentDirect(fields, 'ey', (20, 0, 0), (20, -1, -1), tfunc)
IncidentDirect(fields, 'ex', (0, 20, 0), (-1, 20, -1), tfunc)
for tstep in xrange(1, tmax+1):
fields.update_e()
fields.update_h()
fields.enqueue_barrier()
ax1 = fig.add_subplot(2, 3, 1)
ax1.imshow(fields.get('ey')[:,:,nz/2].T, vmin=-1.1, vmax=1.1)
ax1.set_title('%s, ey[20,:,:]' % repr(fields.ns))
ax1.set_xlabel('x')
ax1.set_ylabel('y')
ax2 = fig.add_subplot(2, 3, 4)
ax2.imshow(fields.get('ex')[:,:,nz/2].T, vmin=-1.1, vmax=1.1)
ax2.set_title('%s, ex[:,20,:]' % repr(fields.ns))
ax2.set_xlabel('x')
ax2.set_ylabel('y')
# y-axis
nx, ny, nz = 180, 2, 160
fields = Fields(QueueTask(), nx, ny, nz)
Core(fields)
Pbc(fields, 'xyz')
IncidentDirect(fields, 'ez', (20, 0, 0), (20, -1, -1), tfunc)
IncidentDirect(fields, 'ex', (0, 0, 20), (-1, -1, 20), tfunc)
for tstep in xrange(1, tmax+1):
fields.update_e()
fields.update_h()
fields.enqueue_barrier()
ax1 = fig.add_subplot(2, 3, 2)
ax1.imshow(fields.get('ez')[:,ny/2,:].T, vmin=-1.1, vmax=1.1)
ax1.set_title('%s, ez[20,:,:]' % repr(fields.ns))
ax1.set_xlabel('x')
ax1.set_ylabel('z')
ax2 = fig.add_subplot(2, 3, 5)
ax2.imshow(fields.get('ex')[:,ny/2,:].T, vmin=-1.1, vmax=1.1)
ax2.set_title('%s, ex[:,:,20]' % repr(fields.ns))
ax2.set_xlabel('x')
ax2.set_ylabel('z')
# x-axis
nx, ny, nz = 2, 180, 160
fields = Fields(QueueTask(), nx, ny, nz)
Core(fields)
Pbc(fields, 'xyz')
IncidentDirect(fields, 'ez', (0, 20, 0), (-1, 20, -1), tfunc)
IncidentDirect(fields, 'ey', (0, 0, 20), (-1, -1, 20), tfunc)
for tstep in xrange(1, tmax+1):
fields.update_e()
fields.update_h()
fields.enqueue_barrier()
ax1 = fig.add_subplot(2, 3, 3)
ax1.imshow(fields.get('ez')[nx/2,:,:].T, vmin=-1.1, vmax=1.1)
ax1.set_title('%s, ez[:,20,:]' % repr(fields.ns))
ax1.set_xlabel('y')
ax1.set_ylabel('z')
ax2 = fig.add_subplot(2, 3, 6)
ax2.imshow(fields.get('ey')[nx/2,:,:].T, vmin=-1.1, vmax=1.1)
ax2.set_title('%s, ey[:,:,20]' % repr(fields.ns))
ax2.set_xlabel('y')
ax2.set_ylabel('z')
#plt.savefig('./png/%.6d.png' % tstep)
plt.show()
|
[
"kh.kim@kiaps.org"
] |
kh.kim@kiaps.org
|
b04305f109a7f6222d39d666b9d65f277cae3196
|
ce972e94fcdf19d6809d94c2a73595233d1f741d
|
/catkin_ws/build/tf/cmake/tf-genmsg-context.py
|
552ade3ed0568d977725f0742c6cd2ff5e746bcd
|
[] |
no_license
|
WilliamZipanHe/reward_shaping_ttr
|
cfa0e26579f31837c61af3e09621b4dad7eaaba2
|
df56cc0153147bb067bc3a0eee0e1e4e1044407f
|
refs/heads/master
| 2022-02-23T05:02:00.120626
| 2019-08-07T21:52:50
| 2019-08-07T21:52:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 830
|
py
|
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/local-scratch/xlv/catkin_ws/src/geometry/tf/msg/tfMessage.msg"
services_str = "/local-scratch/xlv/catkin_ws/src/geometry/tf/srv/FrameGraph.srv"
pkg_name = "tf"
dependencies_str = "geometry_msgs;sensor_msgs;std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "tf;/local-scratch/xlv/catkin_ws/src/geometry/tf/msg;geometry_msgs;/opt/ros/kinetic/share/geometry_msgs/cmake/../msg;sensor_msgs;/opt/ros/kinetic/share/sensor_msgs/cmake/../msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/local-scratch/xlv/miniconda3/envs/py35_no_specific/bin/python3.5"
package_has_static_sources = 'TRUE' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
|
[
"xlv@cs-mars-01.cmpt.sfu.ca"
] |
xlv@cs-mars-01.cmpt.sfu.ca
|
c1f2cdad0c15be0461cfad4fc0ce8cad5a672d6f
|
eca90951b53822740812e40572e209728f71c261
|
/models.py
|
a109ee157f955568d9590b918a0010e3fee55a02
|
[] |
no_license
|
tungvx/reporting
|
b7117879be773fccd90dbdb36a9e1220edc1d202
|
98f54c821aad761c0ab0ab83a8faad232ece1b41
|
refs/heads/master
| 2021-01-16T18:20:46.613907
| 2012-03-25T13:42:06
| 2012-03-25T13:42:06
| 3,491,504
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,661
|
py
|
# -*- coding: utf-8 -*-
import datetime
from django.db import models
from django import forms
class Upload(models.Model): #Upload files table in databases
filename = models.CharField(max_length=255)
upload_time = models.DateTimeField('time uploaded')
description = models.CharField(max_length=255)
filestore = models.CharField(max_length=255)
def __unicode__(self):
return self.description
class Spreadsheet_report(models.Model): # model to store the information about the spreadsheet used by user
created_time = models.DateTimeField('time created')
description = models.CharField(max_length=255)
spreadsheet_link = models.CharField(max_length=255)
output_link = models.CharField(max_length=255)
title = models.CharField(max_length=255)
def __unicode__(self):
return self.description
class upload_file_form(forms.Form): # Define a simple form for uploading excels file
description = forms.CharField(max_length=255,required=True)
file = forms.FileField(required=True,)
def handle_uploaded_file(f,location,filename):
#Save file upload content to uploaded folder
fd = open('%s/%s' % (location, str(filename)), 'wb') #Create new file for write
for chunk in f.chunks():
fd.write(chunk) #Write file data
fd.close() #Close the file
class spreadsheet_report_form(forms.Form):
description = forms.CharField(max_length=255,required=True)
spreadsheet_link = forms.CharField(max_length=255,required=False)
|
[
"toilatung90@gmail.com"
] |
toilatung90@gmail.com
|
ba265bb9d96f3ceeec3b311c1c36ce36f9c18206
|
f50114692187a054bf2627695c6380d5ac79a20c
|
/q0028.py
|
26642f410f3a67d77ed2d7e0af65fad57a49ecc3
|
[] |
no_license
|
petitepirate/interviewQuestions
|
c0cb8775932f90ff5c8e4ef80be468ef4155052f
|
209322c1f1ddbe8111dc2c5e9c35aaf787e0196a
|
refs/heads/master
| 2023-07-17T12:15:22.847440
| 2021-06-14T02:32:39
| 2021-06-14T02:32:39
| 286,884,022
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,685
|
py
|
# This problem was asked by Palantir.
# Write an algorithm to justify text. Given a sequence of words and an integer line length
# k, return a list of strings which represents each line, fully justified.
# More specifically, you should have as many words as possible in each line. There should
# be at least one space between each word. Pad extra spaces when necessary so that each line
# has exactly length k. Spaces should be distributed as equally as possible, with the extra
# spaces, if any, distributed starting from the left.
# If you can only fit one word on a line, then you should pad the right-hand side with spaces.
# Each word is guaranteed not to be longer than k.
# For example, given the list of words ["the", "quick", "brown", "fox", "jumps", "over",
# "the", "lazy", "dog"] and k = 16, you should return the following:
# ["the quick brown", # 1 extra space on the left
# "fox jumps over", # 2 extra spaces distributed evenly
# "the lazy dog"] # 4 extra spaces distributed evenly
# ________________________________________________________________________________________
# Solution
# It seems like the justification algorithm is independent from the groupings, so immediately
# we should figure out two things:
# How to group lines together so that it is as close to k as possible (without going over)
# Given a grouping of lines, justifying the text by appropriately distributing spaces
# To solve the first part, let's write a function group_lines that takes in all the words in
# our input sequence as well as out target line length k, and return a list of list of words
# that represents the lines that we will eventually justify. Our main strategy will be to
# iterate over all the words, keep a list of words for the current line, and because we want
# to fit as many words as possible per line, estimate the current line length, assuming only
# one space between each word. Once we go over k, then save the word and start a new line with
# it. So our function will look something like this:
import math
def min_line(words):
return ' '.join(words)
def group_lines(words, k):
'''
Returns groupings of |words| whose total length, including 1 space in between,
is less than |k|.
'''
groups = []
current_sum = 0
current_line = []
for _, word in enumerate(words):
# Check if adding the next word would push it over
# the limit. If it does, then add |current_line| to
# group. Also reset |current_line| properly.
if len(min_line(current_line + [word])) > k:
groups.append(current_line)
current_line = []
current_line.append(word)
# Add the last line to groups.
groups.append(current_line)
return groups
# Then, we'll want to actually justify each line. We know for sure each line we feed
# from group_lines is the maximum number of words we can pack into a line and no more.
# What we can do is first figure out how many spaces we have available to distribute
# between each word. Then from that, we can calculate how much base space we should
# have between each word by dividing it by the number of words minus one. If there are
# any leftover spaces to distribute, then we can keep track of that in a counter, and
# as we rope in each new word we'll add the appropriate number of spaces. We can't add
# more than one leftover space per word.
def justify(words, length):
'''
Precondition: |words| can fit in |length|.
Justifies the words using the following algorithm:
- Find the smallest spacing between each word (available_spaces / spaces)
- Add a leftover space one-by-one until we run out
'''
if len(words) == 1:
word = words[0]
num_spaces = length - len(word)
spaces = ' ' * num_spaces
return word + spaces
spaces_to_distribute = length - sum(len(word) for word in words)
number_of_spaces = len(words) - 1
smallest_space = math.floor(spaces_to_distribute / number_of_spaces)
leftover_spaces = spaces_to_distribute - \
(number_of_spaces * smallest_space)
justified_words = []
for word in words:
justified_words.append(word)
current_space = ' ' * smallest_space
if leftover_spaces > 0:
current_space += ' '
leftover_spaces -= 1
justified_words.append(current_space)
return ''.join(justified_words).rstrip()
# The final solution should just combine our two functions:
def justify_text(words, k):
return [justify(group, k) for group in group_lines(words, k)]
|
[
"msmeganmcmanus@gmail.com"
] |
msmeganmcmanus@gmail.com
|
ede19815e880082d79936d9cd70e6f8e3e80d2d2
|
0ceb9ffeb2c087b8ae75c1a1179387fe36379f17
|
/test_users.py
|
31f33f5ba427527d4314a2b07fc4c576bc50f3d0
|
[] |
no_license
|
chetat/eveno
|
47a3a99ebe4c5ea10252e1a21c45129e61b3e2ba
|
dbc138cde6e48039614cea52d3dc7bcad869a1dd
|
refs/heads/master
| 2022-12-09T13:51:46.923622
| 2021-03-09T10:21:25
| 2021-03-09T10:21:25
| 224,212,711
| 0
| 0
| null | 2022-09-16T18:20:10
| 2019-11-26T14:34:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,623
|
py
|
import os
import json
from app import create_app, sqlalchemy as db
from sqlalchemy import create_engine, text
from flask_sqlalchemy import SQLAlchemy
from app.config import TestingConfig
from models import initialize_db
import unittest
class UsersTestCase(unittest.TestCase):
"""This class represents the Event App test case"""
def setUp(self):
"""Executed before each test.
Define test variables and initialize app."""
self.app = create_app(TestingConfig)
self.client = self.app.test_client
self.user = {
"email": "yeku@gmail.com",
"firstname": "Yeku Wilfred",
"lastname": "chetat",
"phone": "671357962",
"password": "weezybaby"
}
with self.app.app_context():
# create all tables
db.create_all()
initialize_db()
def tearDown(self):
with self.app.app_context():
db.session.remove()
db.drop_all()
def test_create_user(self):
res = self.client().post("api/v1/users", json=self.user)
self.assertTrue(res.status_code, 200)
def test_get_users(self):
res = self.client().get("api/v1/users")
self.assertTrue(res.status_code, 200)
def test_invalid_credentials(self):
res = self.client().post("api/v1/auth", json={"email": "yekuwilfred@gmailcom",
"password": "wybaby"
})
self.assertEqual(res.status_code, 404)
if __name__ == "__main__":
unittest.main()
|
[
"yekuwilfred@gmail.com"
] |
yekuwilfred@gmail.com
|
0cc046943de4f6e8e84509edc6055886e1602b1e
|
024a515e8741ecc88b4cc20c067a5ef7785375f2
|
/preproc_dataset.py
|
0241efc2f5bb8f421152bf5b2a64df8c226e9aa6
|
[
"MIT"
] |
permissive
|
PCJohn/maml
|
e3752a842557210e955f68479e0a056766647d54
|
17362ed5f9d85863ead5774c3e08e163c260bd16
|
refs/heads/master
| 2020-03-28T11:37:42.014350
| 2018-10-01T17:08:12
| 2018-10-01T17:08:12
| 148,232,484
| 0
| 0
| null | 2018-09-10T23:35:04
| 2018-09-10T23:35:03
| null |
UTF-8
|
Python
| false
| false
| 1,756
|
py
|
import os
import numpy as np
from PIL import Image
dataset = 'miniimagenet'
train_count = 10 # train samples per class
val_count = 590 # val samples per class
mnist_dir = '/home/prithvi/dsets/MNIST/trainingSet/'
omniglot_dir = '/home/prithvi/dsets/Omniglot/train'
miniimagenet_dir = '/home/prithvi/dsets/miniimagenet/train'
save_dir = './data'
if dataset == 'mnist':
data_dir = mnist_dir
size = (28,28)
elif dataset == 'omniglot':
data_dir = omniglot_dir
size = (28,28)
elif dataset == 'miniimagenet':
data_dir = miniimagenet_dir
size = (84,84)
if __name__ == '__main__':
train_dir = os.path.join(save_dir,'metatrain')
val_dir = os.path.join(save_dir,'metaval')
if not (os.path.exists(train_dir)):
os.system('mkdir '+train_dir)
if not (os.path.exists(val_dir)):
os.system('mkdir '+val_dir)
for cls in os.listdir(data_dir):
cls_dir = os.path.join(data_dir,cls)
cls_train_dir = os.path.join(train_dir,cls)
if not os.path.exists(cls_train_dir):
os.system('mkdir '+cls_train_dir)
cls_val_dir = os.path.join(val_dir,cls)
if not os.path.exists(cls_val_dir):
os.system('mkdir '+cls_val_dir)
samples = map(lambda x:(Image.open(x).resize(size,resample=Image.LANCZOS),
os.path.split(x)[-1]),
[os.path.join(cls_dir,s) for s in os.listdir(cls_dir)])
np.random.shuffle(samples)
train,val = samples[:train_count],samples[train_count:train_count+val_count]
for s,fname in train:
s.save(os.path.join(cls_train_dir,fname))
for s,fname in val:
s.save(os.path.join(cls_val_dir,fname))
|
[
"prithvichakra@gmail.com"
] |
prithvichakra@gmail.com
|
976fe04075eb0ae35fbceb4cf4f95288fd19f182
|
9a0ada115978e9600ad7f1eab65fcc8825f637cf
|
/work_in_progress/kezhi_paper/classification/intermedia_classification_WT_new.py
|
23736c730b2fe6d41790fd9dfaa294059b6a9fd6
|
[] |
no_license
|
ver228/work-in-progress
|
c1971f8d72b9685f688a10e4c5a1b150fa0812da
|
ef5baecc324da4550f81edb0513d38f039ee3429
|
refs/heads/master
| 2018-12-16T22:18:55.457290
| 2018-09-14T09:27:49
| 2018-09-14T09:27:49
| 56,165,301
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,223
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 06 16:16:01 2017
@author: kezhili
"""
import numpy as np
import tables
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
no_neuron = 1040 # step size in the mesh
no_fea = 30
fea_no = 30
names = ["Nearest Neighbors", "RBF SVM", "Gaussian Process",
"Decision Tree", "Random Forest"]
if __name__ == '__main__':
fdata = 'interm_LR_WTmemory_interm_result_30features_WT.hdf5'
with tables.File(fdata, 'r') as fid:
X = fid.get_node('/X3')[:]
Y = fid.get_node('/Y3')[:]
#because of the fix of a previous error in the dataset,
#where 'LSJ1' and 'L5J1' are actually the same class
Y[97:107] = Y[107]
with open('result_30_features.txt') as f:
lines = f.read().split('\n')
X_ind = [x == 'True' for x in lines if x]
Xp = X[:,X_ind]
cross_validation_fold = 5
for n_estimators in [10, 100, 1000]:
clf2 = RandomForestClassifier(n_estimators=n_estimators)
c_val = cross_val_score(clf2, Xp, Y, cv = cross_validation_fold)
print(np.mean(c_val), np.std(c_val))
|
[
"ver228@gmail.com"
] |
ver228@gmail.com
|
694c9e252cceb994dcb65e0a0c42be5b6500d395
|
10e89eb922a5c122079a55234169e5b0e7af0819
|
/histore/cli/base.py
|
06f81c1b370a21633cbbc13230aeb3a0d81faa09
|
[
"BSD-3-Clause"
] |
permissive
|
Sandy4321/histore
|
5753dd0008d1ae3400506181f22789aa9fdb43ba
|
d600052514a1c5f672137f76a6e1388184b17cd4
|
refs/heads/master
| 2023-04-15T07:57:49.762415
| 2021-04-28T11:12:31
| 2021-04-28T11:12:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,101
|
py
|
# This file is part of the History Store (histore).
#
# Copyright (C) 2018-2021 New York University.
#
# The History Store (histore) is released under the Revised BSD License. See
# file LICENSE for full license details.
"""Command line interface to interact with a manager for archives on the local
file system.
"""
import click
import os
import sys
from histore.archive.manager.persist import PersistentArchiveManager
from histore.cli.archive import create_archive, delete_archive, list_archives, rename_archive
from histore.cli.snapshot import checkout_snapshot, commit_snapshot, list_snapshots
import histore.config as config
# -- Init the archive manager -------------------------------------------------
@click.command(name='init')
@click.option(
'-b', '--basedir',
required=False,
type=click.Path(file_okay=False, dir_okay=True),
help='Base directory for archive files'
)
@click.option(
'-c', '--dbconnect',
required=False,
help='Connect URL for the database'
)
def init_manager(basedir, dbconnect):
"""Initialize the archive manager directory."""
# Test if the base directory exists and is empty.
basedir = basedir if basedir is not None else config.BASEDIR()
if os.path.isdir(basedir):
if os.listdir(basedir):
click.echo('Not an empty directory {}.'.format(basedir))
sys.exit(-1)
# Create instance of persistent archive manager to setup directories and
# files.
PersistentArchiveManager(
basedir=basedir,
dbconnect=dbconnect,
create=True
)
click.echo("Initialized in {}.".format(os.path.abspath(basedir)))
# -- Create command group -----------------------------------------------------
@click.group()
def cli(): # pragma: no cover
"""Command line interface for HISTORE archive manager."""
pass
cli.add_command(init_manager)
cli.add_command(checkout_snapshot)
cli.add_command(commit_snapshot)
cli.add_command(create_archive)
cli.add_command(delete_archive)
cli.add_command(list_archives)
cli.add_command(list_snapshots)
cli.add_command(rename_archive)
|
[
"heiko.muller@gmail.com"
] |
heiko.muller@gmail.com
|
7b4029c8b2039d20fcfa67a38e0af84120540d04
|
21e35e3889cd0064474343a3b84aa289a01f8fac
|
/third_example_flask_app/classes.py
|
89d5863ab289c20518eb2bebec2bf9439ba3ca5f
|
[] |
no_license
|
EricSchles/learn_python
|
b8fbc2b38647efb8e7176ac0d20708ffe1691007
|
8a5eb76aa333253a6c01f76d36dacad6bcf931ea
|
refs/heads/master
| 2021-01-13T16:45:53.606240
| 2017-01-31T21:10:53
| 2017-01-31T21:10:53
| 77,006,920
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
class Integer:
def __init__(self,number):
self.number = number
def add(self,other_integer):
self.number += other_integer
def subtract(self,other_integer):
self.number -= other_integer
def __str__(self):
return repr(self.number)
if __name__ == '__main__':
number = Integer(5)
number.add(7)
print(number)
|
[
"ericschles@gmail.com"
] |
ericschles@gmail.com
|
6c81ceed055c2d375b64725926e5208cb180263e
|
4e96f383d4703ad8ee58869ed91a0c8432c8a051
|
/Cura/Cura/plugins/VersionUpgrade/VersionUpgrade25to26/__init__.py
|
c74b3218b6ee2cb2b18f90149702826398f3ef7d
|
[
"GPL-3.0-only",
"LGPL-3.0-only"
] |
permissive
|
flight7788/3d-printing-with-moveo-1
|
b2dba26010c4fa31815bc1d2d0966161a8600081
|
7fcb9c6b5da9245d54ac917de8c2a7f5148e42b0
|
refs/heads/Feature_Marlin_with_AlanBoy
| 2022-08-30T18:36:44.785058
| 2020-05-30T07:52:58
| 2020-05-30T07:52:58
| 212,583,912
| 0
| 0
|
MIT
| 2020-05-16T07:39:47
| 2019-10-03T13:13:01
|
C
|
UTF-8
|
Python
| false
| false
| 2,027
|
py
|
# Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import Any, Dict, TYPE_CHECKING
from . import VersionUpgrade25to26
if TYPE_CHECKING:
from UM.Application import Application
upgrade = VersionUpgrade25to26.VersionUpgrade25to26()
def getMetaData() -> Dict[str, Any]:
return {
"version_upgrade": {
# From To Upgrade function
("preferences", 4000000): ("preferences", 4000001, upgrade.upgradePreferences),
# NOTE: All the instance containers share the same general/version, so we have to update all of them
# if any is updated.
("quality_changes", 2000000): ("quality_changes", 2000001, upgrade.upgradeInstanceContainer),
("user", 2000000): ("user", 2000001, upgrade.upgradeInstanceContainer),
("definition_changes", 2000000): ("definition_changes", 2000001, upgrade.upgradeInstanceContainer),
("machine_stack", 3000000): ("machine_stack", 3000001, upgrade.upgradeMachineStack),
},
"sources": {
"quality_changes": {
"get_version": upgrade.getCfgVersion,
"location": {"./quality"}
},
"preferences": {
"get_version": upgrade.getCfgVersion,
"location": {"."}
},
"user": {
"get_version": upgrade.getCfgVersion,
"location": {"./user"}
},
"definition_changes": {
"get_version": upgrade.getCfgVersion,
"location": {"./machine_instances"}
},
"machine_stack": {
"get_version": upgrade.getCfgVersion,
"location": {"./machine_instances"}
}
}
}
def register(app: "Application") -> Dict[str, Any]:
return { "version_upgrade": upgrade }
|
[
"t106360212@ntut.org.tw"
] |
t106360212@ntut.org.tw
|
6d9ced454145fc2b7691e9934b6e754a3ceb726d
|
18536f8145457a193b976eec44ee92535f588e54
|
/tests/functional/s3api/test_select_object_content.py
|
87750a30e5124bb75e76a8d9613cd5c8052055b8
|
[
"Apache-2.0"
] |
permissive
|
jamsheedsaeed/awsapp
|
07e4ec6b9e07f679106db8e61f104ee9065e6af0
|
5498a3652b1d471a8695e14ca9739140e88a4b29
|
refs/heads/master
| 2023-01-01T23:44:16.181967
| 2020-06-19T06:56:48
| 2020-06-19T06:56:48
| 273,418,893
| 0
| 0
|
NOASSERTION
| 2022-12-26T20:16:39
| 2020-06-19T06:16:31
|
Python
|
UTF-8
|
Python
| false
| false
| 4,758
|
py
|
#!/usr/bin/env python
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import tempfile
import shutil
from awscli.testutils import BaseAWSCommandParamsTest
from awscli.testutils import BaseAWSHelpOutputTest
class TestGetObject(BaseAWSCommandParamsTest):
prefix = ['s3api', 'select-object-content']
def setUp(self):
super(TestGetObject, self).setUp()
self.parsed_response = {'Payload': self.create_fake_payload()}
self._tempdir = tempfile.mkdtemp()
def tearDown(self):
super(TestGetObject, self).tearDown()
shutil.rmtree(self._tempdir)
def create_fake_payload(self):
yield {'Records': {'Payload': b'a,b,c,d\n'}}
# These next two events are ignored because they aren't
# "Records".
yield {'Progress': {'Details': {'BytesScanned': 1048576,
'BytesProcessed': 37748736}}}
yield {'Records': {'Payload': b'e,f,g,h\n'}}
yield {'Stats': {'Details': {'BytesProcessed': 62605400,
'BytesScanned': 1662276}}}
yield {'End': {}}
def test_can_stream_to_file(self):
filename = os.path.join(self._tempdir, 'outfile')
cmdline = self.prefix[::]
cmdline.extend(['--bucket', 'mybucket'])
cmdline.extend(['--key', 'mykey'])
cmdline.extend(['--expression', 'SELECT * FROM S3Object'])
cmdline.extend(['--expression-type', 'SQL'])
cmdline.extend(['--request-progress', 'Enabled=True'])
cmdline.extend(['--input-serialization',
'{"CSV": {}, "CompressionType": "GZIP"}'])
cmdline.extend(['--output-serialization', '{"CSV": {}}'])
cmdline.extend([filename])
expected_params = {
'Bucket': 'mybucket',
'Key': u'mykey',
'Expression': 'SELECT * FROM S3Object',
'ExpressionType': 'SQL',
'InputSerialization': {'CSV': {}, 'CompressionType': 'GZIP'},
'OutputSerialization': {'CSV': {}},
'RequestProgress': {'Enabled': True},
}
stdout = self.assert_params_for_cmd(cmdline, expected_params)[0]
self.assertEqual(stdout, '')
with open(filename, 'r') as f:
contents = f.read()
self.assertEqual(contents, (
'a,b,c,d\n'
'e,f,g,h\n'
))
def test_errors_are_propagated(self):
self.http_response.status_code = 400
self.parsed_response = {
'Error': {
'Code': 'CastFailed',
'Message': 'Attempt to convert from one data type to another',
}
}
cmdline = self.prefix + [
'--bucket', 'mybucket',
'--key', 'mykey',
'--expression', 'SELECT * FROM S3Object',
'--expression-type', 'SQL',
'--request-progress', 'Enabled=True',
'--input-serialization', '{"CSV": {}, "CompressionType": "GZIP"}',
'--output-serialization', '{"CSV": {}}',
os.path.join(self._tempdir, 'outfile'),
]
expected_params = {
'Bucket': 'mybucket',
'Key': u'mykey',
'Expression': 'SELECT * FROM S3Object',
'ExpressionType': 'SQL',
'InputSerialization': {'CSV': {}, 'CompressionType': 'GZIP'},
'OutputSerialization': {'CSV': {}},
'RequestProgress': {'Enabled': True},
}
self.assert_params_for_cmd(
cmd=cmdline, params=expected_params,
expected_rc=254,
stderr_contains=(
'An error occurred (CastFailed) when '
'calling the SelectObjectContent operation'),
)
class TestHelpOutput(BaseAWSHelpOutputTest):
def test_output(self):
self.driver.main(['s3api', 'select-object-content', 'help'])
# We don't want to be super picky because the wording may change
# We just want to verify the Output section was customized.
self.assert_contains(
'Output\n======\n'
'This command generates no output'
)
self.assert_not_contains('[outfile')
self.assert_contains('outfile')
|
[
"jamsheedsaeed786@gmail.com"
] |
jamsheedsaeed786@gmail.com
|
d9fe6f00109d7ea449564f348bea0fbcf2feca43
|
001184c168b93118f0429b11bab55fe108928b5d
|
/Week 3- Programming Assignments/Orientation2.py
|
533af81246e273eec244ce942ae5e3e9300f2085
|
[] |
no_license
|
harrypotter0/ml-robot
|
caee1f9695427b8a83f64a41420f89b948fdb801
|
06e6672ba47fd73d4077ff5a6f48bdfafaea6597
|
refs/heads/master
| 2021-09-14T04:28:31.987037
| 2018-05-08T14:30:04
| 2018-05-08T14:30:04
| 115,803,231
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,177
|
py
|
# In this exercise, write a program that will
# run your previous code twice.
# Please only modify the indicated area below!
from math import *
import random
landmarks = [[20.0, 20.0], [80.0, 80.0], [20.0, 80.0], [80.0, 20.0]]
world_size = 100.0
class robot:
def __init__(self):
self.x = random.random() * world_size
self.y = random.random() * world_size
self.orientation = random.random() * 2.0 * pi
self.forward_noise = 0.0;
self.turn_noise = 0.0;
self.sense_noise = 0.0;
def set(self, new_x, new_y, new_orientation):
if new_x < 0 or new_x >= world_size:
raise ValueError, 'X coordinate out of bound'
if new_y < 0 or new_y >= world_size:
raise ValueError, 'Y coordinate out of bound'
if new_orientation < 0 or new_orientation >= 2 * pi:
raise ValueError, 'Orientation must be in [0..2pi]'
self.x = float(new_x)
self.y = float(new_y)
self.orientation = float(new_orientation)
def set_noise(self, new_f_noise, new_t_noise, new_s_noise):
# makes it possible to change the noise parameters
# this is often useful in particle filters
self.forward_noise = float(new_f_noise);
self.turn_noise = float(new_t_noise);
self.sense_noise = float(new_s_noise);
def sense(self):
Z = []
for i in range(len(landmarks)):
dist = sqrt((self.x - landmarks[i][0]) ** 2 + (self.y - landmarks[i][1]) ** 2)
dist += random.gauss(0.0, self.sense_noise)
Z.append(dist)
return Z
def move(self, turn, forward):
if forward < 0:
raise ValueError, 'Robot cant move backwards'
# turn, and add randomness to the turning command
orientation = self.orientation + float(turn) + random.gauss(0.0, self.turn_noise)
orientation %= 2 * pi
# move, and add randomness to the motion command
dist = float(forward) + random.gauss(0.0, self.forward_noise)
x = self.x + (cos(orientation) * dist)
y = self.y + (sin(orientation) * dist)
x %= world_size # cyclic truncate
y %= world_size
# set particle
res = robot()
res.set(x, y, orientation)
res.set_noise(self.forward_noise, self.turn_noise, self.sense_noise)
return res
def Gaussian(self, mu, sigma, x):
# calculates the probability of x for 1-dim Gaussian with mean mu and var. sigma
return exp(- ((mu - x) ** 2) / (sigma ** 2) / 2.0) / sqrt(2.0 * pi * (sigma ** 2))
def measurement_prob(self, measurement):
# calculates how likely a measurement should be
prob = 1.0;
for i in range(len(landmarks)):
dist = sqrt((self.x - landmarks[i][0]) ** 2 + (self.y - landmarks[i][1]) ** 2)
prob *= self.Gaussian(dist, self.sense_noise, measurement[i])
return prob
def __repr__(self):
return '[x=%.6s y=%.6s orient=%.6s]' % (str(self.x), str(self.y), str(self.orientation))
#myrobot = robot()
#myrobot.set_noise(5.0, 0.1, 5.0)
#myrobot.set(30.0, 50.0, pi/2)
#myrobot = myrobot.move(-pi/2, 15.0)
#print myrobot.sense()
#myrobot = myrobot.move(-pi/2, 10.0)
#print myrobot.sense()
#### DON'T MODIFY ANYTHING ABOVE HERE! ENTER/MODIFY CODE BELOW ####
myrobot = robot()
myrobot = myrobot.move(0.1, 5.0)
Z = myrobot.sense()
N = 1000
p = []
for i in range(N):
x = robot()
x.set_noise(0.05, 0.05, 5.0)
p.append(x)
ntimes = 28
for i in range(ntimes):
p2 = []
for i in range(N):
p2.append(p[i].move(0.1, 5.0))
p = p2
w = []
for i in range(N):
w.append(p[i].measurement_prob(Z))
p3 = []
index = int(random.random() * N)
beta = 0.0
mw = max(w)
for i in range(N):
beta += random.random() * 2.0 * mw
while beta > w[index]:
beta -= w[index]
index = (index + 1) % N
p3.append(p[index])
p = p3
print p #Leave this print statement for grading purposes!
|
[
"9654263057akashkandpal@gmail.com"
] |
9654263057akashkandpal@gmail.com
|
d91241e28a7781f4e1b8d3e0aaae4e0162f0622f
|
a3eccc652f83815318bdb033a33573c5b1e073e9
|
/nac/crm/views/add_lead.py
|
7d6cf4cdf3c36cb198817a0d24a038fb42afd38d
|
[] |
no_license
|
jsvelu/coms-dev--old
|
8139fa511e2985b4d71550f1c59402069d09edf3
|
de300ad6ef947d29380972a6efe809f4ef05d7e1
|
refs/heads/main
| 2023-07-17T20:44:36.101738
| 2021-09-04T21:56:38
| 2021-09-04T21:56:38
| 403,158,834
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,615
|
py
|
from authtools.views import resolve_url_lazy
from django.urls import reverse_lazy
from django.views.generic.edit import FormView
from rules.contrib.views import PermissionRequiredMixin
from crm.forms.lead import LeadForm
from customers.models import Customer
from dealerships.models import Dealership
class AddLeadView(PermissionRequiredMixin, FormView):
template_name = 'crm/add_lead.html'
form_class = LeadForm
success_url = reverse_lazy('crm:lead_listing')
raise_exception = True
permission_required = 'customers.add_customer'
def get_allowed_dealerships(self):
dealer_choices = None
if self.request.user.has_perm('customers.manage_self_and_dealership_leads_only'):
dealer_choices = Dealership.objects.filter(dealershipuser=self.request.user)
if self.request.user.has_perm('crm.manage_all_leads'):
dealer_choices = Dealership.objects.all()
return dealer_choices
def get_form_kwargs(self):
kwargs = super(AddLeadView, self).get_form_kwargs()
dealer_choices = [(dealership.id, dealership.name) for dealership in self.get_allowed_dealerships()]
kwargs.update({'dealership_choices': dealer_choices})
return kwargs
def get_context_data(self, **kwargs):
context = super(AddLeadView, self).get_context_data(**kwargs)
context['sub_heading'] = 'Add Lead'
return context
def form_valid(self, form):
lead = form.save(commit=False)
lead.lead_type = Customer.LEAD_TYPE_LEAD
lead.save()
return super(AddLeadView, self).form_valid(form)
|
[
"velu@qrsolutions.in"
] |
velu@qrsolutions.in
|
77388ef09898130d90f9cc56214733cdd9160d06
|
4b60c34ba37e7c0611257e7934791fb43d01e254
|
/src/Lib/pysparseSuperLU.py
|
2524ee2635966f53d120c079893e98ac0bbfb303
|
[] |
no_license
|
regmi/pysparse
|
0913ff69b5d07b58c20deb5b6f44caeaa8498d64
|
ebc2ad045382c69e6bb41217c9431e51736ac4a0
|
refs/heads/master
| 2021-01-01T16:20:10.075871
| 2010-04-09T22:56:51
| 2010-04-09T22:56:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,502
|
py
|
"""
A framework for solving sparse linear systems of equations using an LU
factorization, by means of the supernodal sparse LU factorization package
SuperLU ([DEGLL99]_, [DGL99]_, [LD03]_).
This package is appropriate for factorizing sparse square unsymmetric or
rectangular matrices.
See [SLU]_ for more information.
**References:**
.. [DEGLL99] J. W. Demmel, S. C. Eisenstat, J. R. Gilbert, X. S. Li and
J. W. H. Liu, *A supernodal approach to sparse partial pivoting*,
SIAM Journal on Matrix Analysis and Applications **20**\ (3),
pp. 720-755, 1999.
.. [DGL99] J. W. Demmel, J. R. Gilbert and X. S. Li,
*An Asynchronous Parallel Supernodal Algorithm for Sparse Gaussian
Elimination*, SIAM Journal on Matrix Analysis and Applications
**20**\ (4), pp. 915-952, 1999.
.. [LD03] X. S. Li and J. W. Demmel, *SuperLU_DIST: A Scalable
Distributed-Memory Sparse Direct Solver for Unsymmetric Linear
Systems*, ACM Transactions on Mathematical Software **29**\ (2),
pp. 110-140, 2003.
.. [SLU] http://crd.lbl.gov/~xiaoye/SuperLU
"""
# To look into:
# - allow other data types
__docformat__ = 'restructuredtext'
import pysparseMatrix as psm
import numpy
import resource
from directSolver import PysparseDirectSolver
from pysparse import superlu
def cputime():
return resource.getrusage(resource.RUSAGE_SELF)[0]
class PysparseSuperLUSolver( PysparseDirectSolver ):
"""
`PysparseSuperLUSolver` is a wrapper class around the SuperLu library for
the factorization of full-rank n-by-m matrices. Only matrices with real
coefficients are currently supported.
:parameters:
:A: The matrix to be factorized, supplied as a PysparseMatrix instance.
:keywords:
:symmetric: a boolean indicating that the user wishes to use symmetric
mode. In symmetric mode, ``permc_spec=2`` must be chosen and
``diag_pivot_thresh`` must be small, e.g., 0.0 or 0.1. Since
the value of ``diag_pivot_thresh`` is up to the user, setting
``symmetric`` to ``True`` does *not* automatically set
``permc_spec`` and ``diag_pivot_thresh`` to appropriate
values.
:diag_pivot_thresh: a float value between 0 and 1 representing the
threshold for partial pivoting (0 = no pivoting,
1 = always perform partial pivoting). Default: 1.0.
:drop_tol: the value of a drop tolerance, between 0 and 1, if an
incomplete factorization is desired (0 = exact factorization).
This keyword does not exist if using SuperLU version 2.0 and
below. In more recent version of SuperLU, the keyword is
accepted but has no effect. Default: 0.0
:relax: an integer controling the degree of relaxing supernodes.
Default: 1.
:panel_size: an integer specifying the maximum number of columns to form
a panel. Default: 10.
:permc_spec: an integer specifying the ordering strategy used during the
factorization.
0. natural ordering,
1. MMD applied to the structure of
:math:`\mathbf{A}^T \mathbf{A}`
2. MMD applied to the structure of
:math:`\mathbf{A}^T + \mathbf{A}`
3. COLAMD.
Default: 2.
.. attribute:: LU
A :class:`superlu_context` object encapsulating the factorization.
.. attribute:: sol
The solution of the linear system after a call to :meth:`solve`.
.. attribute:: factorizationTime
The CPU time to perform the factorization.
.. attribute:: solutionTime
The CPU time to perform the forward and backward sweeps.
.. attribute:: lunz
The number of nonzero elements in the factors L and U together after a
call to :meth:`fetch_lunz`.
"""
def __init__(self, A, **kwargs):
PysparseDirectSolver.__init__(self, A, **kwargs)
self.type = numpy.float
self.nrow, self.ncol = A.getShape()
t = cputime()
self.LU = superlu.factorize(A.matrix.to_csr(), **kwargs)
self.factorizationTime = cputime() - t
self.solutionTime = 0.0
self.sol = None
self.L = self.U = None
return
def solve(self, rhs, transpose = False):
"""
Solve the linear system ``A x = rhs``, where ``A`` is the input matrix
and ``rhs`` is a Numpy vector of appropriate dimension. The result is
placed in the :attr:`sol` member of the class instance.
If the optional argument ``transpose`` is ``True``, the transpose system
``A^T x = rhs`` is solved.
"""
if self.sol is None: self.sol = numpy.empty(self.ncol, self.type)
transp = 'N'
if transpose: transp = 'T'
t = cputime()
self.LU.solve(rhs, self.sol, transp)
self.solutionTime = cputime() - t
return
def fetch_lunz(self):
"""
Retrieve the number of nonzeros in the factors L and U together. The
result is stored in the member :attr:`lunz` of the class instance.
"""
self.lunz = self.LU.nnz
def fetch_factors(self):
"""
Not yet available.
"""
raise NotImplementedError
|
[
"regmisk@gmail.com"
] |
regmisk@gmail.com
|
5e3f127541774244db776a2ad4ca2f70cd1d14b9
|
dfdbc9118742bc09d7c7fe6fe42f53b7d1d7977a
|
/spacegame_ii/parralax.py
|
3ef67de9c8ce5c05576be44f185131d62e7cf4e6
|
[] |
no_license
|
602p/spacegame
|
d1c3a34233ed7c7128d5cbe4c470c168a84700ac
|
1350beeb6df2b65a0c041f512fa944cbae4dba2b
|
refs/heads/master
| 2021-01-10T19:28:19.220218
| 2015-04-27T20:37:14
| 2015-04-27T20:37:14
| 27,309,904
| 0
| 0
| null | 2015-04-20T15:12:11
| 2014-11-29T17:39:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,454
|
py
|
from __future__ import division
import pygame, random
import logging
module_logger=logging.getLogger("sg.parralax")
debug, info, warning, error, critical = module_logger.debug, module_logger.info, module_logger.warning, module_logger.error, module_logger.critical
class StarfieldLayer:
def __init__(self, density, color, size, speed):
self.density=density
self.speed=speed
self.particle_surface=pygame.Surface((size, size))
self.particle_surface.fill(color)
def bind(self, (xsize, ysize)):
self.particles=[]
self.size=(xsize, ysize)
def render(self, surface, (xpos, ypos)):
i=0
state=random.getstate()
random.seed(413)
while i!=self.density:
#
pos=(random.uniform(0, self.size[0]), random.uniform(0, self.size[1]))
surface.blit(self.particle_surface,
(
int((((xpos+pos[0])/self.speed)%self.size[0])),
int((((ypos+pos[1])/self.speed)%self.size[1]))
)
)
i+=1
random.setstate(state)
class ParralaxStarfieldScroller:
def __init__(self, size, layers):
self.layers=layers
self.pos=[0,0]
self.bindall(size)
debug("Initilized ParralaxStarfieldScroller with "+str(len(layers))+" layers at []:"+str(size))
def bindall(self, size):
for layer in self.layers:
layer.bind(size)
def render(self, surface):
for layer in self.layers:
layer.render(surface, self.pos)
def move(self, x, y):
self.pos[0]+=x
self.pos[1]+=y
def move_to(self, x, y):
self.pos[0]=x
self.pos[1]=y
|
[
"louis@goessling.com"
] |
louis@goessling.com
|
fe3657f499c9f10b71a26ef58326f2f95d5634cb
|
6c4486ab599fd5dea9006e41cdb89db54b47b77c
|
/tests/products/NGP_OBLIC_Create_Ballpark.py
|
92f3d21ced7693e315f188073ea8a95c59c80077
|
[] |
no_license
|
kenito2050/Python-Page-Object-Framework-Example
|
28ba61cdc1498374be4fc088a1348e0acb754dc2
|
2a3a3e6c74dc7ec7c9acce41030e9487925b9b0c
|
refs/heads/master
| 2020-04-02T15:52:24.286208
| 2018-10-25T01:46:24
| 2018-10-25T01:46:24
| 154,587,117
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,830
|
py
|
from xml.etree import ElementTree as ET
import xlrd
import time
from pages.producer_center.ballpark.ballpark_Indication import BallPark_Indication
from pages.producer_center.ballpark.ballpark_PAF import BallPark_PAF
from pages.producer_center.ballpark.ballpark_download_send import BallPark_Download_Send
from pages.producer_center.products_programs_page import ProductsAndPrograms
from pages.service_center.agents_page import AgentsPage
from pages.service_center.login_page import LoginPage
from pages.service_center.navigation_bar import NavigationBar
from utilities.Environments.Environments import Environments
from utilities.state_capitals.state_capitals import StateCapitals
from utilities.zip_codes_state_capitals.zip_codes import ZipCodes
from utilities.Faker.Data_Generator import Data_Generator
from utilities.Date_Time_Generator.Date_Time_Generator import Date_Time_Generator
from config_globals import *
class TestCreateQuote():
def test_login_search_for_agent_create_quote(self, browser, env):
Product = "NGP_OBLIC"
driver = browser
## Directory Locations
tests_directory = ROOT_DIR / 'tests'
framework_directory = ROOT_DIR
config_file_directory = CONFIG_PATH
test_case_directory = framework_directory / 'utilities' / 'Excel_Sheets' / 'Products'
test_results_directory = framework_directory / 'utilities' / 'Excel_Sheets' / 'Test_Results'
global test_summary
global test_scenario
global effective_date
global contract_class
global agent
global state
global revenue
global total_num_records
global _OLD_scenario
global limit
global deductible
# Open Test Scenario Workbook; Instantiate worksheet object
# 0 - First Worksheet
# 1 - Second Worksheet...etc
wb = xlrd.open_workbook(str(test_case_directory / Product) + '.xlsx')
sh = wb.sheet_by_index(3)
## Begin For Loop to iterate through Test Scenarios
i = 1
rows = sh.nrows
empty_cell = False
for i in range(1, sh.nrows):
cell_val = sh.cell(i, 0).value
if cell_val == '':
# If Cell Value is empty, set empty_cell to True
empty_cell = True
else:
# If Cell Value is NOT empty, set empty_cell to False
empty_cell = False
# Check to see if cell is NOT empty
# If cell is not empty, read in the values
if empty_cell == False:
test_summary = sh.cell_value(i, 0)
test_scenario = str(round(sh.cell_value(i, 1)))
effective_date = sh.cell_value(i, 2)
contract_class = sh.cell_value(i, 3)
agent = sh.cell_value(i, 4)
state = sh.cell_value(i, 5)
revenue = str(round(sh.cell_value(i, 6)))
total_num_records = (sh.cell_value(i, 7))
_OLD_scenario = sh.cell_value(i, 8)
limit = sh.cell_value(i, 9)
deductible = sh.cell_value(i, 10)
# Else, the cell is empty
# End the Loop
else:
break
# Create Instance of Data Generator
dg = Data_Generator()
# Create Company Name Value
company_name_string = dg.create_full_company_name()
# Create Street Address Value
address_value = dg.create_street_address()
city = StateCapitals.return_state_capital(state)
postal_code = ZipCodes.return_zip_codes(state)
# Create Instance of Date Time Generator
dtg = Date_Time_Generator()
# Create Today's Date
date_today = dtg.return_date_today()
# Access XML to retrieve login credentials
tree = ET.parse(str(config_file_directory / 'resources.xml'))
login_credentials = tree.getroot()
username = (login_credentials[1][0].text)
password = (login_credentials[1][1].text)
## Test Environment
## Select Appropriate URL based on the Environment Value (env)
baseURL = Environments.return_environments(env)
# Maximize Window; Launch URL
driver.get(baseURL)
driver.implicitly_wait(3)
# Call Login methods from Pages.home.login_page.py
lp = LoginPage(driver)
lp.login(username, password)
lp.click_login_button()
nb = NavigationBar(driver)
nb.click_agents()
ap = AgentsPage(driver)
ap.search_for_agent(agent)
ap.click_submit_new_application_as_agent()
pp = ProductsAndPrograms(driver)
pp.click_ballpark()
bp_PAF = BallPark_PAF(driver)
bp_PAF.switch_windows()
bp_PAF.start_ballpark_enter_faker_company_name_valid_zip(company_name_string, postal_code)
bp_PAF.select_contract_class(contract_class)
bp_PAF.click_ballpark_button()
bp_PAF.select_NGP_OBLIC()
time.sleep(3)
# Enter Ad Hoc Effective Date
# bp_PAF.enter_effective_date(ad_hoc_effectiveDate)
# Enter Today's Date as Effective Date
bp_PAF.enter_current_date(date_today)
time.sleep(3)
bp_PAF.enter_revenue(revenue)
bp_PAF.click_ballpark_button()
bp_Indication = BallPark_Indication(driver)
bp_Indication.click_Download_Send_Indication()
bp_Download_Send = BallPark_Download_Send(driver)
bp_Download_Send.input_email()
bp_Download_Send.click_send_email()
# Close Ballpark Window
driver.close()
# Switch to First Window (Service Center)
driver.switch_to.window(driver.window_handles[0])
# Wait
driver.implicitly_wait(3)
# Close Browser
driver.quit()
|
[
"kvillarruel@nasinsurance.com"
] |
kvillarruel@nasinsurance.com
|
ce8bc8e8c5475a8d2ed27e43305f5fd02ca3d509
|
da481ac79daaa68df0219e92b0d80a1a7aed1bf6
|
/python/example/run_all_tests.py
|
747477c1ca24629cf95203798b913aa2369a40a8
|
[] |
no_license
|
PeterZhouSZ/diff_pd
|
5ebad8f8d361a3ac4b8c7fb25c923b85e01c8a31
|
45bf74bc1b601d0aba7cc5becc3d2582136b5c0b
|
refs/heads/master
| 2023-07-06T10:56:59.660391
| 2021-08-13T04:21:07
| 2021-08-13T04:21:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,144
|
py
|
import sys
sys.path.append('../')
from importlib import import_module
from py_diff_pd.common.common import print_ok, print_error
if __name__ == '__main__':
# If you want to add a new test, simply add its name here --- you can find their names from README.md.
tests = [
# Utilities.
'render_quad_mesh',
# Numerical check.
'actuation_2d',
'actuation_3d',
'collision_2d',
'deformable_backward_2d',
'deformable_backward_3d',
'deformable_quasi_static_3d',
'pd_energy_2d',
'pd_energy_3d',
'pd_forward',
'state_force_2d',
'state_force_3d'
]
failure_cnt = 0
for name in tests:
test_func_name = 'test_{}'.format(name)
module_name = name
test_func = getattr(import_module(module_name), test_func_name)
if test_func(verbose=False):
print_ok('[{}] PASSED.'.format(name))
else:
print_error('[{}] FAILED.'.format(name))
failure_cnt += 1
print('{}/{} tests failed.'.format(failure_cnt, len(tests)))
if failure_cnt > 0:
sys.exit(-1)
|
[
"taodu@csail.mit.edu"
] |
taodu@csail.mit.edu
|
2958eecfff5d168b9defe4a24e536c251c58de46
|
db331fb24e5b95131413c8d5cebc880674dd30f7
|
/foundation/migrations/0009_title_img2.py
|
d49553924ebcef455145509d19762fb843f2a4e0
|
[] |
no_license
|
kmvit/personalsite
|
78fb18f428e3c95219d9145cb22bdefdf8896bf9
|
5da20e853055affdad4e6d21f36bae6cfe7507b7
|
refs/heads/master
| 2021-06-18T13:34:03.800622
| 2017-04-07T12:40:24
| 2017-04-07T12:40:24
| 27,366,313
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('foundation', '0008_auto_20141201_2156'),
]
operations = [
migrations.AddField(
model_name='title',
name='img2',
field=models.ImageField(null=True, upload_to=b'media', blank=True),
preserve_default=True,
),
]
|
[
"kmv-it@yandex.ru"
] |
kmv-it@yandex.ru
|
79b729366e1e6dd4733ec4c9c4f4199ddbfb9d6a
|
7b91755b1c777248050f3cadf23ed34d1f10adef
|
/Section3/14.py
|
86af53d07f05b5c224b8112a78a39b964bf71c05
|
[] |
no_license
|
JAntonioMarin/PythonBootcamp
|
ef2976be0204df44e0c56a521628c73a5d274008
|
6e4af15b725913d3fda60599792e17d7d43d61d2
|
refs/heads/master
| 2021-01-06T16:52:57.537678
| 2020-03-26T17:36:50
| 2020-03-26T17:36:50
| 241,406,222
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
print('hello')
print("world")
print('this is also a string')
print(" I'm going on a run ")
print('hello \nworld')
print('hello \tworld')
print(len('hello'))
print(len('I am'))
# Coding Exercise 2: Quic Print Check
print("Hello World")
|
[
"avalanch.psp@gmail.com"
] |
avalanch.psp@gmail.com
|
9591b223d52310912c74667e3148d1398f10c830
|
124fe233f9cc86898756b3c0fc2988c69001f670
|
/tests/test_stat.py
|
212f9eec83cf924d08398be7ee3032045c77594c
|
[
"MIT"
] |
permissive
|
jon-rd/jc
|
0c4e043ccc40fdf7a0771d48cec4d86321fc4588
|
1e18dd30a824b0463f0cad86e0da7094c47d34f9
|
refs/heads/master
| 2023-04-16T01:39:16.961164
| 2021-03-05T19:50:37
| 2021-03-05T19:50:37
| 351,145,638
| 0
| 0
|
MIT
| 2021-04-14T14:13:28
| 2021-03-24T16:14:42
| null |
UTF-8
|
Python
| false
| false
| 2,659
|
py
|
import os
import json
import unittest
import jc.parsers.stat
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class MyTests(unittest.TestCase):
def setUp(self):
# input
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/stat.out'), 'r', encoding='utf-8') as f:
self.centos_7_7_stat = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/stat.out'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_stat = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/stat.out'), 'r', encoding='utf-8') as f:
self.osx_10_14_6_stat = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/freebsd12/stat.out'), 'r', encoding='utf-8') as f:
self.freebsd12_stat = f.read()
# output
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/stat.json'), 'r', encoding='utf-8') as f:
self.centos_7_7_stat_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/stat.json'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_stat_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/stat.json'), 'r', encoding='utf-8') as f:
self.osx_10_14_6_stat_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/freebsd12/stat.json'), 'r', encoding='utf-8') as f:
self.freebsd12_stat_json = json.loads(f.read())
def test_stat_nodata(self):
"""
Test 'stat' with no data
"""
self.assertEqual(jc.parsers.stat.parse('', quiet=True), [])
def test_stat_centos_7_7(self):
"""
Test 'stat /bin/*' on Centos 7.7
"""
self.assertEqual(jc.parsers.stat.parse(self.centos_7_7_stat, quiet=True), self.centos_7_7_stat_json)
def test_stat_ubuntu_18_4(self):
"""
Test 'stat /bin/*' on Ubuntu 18.4
"""
self.assertEqual(jc.parsers.stat.parse(self.ubuntu_18_4_stat, quiet=True), self.ubuntu_18_4_stat_json)
def test_stat_osx_10_14_6(self):
"""
Test 'stat /foo/*' on OSX 10.14.6
"""
self.assertEqual(jc.parsers.stat.parse(self.osx_10_14_6_stat, quiet=True), self.osx_10_14_6_stat_json)
def test_stat_freebsd12(self):
"""
Test 'stat /foo/*' on FreeBSD12
"""
self.assertEqual(jc.parsers.stat.parse(self.freebsd12_stat, quiet=True), self.freebsd12_stat_json)
if __name__ == '__main__':
unittest.main()
|
[
"kellyjonbrazil@gmail.com"
] |
kellyjonbrazil@gmail.com
|
3e8474ef201455aa5739a31fe47a4749d1b82850
|
e6f4e3afd16a7ee5c7a8fb61f7ed697ce88ef4c4
|
/Pro2/abcpp/abcpp/treeplot.py
|
c8a4ebaadf40cfd0ca32ddbb58e1a29142193621
|
[] |
no_license
|
xl0418/Code
|
01b58d05f7fae1a5fcfec15894ce0ed8c833fd1a
|
75235b913730714d538d6d822a99297da54d3841
|
refs/heads/master
| 2021-06-03T21:10:31.578731
| 2020-11-17T07:50:48
| 2020-11-17T07:50:48
| 136,896,128
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,549
|
py
|
import sys, os
import platform
if platform.system() == 'Windows':
sys.path.append('C:/Liang/abcpp_master8/abcpp')
elif platform.system() == 'Darwin':
sys.path.append('/Users/dudupig/Documents/GitHub/Code/Pro2/Python_p2')
from dvtraitsim_py import DVSim
from dvtraitsim_shared import DVTreeData, DVParam
import numpy as np
import matplotlib.pyplot as plt
theta = 0 # optimum of natural selection
r = 1 # growth rate
Vmax = 1
scalar = 10000
K = 10e8
nu = 1 / (100 * K)
timegap = 100
# let's try to find a true simulation:
# trait evolution plot
for no_tree in range(4, 23):
gamma_vec = np.array([0, 0.001, 0.01, 0.1, 0.5, 1])
a_vec = gamma_vec
row_gamma = len(gamma_vec)
count = 0
tree = 'tree' + '%d' % no_tree
example = 'example' + '%d' % no_tree
if platform.system() == 'Windows':
dir_path = 'c:/Liang/Googlebox/Research/Project2'
files = dir_path + '/treesim_newexp/' + example + '/'
td = DVTreeData(path=files, scalar=scalar)
elif platform.system() == 'Darwin':
file = '/Users/dudupig/Documents/GitHub/Code/Pro2/abcpp/tree_data/' + example + '/'
f1, axes1 = plt.subplots(row_gamma, row_gamma, figsize=(9, 9), sharey=True, sharex=True) #
f2, axes2 = plt.subplots(row_gamma, row_gamma, figsize=(9, 9), sharey=True, sharex=True) #
f3, axes3 = plt.subplots(row_gamma, row_gamma, figsize=(9, 9), sharey=True, sharex=True) #
label_a = (['$\\alpha$=0', '$\\alpha$=.001', '$\\alpha$=.01', '$\\alpha$=.1', '$\\alpha$=.5', '$\\alpha$=1'])
label_gamma = (['$\gamma$=0', '$\gamma$=.001', '$\gamma$=.01', '$\gamma$=.1', '$\gamma$=.5', '$\gamma$=1'])
xticks = (0, td.evo_time * scalar / timegap)
xlabels = ['0', '150K']
for index_g in range(len(gamma_vec)):
gamma1 = gamma_vec[index_g]
for index_a in range(len(a_vec)):
a = a_vec[index_a]
print(count)
if index_a >= index_g:
for replicate in range(100):
obs_param = DVParam(gamma=gamma1, a=a, K=K, nu=nu, r=r, theta=theta, Vmax=1, inittrait=0, initpop=500,
initpop_sigma=10.0, break_on_mu=False)
simresult = DVSim(td, obs_param)
if simresult['sim_time'] == td.sim_evo_time:
pic = 0
break
else:
pic = 1
# if pic==0:
evo_time, total_species = simresult['N'].shape
evo_time = evo_time - 1
trait_RI_dr = simresult['Z']
population_RI_dr = simresult['N']
population_RI_dr = population_RI_dr.astype(float)
population_RI_dr[population_RI_dr == 0] = np.nan
V_dr = simresult['V']
num_lines = total_species
x = np.arange(evo_time / timegap + 1)
labels = []
for i in range(1, num_lines + 1):
axes1[index_g, index_a].plot(x, trait_RI_dr[::timegap, i - 1])
axes2[index_g, index_a].plot(x, population_RI_dr[::timegap, i - 1])
axes3[index_g, index_a].plot(x, V_dr[::timegap, i - 1])
axes1[index_g, index_a].set_xticks(xticks)
axes1[index_g, index_a].set_xticklabels(xlabels, minor=False)
axes2[index_g, index_a].set_xticks(xticks)
axes2[index_g, index_a].set_xticklabels(xlabels, minor=False)
axes3[index_g, index_a].set_xticks(xticks)
axes3[index_g, index_a].set_xticklabels(xlabels, minor=False)
if count in range(0, row_gamma):
axes1[index_g, index_a].title.set_text(label_a[count])
axes2[index_g, index_a].title.set_text(label_a[count])
axes3[index_g, index_a].title.set_text(label_a[count])
if count in ([5, 11, 17, 23, 29, 35]):
axes1[index_g, index_a].set_ylabel(label_gamma[int(count / row_gamma)])
axes1[index_g, index_a].yaxis.set_label_position("right")
axes2[index_g, index_a].set_ylabel(label_gamma[int(count / row_gamma)])
axes2[index_g, index_a].yaxis.set_label_position("right")
axes3[index_g, index_a].set_ylabel(label_gamma[int(count / row_gamma)])
axes3[index_g, index_a].yaxis.set_label_position("right")
else:
axes1[index_g, index_a].plot([])
axes2[index_g, index_a].plot([])
axes3[index_g, index_a].plot([])
axes1[index_g, index_a].axis('off')
axes2[index_g, index_a].axis('off')
axes3[index_g, index_a].axis('off')
count += 1
dir_fig = 'C:/Liang/Googlebox/Research/Project2/smc_treeuppertri/' + tree
f1.text(0.84, 0.04, 'Generation', ha='center', fontsize=15)
f1.text(0.04, 0.84, 'Trait mean', va='center', rotation='vertical', fontsize=15)
f2.text(0.84, 0.04, 'Generation', ha='center', fontsize=15)
f2.text(0.04, 0.84, 'Population size', va='center', rotation='vertical', fontsize=15)
f3.text(0.84, 0.04, 'Generation', ha='center', fontsize=15)
f3.text(0.04, 0.84, 'Trait variance', va='center', rotation='vertical', fontsize=15)
f1.savefig(dir_fig + 'TP.png')
plt.close(f1)
f2.savefig(dir_fig + 'NP.png')
plt.close(f2)
f3.savefig(dir_fig + 'VP.png')
plt.close(f3)
plt.close('all')
|
[
"xl0418@gmail.com"
] |
xl0418@gmail.com
|
a823d40e45f948799c58daf72f5a9314e5da1b4c
|
25e481ef7fba79285f4c8a7fa2e81c8b2b7f9cce
|
/saleor/core/permissions.py
|
093869518cad2dc41e345780942a66464273b61a
|
[
"BSD-2-Clause"
] |
permissive
|
arslanahmd/Ghar-Tameer
|
59e60def48a14f9452dfefe2edf30e362878191d
|
72401b2fc0079e6d52e844afd8fcf57122ad319f
|
refs/heads/master
| 2023-01-31T04:08:26.288332
| 2018-06-07T18:02:01
| 2018-06-07T18:02:01
| 136,231,127
| 0
| 0
|
NOASSERTION
| 2023-01-11T22:21:42
| 2018-06-05T20:28:11
|
Python
|
UTF-8
|
Python
| false
| false
| 957
|
py
|
from django.contrib.auth.models import Permission
MODELS_PERMISSIONS = [
'order.view_order',
'order.edit_order',
'product.view_category',
'product.edit_category',
'product.view_product',
'product.edit_product',
'product.view_properties',
'product.edit_properties',
'product.view_stock_location',
'product.edit_stock_location',
'sale.view_sale',
'sale.edit_sale',
'shipping.view_shipping',
'shipping.edit_shipping',
'site.edit_settings',
'site.view_settings',
'user.view_user',
'user.edit_user',
'user.view_group',
'user.edit_group',
'user.view_staff',
'user.edit_staff',
'user.impersonate_user',
'voucher.view_voucher',
'voucher.edit_voucher',
]
def get_permissions():
codenames = [permission.split('.')[1] for permission in MODELS_PERMISSIONS]
return Permission.objects.filter(codename__in=codenames)\
.prefetch_related('content_type')
|
[
"arslanahmad085@gmail.com"
] |
arslanahmad085@gmail.com
|
9a9f2f15e46689e698a6cf1af6e148036e507196
|
0444e53f4908454e2e8ab9f70877ec76be9f872c
|
/reportng/migrations/0025_auto_20161204_2336.py
|
76552396407602970196277f9a8d5e618d06029d
|
[
"MIT"
] |
permissive
|
dedayoa/keepintouch
|
c946612d6b69a20a92617354fef2ce6407382be5
|
2551fc21bb1f6055ab0fc2ec040c6ba874c2838f
|
refs/heads/master
| 2020-03-22T12:53:44.488570
| 2018-08-17T13:00:51
| 2018-08-17T13:00:51
| 140,069,262
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 487
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-12-04 22:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reportng', '0024_auto_20161204_2123'),
]
operations = [
migrations.AlterField(
model_name='emaildeliveryreport',
name='sent_at',
field=models.DateTimeField(null=True, verbose_name='Sent'),
),
]
|
[
"dedayoa@gmail.com"
] |
dedayoa@gmail.com
|
f6200c41fbfc26030afb9f8196b7c6948fbc18be
|
ecf0d106831b9e08578845674a457a166b6e0a14
|
/OOP/inheritance_EXERCISE/restaurant/project/food/dessert.py
|
6f120937221db42a44dd44819bf082a0f5df016c
|
[] |
no_license
|
ivo-bass/SoftUni-Solutions
|
015dad72cff917bb74caeeed5e23b4c5fdeeca75
|
75612d4bdb6f41b749e88f8d9c512d0e00712011
|
refs/heads/master
| 2023-05-09T23:21:40.922503
| 2021-05-27T19:42:03
| 2021-05-27T19:42:03
| 311,329,921
| 8
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 288
|
py
|
from project.food.food import Food
class Dessert(Food):
def __init__(self, name: str, price: float, grams: float, calories: float):
super().__init__(name, price, grams)
self.__calories = calories
@property
def calories(self):
return self.__calories
|
[
"ivailo.ignatoff@gmail.com"
] |
ivailo.ignatoff@gmail.com
|
b0dd547c60357b7f4540d9f1064f30da3da49bfb
|
3b89c0a97ac6b58b6923a213bc8471e11ad4fe69
|
/python/CodingExercises/Median.py
|
9fca643773716391f6ec77c8b200529cd534e8b8
|
[] |
no_license
|
ksayee/programming_assignments
|
b187adca502ecf7ff7b51dc849d5d79ceb90d4a6
|
13bc1c44e1eef17fc36724f20b060c3339c280ea
|
refs/heads/master
| 2021-06-30T07:19:34.192277
| 2021-06-23T05:11:32
| 2021-06-23T05:11:32
| 50,700,556
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 454
|
py
|
# Find median of an Array of numbers
def Median(ary):
ary.sort()
print(ary)
if len(ary)%2!=0:
mid=int(len(ary)/2)
return ary[mid]
else:
idx1=int(len(ary)/2)-1
idx2=int(len(ary)/2)
return (ary[idx1]+ary[idx2])/2
def main():
ary=[5, 89, 20, 64, 20, 45]
print(Median(ary))
ary = [5, 89, 20, 64, 20, 45, 45, 23, 67, 32, 30]
print(Median(ary))
if __name__=='__main__':
main()
|
[
"kartiksayee@gmail.com"
] |
kartiksayee@gmail.com
|
66598ec0257be4760b1dc38c8228fa5050235c13
|
6929a33a7259dad9b45192ca088a492085ed2953
|
/solutions/0062-unique-paths/unique-paths.py
|
1ac2c0fb3f2cb34e18806ccd977c68cdb4bb37c1
|
[] |
no_license
|
moqi112358/leetcode
|
70366d29c474d19c43180fd4c282cc02c890af03
|
fab9433ff7f66d00023e3af271cf309b2d481722
|
refs/heads/master
| 2022-12-10T01:46:14.799231
| 2021-01-14T05:00:09
| 2021-01-14T05:00:09
| 218,163,960
| 3
| 0
| null | 2022-07-06T20:26:38
| 2019-10-28T23:26:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,747
|
py
|
# A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).
#
# The robot can only move either down or right at any point in time. The robot is trying to reach the bottom-right corner of the grid (marked 'Finish' in the diagram below).
#
# How many possible unique paths are there?
#
#
# Example 1:
#
#
# Input: m = 3, n = 7
# Output: 28
#
#
# Example 2:
#
#
# Input: m = 3, n = 2
# Output: 3
# Explanation:
# From the top-left corner, there are a total of 3 ways to reach the bottom-right corner:
# 1. Right -> Down -> Down
# 2. Down -> Down -> Right
# 3. Down -> Right -> Down
#
#
# Example 3:
#
#
# Input: m = 7, n = 3
# Output: 28
#
#
# Example 4:
#
#
# Input: m = 3, n = 3
# Output: 6
#
#
#
# Constraints:
#
#
# 1 <= m, n <= 100
# It's guaranteed that the answer will be less than or equal to 2 * 109.
#
#
class Solution:
# def uniquePaths(self, m: int, n: int) -> int:
# self.count = 0
# self.searchPath(m, n, 0, 0)
# return self.count
# def searchPath(self, m, n, r, c):
# if r == m - 1 and c == n - 1:
# self.count += 1
# dx = [0, 1]
# dy = [1, 0]
# for i in range(2):
# x = r + dx[i]
# y = c + dy[i]
# if 0 <= x <= m - 1 and 0 <= y <= n - 1:
# self.searchPath(m, n, x, y)
# return
def uniquePaths(self, m: int, n: int) -> int:
res = [[0] * m for i in range(n)]
for i in range(m):
res[0][i] = 1
for i in range(n):
res[i][0] = 1
for i in range(1, n):
for j in range(1, m):
res[i][j] = res[i-1][j] + res[i][j-1]
return res[n-1][m-1]
|
[
"983028670@qq.com"
] |
983028670@qq.com
|
8c987f3e7fcca91265fa510ed17ea01cb6f62bab
|
b2487a96bb865cfa0d1906c4e66a4aea9b613ce0
|
/pynfb/setup.py
|
fa813e333cb6ac1dadf94028ece66e8e86523568
|
[] |
no_license
|
gurasog/nfb
|
1ccbbc7d507525cff65f5d5c756afd98ad86a7c6
|
9ff2c736c5d764f48d921bad3942c4db93390a5d
|
refs/heads/master
| 2022-12-14T23:06:56.523586
| 2020-09-07T07:36:01
| 2020-09-07T07:36:01
| 273,970,746
| 0
| 0
| null | 2020-06-21T19:30:38
| 2020-06-21T19:30:37
| null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
from cx_Freeze import setup, Executable
setup(
name = "pynfb",
version = "0.1",
description = "Python NFB",
executables = [Executable("main.py")]
)
|
[
"smtnm@ya.ru"
] |
smtnm@ya.ru
|
65ba12265ec9faffe3d18ec718e81ba5893e0a1e
|
d659810b24ebc6ae29a4d7fbb3b82294c860633a
|
/aliyun-python-sdk-unimkt/aliyunsdkunimkt/request/v20181212/ScanCodeNotificationRequest.py
|
5835e1a20aefefe670ace4df8bd671a5268d659a
|
[
"Apache-2.0"
] |
permissive
|
leafcoder/aliyun-openapi-python-sdk
|
3dd874e620715173b6ccf7c34646d5cb8268da45
|
26b441ab37a5cda804de475fd5284bab699443f1
|
refs/heads/master
| 2023-07-31T23:22:35.642837
| 2021-09-17T07:49:51
| 2021-09-17T07:49:51
| 407,727,896
| 0
| 0
|
NOASSERTION
| 2021-09-18T01:56:10
| 2021-09-18T01:56:09
| null |
UTF-8
|
Python
| false
| false
| 5,343
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkunimkt.endpoint import endpoint_data
class ScanCodeNotificationRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'UniMkt', '2018-12-12', 'ScanCodeNotification')
self.set_protocol_type('https')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_RealCostAmount(self):
return self.get_query_params().get('RealCostAmount')
def set_RealCostAmount(self,RealCostAmount):
self.add_query_param('RealCostAmount',RealCostAmount)
def get_SalePrice(self):
return self.get_query_params().get('SalePrice')
def set_SalePrice(self,SalePrice):
self.add_query_param('SalePrice',SalePrice)
def get_CommodityId(self):
return self.get_query_params().get('CommodityId')
def set_CommodityId(self,CommodityId):
self.add_query_param('CommodityId',CommodityId)
def get_HolderId(self):
return self.get_query_params().get('HolderId')
def set_HolderId(self,HolderId):
self.add_query_param('HolderId',HolderId)
def get_DeviceType(self):
return self.get_query_params().get('DeviceType')
def set_DeviceType(self,DeviceType):
self.add_query_param('DeviceType',DeviceType)
def get_DeviceCode(self):
return self.get_query_params().get('DeviceCode')
def set_DeviceCode(self,DeviceCode):
self.add_query_param('DeviceCode',DeviceCode)
def get_ApplyPrice(self):
return self.get_query_params().get('ApplyPrice')
def set_ApplyPrice(self,ApplyPrice):
self.add_query_param('ApplyPrice',ApplyPrice)
def get_TaskId(self):
return self.get_query_params().get('TaskId')
def set_TaskId(self,TaskId):
self.add_query_param('TaskId',TaskId)
def get_OuterCode(self):
return self.get_query_params().get('OuterCode')
def set_OuterCode(self,OuterCode):
self.add_query_param('OuterCode',OuterCode)
def get_QueryStr(self):
return self.get_query_params().get('QueryStr')
def set_QueryStr(self,QueryStr):
self.add_query_param('QueryStr',QueryStr)
def get_Phase(self):
return self.get_query_params().get('Phase')
def set_Phase(self,Phase):
self.add_query_param('Phase',Phase)
def get_BizResult(self):
return self.get_query_params().get('BizResult')
def set_BizResult(self,BizResult):
self.add_query_param('BizResult',BizResult)
def get_TaskType(self):
return self.get_query_params().get('TaskType')
def set_TaskType(self,TaskType):
self.add_query_param('TaskType',TaskType)
def get_BrandUserId(self):
return self.get_query_params().get('BrandUserId')
def set_BrandUserId(self,BrandUserId):
self.add_query_param('BrandUserId',BrandUserId)
def get_Sex(self):
return self.get_query_params().get('Sex')
def set_Sex(self,Sex):
self.add_query_param('Sex',Sex)
def get_CostDetail(self):
return self.get_query_params().get('CostDetail')
def set_CostDetail(self,CostDetail):
self.add_query_param('CostDetail',CostDetail)
def get_ProxyUserId(self):
return self.get_query_params().get('ProxyUserId')
def set_ProxyUserId(self,ProxyUserId):
self.add_query_param('ProxyUserId',ProxyUserId)
def get_AlipayOpenId(self):
return self.get_query_params().get('AlipayOpenId')
def set_AlipayOpenId(self,AlipayOpenId):
self.add_query_param('AlipayOpenId',AlipayOpenId)
def get_BizType(self):
return self.get_query_params().get('BizType')
def set_BizType(self,BizType):
self.add_query_param('BizType',BizType)
def get_BrandNick(self):
return self.get_query_params().get('BrandNick')
def set_BrandNick(self,BrandNick):
self.add_query_param('BrandNick',BrandNick)
def get_V(self):
return self.get_query_params().get('V')
def set_V(self,V):
self.add_query_param('V',V)
def get_ChargeTag(self):
return self.get_query_params().get('ChargeTag')
def set_ChargeTag(self,ChargeTag):
self.add_query_param('ChargeTag',ChargeTag)
def get_Age(self):
return self.get_query_params().get('Age')
def set_Age(self,Age):
self.add_query_param('Age',Age)
def get_ChannelId(self):
return self.get_query_params().get('ChannelId')
def set_ChannelId(self,ChannelId):
self.add_query_param('ChannelId',ChannelId)
def get_Cid(self):
return self.get_query_params().get('Cid')
def set_Cid(self,Cid):
self.add_query_param('Cid',Cid)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
2bebb31050c1eea8eec0d868ef0ad1ce49749f7d
|
38bee274b237e508175be4c37bf357621ed50195
|
/pde/pdes/allen_cahn.py
|
f1a80a46cb539fbec959f00de2c5a1ab654a3641
|
[
"MIT"
] |
permissive
|
anna-11/py-pde
|
26f0110266fdb21803e665447b1204bedb401d78
|
5b596af5f224e3ec2a7fbea8f87fab3896c19642
|
refs/heads/master
| 2023-06-17T09:13:34.026794
| 2021-07-09T12:10:36
| 2021-07-09T12:10:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,296
|
py
|
"""
A Allen-Cahn equation
.. codeauthor:: David Zwicker <david.zwicker@ds.mpg.de>
"""
from typing import Callable # @UnusedImport
import numpy as np
from ..fields import ScalarField
from ..grids.boundaries.axes import BoundariesData
from ..tools.docstrings import fill_in_docstring
from ..tools.numba import jit, nb
from .base import PDEBase, expr_prod
class AllenCahnPDE(PDEBase):
r"""A simple Allen-Cahn equation
The mathematical definition is
.. math::
\partial_t c = \gamma \nabla^2 c - c^3 + c
where :math:`c` is a scalar field and :math:`\gamma` sets the interfacial
width.
"""
explicit_time_dependence = False
interface_width: float
@fill_in_docstring
def __init__(self, interface_width: float = 1, bc: BoundariesData = "natural"):
"""
Args:
interface_width (float):
The diffusivity of the described species
bc:
The boundary conditions applied to the field.
{ARG_BOUNDARIES}
"""
super().__init__()
self.interface_width = interface_width
self.bc = bc
@property
def expression(self) -> str:
"""str: the expression of the right hand side of this PDE"""
return f"{expr_prod(self.interface_width, 'laplace(c)')} - c**3 + c"
def evolution_rate( # type: ignore
self,
state: ScalarField,
t: float = 0,
) -> ScalarField:
"""evaluate the right hand side of the PDE
Args:
state (:class:`~pde.fields.ScalarField`):
The scalar field describing the concentration distribution
t (float): The current time point
Returns:
:class:`~pde.fields.ScalarField`:
Scalar field describing the evolution rate of the PDE
"""
assert isinstance(state, ScalarField), "`state` must be ScalarField"
laplace = state.laplace(bc=self.bc, label="evolution rate")
return self.interface_width * laplace - state ** 3 + state # type: ignore
def _make_pde_rhs_numba( # type: ignore
self, state: ScalarField
) -> Callable[[np.ndarray, float], np.ndarray]:
"""create a compiled function evaluating the right hand side of the PDE
Args:
state (:class:`~pde.fields.ScalarField`):
An example for the state defining the grid and data types
Returns:
A function with signature `(state_data, t)`, which can be called
with an instance of :class:`~numpy.ndarray` of the state data and
the time to obtained an instance of :class:`~numpy.ndarray` giving
the evolution rate.
"""
shape = state.grid.shape
arr_type = nb.typeof(np.empty(shape, dtype=state.data.dtype))
signature = arr_type(arr_type, nb.double)
interface_width = self.interface_width
laplace = state.grid.get_operator("laplace", bc=self.bc)
@jit(signature)
def pde_rhs(state_data: np.ndarray, t: float) -> np.ndarray:
"""compiled helper function evaluating right hand side"""
return interface_width * laplace(state_data) - state_data ** 3 + state_data # type: ignore
return pde_rhs # type: ignore
|
[
"david.zwicker@ds.mpg.de"
] |
david.zwicker@ds.mpg.de
|
c2568db3ed9af7c59ba95dc30e283b4abd83d2a1
|
e9adf4bb294b22add02c997f750e36b6fea23cdc
|
/nw/nw_logic/nw_rules_bank.py
|
432c3a0479a0241af1dc0f9ec9c891ae0851037a
|
[
"BSD-3-Clause"
] |
permissive
|
bairoliyaprem/python-rules
|
5aa70dbf6d9efde0d95dda4b6397e36788c22cfa
|
ca7ab0addf60179ea61fddad6cd65c77a6792de1
|
refs/heads/master
| 2022-12-17T22:15:47.745660
| 2020-09-29T00:39:03
| 2020-09-29T00:39:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,863
|
py
|
import sqlalchemy
from sqlalchemy_utils import get_mapper
from python_rules.exec_row_logic.logic_row import LogicRow
from python_rules.rule import Rule
from python_rules.rule_bank.rule_bank import RuleBank
from nw.nw_logic import models
from nw.nw_logic.models import Customer, OrderDetail, Product, Order, Employee
def activate_basic_check_credit_rules():
"""
Issues function calls to activate check credit rules, below.
These rules are executed not now, but on commits
Order is irrelevant - determined by system based on dependency analysis
Their inclusion in classes is for doc / convenience, no semantics
These rules apply to all transactions (automatic re-use), eg.
* place order
* change Order Detail product, quantity
* add/delete Order Detail
* ship / unship order
* delete order
* move order to new customer, etc
"""
def units_in_stock(row: Product, old_row: Product, logic_row: LogicRow):
result = row.UnitsInStock - (row.UnitsShipped - old_row.UnitsShipped)
return result
def congratulate_sales_rep(row: Order, old_row: Order, logic_row: LogicRow):
if logic_row.ins_upd_dlt == "ins" or True: # logic engine fills parents for insert
sales_rep = row.SalesRep # type : Employee
if sales_rep is None:
logic_row.log("no salesrep for this order")
else:
logic_row.log(f'Hi, {sales_rep.Manager.FirstName}, congratulate {sales_rep.FirstName} on their new order')
Rule.constraint(validate=Customer,
as_condition=lambda row: row.Balance <= row.CreditLimit,
error_msg="balance ({row.Balance}) exceeds credit ({row.CreditLimit})")
Rule.sum(derive=Customer.Balance, as_sum_of=Order.AmountTotal,
where=lambda row: row.ShippedDate is None) # *not* a sql select sum...
Rule.sum(derive=Order.AmountTotal, as_sum_of=OrderDetail.Amount)
Rule.formula(derive=OrderDetail.Amount, as_expression=lambda row: row.UnitPrice * row.Quantity)
Rule.copy(derive=OrderDetail.UnitPrice, from_parent=Product.UnitPrice)
Rule.formula(derive=OrderDetail.ShippedDate, as_exp="row.OrderHeader.ShippedDate")
Rule.sum(derive=Product.UnitsShipped, as_sum_of=OrderDetail.Quantity,
where="row.ShippedDate is not None")
Rule.formula(derive=Product.UnitsInStock, calling=units_in_stock)
Rule.commit_row_event(on_class=Order, calling=congratulate_sales_rep)
Rule.count(derive=Customer.UnpaidOrderCount, as_count_of=Order,
where=lambda row: row.ShippedDate is None) # *not* a sql select sum...
Rule.count(derive=Customer.OrderCount, as_count_of=Order)
class InvokePythonFunctions: # use functions for more complex rules, type checking, etc (not used)
@staticmethod
def load_rules(self):
def my_early_event(row, old_row, logic_row):
logic_row.log("early event for *all* tables - good breakpoint, time/date stamping, etc")
def check_balance(row: Customer, old_row, logic_row) -> bool:
"""
Not used... illustrate function alternative (e.g., more complex if/else logic)
specify rule with `calling=check_balance` (instead of as_condition)
"""
return row.Balance <= row.CreditLimit
def compute_amount(row: OrderDetail, old_row, logic_row):
return row.UnitPrice * row.Quantity
Rule.formula(derive="OrderDetail.Amount", calling=compute_amount)
Rule.formula(derive="OrderDetail.Amount", calling=lambda Customer: Customer.Quantity * Customer.UnitPrice)
Rule.early_row_event(on_class="*", calling=my_early_event) # just for debug
Rule.constraint(validate="Customer", calling=check_balance,
error_msg="balance ({row.Balance}) exceeds credit ({row.CreditLimit})")
class DependencyGraphTests:
"""Not loaded"""
def not_loaded(self):
Rule.formula(derive="Tbl.ColA", # or, calling=compute_amount)
as_exp="row.ColB + row.ColC")
Rule.formula(derive="Tbl.ColB", # or, calling=compute_amount)
as_exp="row.ColC")
Rule.formula(derive="Tbl.ColC", # or, calling=compute_amount)
as_exp="row.ColD")
Rule.formula(derive="Tbl.ColD", # or, calling=compute_amount)
as_exp="row.ColE")
Rule.formula(derive="Tbl.ColE", # or, calling=compute_amount)
as_exp="xxx")
class UnusedTests:
"""Not loaded"""
def not_loaded(self):
Rule.constraint(validate="AbUser", # table is ab_user
calling=lambda row: row.username != "no_name")
Rule.count(derive=Customer.OrderCount, as_count_of=Order,
where="ShippedDate not None")
|
[
"valjhuber@gmail.com"
] |
valjhuber@gmail.com
|
7ca0ee05969af630f9b2b5a8871790f4991b5c08
|
d55f3f715c00bcbd60badb3a31696a1a629600e2
|
/students/maks/9/site2/page/management/commands/hello.py
|
014dfc85a65d4e56ef7b7b974c84bd8f9de0f150
|
[] |
no_license
|
zdimon/wezom-python-course
|
ea0adaa54444f6deaca81ce54ee8334297f2cd1a
|
5b87892102e4eb77a4c12924d2d71716b9cce721
|
refs/heads/master
| 2023-01-29T02:22:54.220880
| 2020-12-05T11:27:48
| 2020-12-05T11:27:48
| 302,864,112
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
from django.core.management.base import BaseCommand, CommandError
from page.models import Page
class Command(BaseCommand):
def handle(self, *args, **options):
print('Hello command!!!')
Page.objects.all().delete()
page1 = Page()
page1.title = 'Index page'
page1.content = 'content content'
page1.save()
page2 = Page()
page2.title = 'Index page 2'
page2.content = 'content content 2'
page2.save()
|
[
"you@example.com"
] |
you@example.com
|
4df98b423cb6c082c08b54fdbf134f6143af4b37
|
cb6f314d0c8f1a943718caa46b47bbe5ef9908f5
|
/test/drawings/draw_hmab.py
|
21c985c2897367265b4cd012a1bfb34b55a9bbce
|
[
"MIT"
] |
permissive
|
yunx-z/soln-ml
|
d702d5afef7d9204c89bdcba44e26f89a68b8923
|
f97c991c2ad287e8d295d3058b4a9b1fd50d847b
|
refs/heads/master
| 2022-12-26T18:00:51.457311
| 2020-09-13T08:31:58
| 2020-09-13T08:31:58
| 296,199,472
| 0
| 0
|
MIT
| 2020-09-17T02:37:20
| 2020-09-17T02:37:20
| null |
UTF-8
|
Python
| false
| false
| 1,574
|
py
|
import os
import sys
import time
import pickle
import argparse
import numpy as np
import autosklearn.classification
sys.path.append(os.getcwd())
parser = argparse.ArgumentParser()
dataset_set = 'diabetes,spectf,credit,ionosphere,lymphography,pc4,' \
'messidor_features,winequality_red,winequality_white,splice,spambase,amazon_employee'
parser.add_argument('--datasets', type=str, default=dataset_set)
parser.add_argument('--mth', choices=['ours', 'ausk'], default='ours')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--algo_num', type=int, default=8)
parser.add_argument('--trial_num', type=int, default=100)
project_dir = './'
def plot(mth, dataset, algo_num, trial_num, seed):
if mth == 'ours':
save_path = project_dir + 'data/hmab_%s_%d_%d_%d.pkl' % \
(dataset, trial_num, algo_num, seed)
else:
save_path = project_dir + 'data/ausk_%s_%d.pkl' % (dataset, algo_num)
with open(save_path, 'rb') as f:
result = pickle.load(f)
print('Best validation accuracy: %.4f' % np.max(result[0]))
print('Final Rewards', result[0])
print('Time records', result[1])
print('Action Sequence', result[2])
print('-' * 30)
if __name__ == "__main__":
args = parser.parse_args()
dataset_str = args.datasets
dataset_list = list()
if dataset_str == 'all':
dataset_list = dataset_set
else:
dataset_list = dataset_str.split(',')
for dataset in dataset_list:
plot(args.mth, dataset, args.algo_num, args.trial_num, args.seed)
|
[
"1225646303@qq.com"
] |
1225646303@qq.com
|
dce5217f537b1368245960c5d19597735d145b4a
|
b6fc54cff7037f5e4ef26cb4a645d5ea5a6fecdf
|
/001146StepikPyBegin/Stepik001146PyBeginсh07p01st01T01_for_20200420.py
|
e9b62e163ccebe48ce59797d086fef7a96ccb3fa
|
[
"Apache-2.0"
] |
permissive
|
SafonovMikhail/python_000577
|
5483eaf2f7c73bc619ce1f5de67d8d689d2e7dd4
|
f2dccac82a37df430c4eb7425b5d084d83520409
|
refs/heads/master
| 2022-12-08T10:53:57.202746
| 2022-12-07T09:09:51
| 2022-12-07T09:09:51
| 204,713,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
# реализация множественного ввода
for i in range(5):
num1 = int(input())
print("Квадрат числа равен:", num1 ** 2)
print("Цикл завершен")
|
[
"ms33@inbox.ru"
] |
ms33@inbox.ru
|
884a0b600ab0d1c224f04dbc9a3f59c3b47ca4f1
|
fcd965c9333ee328ec51bc41f5bc0300cc06dc33
|
/Coding Patterns/Fast & Slow Pointers/Palindrome LinkedList.py
|
8e1dc912eb07d240edfe2d100acd686917699403
|
[] |
no_license
|
henrylin2008/Coding_Problems
|
699bb345481c14dc3faa8bab439776c7070a1cb0
|
281067e872f73a27f76ae10ab0f1564916bddd28
|
refs/heads/master
| 2023-01-11T11:55:47.936163
| 2022-12-24T07:50:17
| 2022-12-24T07:50:17
| 170,151,972
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,389
|
py
|
# Problem Challenge 1: Palindrome LinkedList (medium)
# https://designgurus.org/path-player?courseid=grokking-the-coding-interview&unit=grokking-the-coding-interview_1628743582805_17Unit
# Problem Statement
#
# Given the head of a Singly LinkedList, write a method to check if the LinkedList is a palindrome or not.
#
# Your algorithm should use constant space and the input LinkedList should be in the original form once the algorithm
# is finished. The algorithm should have O(N)O(N) time complexity where ‘N’ is the number of nodes in the LinkedList.
#
# Example 1:
# Input: 2 -> 4 -> 6 -> 4 -> 2 -> null
# Output: true
#
# Example 2:
# Input: 2 -> 4 -> 6 -> 4 -> 2 -> 2 -> null
# Output: false
# Solution
#
# As we know, a palindrome LinkedList will have nodes values that read the same backward or forward. This means that
# if we divide the LinkedList into two halves, the node values of the first half in the forward direction should be
# similar to the node values of the second half in the backward direction. As we have been given a Singly LinkedList,
# we can’t move in the backward direction. To handle this, we will perform the following steps:
# 1. We can use the Fast & Slow pointers method similar to Middle of the LinkedList to find the middle node of the
# LinkedList.
# 2. Once we have the middle of the LinkedList, we will reverse the second half.
# 3. Then, we will compare the first half with the reversed second half to see if the LinkedList represents a
# palindrome.
# 4. Finally, we will reverse the second half of the LinkedList again to revert and bring the LinkedList back to its
# original form.
class Node:
def __init__(self, value, next=None):
self.value = value
self.next = next
def is_palindromic_linked_list(head):
if head is None or head.next is None:
return True
# find middle of the LinkedList
slow, fast = head, head
while fast is not None and fast.next is not None:
slow = slow.next
fast = fast.next.next
head_second_half = reverse(slow) # reverse the second half
# store the head of reversed part to revert back later
copy_head_second_half = head_second_half
# compare the first and the second half
while head is not None and head_second_half is not None:
if head.value != head_second_half.value:
break # not a palindrome
head = head.next
head_second_half = head_second_half.next
reverse(copy_head_second_half) # revert the reverse of the second half
if head is None or head_second_half is None: # if both halves match
return True
return False
def reverse(head):
prev = None
while head is not None:
next = head.next
head.next = prev
prev = head
head = next
return prev
def main():
head = Node(2)
head.next = Node(4)
head.next.next = Node(6)
head.next.next.next = Node(4)
head.next.next.next.next = Node(2)
print("Is palindrome: " + str(is_palindromic_linked_list(head)))
head.next.next.next.next.next = Node(2)
print("Is palindrome: " + str(is_palindromic_linked_list(head)))
main()
# Time Complexity
# The above algorithm will have a time complexity of O(N) where ‘N’ is the number of nodes in the LinkedList.
#
# Space Complexity
# The algorithm runs in constant space O(1).
|
[
"henrylin2008@yahoo.com"
] |
henrylin2008@yahoo.com
|
dcb3980fb3389f1967bc679b30b0ca7aa6b476c9
|
08d316151302f7ba4ae841c15b7adfe4e348ddf1
|
/reviewboard/hostingsvcs/tests/test_fogbugz.py
|
97cccab17520fd5b84e7a49e97c38afd16cd410c
|
[
"MIT"
] |
permissive
|
LloydFinch/reviewboard
|
aa8cd21fac359d49b3dfc5a68c42b857c0c04bd8
|
563c1e8d4dfd860f372281dc0f380a0809f6ae15
|
refs/heads/master
| 2020-08-10T20:02:32.204351
| 2019-10-02T20:46:08
| 2019-10-02T20:46:08
| 214,411,166
| 2
| 0
|
MIT
| 2019-10-11T10:44:55
| 2019-10-11T10:44:54
| null |
UTF-8
|
Python
| false
| false
| 952
|
py
|
"""Unit tests for the FogBugz hosting service."""
from __future__ import unicode_literals
from reviewboard.hostingsvcs.testing import HostingServiceTestCase
class FogBugzTests(HostingServiceTestCase):
"""Unit tests for the FogBugz hosting service."""
service_name = 'fogbugz'
fixtures = ['test_scmtools']
def test_service_support(self):
"""Testing FogBugz service support capabilities"""
self.assertTrue(self.service_class.supports_bug_trackers)
self.assertFalse(self.service_class.supports_repositories)
def test_get_bug_tracker_field(self):
"""Testing FogBugz.get_bug_tracker_field"""
self.assertFalse(
self.service_class.get_bug_tracker_requires_username())
self.assertEqual(
self.service_class.get_bug_tracker_field(None, {
'fogbugz_account_domain': 'mydomain',
}),
'https://mydomain.fogbugz.com/f/cases/%s')
|
[
"christian@beanbaginc.com"
] |
christian@beanbaginc.com
|
a3ed4c22c27ef24b07b4ce76b60d632ea251f1f0
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/212/usersdata/265/87081/submittedfiles/av1_3.py
|
1d4a47463f8274d69023d764799f71ec143c55a0
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 365
|
py
|
# -*- coding: utf-8 -*-
import math
a = int(input('digite o valor de a: '))
b = int(input('digite o valor de b: '))
c = int(input('digite o valor de c: '))
i=2
j=2
k=2
while i<=a:
if a%i==0:
divisor_a=i
i=1+i
print(i)
while j<=b:
if b%j==0:
divisor_b=j
j=1+j
print(j)
while k<=c:
if c%k==0:
divisor_c=k
k=1+k
print(k)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
1849bac316628694ea87ff00774d2816a84a04ce
|
6e4e395988c641856aa13aa3b68db838f0d47cc0
|
/Trying out DB-API.py
|
7a7c01c86e3f781089783e9aaf81459aec431027
|
[
"MIT"
] |
permissive
|
fatih-iver/Intro-to-Relational-Databases
|
2669d060fd2f9c8884e936e541373eecfbe8634e
|
28528132378436d6dd1f1bdec96d1e7e285b4e4d
|
refs/heads/master
| 2020-03-16T11:26:54.090473
| 2018-05-09T21:32:38
| 2018-05-09T21:32:38
| 132,648,541
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
# To see how the various functions in the DB-API work, take a look at this code,
# then the results that it prints when you press "Test Run".
#
# Then modify this code so that the student records are fetched in sorted order
# by student's name.
#
import sqlite3
# Fetch some student records from the database.
db = sqlite3.connect("students")
c = db.cursor()
query = "select name, id from students order by name;"
c.execute(query)
rows = c.fetchall()
# First, what data structure did we get?
print("Row data:")
print(rows)
# And let's loop over it too:
print()
print("Student names:")
for row in rows:
print(" ", row[0])
db.close()
|
[
"noreply@github.com"
] |
fatih-iver.noreply@github.com
|
fa1d83149490bbaaf6e2373771de847c30a4191b
|
10326daa458342fd992f4bd2f9c63c9469ae5a11
|
/.graal-git-repo
|
419b5f2ded2ecc3adce67832c5ddb0e5577ca1db
|
[
"MIT",
"CC-BY-SA-4.0"
] |
permissive
|
charig/black-diamonds
|
b08c1d9dab528a9c9c935414e6af4faf2d5773fb
|
0fd46caeec41f57b621143be1e4bd22d5a9a7348
|
refs/heads/master
| 2020-04-12T20:04:36.490970
| 2018-09-22T13:39:03
| 2018-09-22T13:39:03
| 162,725,323
| 0
| 0
|
MIT
| 2018-12-21T14:47:34
| 2018-12-21T14:47:34
| null |
UTF-8
|
Python
| false
| false
| 2,522
|
#!/usr/bin/python
# This is a self-updating script encoding the Graal+Truffle and mx repos and
# revision used for testing on CI. We don't use submodules to avoid a hard
# dependency that might bloat the repository of users of BlackDiamonds
#
# To checkout the repos, at the specified version for this version of the code,
# run `./graal-git-repo checkout`
# To update this script, so its revisions point to the latest versions of the
# configured repos, run `./graal-git-repo update-script-revs`
import sys
import os
# We use the following repositories
GRAAL_REPO_URL = "https://github.com/smarr/truffle.git"
MX_REPO_URL = "https://github.com/graalvm/mx.git"
# And these are the repo revisions we test against
GRAAL_REPO_REV = "a9fba6a775ffc60a90959d2eff4e66d15e9867a9"
MX_REPO_REV = "5bc7f83b9d66a31259b90933fcd0aa64d38b8d1e"
def update(lines, var, val):
for idx, line in enumerate(lines):
if line.startswith(var):
print("Updating " + var + " to " + val)
lines[idx] = var.ljust(15) + '= "' + val + '"\n'
break
def run(cmd):
print(cmd)
return os.popen(cmd).read()
if len(sys.argv) == 1:
print("To checkout the Graal+Truffle and MX dependencies use:")
print(" " + __file__ + " checkout")
print("To update the dependencies in this script use:")
print(" " + __file__ + " update-script-revs")
quit()
if sys.argv[1] == "update-script-revs":
graal_head_data = run("git ls-remote " + GRAAL_REPO_URL + " HEAD")
graal_head_rev = graal_head_data.split("\t")[0]
mx_head_data = run("git ls-remote " + MX_REPO_URL + " HEAD")
mx_head_rev = mx_head_data.split("\t")[0]
with open(__file__, 'r') as script_file:
content = script_file.readlines()
update(content, 'GRAAL_REPO_REV', graal_head_rev)
update(content, 'MX_REPO_REV', mx_head_rev)
with open(__file__, 'w') as script_file:
script_file.writelines(content)
def update_repo(folder, repo, rev):
folder = os.path.realpath(folder)
if not os.path.isdir(folder):
print("cloning " + repo)
print(run("git clone --depth 5000 " + repo + " " + folder))
run("git --git-dir=" + folder + "/.git --work-tree=" + folder +
" fetch --depth 5000")
print(run("git --git-dir=" + folder + "/.git --work-tree=" + folder +
" reset --hard " + rev))
if sys.argv[1] == "checkout":
update_repo("graal", GRAAL_REPO_URL, GRAAL_REPO_REV)
update_repo("mx", MX_REPO_URL, MX_REPO_REV)
|
[
"git@stefan-marr.de"
] |
git@stefan-marr.de
|
|
967c4163dd3b2b4b3fccef187338d4d020e0e693
|
7c63a96fad4257f4959ffeba0868059fc96566fb
|
/py/m_lutz-programming_python-4_ed/code/ch_01/step_06/01-cgi_basics/cgi-bin/cgi101.py
|
73cd3dab161b31965720666610c168715c174345
|
[
"MIT"
] |
permissive
|
ordinary-developer/education
|
b426148f5690f48e0ed4853adfc3740bd038b72c
|
526e5cf86f90eab68063bb7c75744226f2c54b8d
|
refs/heads/master
| 2023-08-31T14:42:37.237690
| 2023-08-30T18:15:18
| 2023-08-30T18:15:18
| 91,232,306
| 8
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
#!/usr/local/bin/python3
import cgi
form = cgi.FieldStorage()
print('Content-type: text/html\n')
print('<title>Reply Page</title>')
if not 'user' in form:
print('<h1>Who are you?</h1>')
else:
print('<h1>Hello <i>%s</i>!</h1>' % cgi.escape(form['user'].value))
|
[
"merely.ordinary.developer@gmail.com"
] |
merely.ordinary.developer@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.