blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7849122eb3fe8c33686282b0d31947253548e51b | f3b233e5053e28fa95c549017bd75a30456eb50c | /mcl1_input/L31/31-35_MD_NVT_rerun/set_6.py | 8441267cd7f30a30dc5e9f08e15d3a37e6877812 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | import os
dir = '/mnt/scratch/songlin3/run/mcl1/L31/MD_NVT_rerun/ti_one-step/31_35/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_6.in'
temp_pbs = filesdir + 'temp_6.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_6.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_6.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
02dda9d71cdceea323cc189f06c4e34e849415aa | f0aa307e12bf7ea74c7bee23830f016aeaf45dd8 | /tensor2tensor/data_generators/timeseries_test.py | 9daabe80b1a68bdd9c361272296b30955bbae4de | [
"Apache-2.0"
] | permissive | ShahNewazKhan/tensor2tensor | d94aaa0eea23e20fe1e483d27890939a7243d3b9 | ef91df0197d3f6bfd1a91181ea10e97d4d0e5393 | refs/heads/master | 2020-03-19T17:42:27.293415 | 2018-06-10T02:41:33 | 2018-06-10T02:41:33 | 136,773,457 | 0 | 0 | null | 2018-06-10T02:34:05 | 2018-06-10T02:34:05 | null | UTF-8 | Python | false | false | 1,873 | py | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Timeseries generators tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from tensor2tensor.data_generators import timeseries
import tensorflow as tf
class TimeseriesTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
cls.tmp_dir = tf.test.get_temp_dir()
shutil.rmtree(cls.tmp_dir)
os.mkdir(cls.tmp_dir)
def testTimeseriesToyProblem(self):
problem = timeseries.TimeseriesToyProblem()
problem.generate_data(self.tmp_dir, self.tmp_dir)
dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN, self.tmp_dir)
features = dataset.make_one_shot_iterator().get_next()
examples = []
exhausted = False
with self.test_session() as sess:
examples.append(sess.run(features))
examples.append(sess.run(features))
examples.append(sess.run(features))
examples.append(sess.run(features))
try:
sess.run(features)
except tf.errors.OutOfRangeError:
exhausted = True
self.assertTrue(exhausted)
self.assertEqual(4, len(examples))
self.assertNotEqual(
list(examples[0]["inputs"][0]), list(examples[1]["inputs"][0]))
if __name__ == "__main__":
tf.test.main()
| [
"copybara-piper@google.com"
] | copybara-piper@google.com |
a2169451d8f1b8e4c3a35108c6208337b798af78 | cca3f6a0accb18760bb134558fea98bb87a74806 | /abc175/F/main.py | a737fd1cdca034d435b425811277d4674b8061de | [] | no_license | Hashizu/atcoder_work | 5ec48cc1147535f8b9d0f0455fd110536d9f27ea | cda1d9ac0fcd56697ee5db93d26602dd8ccee9df | refs/heads/master | 2023-07-15T02:22:31.995451 | 2021-09-03T12:10:57 | 2021-09-03T12:10:57 | 382,987,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | #!/usr/bin/env python3
import sys
def solve(N: int, S: "List[str]", C: "List[int]"):
return
# Generated by 1.1.7.1 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens)) # type: int
S = [str()] * (N) # type: "List[str]"
C = [int()] * (N) # type: "List[int]"
for i in range(N):
S[i] = next(tokens)
C[i] = int(next(tokens))
solve(N, S, C)
if __name__ == '__main__':
main()
| [
"athenenoctus@gmail.com"
] | athenenoctus@gmail.com |
db042864a1223f46e3ce2426fa6200fe5f845b09 | 4910ef5677b0af1d5ee88dd422a1a65f8ad81413 | /whyis/autonomic/global_change_service.py | 2bbd467310b2daad084e27918b861e99a88b4ca3 | [
"Apache-2.0"
] | permissive | mobilemadman2/whyis | a0d3b0d88873955d7f50471ecb928f6cdb47ffb1 | 009fdfefc0962dbf1dd629c47d763720c6f20ba0 | refs/heads/master | 2020-06-27T00:43:01.894275 | 2019-07-30T23:22:42 | 2019-07-30T23:22:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 591 | py | from __future__ import print_function
from builtins import str
import sadi
import rdflib
import setlr
from datetime import datetime
from .service import Service
from nanopub import Nanopublication
from datastore import create_id
import flask
from flask import render_template
from flask import render_template_string
import logging
import sys, traceback
import database
import tempfile
from depot.io.interfaces import StoredFile
from whyis.namespace import whyis
class GlobalChangeService(Service):
@property
def query_predicate(self):
return whyis.globalChangeQuery
| [
"gordom6@rpi.edu"
] | gordom6@rpi.edu |
1b30005c650309cac9e0140834da3c81572bba0f | e96deed00dd14a1f6d1ed7825991f12ea8c6a384 | /093. Restore IP Addresses.py | 3bbe1bad78b5fc89c3478ebae0c08eaec03f3737 | [] | no_license | borisachen/leetcode | 70b5c320abea8ddfa299b2e81f886cfeb39345c1 | 15e36b472a5067d17482dbd0d357336d31b35ff4 | refs/heads/master | 2021-01-19T17:07:46.726320 | 2020-11-16T04:30:52 | 2020-11-16T04:30:52 | 88,306,634 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,485 | py | 93. Restore IP Addresses
Given a string containing only digits, restore it by returning all possible valid IP address combinations.
For example:
Given "25525511135",
return ["255.255.11.135", "255.255.111.35"]. (Order does not matter)
class Solution(object):
def restoreIpAddresses(self, s):
"""
:type s: str
:rtype: List[str]
"""
res = []
self.backtrack(s, res, 0, '', 0)
return res
def backtrack(self, ip, res, start, temp, count):
if count > 4:
return
if count == 4 and start==len(ip):
res.append(temp)
for i in range(1,4):
if start+i > len(ip):
break
next_block = ip[start:(start+i)]
# check for invalid blocks
if (next_block[0]=='0' and len(next_block)>1) or (i==3 and next_block > '255'):
continue
period = '.' if count < 3 else ''
a = "" if count == 3 else "."
self.backtrack(ip, res, start+i, temp+next_block+a, count+1)
class Solution(object):
def restoreIpAddresses(self, s):
"""
:type s: str
:rtype: List[str]
"""
solutions = []
self.restoreIp(ip=s, solutions=solutions, idx=0, restored="", count=0)
return solutions
def restoreIp(self, ip, solutions, idx, restored, count):
if count > 4:
return
if count==4 and idx==len(ip):
solutions.append(restored)
for i in range(1,4): # i is the number of digits to try for the next set. try i=1,2,3
if idx+i > len(ip): # if we are beyond the original ip, break the loop entirely
break
s = ip[idx:(idx+i)] # s = the current next value to be added
if (s[0]=='0' and len(s)>1) or (i==3 and s>='256'): # s is invalid if it starts with 0XX or is greater than 255
continue
a = "" if count == 3 else "." # what to append after s? either . or nothing depending on current count of .'s
self.restoreIp(ip, solutions, idx+i, restored+s+a, count+1)
public List<String> restoreIpAddresses(String s) {
List<String> solutions = new ArrayList<String>();
restoreIp(s, solutions, 0, "", 0);
return solutions;
}
private void restoreIp(String ip, List<String> solutions, int idx, String restored, int count) {
if (count > 4) return;
if (count == 4 && idx == ip.length()) solutions.add(restored);
for (int i=1; i<4; i++) {
if (idx+i > ip.length()) break;
String s = ip.substring(idx,idx+i);
if ((s.startsWith("0") && s.length()>1) || (i==3 && Integer.parseInt(s) >= 256)) continue;
restoreIp(ip, solutions, idx+i, restored+s+(count==3?"" : "."), count+1);
}
}
| [
"boris.chen@gmail.com"
] | boris.chen@gmail.com |
cf5eff96017123e59bba07e49d5d94addf5b3000 | dd8363acd9a028d9b6432936d72e7a5344077c20 | /plugins/modules/s3_logging.py | 16561ce89703400d898a79597cd3ab3f8a659732 | [] | no_license | gundalow-collections/amazon | 5d437c41af5e3cfa73731c9cd2c08b66c7480b43 | 23c743b63f58ba97960479e230b462fb1c90cc95 | refs/heads/master | 2020-08-28T04:53:02.641829 | 2019-10-25T19:40:04 | 2019-10-25T19:40:04 | 217,595,855 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,361 | py | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''author: Rob White (@wimnat)
description:
- Manage logging facility of an s3 bucket in AWS
extends_documentation_fragment:
- ansible.amazon.aws
- ansible.amazon.ec2
module: s3_logging
options:
name:
description:
- Name of the s3 bucket.
required: true
state:
choices:
- present
- absent
default: present
description:
- Enable or disable logging.
target_bucket:
description:
- The bucket to log to. Required when state=present.
target_prefix:
default: ''
description:
- The prefix that should be prepended to the generated log files written to the
target_bucket.
short_description: Manage logging facility of an s3 bucket in AWS
version_added: '2.0'
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Enable logging of s3 bucket mywebsite.com to s3 bucket mylogs
s3_logging:
name: mywebsite.com
target_bucket: mylogs
target_prefix: logs/mywebsite.com
state: present
- name: Remove logging on an s3 bucket
s3_logging:
name: mywebsite.com
state: absent
'''
try:
import boto.ec2
from boto.s3.connection import OrdinaryCallingFormat, Location
from boto.exception import S3ResponseError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AnsibleAWSError, ec2_argument_spec, get_aws_connection_info
def compare_bucket_logging(bucket, target_bucket, target_prefix):
bucket_log_obj = bucket.get_logging_status()
if bucket_log_obj.target != target_bucket or bucket_log_obj.prefix != target_prefix:
return False
else:
return True
def enable_bucket_logging(connection, module):
bucket_name = module.params.get("name")
target_bucket = module.params.get("target_bucket")
target_prefix = module.params.get("target_prefix")
changed = False
try:
bucket = connection.get_bucket(bucket_name)
except S3ResponseError as e:
module.fail_json(msg=e.message)
try:
if not compare_bucket_logging(bucket, target_bucket, target_prefix):
# Before we can enable logging we must give the log-delivery group WRITE and READ_ACP permissions to the target bucket
try:
target_bucket_obj = connection.get_bucket(target_bucket)
except S3ResponseError as e:
if e.status == 301:
module.fail_json(msg="the logging target bucket must be in the same region as the bucket being logged")
else:
module.fail_json(msg=e.message)
target_bucket_obj.set_as_logging_target()
bucket.enable_logging(target_bucket, target_prefix)
changed = True
except S3ResponseError as e:
module.fail_json(msg=e.message)
module.exit_json(changed=changed)
def disable_bucket_logging(connection, module):
bucket_name = module.params.get("name")
changed = False
try:
bucket = connection.get_bucket(bucket_name)
if not compare_bucket_logging(bucket, None, None):
bucket.disable_logging()
changed = True
except S3ResponseError as e:
module.fail_json(msg=e.message)
module.exit_json(changed=changed)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
target_bucket=dict(required=False, default=None),
target_prefix=dict(required=False, default=""),
state=dict(required=False, default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region in ('us-east-1', '', None):
# S3ism for the US Standard region
location = Location.DEFAULT
else:
# Boto uses symbolic names for locations but region strings will
# actually work fine for everything except us-east-1 (US Standard)
location = region
try:
connection = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_params)
# use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
if connection is None:
connection = boto.connect_s3(**aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
state = module.params.get("state")
if state == 'present':
enable_bucket_logging(connection, module)
elif state == 'absent':
disable_bucket_logging(connection, module)
if __name__ == '__main__':
main()
| [
"brian.coca+git@gmail.com"
] | brian.coca+git@gmail.com |
1dc1bf27b4946f2a8920cea5731c6958148fa159 | a54d5a5ae5ba352963f1166a29e1bb6c867157ab | /python/divides_evenly.py | 2b4eec4e705a704904ffcb8479439ddadd25217b | [] | no_license | alephist/edabit-coding-challenges | 06f573e90ffbd13bc54ecbdaa8e6a225aa44f5d8 | 35f1fc84848fc44e184aae1ae231a36319c1c81e | refs/heads/main | 2023-07-30T22:39:37.468756 | 2021-09-18T07:47:02 | 2021-09-18T07:47:02 | 341,467,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | """
Divides Evenly
Given two integers, a and b, return True if a can be divided evenly by b. Return False otherwise.
https://edabit.com/challenge/NRxWszQRw5JqSDmQS
"""
def divides_evenly(a: int, b: int) -> bool:
return a % b == 0
| [
"justin.necesito@gmail.com"
] | justin.necesito@gmail.com |
ebdf44d0b8071754b1394cf7bba558ebb4b9144c | c69e2b05b709a030c6f35244986df889d544adbf | /slackbot_ce/code_em/patrick/slacklib.py | 6770528924553d0a47a23fcf377fe93c8ed72d8b | [
"MIT"
] | permissive | wray/code_em | af325787d3f7a6dad68d28c72990c28e5baab4dd | 1500141828e0db8f4e13b0507398a65c2e0642cd | refs/heads/master | 2021-04-30T23:00:46.659505 | 2018-06-25T23:15:05 | 2018-06-25T23:15:05 | 68,224,253 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | # Put your commands here
COMMAND1 = "~~"
# Your handling code goes in this function
def handle_command(command):
"""
Determine if the command is valid. If so, take action and return
a response, if necessary.
"""
response = ""
if COMMAND1 in command:
response = "Huh?"
return response | [
"joe@techemstudios.com"
] | joe@techemstudios.com |
7de3feab7305a15bf87af61ebc2b63f2601479a7 | a111639e451d54e3284363e2ad2e3e328f711a94 | /structural/bridge/free_web_page.py | 6baede3f34d1508f259c2565d60a7eabee1044b3 | [] | no_license | TheProrok29/design_patterns_in_python | 33bc24c8853d3ffe13711b06ea11e8bbe4a5f391 | a68930ca60c431f93be596b7e4440daf7f4bc319 | refs/heads/master | 2021-02-18T14:31:05.465004 | 2020-04-03T10:07:40 | 2020-04-03T10:07:40 | 245,205,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | from structural.bridge.web_page import WebPage
class FreeWebPage(WebPage):
def show_page(self):
image = self.fetcher.get_image()
snippet = self.fetcher.get_snippet()
ads = self.fetcher.get_ads()
full_version = self.fetcher.go_to_full_version()
print(snippet)
print(image)
print(ads)
print(full_version)
| [
"tomaszdbogacki@gmail.com"
] | tomaszdbogacki@gmail.com |
276da94c564d9de0962c4d7f29d8230ac4925eb8 | 9aa85999021da96ce0a7d76789c1298d174d1835 | /meet/migrations/0003_auto_20200128_1727.py | e4d4f1cf789a13902adda66a4e334e527bcc0455 | [] | no_license | m0bi5/ISTE-NITK_Website | 20b83a3a629836c33c7478c0af834f6f57e0e907 | 2e186bb1ba457c930f9b691cc5a5584b8e3c270c | refs/heads/master | 2022-11-24T03:02:49.354491 | 2020-07-24T15:43:44 | 2020-07-24T15:43:44 | 184,452,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | # Generated by Django 2.2.4 on 2020-01-28 17:27
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('meet', '0002_auto_20200128_1714'),
]
operations = [
migrations.AlterField(
model_name='meet',
name='end_time',
field=models.TimeField(default=datetime.time(17, 27, 7, 937731)),
),
migrations.AlterField(
model_name='meet',
name='start_time',
field=models.TimeField(default=datetime.time(17, 27, 7, 937701)),
),
]
| [
"amodhshenoy@gmail.com"
] | amodhshenoy@gmail.com |
3598c364d465f9de29b3133708e16f1b6e8a21f9 | e1a2c6ed4a4b93b4697974e3b0a32a4d67daa6f6 | /venv/Lib/site-packages/pybrain/structure/modules/samplelayer.py | a3096be9947029eb16c8a2d254b083c9143ce84e | [
"MIT"
] | permissive | ishatserka/MachineLearningAndDataAnalysisCoursera | cdf0f23a58617e17d6b938e3a9df17daae8585e4 | e82e772df2f4aec162cb34ac6127df10d14a625a | refs/heads/master | 2021-09-11T01:39:26.228392 | 2018-04-05T14:33:39 | 2018-04-05T14:33:39 | 117,153,454 | 0 | 0 | MIT | 2018-03-27T05:20:37 | 2018-01-11T21:05:33 | Python | UTF-8 | Python | false | false | 577 | py | #! /usr/bin/env python2.5
# -*- coding: utf-8 -*-
__author__ = ('Christian Osendorfer, osendorf@in.tum.de; '
'Justin S Bayer, bayerj@in.tum.de')
from scipy import random
from pybrain.structure.modules.neuronlayer import NeuronLayer
class SampleLayer(NeuronLayer):
"""Baseclass for all layers that have stochastic output depending on the
incoming weight."""
class BernoulliLayer(SampleLayer):
def _forwardImplementation(self, inbuf, outbuf):
outbuf[:] = inbuf <= random.random(inbuf.shape)
| [
"shatserka@gmail.com"
] | shatserka@gmail.com |
a6a7cf557aeeff0dfc96557ca96fa2ecf6f6a35d | a9c43c4b1a640841f1c9b13b63e39422c4fc47c2 | /test/tests/set_more.py | 3283c3eae471a229fed147eedfc564e625324313 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] | permissive | lovejavaee/pyston | be5bd8393462be17259bcc40bf8f745e157d9793 | e8f0d9667c35db043add2f07a0ea7d23e290dd80 | refs/heads/master | 2023-05-01T17:42:35.616499 | 2015-04-07T08:10:44 | 2015-04-07T08:10:44 | 33,535,295 | 0 | 0 | NOASSERTION | 2023-04-14T02:16:28 | 2015-04-07T09:53:36 | Python | UTF-8 | Python | false | false | 225 | py | # expected: fail
print hasattr(set, "__ior__")
print hasattr(set, "__isub__")
print hasattr(set, "__iand__")
print hasattr(set, "__ixor__")
s1 = set() | set(range(3))
s2 = set(range(1, 5))
s3 = s1
s1 -= s2
print s1, s2, s3
| [
"kmod@dropbox.com"
] | kmod@dropbox.com |
4f16d28884277d1d267297d0bcd2663fcaf9a841 | 4c61666c08f3564459b2e9de65f1cef50ef7ce0a | /packages/vaex-server/vaex/server/service.py | 1732c9555f41448c83fd571e7be1f3a913fcc7a7 | [
"MIT"
] | permissive | nemochina2008/vaex | 558b42e9b65455228eb95a3c96e66c9aff39a4d6 | afdaea5568fd3b8f414ab6084d0bb53c5319b968 | refs/heads/master | 2022-05-20T16:39:14.959035 | 2020-04-13T06:30:47 | 2020-04-13T06:30:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,934 | py | import concurrent.futures
import logging
import threading
import vaex
logger = logging.getLogger("vaex.server.service")
class Service:
def __init__(self, df_map):
self.df_map = df_map
def stop(self):
pass
def __getitem__(self, item):
return self.df_map[item]
def list(self):
"""Return a dict with dataframe information"""
return {name: {
'length_original': df.length_original(),
'column_names': df.get_column_names(strings=True),
'dtypes': {name: str("str" if df.dtype(name) == vaex.column.str_type else df.dtype(name)) for name in df.get_column_names(strings=True)},
'state': df.state_get()
} for name, df in self.df_map.items()
}
def _rmi(self, df, methodname, args, kwargs):
method = getattr(df, methodname)
return method(*args, **kwargs)
def execute(self, df, tasks):
assert df.executor.task_queue == []
for task in tasks:
df.executor.schedule(task)
df.execute()
return [task.get() for task in tasks]
class Proxy:
def __init__(self, service):
self.service = service
def __getitem__(self, item):
return self.service[item]
def stop(self):
return self.service.stop()
def list(self):
return self.service.list()
def _rmi(self, df, methodname, args, kwargs):
return self.service._rmi(df, methodname, args, kwargs)
class AsyncThreadedService(Proxy):
def __init__(self, service, thread_count, threads_per_job):
super().__init__(service)
self.threads_per_job = threads_per_job
self.thread_pool = concurrent.futures.ThreadPoolExecutor(thread_count)
self.thread_local = threading.local()
self.thread_pools = []
def stop(self):
self.thread_pool.shutdown()
for thread_pool in self.thread_pools:
thread_pool.shutdown()
def execute(self, df, tasks, progress=None):
def execute():
if not hasattr(self.thread_local, "executor"):
logger.debug("creating thread pool and executor")
self.thread_local.thread_pool = vaex.multithreading.ThreadPoolIndex(max_workers=self.threads_per_job)
self.thread_local.executor = vaex.execution.Executor(thread_pool=self.thread_local.thread_pool)
self.thread_pools.append(self.thread_local.thread_pool)
executor = self.thread_local.executor
try:
if progress:
executor.signal_progress.connect(progress)
df.executor = executor
return self.service.execute(df, tasks)
finally:
if progress:
executor.signal_progress.disconnect(progress)
return self.thread_pool.submit(execute)
| [
"maartenbreddels@gmail.com"
] | maartenbreddels@gmail.com |
d6cef3773f219850c2d45b864a285ebd437b6090 | 2e26bf9c44f349ee308e63e067d93da654daf69d | /python-scripts/scripts/10_find_files_recursively.py | 7952c3841a6d2c43fc75392b73d25433959ad049 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | RelativeTech/PYTHON_PRAC | 034e44484d63d50a9c4295aa7e1dc63ef786fb37 | 7fa145dece99089706460466a89901e00eef9d28 | refs/heads/master | 2023-06-04T18:59:45.059403 | 2021-06-07T19:40:10 | 2021-06-07T19:40:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,916 | py | import fnmatch
import os
# constants
PATH = './'
PATTERN = '*.md'
def get_file_names(filepath, pattern):
matches = []
if os.path.exists(filepath):
for root, dirnames, filenames in os.walk(filepath):
for filename in fnmatch.filter(filenames, pattern):
# matches.append(os.path.join(root, filename)) # full path
matches.append(os.path.join(filename)) # just file name
if matches:
print("Found {} files:".format(len(matches)))
output_files(matches)
else:
print("No files found.")
else:
print("Sorry that path does not exist. Try again.")
def output_files(list_of_files):
for filename in list_of_files:
print(filename)
if __name__ == '__main__':
get_file_names('./', '*.py')
# 05:09:47|bryan@LAPTOP-9LGJ3JGS:[scripts] scripts_exitstatus:1[╗__________________________________________________________o>
#
# python3 10_find_files_recursively.py
# Found 31 files:
# 02_find_all_links.py
# 03_simple_twitter_manager.py
# 04_rename_with_slice.py
# 05_load_json_without_dupes.py
# 06_execution_time.py
# 07_benchmark_permissions_loading_django.py
# 08_basic_email_web_crawler.py
# 09_basic_link_web_crawler.py
# 10_find_files_recursively.py
# 11_optimize_images_with_wand.py
# 12_csv_split.py
# 13_random_name_generator.py
# 15_check_my_environment.py
# 16_jinja_quick_load.py
# 18_zipper.py
# 19_tsv-to-csv.py
# 20_restore_file_from_git.py
# 21_twitter_bot.py
# 22_git_tag.py
# 23_flask_session_test.py
# 24_sql2csv.py
# 25_ip2geolocation.py
# 26_stock_scraper.py
# 27_send_sms.py
# 28_income_tax_calculator.py
# 29_json_to_yaml.py
# 30_fullcontact.py
# 31_youtube_sentiment.py
# 32_stock_scraper.py
# 33_country_code.py
# 34_git_all_repos.py
# |05:10:14|bryan@LAPTOP-9LGJ3JGS:[scripts] scripts_exitstatus:0[╗__________________________________________________________o>
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
91d11f83078c6f58b08135d0c5f716c6a00ddffd | 5bf88a21ad382f75ee94cf98a481df162d519304 | /functional_testing/tutorial/tests.py | f372351b7f8ebf180bda99ed038156fe8c3ca1e8 | [] | no_license | VladyslavHnatchenko/pyramid_projects | 43dd3181599c822109f0f5e39f05c7393c721f7c | 2a727a7799845231f4ba61a8129d710938880f46 | refs/heads/master | 2022-06-10T22:41:01.483199 | 2020-05-08T05:44:10 | 2020-05-08T05:44:10 | 261,702,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | import unittest
from pyramid import testing
class TutorialViewTests(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def test_hello_world(self):
from tutorial import hello_world
request = testing.DummyRequest()
response = hello_world(request)
self.assertEqual(response.status_code, 200)
class TutorialFunctionalTests(unittest.TestCase):
def setUp(self):
from tutorial import main
app = main({})
from webtest import TestApp
self.testapp = TestApp(app)
def test_hello_world(self):
res = self.testapp.get('/', status=200)
self.assertIn(b'<h1>Whats up, Man!</h1>', res.body)
| [
"hnatchenko.vladyslav@gmail.com"
] | hnatchenko.vladyslav@gmail.com |
c284d3231f39051b27c7db6e856ea8c8fa9de65a | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/b946e078bb904b47bf67863586198872.py | 8a22ddab6ef5cc2e22d14498cd235ee04d34eb67 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 661 | py | # -*- coding: utf-8 -*-
import re
"""
Bob answers 'Sure.' if you ask him a question.
He answers 'Woah, chill out!' if you yell at him.
He says 'Fine. Be that way!' if you address him without actually saying
anything.
He answers 'Whatever.' to anything else.
"""
def hey(text):
# Remove whitespace and check if string is empty
text = text.strip()
if not text:
return 'Fine. Be that way!'
# Check if string contains letters and is all uppercase
elif text.isupper():
return 'Woah, chill out!'
# Check if the string is a question
elif text.endswith("?"):
return 'Sure.'
else:
return 'Whatever.'
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
a07065dd91263b9df6c81b2273dd18dce7104ca2 | a86e5de1a4a732172e4447d54fb96f62471fa450 | /internet/getfilegui_ftp.py | 308b819986ee148ae282c24d08b9f7ae287509c6 | [] | no_license | flwwsg/learnpy | 14c769ee5b59e62a9cd02095f00541ad5393517d | 915d71fbb53927f7063dd344f327f95cc0b25322 | refs/heads/master | 2020-06-30T23:34:19.134225 | 2017-01-18T13:43:54 | 2017-01-18T13:43:54 | 74,344,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,008 | py | #!/usr/bin/env python3
from tkinter import Tk, mainloop
from tkinter.messagebox import showinfo
import getfile_ftp, os, sys, _thread
from form import Form
class FtpForm(Form):
def __init__(self):
root = Tk()
root.title(self.title)
labels = ['Server Name', 'Remote Dir', 'File Name',
'Local Dir', 'User Name?', 'Password?']
Form.__init__(self, labels, root)
self.mutex = _thread.allocate_lock()
self.threads = 0
def transfer(self, filename, servername, remotedir, userinfo):
try:
self.do_transfer(filename, servername, remotedir, userinfo)
print('%s of "%s" successful' % (self.mode, filename))
except :
print('%s of "%s" has failed:' % (self.mode, filename), end=' ')
print(sys.exc_info()[0], sys.exc_info()[1])
self.mutex.acquire()
self.threads -= 1
self.mutex.release()
def onSubmit(self):
Form.onSubmit(self)
localdir = self.content['Local Dir'].get()
remotedir = self.content['Remote Dir'].get()
servername = self.content['Server Name'].get()
filename = self.content['File Name'].get()
username = self.content['User Name?'].get()
password = self.content['Password?'].get()
userinfo = ()
if username and password:
userinfo = (username, password)
if localdir:
os.chdir(localdir)
self.mutex.acquire()
self.threads += 1
self.mutex.release()
ftpargs = (filename, servername, remotedir, userinfo)
_thread.start_new_thread(self.transfer, ftpargs)
showinfo(self.title, '%s of "%s" started' % (self.mode, filename))
def onCancel(self):
if self.threads == 0:
Tk().quit()
else:
showinfo(self.title,
'Cannot exit: %d threads running' % self.threads)
class FtpGetfileForm(FtpForm):
title = 'FtpGetfileGui'
mode = 'Download'
def do_transfer(self, filename, servername, remotedir, userinfo):
getfile_ftp.getfile(filename, servername, remotedir,
userinfo, verbose=False, refetch=True)
if __name__ == '__main__':
FtpGetfileForm()
mainloop() | [
"2319406132@qq.com"
] | 2319406132@qq.com |
963865d264c997cda532d2ed72b29f073ba51061 | 3ffeeae8a9a3245d8998d94aa08f680f00056cad | /226.翻转二叉树.py | c06af9247a396860bcc143a341de6a944f56d89f | [] | no_license | Ezi4Zy/leetcode | 6e293e5c07a7d8c3e38f9445ff24330134ef6c48 | 9d394cd2862703cfb7a7b505b35deda7450a692e | refs/heads/master | 2022-04-09T14:11:36.957861 | 2022-03-09T10:30:30 | 2022-03-09T10:30:30 | 57,290,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | #
# @lc app=leetcode.cn id=226 lang=python
#
# [226] 翻转二叉树
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def invertTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
if root:
root.left, root.right = self.invertTree(root.right), self.invertTree(root.left)
return root
# @lc code=end
| [
"Ezi4zy@163.com"
] | Ezi4zy@163.com |
85451990d7639637c9028004b7716a90604a4d5e | 41d1e085dc3ec6c329b8d6443035e1e8a1c93bcc | /gridded/tests/test_ugrid/test_grid_manipulation.py | 61772aa3fa37c4e63d59b1f2e94ce2ec0254dcef | [
"Unlicense"
] | permissive | Ocean1125/gridded | 9252d3d89ecacc55c59a0ecf6fd60fe6ac0afd6e | 90cca5edf4c8d9a47914c2b6d6f78180d9c280a5 | refs/heads/master | 2023-05-15T13:21:34.144583 | 2021-06-03T21:50:01 | 2021-06-03T21:50:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,111 | py | #!/usr/bin/env python
"""
Testing of various utilities to manipulate the grid.
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np
import pytest
from .utilities import two_triangles, twenty_one_triangles
def test_build_face_face_connectivity_small(two_triangles):
ugrid = two_triangles
ugrid.build_face_face_connectivity()
face_face = ugrid.face_face_connectivity
assert np.array_equal(face_face[0], [-1, 1, -1])
assert np.array_equal(face_face[1], [-1, -1, 0])
def test_build_face_face_connectivity_big(twenty_one_triangles):
ugrid = twenty_one_triangles
ugrid.build_face_face_connectivity()
face_face = ugrid.face_face_connectivity
assert face_face[0].tolist() == [-1, 3, 2]
assert face_face[9].tolist() == [8, 10, 7]
assert face_face[8].tolist() == [-1, 9, 6]
assert face_face[15].tolist() == [14, 16, 13]
assert face_face[20].tolist() == [19, -1, -1]
def test_build_edges(two_triangles):
ugrid = two_triangles
ugrid.build_edges()
edges = ugrid.edges
edges.sort(axis=0)
assert np.array_equal(edges, [[0, 1], [0, 2], [1, 2], [1, 3], [2, 3]])
def test_build_face_coordinates(two_triangles):
grid = two_triangles
grid.build_face_coordinates()
coords = grid.face_coordinates
assert coords.shape == (2, 2)
assert np.allclose(coords, [(1.1, 0.76666667),
(2.1, 1.43333333)])
def test_build_edge_coordinates(two_triangles):
grid = two_triangles
grid.build_edge_coordinates()
coords = grid.edge_coordinates
assert coords.shape == (5, 2)
assert np.allclose(coords, [[1.1, 0.1],
[2.6, 1.1],
[2.1, 2.1],
[0.6, 1.1],
[1.6, 1.1]])
def test_build_boundary_coordinates(two_triangles):
grid = two_triangles
grid.boundaries = [(0, 1), (0, 2), (2, 3), (1, 3)]
grid.build_boundary_coordinates()
coords = grid.boundary_coordinates
assert coords.shape == (4, 2)
assert np.allclose(coords, [[1.1, 0.1],
[0.6, 1.1],
[2.1, 2.1],
[2.6, 1.1]])
def test_build_boundaries_small(two_triangles):
ugrid = two_triangles
ugrid.build_face_face_connectivity()
ugrid.build_boundaries()
boundaries = sorted(ugrid.boundaries.tolist())
expected_boundaries = [[0, 1], [1, 3], [2, 0], [3, 2]]
assert boundaries == expected_boundaries
def test_build_boundaries_big(twenty_one_triangles):
ugrid = twenty_one_triangles
ugrid.build_face_face_connectivity()
ugrid.build_boundaries()
boundaries = sorted(ugrid.boundaries.tolist())
expected_boundaries = [[0, 1], [1, 5], [2, 0], [3, 6], [4, 3], [5, 11],
[6, 9], [7, 2], [9, 10], [10, 4], [11, 14], [12, 7],
[13, 12], [14, 16], [15, 13], [16, 18], [17, 15],
[18, 19], [19, 17]]
assert boundaries == expected_boundaries
| [
"Chris.Barker@noaa.gov"
] | Chris.Barker@noaa.gov |
14eb64cd3a3bd8cedfcded8bc29dfff5f452c7c8 | b59372692c912ba17ec2e6812983663a6deccdaf | /.history/bsServer/models_20200502170425.py | 683a93190755a6adc35736cedb98242f994d2e94 | [] | no_license | nanjigirl/bs-server-project | 2d7c240ddf21983ed0439829a7995bde94082467 | 7863aed279b233d359c540c71fdd08ce8633976b | refs/heads/master | 2022-08-02T17:33:48.201967 | 2020-05-25T15:18:34 | 2020-05-25T15:18:34 | 261,204,713 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | from django.db import models
# Create your models here.
#创建模型类
class User(models.Model):
id = models.AutoField(primary_key = True) #该字段可不写,它会自动补全
name = models.CharField(max_length = 30)
age = models.IntegerField()
sex = models.CharField(max_length = 2)
def _str_(self):
return "<User:{id=%s,name=%s,age=%s,sex=%s}>"\(self) | [
"chenxueb@yonyou.com"
] | chenxueb@yonyou.com |
4eee6f4fb6f52da831716c694e832f567a110bd2 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/1481.py | 60894a78cec6553a3757bede617c8af1cbf45e05 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,481 | py | f = open('/home/cse/btech/cs1130260/workfile.txt')
sumit = open("/home/cse/btech/cs1130260/output.txt","w")
a = int(f.readline())
x = 1
while (a > 0):
firstanswer = int(f.readline())
firstrow1 = f.readline()
firstrow2 = f.readline()
firstrow3 = f.readline()
firstrow4 = f.readline()
secondanswer = int(f.readline())
secondrow1 = f.readline()
secondrow2 = f.readline()
secondrow3 = f.readline()
secondrow4 = f.readline()
if (firstanswer == 1):
b = firstrow1.split()
elif (firstanswer == 2):
b = firstrow2.split()
elif (firstanswer == 3):
b = firstrow3.split()
elif (firstanswer == 4):
b = firstrow4.split()
if (secondanswer == 1):
c = secondrow1.split()
elif (secondanswer == 2):
c = secondrow2.split()
elif (secondanswer == 3):
c = secondrow3.split()
elif (secondanswer == 4):
c = secondrow4.split()
i = 0
k = 0
while (i <= 3) and (k < 2):
j = 0
while (j <=3) and (k < 2):
if (b[i] == c[j]):
l = j
k = k+1
j = j+1
else:
j = j+1
i = i+1
if (k == 1):
sumit.write("Case #%s: %s\n" % (x,int(c[l])))
if (k == 2):
sumit.write("Case #%s: Bad Magician!\n" % (x))
if (k == 0):
sumit.write("Case #%s: Volunteer cheated!\n" % (x))
x = x+1
a = a-1
sumit.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
4f98552f0e559f617ec311f4d3261eceae59e4d2 | dfcbc3d04adda5925ff36f0708173cb7baa10e7e | /leetcode/145.py | 9993c2e7a9a0e8d47052b9fd01540bec850fad61 | [] | no_license | yanggelinux/algorithm-data-structure | 66a6fe9acdcacce226b9dbb85e6236776e7206e9 | 3b13b36f37eb364410b3b5b4f10a1808d8b1111e | refs/heads/master | 2021-07-25T14:14:18.521174 | 2020-09-04T08:20:24 | 2020-09-04T08:20:24 | 215,970,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,642 | py | # -*- coding: utf8 -*-
"""
给定一个二叉树,返回它的 后序 遍历。
示例:
输入: [1,null,2,3]
1
\
2
/
3
输出: [3,2,1]
进阶: 递归算法很简单,你可以通过迭代算法完成吗?
后序遍历:对于树中的任意节点来说,先打印它的左子树,然后再打印它的右子树,最后打印它本身。
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def postorderTraversal(self, root):
"""
递归法
:type root: TreeNode
:rtype: List[int]
"""
res_list = []
if root is None:return res_list
res_left_list = self.postorderTraversal(root.left)
res_list += res_left_list
res_right_list = self.postorderTraversal(root.right)
res_list += res_right_list
res_list.append(root.val)
return res_list
def postorderTraversal2(self, root):
"""
递归法
:type root: TreeNode
:rtype: List[int]
"""
res_list = []
if root is None: return res_list
stack = [root]
stack1 = []
while stack:
tree_node = stack.pop()
#先检查左子节点,进栈
if tree_node.left is not None:
stack.append(tree_node.left)
#再检查右子节点进栈
if tree_node.right is not None:
stack.append(tree_node.right)
#获取stack1 的反序
stack1.append(tree_node)
while stack1:
res_list.append(stack1.pop().val)
return res_list
def postorderTraversal3(self, root):
"""
迭代法,标记颜色
:type root: TreeNode
:rtype: List[int]
"""
white,grey = 0,1
res_list = []
if root is None: return res_list
stack = [(white,root)]
while stack:
color,tree_node = stack.pop()
if tree_node is None:continue
if color == white:
#入栈方式,正好和 递归方式的顺序相反。
stack.append((grey,tree_node))
stack.append((white,tree_node.right))
stack.append((white,tree_node.left))
else:
res_list.append(tree_node.val)
return res_list
if __name__ == '__main__':
slt = Solution()
root = TreeNode(1)
root.right = TreeNode(2)
root.right.left = TreeNode(3)
print(slt.postorderTraversal3(root)) | [
"yangyang@ishansong.com"
] | yangyang@ishansong.com |
c75cb1f819a7c2b1caff1b2ab17f1b923f229602 | a2812fad2ff72d4769d136a4a79c320749bffe72 | /jesusanaya_blog/providers/blog_post.py | 99b6551e5d10f12053d069a0f7159931da5f8821 | [] | no_license | JesusAnaya/jesusanaya_blog | 7976ab2c3b1edc773d5c2e04674f865464033566 | fecc91c479caf8e7c0514fcdb85bcb9ad34f1308 | refs/heads/master | 2021-05-04T10:02:20.308204 | 2016-10-14T09:03:44 | 2016-10-14T09:03:44 | 51,033,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | from jesusanaya_blog.services.storage import StorageService
from .base import Provider
class BlogPostProvider(Provider):
def __init__(self, dbsession, settings):
super(BlogPostProvider, self).__init__(dbsession, settings)
self.storage = StorageService(settings)
def create(self, post_data):
pass
| [
"jesus.anaya.dev@gmail.com"
] | jesus.anaya.dev@gmail.com |
fc6c2a31ee7db5f9532e1462ad34bd6736bc43dc | 08cef372f61ba96b05e88a44a8528ac506633c51 | /Pantera/Tools/BLAST/__init__.py | 90ac553468320b5640596f8eea68f82f81856a4f | [] | no_license | xubeisi/Pantera | 322603575db54ff5e63c2fa3b4ef6b6c80376940 | b1315bbf8362ae514971b86824a144ef6b696d71 | refs/heads/master | 2021-06-22T21:58:43.048214 | 2017-08-27T23:23:29 | 2017-08-27T23:23:29 | 105,809,057 | 1 | 0 | null | 2017-10-04T19:25:27 | 2017-10-04T19:25:27 | null | UTF-8 | Python | false | false | 297 | py | __author__ = 'mahajrod'
from Pantera.Tools.BLAST.BLAST import *
from Pantera.Tools.BLAST.BLASTPlus import *
BLAST = BLAST()
BLASTn = BLASTn()
BLASTp = BLASTp()
BLASTPlus = BLASTPlus()
DustMasker = DustMasker()
BLASTDbCmd = BLASTDbCmd()
MakeBLASTDb = MakeBLASTDb()
Windowmasker = Windowmasker()
| [
"mahajrod@gmail.com"
] | mahajrod@gmail.com |
06978625b54d51d9560dc5732e5211749a1fa1fd | d1c67f2031d657902acef4411877d75b992eab91 | /test/test_drip_stat_integration.py | d8752d86bfdd76e55e14d2fec9e4dfbf4bd27a58 | [] | no_license | Certn/opsgenie-python | c6e6a7f42394499e5224d679cc9a449042fcf9c3 | bd5f402f97d591e4082b38c938cbabca4cf29787 | refs/heads/master | 2023-01-01T10:45:13.132455 | 2020-10-27T17:40:01 | 2020-10-27T17:40:01 | 307,769,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | # coding: utf-8
"""
Opsgenie REST API
Opsgenie OpenAPI Specification # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.drip_stat_integration import DripStatIntegration # noqa: E501
from swagger_client.rest import ApiException
class TestDripStatIntegration(unittest.TestCase):
"""DripStatIntegration unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDripStatIntegration(self):
"""Test DripStatIntegration"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.drip_stat_integration.DripStatIntegration() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"john@oram.ca"
] | john@oram.ca |
78476fd94dbd2f8efaad66cfcdfdc067311dd1c7 | 25ebc03b92df764ff0a6c70c14c2848a49fe1b0b | /daily/20200212/example_monogusa2/01fake_stream.py | 2dabee38e314697abf9b966624a9316bbb5a7e37 | [] | no_license | podhmo/individual-sandbox | 18db414fafd061568d0d5e993b8f8069867dfcfb | cafee43b4cf51a321f4e2c3f9949ac53eece4b15 | refs/heads/master | 2023-07-23T07:06:57.944539 | 2023-07-09T11:45:53 | 2023-07-09T11:45:53 | 61,940,197 | 6 | 0 | null | 2022-10-19T05:01:17 | 2016-06-25T11:27:04 | Python | UTF-8 | Python | false | false | 1,017 | py | import logging
from monogusa.events import EventParser, subscription
from monogusa.events import Message
logger = logging.getLogger(__name__)
@subscription.subscribe(Message)
def echo(ev: Message) -> None:
print("!", ev.content)
def read():
import typing as t
import sys
import os
import io
def stream(default_or_io: t.Union[str, t.IO[str]]):
if not os.isatty(sys.stdin.fileno()):
return sys.stdin
if isinstance(default_or_io, io.StringIO):
o = default_or_io
else:
o = io.StringIO()
o.write(default_or_io)
if not o.getvalue().endswith("\n"):
o.write("\n")
o.seek(0)
return o
p = EventParser(sep=",")
for line in stream("Message, hello\nMessage, byebye"):
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
logger.debug(f"<- %r", line)
ev = p.parse(line)
subscription(ev)
| [
"ababjam61+github@gmail.com"
] | ababjam61+github@gmail.com |
43c1a434653a699d34d6d91683a508e50c615182 | aa3b7c6a81a323d2e17a1be7cb7ce90a20d6099a | /cproject/donation_form/api.py | 48cec882a58d24ee8f22fe9be7e58654cc1de176 | [] | no_license | gadia-aayush/sample | fdf00a4a890af6e4380b133cc64d7df89c1defff | 145b83206f9fb0972d19bef9229da0c1bf0aede0 | refs/heads/master | 2022-12-22T16:54:50.228277 | 2020-08-18T20:26:05 | 2020-08-18T20:26:05 | 288,516,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | from donation_form.models import Donationreqs
from rest_framework import viewsets,permissions
from .serializers import DonationRquestsSerializers
#Donation_Request Viewset
class DonationRequestViewSet(viewsets.ModelViewSet):
queryset = Donationreqs.objects.all()
permission_classes = [
permissions.AllowAny
]
serializer_class = DonationRquestsSerializers | [
"gadia.aayush@gmail.com"
] | gadia.aayush@gmail.com |
0de1d75746071df0018fe7e5eb5ddb319207be71 | 11cd15da4be0eba77aee8edc64dbfcacd9f418c4 | /probleme22.py | 8fb7376bcba8d81369815d8f8a79c41b63d82ee5 | [] | no_license | mines-nancy-tcss5ac-2018/td1-YonasHassnaoui | 1bdc1af9872a6faacda5ba0479f8eafddc0ba8ee | 96c3e030fe1345429dd6d17af50e19d8f6383ea4 | refs/heads/master | 2020-03-30T23:38:42.027520 | 2018-10-05T11:00:08 | 2018-10-05T11:00:08 | 151,707,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | def alphabet(lettre):
res=0
al=['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
for i in range(len(al)):
if al[i]==lettre:
res=i+1
return res
fichier=open('p022_names.txt','r')
for i in fichier:
print(i)
p=i.split(',')
print(p)
def solve():
res=0
for k in range(len(p)):
somme=0
for x in p[k]:
somme=somme+alphabet('x')
res=res+somme*k
return res
print(solve()) | [
"noreply@github.com"
] | mines-nancy-tcss5ac-2018.noreply@github.com |
32e84c96e90c012be5b6f4123a13f29014248c3a | 051c3ee44478265c4510530888335335ec9f7fdf | /ML_Applications/SVM/Mutants/code/SVM_rbf/DigitRecognitionApp_8.py | 3932c672a9a2b7163e2eba4970f01104b676b8ec | [] | no_license | PinjiaHe/VerifyML | b581c016012c62d8439adfce0caef4f098b36d5e | 3bd7c49e45720c1cdfe0af4ac7dd35b201056e65 | refs/heads/master | 2020-03-25T19:40:39.996370 | 2018-01-30T08:58:58 | 2018-01-30T08:58:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,163 | py | """
Created on Fri May 26 15:20:01 2017
#Digit Recognition for V & V
#Following note added by RR
Note:
1. The actual digits data from the http://archive.ics.uci.edu/ml/datasets/Pen-Based+Recognition+of+Handwritten+Digits is different than the one referred in this sklearn example
2. For more info, refer this link http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html and the above one.
3. The digits data referred by this Sklearn example can be downloaded from the following link.
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/data/digits.csv.gz
"""
import matplotlib.pyplot as plt
from sklearn import datasets, svm, metrics
import numpy as np
import _pickle as cPickle
digits = np.loadtxt('digits_Train.csv', delimiter=',')
digits_images_flat = digits[:,:(-1)]
digits_images = digits_images_flat.view()
digits_images.shape = ((-1), 8, 8)
digits_target = digits[:,(+1)].astype(np.int)
digits_test = np.loadtxt('digits_Test.csv', delimiter=',')
digits_test_images_flat = digits_test[:,:(-1)]
digits_test_images = digits_test_images_flat.view()
digits_test_images.shape = ((-1), 8, 8)
digits_test_target = digits_test[:,(-1)].astype(np.int)
images_and_labels = list(zip(digits_images, digits_target))
n_samples = len(digits_images)
classifier = svm.SVC(gamma=0.001)
classifier.fit(digits_images_flat, digits_target)
expected = digits_test_target
predicted = classifier.predict(digits_test_images_flat)
print('Classification report for classifier %s:\n%s\n' % (
classifier, metrics.classification_report(expected, predicted)))
print('Confusion matrix:\n%s' % metrics.confusion_matrix(expected, predicted))
print("accuracy:", metrics.accuracy_score(expected, predicted))
images_and_predictions = list(zip(digits_test_images, predicted))
np.savetxt('output.txt', classifier.decision_function(digits_test_images_flat))
outputData = {'data_array': metrics.confusion_matrix(expected, predicted)}
with open('output.pkl', 'wb') as outputFile:
cPickle.dump(outputData, outputFile)
with open('model.pkl', 'wb') as modelFile:
cPickle.dump(classifier, modelFile) | [
"anurag.bms@gmail.com"
] | anurag.bms@gmail.com |
d620033debfba7430ebb3d0748e29d104a5f7713 | 764696896b3a0595f83dc7d108d79d23700d4573 | /pytest_relaxed/trap.py | 8f6baba442e4e1b42c6d3515273cda866a17ba12 | [
"BSD-2-Clause"
] | permissive | bitprophet/pytest-relaxed | 57f35b65c54f0ac86032050ac4f841772c755d1a | 5c18490316cbeebe9a4650a26218c10740950d8a | refs/heads/main | 2023-05-27T05:45:06.488119 | 2023-05-23T14:53:28 | 2023-05-23T14:53:28 | 87,468,490 | 31 | 9 | BSD-2-Clause | 2022-12-31T22:25:32 | 2017-04-06T19:45:24 | Python | UTF-8 | Python | false | false | 2,260 | py | """
Test decorator for capturing stdout/stderr/both.
Based on original code from Fabric 1.x, specifically:
* fabric/tests/utils.py
* as of Git SHA 62abc4e17aab0124bf41f9c5f9c4bc86cc7d9412
Though modifications have been made since.
"""
import io
import sys
from functools import wraps
class CarbonCopy(io.BytesIO):
"""
An IO wrapper capable of multiplexing its writes to other buffer objects.
"""
def __init__(self, buffer=b"", cc=None):
"""
If ``cc`` is given and is a file-like object or an iterable of same,
it/they will be written to whenever this instance is written to.
"""
super().__init__(buffer)
if cc is None:
cc = []
elif hasattr(cc, "write"):
cc = [cc]
self.cc = cc
def write(self, s):
# Ensure we always write bytes.
if isinstance(s, str):
s = s.encode("utf-8")
# Write out to our capturing object & any CC's
super().write(s)
for writer in self.cc:
writer.write(s)
# Real sys.std(out|err) requires writing to a buffer attribute obj in some
# situations.
@property
def buffer(self):
return self
# Make sure we always hand back strings
def getvalue(self):
ret = super().getvalue()
if isinstance(ret, bytes):
ret = ret.decode("utf-8")
return ret
def trap(func):
"""
Replace sys.std(out|err) with a wrapper during execution, restored after.
In addition, a new combined-streams output (another wrapper) will appear at
``sys.stdall``. This stream will resemble what a user sees at a terminal,
i.e. both out/err streams intermingled.
"""
@wraps(func)
def wrapper(*args, **kwargs):
# Use another CarbonCopy even though we're not cc'ing; for our "write
# bytes, return strings" behavior. Meh.
sys.stdall = CarbonCopy()
my_stdout, sys.stdout = sys.stdout, CarbonCopy(cc=sys.stdall)
my_stderr, sys.stderr = sys.stderr, CarbonCopy(cc=sys.stdall)
try:
return func(*args, **kwargs)
finally:
sys.stdout = my_stdout
sys.stderr = my_stderr
del sys.stdall
return wrapper
| [
"jeff@bitprophet.org"
] | jeff@bitprophet.org |
b38c5460f365eaedac5a460595d58f26f950814c | f0d521aad290086063c8928ebc064e021d63aa0c | /mlib/Mobigen/Common/Log/__init__.py | af22ed78642e24c02c685db408f255d5baa5012f | [] | no_license | mobigen/MSF_V2 | 9817cc97c2598c3f69b1192d4b186c3bc1761932 | 8a32adc3969d77455464a07523ced68564eb10af | refs/heads/master | 2023-04-27T20:14:16.020536 | 2022-02-18T08:17:10 | 2022-02-18T08:17:10 | 134,684,685 | 3 | 2 | null | 2023-04-18T22:47:33 | 2018-05-24T08:24:01 | Roff | UHC | Python | false | false | 2,938 | py | # -*- coding: cp949 -*-
import sys
import types
from traceback import *
from DummyLog import CDummyLog
from StandardLog import CStandardLog
from StandardErrorLog import CStandardErrorLog
from RotatingLog import CRotatingLog
from PipeLog import CPipeLog
from UDPLog import CUDPLog
__VERSION__ = "Release 2 (2005/10/21)"
# pysco.full() 모드에서 동작 가능하도록 수정.
#__VERSION__ = "Release 1 (2005/10/11)"
# pysco 충돌문제 해결
__LOG__ = None
#def Init(**args) :
def Init(userDefine = None) :
# 모듈 Import 정보를 조사한다.
impStepList = extract_stack()
if(len(impStepList)==0) :
# psyco.full()이 동작하는걸로 본다.
import psyco
frame = psyco._getemulframe()
impStepList = frame.f_code.co_names
# __main__ 이 아닌 곳에서 import 되는경우 __LOG__ 사용을 위해
# 임시로 Dummy Log 를 생성한다.
if(len(impStepList)!=2) :
curModule = __GetParentModule__()
if(curModule==None) :
sys.modules['__main__'].__dict__["__LOG__"] = CDummyLog()
return
if(curModule.__name__ != "__main__" and not curModule.__dict__.has_key("__LOG__")) :
curModule.__dict__["__LOG__"] = CDummyLog()
return
# __LOG__ 를 생성한다.
global __LOG__
if(userDefine != None) : __LOG__ = userDefine
else : __LOG__ = __InitMain__()
sys.modules["__main__"].__LOG__ = __LOG__
for subModuleName in sys.modules :
subModule = sys.modules[subModuleName]
if(type(subModule) == types.NoneType) : continue
if(not "Log" in subModule.__dict__) : continue
if(subModuleName == "__main__") : continue
# 하위 모듈에서 사용 가능하도록 __LOG__ 등록한다.
subModule.__LOG__ = __LOG__
def __Exception__(type, value, tb):
if hasattr(sys, 'ps1') or not sys.stderr.isatty() or type == SyntaxError:
sys.__excepthook__(type, value, tb)
else:
if(__LOG__) :
__LOG__.PrintException(type, value, tb)
def AutoException() :
if __debug__:
sys.excepthook = __Exception__
def SetLevel(level) :
global __LOG__
if(__LOG__) : __LOG__.SetLevel(level)
def __InitMain__() :
return CStandardLog()
def __GetParentModule__(Test = 0) :
# impStepList[0] : __GetParentModlue__ 을 호출한 함수
# impStepList[1] : Log.py
# impStepList[2] : Log.py를 Import 한 modlue
try :
impStepList = extract_stack()
impStepList.reverse()
parentModulePath = impStepList[2][0]
except :
import psyco
frame = psyco._getemulframe(2)
parentModulePath = frame.f_code.co_filename
parentModule = None
for name in sys.modules :
moduleInfo = str(sys.modules[name])
if (moduleInfo.find(parentModulePath) != -1) :
parentModule = sys.modules[name] # 상위 모듈 획득
break
elif (moduleInfo.find("__main__") != -1 and \
moduleInfo.find("<frozen>") != -1) :
# freeze로 컴파일한경우...
parentModule = sys.modules[name] # 상위 모듈 획득
break
return parentModule
def Version() :
return __VERSION__
| [
"cheerupdi@gmail.com"
] | cheerupdi@gmail.com |
926854e5ba9d587446f3978075fb9e0f6484759c | 9633fb1796269d049aad814efc46ac1545c3b88d | /tetris.py | 239dce1c1bc3fe5fd9d561ba450374c4861dac61 | [] | no_license | Ajax12345/pygame | 90fea131736c87778965a11f4b8ed1fcce729576 | 0bfe055d3a1e3e16c5a1bed01b41044dd0671746 | refs/heads/master | 2021-01-21T08:24:07.355205 | 2017-12-04T14:06:57 | 2017-12-04T14:06:57 | 91,626,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,984 | py | import pygame
import random
import collections
import time
import sys
converter = {"tercoise":(0,238,238), 'yellow':(255,215,0), 'purple':(191,62,255), 'green':(127,255,0), 'red':(255,0,0), 'blue':(0, 0, 255), 'brown':(255,127,36)}
class Block(pygame.sprite.Sprite):
def __init__(self, x, y, color):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([40, 40])
self.image.fill(converter[color])
self.rect = self.image.get_rect()
self.rect.y = y
self.rect.x = x
class Rectangle(pygame.sprite.Sprite):
def __init__(self, x, y, color):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([100, 40])
self.image.fill(converter[color])
self.rect = self.image.get_rect()
self.rect.y = y
self.rect.x =x
class LittleRectangle(pygame.sprite.Sprite):
def __init__(self, x, y, color):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([80, 40])
self.image.fill(converter[color])
self.rect = self.image.get_rect()
self.rect.y = y
self.rect.x =x
class I:
def __init__(self):
self.group = pygame.sprite.Group()
self.main_block = Rectangle(400, 30, 'tercoise')
self.group.add(self.main_block)
def __iter__(self):
for sprite in self.group:
yield sprite
class O:
def __init__(self):
self.group = pygame.sprite.Group()
self.top = LittleRectangle(400, 30, 'yellow')
self.bottom = LittleRectangle(400, 70, 'yellow')
for a, b in self.__dict__.items():
if a in ['top', 'bottom']:
self.group.add(b)
def __iter__(self):
for sprite in self.group:
yield sprite
class T:
def __init__(self):
self.group = pygame.sprite.Group()
self.top = Block(430, 30, 'purple')
self.bottom = Rectangle(400, 70, 'purple')
self.group.add(self.top)
self.group.add(self.bottom)
def __iter__(self):
for sprite in self.group:
yield sprite
class S:
def __init__(self):
self.group = pygame.sprite.Group()
self.top = LittleRectangle(440, 30, 'green')
self.bottom = LittleRectangle(400, 70, 'green')
self.group.add(self.top)
self.group.add(self.bottom)
def __iter__(self):
for sprite in self.group:
yield sprite
class Z:
def __init__(self):
self.group = pygame.sprite.Group()
self.top = LittleRectangle(400, 30, 'red')
self.bottom = LittleRectangle(440, 70, 'red')
self.group.add(self.top)
self.group.add(self.bottom)
def __iter__(self):
for sprite in self.group:
yield sprite
class J:
def __init__(self):
self.group = pygame.sprite.Group()
self.top = Block(400, 30, 'blue')
self.bottom = Rectangle(400, 70, 'blue')
self.group.add(self.top)
self.group.add(self.bottom)
def __iter__(self):
for sprite in self.group:
yield sprite
class L:
def __init__(self):
self.group = pygame.sprite.Group()
self.top = Block(460, 30, 'brown')
self.bottom = Rectangle(400, 70, 'brown')
self.group.add(self.top)
self.group.add(self.bottom)
def __iter__(self):
for sprite in self.group:
yield sprite
class MainGame:
def __init__(self):
self.image = pygame.image.load('/Users/jamespetullo/Desktop/maxresdefault.jpg')
self.screen = pygame.display.set_mode((1100, 900))
self.quit = False
self.first_group = pygame.sprite.Group()
self.block_types = {'I':I, 'O':O, 'T':T, 'S':S, 'Z':Z, 'J':J, 'L':L}
self.game_clock = 1
self.navigation_y = 0
self.navigation_x = 0
self.current_block = pygame.sprite.Group()
self.current_block = L().group
self.future_block = pygame.sprite.Group()
self.final_blocks = pygame.sprite.Group()
self.flag = False
self.prioraty = collections.deque()
self.current_time = time.time()
def play(self):
pygame.init()
self.screen.fill((255, 255, 255))
while not self.quit:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.quit = True
if event.type == pygame.KEYDOWN:
print 'here111'
if event.key == pygame.K_RIGHT:
print "here"
self.navigation_x = 1
if event.key == pygame.K_LEFT:
print "here"
self.navigation_x = -1
for sprite in self.current_block:
sprite.rect.y += 3
sprite.rect.x += 10*self.navigation_x
for sprite in self.future_block:
sprite.rect.y += 3
for group in self.prioraty:
for sprite in group:
sprite.rect.y += 3
self.navigation_x = 0
if self.game_clock%70 == 0:
new_group = self.block_types[random.choice(self.block_types.keys())]()
self.prioraty.append(new_group.group)
'''
for sprite in self.future_block:
if any(pygame.sprite.collide_rect(i, sprite2) for sprite2 in self.final_blocks for i in self.future_block):
for s
'''
for group in self.prioraty:
if any(pygame.sprite.collide_rect(i, sprite2) for sprite2 in self.final_blocks for i in group):
for sprite in group:
self.final_blocks.add(sprite)
for sprite in self.current_block:
if sprite.rect.y >= 700 or any(pygame.sprite.collide_rect(i, sprite2) for sprite2 in self.final_blocks for i in self.current_block):
for sprite in self.current_block:
self.final_blocks.add(sprite)
try:
self.current_block = self.prioraty.popleft()
except IndexError:
print "Congradulations! Game time was {} minutes".format(round(abs(self.current_time-time.time())/60, 2))
sys.exit()
break
self.screen.blit(self.image, (0, 0))
self.current_block.update()
self.current_block.draw(self.screen)
'''
self.future_block.update()
self.future_block.draw(self.screen)
'''
for group in self.prioraty:
group.update()
group.draw(self.screen)
self.final_blocks.update()
self.final_blocks.draw(self.screen)
self.game_clock += 1
pygame.display.flip()
if __name__ == '__main__':
tetris = MainGame()
tetris.play()
| [
"noreply@github.com"
] | Ajax12345.noreply@github.com |
e7bbafa17d265718d8a42bc0251991aeb3da2de2 | 4fe1dc7170d2d44e2c9988c71b08f66d469ee4b8 | /Unit7/ej7.28.py | f48af0397e694f0f8fdcb886a9e279d206aa6a27 | [] | no_license | ftorresi/PythonLearning | 53c0689a6f3e7e219a6314a673a318b25cda82d1 | f2aeb5f81d9090a5a5aa69a8d1203688e9f01adf | refs/heads/master | 2023-01-12T00:40:05.806774 | 2020-11-13T14:33:08 | 2020-11-13T14:33:08 | 267,460,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,227 | py |
### Polynomials
#import numpy
class Polynomial:
def __init__(self, coefficients):
self.coeff = coefficients
def __call__(self, x):
"""Evaluate the polynomial."""
return sum(self.coeff[power]*x**power for power in self.coeff)
def __add__(self, other):
"""Return self + other as Polynomial object."""
result = self.coeff.copy()
for power in other.coeff:
if power in result:
result[power]+=other.coeff[power]
else:
result[power] = other.coeff[power]
result_coeff=result.copy()
for power in result:
if result[power]==0:
del result_coeff[power] #delete terms with zero coeff.
return Polynomial(result_coeff) #return a Polynomial (not a dict of coeff.)
def __sub__(self, other):
"""Return self - other as Polynomial object."""
result = self.coeff.copy()
for power in other.coeff:
if power in result:
result[power]-=other.coeff[power]
else:
result[power] = -other.coeff[power]
result_coeff=result.copy()
for power in result:
if result[power]==0:
del result_coeff[power] #delete terms with zero coeff.
return Polynomial(result_coeff) #return a Polynomial (not a dict of coeff.)
def __mul__(self, other):
c = self.coeff
d = other.coeff
result={}
for i in c:
for j in d:
k=i+j
if k in result:
result[k]+=c[i]*d[j]
else:
result[k]=c[i]*d[j]
return Polynomial(result)
def test_Polynomial():
p1 = Polynomial({4: 1, 2: -2, 0: 3})
p2 = Polynomial({0: 4, 1: 3})
success = (p1(2)-11)<1e-14
assert success, "Bug in evaluating values"
success = (p1(-10)-9803)<1e-14
assert success, "Bug in evaluating values"
success = (p2(2)-10)<1e-14
assert success, "Bug in evaluating values"
success = (p2(-10)+26)<1e-14
assert success, "Bug in evaluating values"
p3 = p1 + p2
p3_exact = Polynomial({0: 7, 1: 3,2: -2, 4: 1})
msg = 'p1 = %s, p2 = %s\np3=p1+p2 = %s\nbut wrong p3 = %s'%(p1, p2, p3_exact, p3)
assert p3.coeff == p3_exact.coeff, msg
## Note __add__ applies lists only, here with integers, so
## == for comparing lists is not subject to round-off errors
p4 = p1*p2
p4_exact = Polynomial({0: 12, 1: 9, 2: -8, 3: -6, 4: 4, 5: 3})
msg = 'p1 = %s, p2 = %s\np4=p1*p2 = %s\ngot wrong p4 = %s'%(p1, p2, p4_exact, p4)
assert p4.coeff == p4_exact.coeff, msg
p5=p1-p2
p5_exact = Polynomial({0: -1, 1: -3,2: -2, 4: 1})
msg = 'p1 = %s, p2 = %s\np5=p1-p2 = %s\nbut wrong p5 = %s'%(p1, p2, p5_exact, p5)
assert p5.coeff == p5_exact.coeff, msg
p6=p2-p1
p6_exact = Polynomial({0: 1, 1: 3,2: 2, 4: -1})
msg = 'p1 = %s, p2 = %s\np6=p2-p1 = %s\nbut wrong p6 = %s'%(p1, p2, p6_exact, p6)
assert p6.coeff == p6_exact.coeff, msg
if __name__ == '__main__':
import sys
if len(sys.argv) >= 2 and sys.argv[1] == 'verify':
test_Polynomial()
| [
"noreply@github.com"
] | ftorresi.noreply@github.com |
7cda5cdd83a59146eb5c1d6c5eb8be5261114aaf | c4e05230949efbd1ef858839850520ee94a87a58 | /musicbingo/gui/dialogbase.py | 0fb81331348d5a628a6d5e765f7eb7ed4df5e60c | [] | no_license | asrashley/music-bingo | bd33b883da9b6f88df506860475861daea63c6fb | f49d26900a10593a6f993b82d8d782b2e7367f84 | refs/heads/main | 2023-07-20T11:15:47.696132 | 2023-06-29T09:59:51 | 2023-07-05T16:48:41 | 125,717,777 | 1 | 1 | null | 2023-08-28T17:28:04 | 2018-03-18T11:26:17 | Python | UTF-8 | Python | false | false | 3,461 | py | """
A base class for creating dialog boxes
This class is based upon the code from:
http://effbot.org/tkinterbook/tkinter-dialog-windows.htm
"""
from abc import ABC, abstractmethod
from typing import Any, Optional, Protocol, Union
import tkinter as tk # pylint: disable=import-error
from .panel import Panel
class Focusable(Protocol):
"""
Interface for an object that supports focus_set
"""
def focus_set(self) -> None:
"""
Set this object as focus
"""
class DialogBase(tk.Toplevel, ABC):
"""
Base class for dialog boxes
"""
NORMAL_BACKGROUND = '#FFF'
ALTERNATE_BACKGROUND = '#BBB'
NORMAL_FOREGROUND = "#343434"
ALTERNATE_FOREGROUND = "#505024"
TYPEFACE = Panel.TYPEFACE
def __init__(self, parent: tk.Tk, title: str, height: Union[str, float] = 0,
width: Union[str, float] = 0):
super().__init__(parent, width=width, height=height)
self.transient(parent)
self.title(title)
self.parent = parent
self.result: Optional[Any] = None
if height and width:
self.geometry(f"{width}x{height}")
body = tk.Frame(self, height=height, width=width)
else:
body = tk.Frame(self)
focus = self.body(body)
if focus:
self.initial_focus = focus
else:
self.initial_focus = self
body.pack(padx=5, pady=5)
self.buttonbox()
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.geometry(f"+{parent.winfo_rootx()+50}+{parent.winfo_rooty()+50}")
self.initial_focus.focus_set()
self.wait_window(self)
@abstractmethod
def body(self, frame: tk.Frame) -> Optional[Focusable]:
"""
create dialog body. return widget that should have
initial focus.
"""
return None
def buttonbox(self):
"""
add standard button box.
override if you don't want the standard buttons
"""
box = tk.Frame(self)
btn = tk.Button(box, text="OK", width=10, command=self.ok, default=tk.ACTIVE)
btn.pack(side=tk.LEFT, padx=5, pady=5)
btn = tk.Button(box, text="Cancel", width=10, command=self.cancel)
btn.pack(side=tk.LEFT, padx=5, pady=5)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
box.pack()
# pylint: disable=invalid-name, unused-argument
def ok(self, event=None):
"""
called when ok button is pressed
"""
if not self.validate():
self.initial_focus.focus_set() # put focus back
return
self.withdraw()
self.update_idletasks()
self.apply()
self.cancel()
# pylint: disable=unused-argument
def cancel(self, event=None):
"""
called when ok or cancel buttons are pressed, or window is closed
"""
# put focus back to the parent window
self.parent.focus_set()
self.destroy()
def validate(self) -> bool:
"""
Validate the fields in this dialog box
"""
return True
@abstractmethod
def apply(self) -> None:
"""
Called when OK button is pressed just before the dialog is
closed. Used to make use of the fields in the dialog before it
is closed.
"""
# pylint: disable=unnecessary-pass
pass
| [
"alex@ashley-family.net"
] | alex@ashley-family.net |
94806089d8e2bf6b6faa81a9841e2b0dfc88baf2 | cbedb18df0aaac810aeea87a2273edb15c1cf899 | /mixed bag/day9-10/394. Decode String (straight sol, but not easy to write).py | cb739c9f859f998c3e95e8efa8d9fe182117b893 | [] | no_license | kanglicheng/CodeBreakersCode | 71b833bb9f4c96d520c26f0044365dc62137a940 | 31f7f730227a0e10951e7468bad1b995cf2eafcb | refs/heads/master | 2023-08-07T20:32:05.267695 | 2020-09-14T14:36:25 | 2020-09-14T14:36:25 | 265,978,034 | 0 | 0 | null | 2020-05-22T00:05:29 | 2020-05-22T00:05:29 | null | UTF-8 | Python | false | false | 1,326 | py | class Solution:
def decodeString(self, s: str) -> str:
def rec(s, l, r, pair):
res = ""
i = l
num = 0
while i < r:
curChar = s[i]
if curChar.isnumeric():
j = i
numStr = ""
while s[j].isnumeric():
numStr += s[j]
j += 1
i = j
num = int(numStr)
elif s[i] == '[':
subStr = rec(s, i + 1, pair[i], pair)
for j in range(0, num):
res += subStr
i = pair[i] + 1
else:
res += s[i]
i += 1
return res
n = len(s)
stack, pair = [], dict()
for i in range(0, n):
if s[i] == '[':
stack.append(i)
elif s[i] == ']':
pair[stack.pop()] = i
return rec(s, 0, n, pair)
| [
"56766457+Wei-LiHuang@users.noreply.github.com"
] | 56766457+Wei-LiHuang@users.noreply.github.com |
92595caed1dd7131190d5a6ceabcea4944d68c2e | b4c6013f346e178222cc579ede4da019c7f8c221 | /src/main/python/idlelib/paragraph.py | f11bdaeb77ac38beb420366597c89ff90ac062fe | [
"BSD-3-Clause",
"OpenSSL",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"GPL-1.0-or-later",
"LicenseRef-scancode-unicode"
] | permissive | cafebabepy/cafebabepy | e69248c4f3d9bab00e93ee749d273bc2c9244f8d | 4ab0e67b8cd79f2ca7cab6281bc811d3b9bc69c1 | refs/heads/develop | 2022-12-09T21:14:56.651792 | 2019-07-01T09:05:23 | 2019-07-01T09:05:23 | 90,854,936 | 9 | 1 | BSD-3-Clause | 2018-01-02T02:13:51 | 2017-05-10T11:05:11 | Java | UTF-8 | Python | false | false | 7,277 | py | """Extension to format a paragraph or selection to a max width.
Does basic, standard text formatting, and also understands Python
comment blocks. Thus, for editing Python source code, this
extension is really only suitable for reformatting these comment
blocks or triple-quoted strings.
Known problems with comment reformatting:
* If there is a selection marked, and the first line of the
selection is not complete, the block will probably not be detected
as comments, and will have the normal "text formatting" rules
applied.
* If a comment block has leading whitespace that mixes tabs and
spaces, they will not be considered part of the same block.
* Fancy comments, like this bulleted list, aren't handled :-)
"""
import re
from idlelib.config import idleConf
class FormatParagraph:
menudefs = [
('format', [ # /s/edit/format dscherer@cmu.edu
('Format Paragraph', '<<format-paragraph>>'),
])
]
def __init__(self, editwin):
self.editwin = editwin
def close(self):
self.editwin = None
def format_paragraph_event(self, event, limit=None):
"""Formats paragraph to a max width specified in idleConf.
If text is selected, format_paragraph_event will start breaking lines
at the max width, starting from the beginning selection.
If no text is selected, format_paragraph_event uses the current
cursor location to determine the paragraph (lines of text surrounded
by blank lines) and formats it.
The length limit parameter is for testing with a known value.
"""
if limit is None:
# The default length limit is that defined by pep8
limit = idleConf.GetOption(
'extensions', 'FormatParagraph', 'max-width',
type='int', default=72)
text = self.editwin.text
first, last = self.editwin.get_selection_indices()
if first and last:
data = text.get(first, last)
comment_header = get_comment_header(data)
else:
first, last, comment_header, data = \
find_paragraph(text, text.index("insert"))
if comment_header:
newdata = reformat_comment(data, limit, comment_header)
else:
newdata = reformat_paragraph(data, limit)
text.tag_remove("sel", "1.0", "end")
if newdata != data:
text.mark_set("insert", first)
text.undo_block_start()
text.delete(first, last)
text.insert(first, newdata)
text.undo_block_stop()
else:
text.mark_set("insert", last)
text.see("insert")
return "break"
def find_paragraph(text, mark):
"""Returns the start/stop indices enclosing the paragraph that mark is in.
Also returns the comment format string, if any, and paragraph of text
between the start/stop indices.
"""
lineno, col = map(int, mark.split("."))
line = text.get("%d.0" % lineno, "%d.end" % lineno)
# Look for start of next paragraph if the index passed in is a blank line
while text.compare("%d.0" % lineno, "<", "end") and is_all_white(line):
lineno = lineno + 1
line = text.get("%d.0" % lineno, "%d.end" % lineno)
first_lineno = lineno
comment_header = get_comment_header(line)
comment_header_len = len(comment_header)
# Once start line found, search for end of paragraph (a blank line)
while get_comment_header(line)==comment_header and \
not is_all_white(line[comment_header_len:]):
lineno = lineno + 1
line = text.get("%d.0" % lineno, "%d.end" % lineno)
last = "%d.0" % lineno
# Search back to beginning of paragraph (first blank line before)
lineno = first_lineno - 1
line = text.get("%d.0" % lineno, "%d.end" % lineno)
while lineno > 0 and \
get_comment_header(line)==comment_header and \
not is_all_white(line[comment_header_len:]):
lineno = lineno - 1
line = text.get("%d.0" % lineno, "%d.end" % lineno)
first = "%d.0" % (lineno+1)
return first, last, comment_header, text.get(first, last)
# This should perhaps be replaced with textwrap.wrap
def reformat_paragraph(data, limit):
"""Return data reformatted to specified width (limit)."""
lines = data.split("\n")
i = 0
n = len(lines)
while i < n and is_all_white(lines[i]):
i = i+1
if i >= n:
return data
indent1 = get_indent(lines[i])
if i+1 < n and not is_all_white(lines[i+1]):
indent2 = get_indent(lines[i+1])
else:
indent2 = indent1
new = lines[:i]
partial = indent1
while i < n and not is_all_white(lines[i]):
# XXX Should take double space after period (etc.) into account
words = re.split(r"(\s+)", lines[i])
for j in range(0, len(words), 2):
word = words[j]
if not word:
continue # Can happen when line ends in whitespace
if len((partial + word).expandtabs()) > limit and \
partial != indent1:
new.append(partial.rstrip())
partial = indent2
partial = partial + word + " "
if j+1 < len(words) and words[j+1] != " ":
partial = partial + " "
i = i+1
new.append(partial.rstrip())
# XXX Should reformat remaining paragraphs as well
new.extend(lines[i:])
return "\n".join(new)
def reformat_comment(data, limit, comment_header):
"""Return data reformatted to specified width with comment header."""
# Remove header from the comment lines
lc = len(comment_header)
data = "\n".join(line[lc:] for line in data.split("\n"))
# Reformat to maxformatwidth chars or a 20 char width,
# whichever is greater.
format_width = max(limit - len(comment_header), 20)
newdata = reformat_paragraph(data, format_width)
# re-split and re-insert the comment header.
newdata = newdata.split("\n")
# If the block ends in a \n, we dont want the comment prefix
# inserted after it. (Im not sure it makes sense to reformat a
# comment block that is not made of complete lines, but whatever!)
# Can't think of a clean solution, so we hack away
block_suffix = ""
if not newdata[-1]:
block_suffix = "\n"
newdata = newdata[:-1]
return '\n'.join(comment_header+line for line in newdata) + block_suffix
def is_all_white(line):
"""Return True if line is empty or all whitespace."""
return re.match(r"^\s*$", line) is not None
def get_indent(line):
"""Return the initial space or tab indent of line."""
return re.match(r"^([ \t]*)", line).group()
def get_comment_header(line):
"""Return string with leading whitespace and '#' from line or ''.
A null return indicates that the line is not a comment line. A non-
null return, such as ' #', will be used to find the other lines of
a comment block with the same indent.
"""
m = re.match(r"^([ \t]*#*)", line)
if m is None: return ""
return m.group(1)
if __name__ == "__main__":
import unittest
unittest.main('idlelib.idle_test.test_paragraph',
verbosity=2, exit=False)
| [
"zh1bvtan1@gmail.com"
] | zh1bvtan1@gmail.com |
312dd6aaf8ba2a988707f09b3851cb85fa7bc812 | 2387cd7657d82b3102e7f8361496307c5f49a534 | /设计模式/python/singleton.py | b0ed87bf24fba431ef90934fc234f64cfcf4ba84 | [] | no_license | kelele67/ReadBooks | da36b25b01d008b2732ad4673e6b676ac7f25027 | 1cf5fc42e1a9edc32971fbcadb64b4b7a84481ad | refs/heads/master | 2021-01-19T14:48:56.932194 | 2017-10-16T10:11:35 | 2017-10-16T10:11:35 | 100,925,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | # import threading
# class Singleton(object):
# """Singleton"""
# instance = None
# lock = threading.RLock()
# @classmethod
# def __new__(cls):
# if cls.instance is None:
# cls.lock.acquire()
# if cls.instance is None:
# cls.instance = super(Singleton, cls).__new__(cls)
# cls.lock.release()
# return cls.instance
# if __name__ == '__main__':
# instance1 = Singleton()
# instance2 = Singleton()
# print (id(instance1) == id(instance2))
# class Singleton(object):
# def __new__(cls):
# if not hasattr(cls, '_instance'):
# cls._instance = super(Singleton, cls).__new__(cls)
# return cls._instance
# if __name__ == '__main__':
# class A(Singleton):
# def __init__(self, s):
# self.s = s
# a = A('apple')
# b = A('banana')
# print (id(a), a.s)
# print (id(b), b.s)
| [
"kingand67@outlook.com"
] | kingand67@outlook.com |
06c38aa52a024c092191fe02628c564ccab24845 | e92a3d0fb77120be99de6040cb6cd34eda0a95f4 | /Ветки в Git, словари, кортежи и множества/code/delete_all-words.py | 0f30acdcfd066b2f9022bb81ba363d691eac67d1 | [] | no_license | Python18Academy/python_first_level | 495f85631f5afc737aa156ef8ca0ea307340c322 | 9ce490da3108474b135a17086f4d11f2a3bbbe55 | refs/heads/master | 2023-09-04T17:00:36.920987 | 2021-03-31T18:44:37 | 2021-03-31T18:44:37 | 331,934,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | # я знаю что вы люите подобные списки
spisok = ['skovorodka', 'stolick', 'istoria', 'skovorodka', 'stena', 'kartina']
while 'skovorodka' in spisok:
spisok.remove('skovorodka')
print(spisok)
# выведет только обрезанный список
| [
"isakura313@gmail.com"
] | isakura313@gmail.com |
6b5e7cbb93508bf3bf78ec609a82b00d5bd0e6c9 | 742f1c8301264d4f06fc1d389157613c57614b7f | /web_test/web.py | 3d455f2d3970d34edb9732ad9174c6b8a2f4bab2 | [
"MIT"
] | permissive | DieMyDarling/python-web-test | 93fe9be2fb372545346621a0a6bd5b3961eb9af4 | 31a2beb82c8ad50df481a7ce0541813a2088a2b6 | refs/heads/master | 2022-11-23T10:34:44.072430 | 2020-07-30T18:12:46 | 2020-07-30T18:12:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,803 | py | from web_test.pages.duckduckgo import Duckduckgo
from web_test.pages.ecosia import Ecosia
from web_test.pages.github import Github
from web_test.pages.google import Google
"""
This module is optional.
Usually it makes sense to call it `app.py`,
but in the context of this template project, our app is "all web",
and the word "web" is already a good name describing exactly what we want.
The idea is to provide a one entry point to all PageObjects
So you can import just this entry point in your test:
from web_test.pages import web
and then fluently access any page:
web.ecosia
# ...
web.searchencrypt
# ...
web.duckduckgo
instead of direct import:
from web_test.pages.ecosia import ecosia
from web_test.pages.searchencrypt import searchencrypt
from web_test.pages.duckduckgo import duckduckgo
ecosia
# ...
searchencrypt
# ...
duckduckgo
Probably instead of:
web_test/web.py
you can use any of:
web_test/pages/web.py
web_test/pages/__init__.py
we type hint variables below to allow better IDE support,
e.g. for Quick Fix feature...
"""
duckduckgo: Duckduckgo = Duckduckgo()
ecosia: Ecosia = Ecosia()
google: Google = Google()
"""
searchencrypt is "PageModule" not "PageObject"
that's we don't have to introduce a new variable for page's object
just an import is enough
There is one nuance though...
If we want the IDE in case of "quick fixing imports" to
show for us ability to directly import searchencrypt from web.py
then we have to do something like this:
from web_test.pages import searchencrypt as _searchencrypt
searchencrypt = _searchencrypt
But probably you will never need it;)
Hence keep things simple;)
"""
from web_test.pages import searchencrypt
github: Github = Github()
| [
"yashaka@gmail.com"
] | yashaka@gmail.com |
5e71698c0e44077c937447562f247b876aac103f | 625a3b84b86df1b0a61a7088373094e481b7502e | /simple_shop/wsgi.py | 8e65b23f6d90e12dc890866495e35847af8f8601 | [] | no_license | muremwa/simple-shop-api | 868dc4747fbdb0e6b3eca18969c373c2dd056b4f | ab96e04030de4f2833a4ede8834c4bce393e4528 | refs/heads/master | 2022-05-09T23:43:28.587469 | 2020-04-16T04:57:03 | 2020-04-16T04:57:03 | 256,277,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for simple_shop project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'simple_shop.settings')
application = get_wsgi_application()
| [
"danmburu254@gmail.com"
] | danmburu254@gmail.com |
5218e4c00c5dad5ac8b32aa30d93dff669c14ca5 | ee1bd2a5c88989a43fee1d9b3c85c08d66392502 | /intro_to_statistics/class15_probability.py | 002c1a3723c3ee75f9b05865185001170828f69a | [] | no_license | shasky2014/PythonLearning | 46288bd915466110ee14b5ee3c390ae9b4f67922 | 04c06d06a2c3f1c4e651627fd6b224f55205c06f | refs/heads/master | 2021-06-27T13:14:10.730525 | 2020-10-09T07:54:31 | 2020-10-09T07:54:31 | 252,903,485 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,176 | py | # Relative Probabilities 1
# Let's suppose we have a fair coin and we flipped it four times.
# p1= 4*0.5*0.5*0.5*0.5
# p2= 0.5
#
# print p1/p2
# FlipPredictor
# A coin is drawn at random from a bag of coins of varying probabilities
# Each coin has the same chance of being drawn
# Your class FlipPredictor will be initialized with a list of the probability of
# heads for each coin. This list of probabilities can be accessed as self.coins
# in the functions you must write. The function update will be called after every
# flip to enable you to update your estimate of the probability of each coin being
# the selected coin. The function pheads may be called and any time and will
# return your best estimate of the next flip landing on heads.
from __future__ import division
class FlipPredictor(object):
def __init__(self, coins):
self.coins = coins
n = len(coins)
self.probs = [1 / n] * n
def pheads(self):
return sum(pcoin*p for pcoin,p in zip(self.coins,self.probs))
# Write a function that returns
# the probability of the next flip being heads
def update(self, result):
pheads = self.pheads()
if result == 'H':
self.probs = [pcoin * p / pheads for pcoin, p in zip(self.coins, self.probs)]
else:
self.probs = [(1 - pcoin) * p / (1 - pheads) for pcoin, p in zip(self.coins, self.probs)]
# Write a function the updates
# the probabilities of flipping each coin
# The code below this line tests your implementation.
# You need not change it
# You may add additional htmlprs cases or otherwise modify if desired
def test(coins, flips):
f = FlipPredictor(coins)
guesses = []
for flip in flips:
f.update(flip)
guesses.append(f.pheads())
return guesses
def maxdiff(l1, l2):
return max([abs(x - y) for x, y in zip(l1, l2)])
testcases = [
(([0.5, 0.4, 0.3], 'HHTH'), [0.4166666666666667, 0.432, 0.42183098591549295, 0.43639398998330553]),
(([0.14, 0.32, 0.42, 0.81, 0.21], 'HHHTTTHHH'),
[0.5255789473684211, 0.6512136991788505, 0.7295055220497553, 0.6187139453483192, 0.4823974597714815,
0.3895729901052968, 0.46081730193074644, 0.5444108434105802, 0.6297110187222278]),
(([0.14, 0.32, 0.42, 0.81, 0.21], 'TTTHHHHHH'),
[0.2907741935483871, 0.25157009005730924, 0.23136284577678012, 0.2766575695593804, 0.3296000585271367,
0.38957299010529806, 0.4608173019307465, 0.5444108434105804, 0.6297110187222278]),
(([0.12, 0.45, 0.23, 0.99, 0.35, 0.36], 'THHTHTTH'),
[0.28514285714285714, 0.3378256513026052, 0.380956725493104, 0.3518717367468537, 0.37500429586037076,
0.36528605387582497, 0.3555106542906013, 0.37479179323540324]),
(([0.03, 0.32, 0.59, 0.53, 0.55, 0.42, 0.65], 'HHTHTTHTHHT'),
[0.528705501618123, 0.5522060353798126, 0.5337142767315369, 0.5521920592821695, 0.5348391689038525,
0.5152373451083692, 0.535385450497415, 0.5168208803156963, 0.5357708613431963, 0.5510509656933194,
0.536055356823069])]
for inputs, output in testcases:
if maxdiff(test(*inputs), output) < 0.001:
print('Correct')
else:
print('Incorrect')
| [
"249398363@qq.com"
] | 249398363@qq.com |
d584073eb7a7fc0392f9fc00e0573e29debd20dd | 66c6df450753acc7c41db5afe66abd35d5018c8c | /cliente Rujel/bin92.py | 76d7a6fac53865a674f5e3fd250b034be7f8ee9a | [] | no_license | hanmiton/CodigoCompletoEncriptacion | a33807d9470b538842751071031c9ce60951260f | efb7898af5d39025e98c82f1f71c8e9633cce186 | refs/heads/master | 2020-03-24T02:03:08.242655 | 2018-07-25T22:41:05 | 2018-07-25T22:41:05 | 142,360,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,006 | py | import sys
import math
import random
import openpyxl
import xlsxwriter
from time import time
LAMBDA = 16 #security parameter
N = LAMBDA
P = LAMBDA ** 2
Q = LAMBDA ** 5
def principal(m1,m2):
numsA = []
numsB = []
aux = []
numsAEncrypt = []
numsBEncrypt = []
keys= []
doc = openpyxl.load_workbook('cifrado.xlsx')
doc.get_sheet_names()
hoja = doc.get_sheet_by_name('Hoja1')
prueba = []
for fila in hoja.rows:
for columna in fila:
boln1 = bin(int(columna.value))
boln2 = bin(0)
if(len(boln1) > len(boln2)):
print len(boln1) - len(boln2)
for i in range(0, len(boln1) - len(boln2)):
aux.append(0)
boln2 = aux + boln2
else:
print len(boln2) - len(boln1)
for i in range(0, len(boln2) - len(boln1)):
aux.append(0)
boln1 = aux + boln1
key = map(keygen,boln1)
boln1Encrypt = map(encrypt,key,boln1)
#boln2Encrypt = map(encrypt,key,boln2)
numsA.append(boln1)
numsB.append(boln2)
keys.append(key)
numsAEncrypt.append(boln1Encrypt)
return numsAEncrypt
boln1Encrypt = []
boln2Encrypt = []
sumEncrypt = []
mulEnctypt = []
res = []
sumEncrypt = map(add,boln1Encrypt,boln2Encrypt)
strEncriptSum = ''.join(str(e) for e in sumEncrypt)
mulEnctypt = map(mult,boln1Encrypt, boln2Encrypt)
resSuma = map (decrypt, key, sumEncrypt)
strSuma = ''.join(str(e) for e in resSuma)
workbook = xlsxwriter.Workbook('enc1.xlsx')
worksheet = workbook.add_worksheet()
i=2
celda = 'A' + repr(i)
celda2 = 'B' + repr(i)
worksheet.write(celda, strEncriptSum)
worksheet.write(celda2, str(len(sumEncrypt) ))
workbook.close()
decSuma = int(strSuma, 2)
#start_time = time()
resMult = map (decrypt, key, mulEnctypt)
#elapsed_time = time() - start_time
#return elapsed_time
strMult = ''.join(str(e) for e in resMult)
decMult = int(strMult, 2)
return decSuma
def quot(z, p):
# http://stackoverflow.com/questions/3950372/round-with-integer-division
return (z + p // 2) // p
def mod(z, p):
return z - quot(z,p) * p
def keygen(n):
key = random.getrandbits(P)
while(key % 2 == 0):
key = random.getrandbits(P)
return key
def encrypt(key, aBit):
q = random.getrandbits(Q)
m_a = 2 * random.getrandbits(N - 1)
c = key * q + m_a + aBit
return c
def decrypt(key, cipherText):
return mod(cipherText, key) % 2
def add(cipherText1, cipherText2):
return cipherText1 + cipherText2
def mult(cipherText1, cipherText2):
return cipherText1 * cipherText2
def bin(numero):
binario = ""
listaN = []
listaRn = []
if (numero >0):
while (numero >0):
if(numero % 2 ==0):
listaN.append(0)
binario="0"+binario
else:
listaN.append(1)
binario = "1"+ binario
numero = int (math.floor(numero/2))
else:
if (numero ==0):
listaN.append(0)
return listaN
else:
return " no se pudo convertir el numero. ingrese solo numeros positivos"
for i in reversed(listaN):
listaRn.append(i)
return listaRn
if __name__ == '__main__':
principal(m1,m2) | [
"hanmilton_12@outlook.com"
] | hanmilton_12@outlook.com |
0df51dc4002580c130c48d9c16bdcd453f42d795 | 3665e5e6946fd825bb03b3bcb79be96262ab6d68 | /jc/parsers/route.py | 668ea3545e3157b06975c36ee8c9bd699778ec9a | [
"MIT",
"BSD-3-Clause"
] | permissive | philippeitis/jc | a28b84cff7fb2852a374a7f0f41151b103288f26 | d96b3a65a98bc135d21d4feafc0a43317b5a11fa | refs/heads/master | 2021-02-16T05:03:03.022601 | 2020-03-04T16:30:52 | 2020-03-04T16:30:52 | 244,969,097 | 0 | 0 | MIT | 2020-03-08T21:10:36 | 2020-03-04T18:01:38 | null | UTF-8 | Python | false | false | 4,085 | py | """jc - JSON CLI output utility route Parser
Usage:
specify --route as the first argument if the piped input is coming from route
Compatibility:
'linux'
Examples:
$ route -ee | jc --route -p
[
{
"destination": "default",
"gateway": "gateway",
"genmask": "0.0.0.0",
"flags": "UG",
"metric": 100,
"ref": 0,
"use": 0,
"iface": "ens33",
"mss": 0,
"window": 0,
"irtt": 0
},
{
"destination": "172.17.0.0",
"gateway": "0.0.0.0",
"genmask": "255.255.0.0",
"flags": "U",
"metric": 0,
"ref": 0,
"use": 0,
"iface": "docker",
"mss": 0,
"window": 0,
"irtt": 0
},
{
"destination": "192.168.71.0",
"gateway": "0.0.0.0",
"genmask": "255.255.255.0",
"flags": "U",
"metric": 100,
"ref": 0,
"use": 0,
"iface": "ens33",
"mss": 0,
"window": 0,
"irtt": 0
}
]
$ route -ee | jc --route -p -r
[
{
"destination": "default",
"gateway": "gateway",
"genmask": "0.0.0.0",
"flags": "UG",
"metric": "100",
"ref": "0",
"use": "0",
"iface": "ens33",
"mss": "0",
"window": "0",
"irtt": "0"
},
{
"destination": "172.17.0.0",
"gateway": "0.0.0.0",
"genmask": "255.255.0.0",
"flags": "U",
"metric": "0",
"ref": "0",
"use": "0",
"iface": "docker",
"mss": "0",
"window": "0",
"irtt": "0"
},
{
"destination": "192.168.71.0",
"gateway": "0.0.0.0",
"genmask": "255.255.255.0",
"flags": "U",
"metric": "100",
"ref": "0",
"use": "0",
"iface": "ens33",
"mss": "0",
"window": "0",
"irtt": "0"
}
]
"""
import jc.utils
import jc.parsers.universal
class info():
version = '1.0'
description = 'route command parser'
author = 'Kelly Brazil'
author_email = 'kellyjonbrazil@gmail.com'
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
compatible = ['linux']
magic_commands = ['route']
__version__ = info.version
def process(proc_data):
"""
Final processing to conform to the schema.
Parameters:
proc_data: (dictionary) raw structured data to process
Returns:
List of dictionaries. Structured data with the following schema:
[
{
"destination": string,
"gateway": string,
"genmask": string,
"flags": string,
"metric": integer,
"ref": integer,
"use": integer,
"mss": integer,
"window": integer,
"irtt": integer,
"iface": string
}
]
"""
for entry in proc_data:
int_list = ['metric', 'ref', 'use', 'mss', 'window', 'irtt']
for key in int_list:
if key in entry:
try:
key_int = int(entry[key])
entry[key] = key_int
except (ValueError):
entry[key] = None
return proc_data
def parse(data, raw=False, quiet=False):
"""
Main text parsing function
Parameters:
data: (string) text data to parse
raw: (boolean) output preprocessed JSON if True
quiet: (boolean) suppress warning messages if True
Returns:
List of dictionaries. Raw or processed structured data.
"""
if not quiet:
jc.utils.compatibility(__name__, info.compatible)
cleandata = data.splitlines()[1:]
cleandata[0] = cleandata[0].lower()
raw_output = jc.parsers.universal.simple_table_parse(cleandata)
if raw:
return raw_output
else:
return process(raw_output)
| [
"kellyjonbrazil@gmail.com"
] | kellyjonbrazil@gmail.com |
7e3ced9ace84a2042505115765dc9b9879f9ec7e | 55b4fe0a6616b30c128b51a9918605050ce49f6d | /migrate_reverb | 2a0df049882f7873eb0353b8db62d960747cadec | [] | no_license | samhaug/ScS_reverb_setup | 783a4fb7c942a598f18dc6c9e3544aa5e2bbcafe | 05e96b9f871d25a1e7b5e9284083167993f56cec | refs/heads/master | 2021-01-12T03:35:45.657459 | 2017-06-24T17:24:07 | 2017-06-24T17:24:07 | 78,234,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,844 | #!/home/samhaug/anaconda2/bin/python
'''
==============================================================================
File Name : migrate_reverb.py
Purpose : Perform a migration to detect reflection coefficients of mid mantle
discontinuities. Must have access to a lookup table, waveform glossary,
data stripped of zeroth-order discontinuities.
See eq (14) of 'A Study of mid-mantle layering beneath the Western Pacific'
1989, Revenaugh & Jordan.
Creation Date : 14-03-2017
Last Modified : Tue 14 Mar 2017 11:54:11 AM EDT
Created By : Samuel M. Haugland
==============================================================================
'''
import numpy as np
import obspy
import seispy
import h5py
from matplotlib import pyplot as plt
from scipy.signal import correlate
from scipy.signal import tukey
def main():
wvlt_glossary = h5py.File('/home/samhaug/work1/ScS_reverb_sims/wave_glossary/prem_568_FJ_20160130.h5','r')
lkup = h5py.File('/home/samhaug/work1/ScS_reverb_sims/lookup_tables/NA_prem_568_20160130.h5','r')
st = obspy.read('/home/samhaug/work1/ScS_reverb_sims/mineos/prem_568_FJ/st_T.pk')
st.integrate().detrend().integrate().detrend()
st.interpolate(1)
st.filter('bandpass',freqmax=1/15.,freqmin=1/75.,zerophase=True)
st = seispy.data.align_on_phase(st,phase=['ScSScS'],a_min=False)
#st.differentiate()
st.normalize()
for idx,tr in enumerate(st):
st[idx] = seispy.data.phase_window(tr,phase=['ScSScS'],window=(-400,2400))
idx=3
ones = np.ones(len(st[idx].data))
ones[387:425] = 1+(-1*tukey(425-387,0.3))
ones[632:669] = 1+(-1*tukey(669-632,0.3))
ones[1299:1343] = 1+(-1*tukey(1343-1299,0.3))
ones[1561:1600] = 1+(-1*tukey(1600-1561,0.3))
ones[2221:2278] = 1+(-1*tukey(2278-2221,0.3))
ones[2466:2524] = 1+(-1*tukey(2524-2466,0.3))
#plt.plot(st[idx].data)
#plt.plot(ones)
#plt.show()
#st[idx].data *= ones
#depth = np.arange(10,2800,2)
#depth = np.arange(900,1000,10)
depth = np.array([670])
stat = st[idx].stats.station
corr_dict,wave_e,wvlt_len = correlate_sig(st[idx],wvlt_glossary)
R_list = []
for h in depth:
h_R = 0
for keys in corr_dict:
ScS2 = lkup[stat+'/ScS2'][:]
lkup_t = lkup[stat+'/'+keys][:]
shift = int(wvlt_len/2.)-58
h_R += find_R(corr_dict[keys],h,lkup_t,ScS2,shift=shift,data=st[idx].data)/wave_e[keys]
R_list.append(h_R)
plt.plot(np.array(R_list),depth,lw=2)
plt.ylim(depth.max(),depth.min())
plt.axhline(220,color='k')
plt.axhline(400,color='k')
plt.axhline(670,color='k')
plt.xlim(-10,10)
plt.grid()
plt.show()
def correlate_sig(tr,wvlt_glos):
corr_dict = {}
wave_e = {}
for keys in wvlt_glos:
wvlt = wvlt_glos[keys]
corr_sig = correlate(tr.data,wvlt,mode='same')
wave_e[keys] = np.dot(wvlt,wvlt)
corr_dict[keys] = corr_sig
return corr_dict,wave_e,len(wvlt)
def find_R(corr_sig,h,lkup,ScS2,**kwargs):
shift = kwargs.get('shift',0)
data = kwargs.get('data',np.zeros(5))
t = lkup[np.argmin(np.abs(lkup[:,0]-h)),1]
ScS2_time = ScS2[np.argmin(np.abs(lkup[:,0]-h)),1]
plot_corr(t,corr_sig,data,ScS2_time,shift)
try:
r = corr_sig[int(t-ScS2_time+400+shift)]
return r
except IndexError:
return 0
corr *= 1./denominator(wvlt_glos)
def plot_corr(t,corr_sig,data,ScS2_time,shift):
fig,ax = plt.subplots(figsize=(25,6))
ax.plot(corr_sig,lw=2)
ax.plot(data,alpha=0.5,color='k')
ax.axvline(t-ScS2_time+400+shift)
plt.tight_layout()
plt.show()
def denominator(wvlt_glos):
energy = 0
for keys in wvlt_glos:
energy += np.dot(wvlt_glos[keys][...],wvlt_glos[keys][...])
return energy
main()
| [
"samhaug@umich.edu"
] | samhaug@umich.edu | |
5c5a167f3d78f3d568304c19afe7a914241562ad | 9597cb1a23e082cf8950408e7fce72a8beff6177 | /src/pipeline.py | 4cb6a56808ebe9440a34c8c428a9f6d11039dcfd | [] | no_license | xiaomi388/sydw_wsl | 5653c981b3720c6fd496b5a12966bb9cee604878 | 459bbeadb288b4fb65e5d816b19b749bd447df4c | refs/heads/master | 2021-03-24T12:29:22.643163 | 2017-10-04T15:00:33 | 2017-10-04T15:00:33 | 105,512,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | import os
import csv
class reportPipeline(object):
def __init__(self, parameters):
flag = os.path.exists('output.csv')
self.outfile = open('output.csv', 'a')
self.outcsv = csv.writer(self.outfile)
if not flag:
self.outcsv.writerow(parameters)
def save(self, arguments):
self.outcsv.writerow(arguments)
| [
"xiaomi388@gmail.com"
] | xiaomi388@gmail.com |
247fe433152dd7d1247fc2bfb4b7d841a962c1cc | e6ead9c9489c1b97fb63dabb60e8083a76fe7e76 | /program/sandboxv2/server/tcp/components/send.py | bfc4e94cf1b48c7e407623d63777ac08d7d9f29a | [] | no_license | montarion/morsecode | a313471c2ccd40c62fa7249897ff58407c1bb03d | eed29720c1bb6ade102d8e8a39b4b1b188737681 | refs/heads/master | 2021-04-30T11:45:30.576645 | 2018-02-12T15:13:12 | 2018-02-12T15:13:12 | 121,256,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | from socket import *
from time import sleep
class send:
def __init__(self, ip):
self.ss = socket(AF_INET, SOCK_STREAM)
self.ss.connect(((ip), 13334))
def send(self, code):
print('ready to send')
self.ss.send(code.encode())
print('sent')
self.ss.close()
| [
"jamirograntsaan@gmail.com"
] | jamirograntsaan@gmail.com |
e4de52a01a45251293a26a1950fbbfb56fc8bd34 | f571590e3c1787d183e00b81c408362e65671f76 | /namestring.py | 0e6c17addc2a6ac9c5fbce45bc29ba1924217a11 | [] | no_license | neymarthan/project1 | 0b3d108dd8eb4b6fa5093525d469d978faf88b88 | 5e07f9dff181bb310f3ce2c7818a8c6787d4b116 | refs/heads/master | 2022-12-26T08:44:53.464398 | 2020-10-06T09:14:29 | 2020-10-06T09:14:29 | 279,528,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | filename=input('Enter the filename: ')
if filename.endswith('.txt'):
print('That is the name of text file.')
elif filename.endswith('.py'):
print('That is the name of a Python source file.')
elif filename.endswith('.doc'):
print('That is the name of word processing document.')
else:
print('Unknown file type.') | [
"INE-02@Admins-iMac-5.local"
] | INE-02@Admins-iMac-5.local |
267efccef26c76fef97df4a0bb5bda3924f48090 | af47797c9518e12a00a8de5a379d5fa27f579c40 | /newbeercellar/login.py | e483db50fe8883ead45c750312921ea650f795fe | [
"MIT"
] | permissive | atlefren/newbeercellar | 29526154e74b4c613061c01eb59815e08cf03f1b | fcf5a174f45a3d21ce9613b88977a88d9bae4aa5 | refs/heads/master | 2021-01-20T08:47:03.976431 | 2015-04-26T16:14:42 | 2015-04-26T16:14:42 | 29,885,741 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,490 | py | from flask import current_app, redirect, url_for, session
from flask.ext.login import login_user, logout_user
from flask_googlelogin import USERINFO_EMAIL_SCOPE
from newbeercellar import login_manager, app, googlelogin
from models import User
from util import get_or_create_default_cellar
@app.route("/login")
def login():
return redirect(
googlelogin.login_url(scopes=[USERINFO_EMAIL_SCOPE])
)
login_manager.unauthorized_handler(login)
@app.route('/logout')
def logout():
logout_user()
session.clear()
return redirect(url_for('index'))
@login_manager.user_loader
def load_user(userid):
return current_app.db_session.query(User).get(userid)
@app.route('/oauth2callback')
@googlelogin.oauth2callback
def create_or_update_user(token, userinfo, **params):
if params.get('error', False):
return redirect(url_for('index'))
db = current_app.db_session
user = db.query(User).filter(User.google_id == userinfo['id']).first()
if user:
user.name = userinfo['name']
else:
user = User(
google_id=userinfo['id'],
name=userinfo['name'],
email=userinfo['email'],
username=userinfo['email'].split('@')[0].replace('.', '')
)
db.add(user)
db.commit()
db.flush()
login_user(user)
cellar = get_or_create_default_cellar(user)
return redirect(url_for(
'view_cellar',
username=user.username,
cellar_id=cellar.id
))
| [
"atle@frenviksveen.net"
] | atle@frenviksveen.net |
cfc8baabe5fbc4634ddd6a8cb4267db77e24b358 | 000a4b227d970cdc6c8db192f4437698cb782721 | /python/helpers/typeshed/stubs/passlib/passlib/ifc.pyi | 80467bfea35e5977019150a5275c3534e3f1145c | [
"Apache-2.0",
"MIT"
] | permissive | trinhanhngoc/intellij-community | 2eb2f66a2a3a9456e7a0c5e7be1eaba03c38815d | 1d4a962cfda308a73e0a7ef75186aaa4b15d1e17 | refs/heads/master | 2022-11-03T21:50:47.859675 | 2022-10-19T16:39:57 | 2022-10-19T23:25:35 | 205,765,945 | 1 | 0 | Apache-2.0 | 2019-09-02T02:55:15 | 2019-09-02T02:55:15 | null | UTF-8 | Python | false | false | 1,061 | pyi | import abc
from abc import abstractmethod
from typing import Any
class PasswordHash(metaclass=abc.ABCMeta):
is_disabled: bool
truncate_size: Any
truncate_error: bool
truncate_verify_reject: bool
@classmethod
@abstractmethod
def hash(cls, secret, **setting_and_context_kwds): ...
@classmethod
def encrypt(cls, *args, **kwds): ...
@classmethod
@abstractmethod
def verify(cls, secret, hash, **context_kwds): ...
@classmethod
@abstractmethod
def using(cls, relaxed: bool = ..., **kwds): ...
@classmethod
def needs_update(cls, hash, secret: Any | None = ...): ...
@classmethod
@abstractmethod
def identify(cls, hash): ...
@classmethod
def genconfig(cls, **setting_kwds): ...
@classmethod
def genhash(cls, secret, config, **context) -> None: ...
deprecated: bool
class DisabledHash(PasswordHash, metaclass=abc.ABCMeta):
is_disabled: bool
@classmethod
def disable(cls, hash: Any | None = ...): ...
@classmethod
def enable(cls, hash) -> None: ...
| [
"intellij-monorepo-bot-no-reply@jetbrains.com"
] | intellij-monorepo-bot-no-reply@jetbrains.com |
7a6660a4ebd2a02738361cb692acd4b4451abde7 | 4fc86f5c444f52619f9f748c9bad5bf3e0e2c0b2 | /megatron/data/test/test_indexed_dataset.py | 78622d275de5b38bbe02b1c1827d34036aff0eb1 | [
"MIT",
"Apache-2.0"
] | permissive | Xianchao-Wu/megatron2 | 95ea620b74c66e51f9e31075b1df6bb1b761678b | f793c37223b32051cb61d3b1d5661dddd57634bf | refs/heads/main | 2023-08-17T03:42:31.602515 | 2021-09-24T05:12:00 | 2021-09-24T05:12:00 | 330,527,561 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,739 | py | # This file isn't really a formal automated test, it's just a place to
# put some code used during development and manual testing of
# indexed_dataset.
import os
import sys
script_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(script_dir, "../../../"))
from megatron.data import indexed_dataset
from megatron.tokenizer import build_tokenizer
import argparse
#import os
#import sys
import torch
#script_dir = os.path.dirname(os.path.realpath(__file__))
#sys.path.append(os.path.join(script_dir, "../../../"))
def test_indexed_dataset(args):
ds = indexed_dataset.make_dataset(args.data, args.dataset_impl)
tokenizer = build_tokenizer(args)
print('len(ds.doc_idx)={}'.format(len(ds.doc_idx)))
print('len(ds)={}'.format(len(ds)))
print('ds.doc_idx[-1]={}'.format(ds.doc_idx[-1]))
if ds.supports_prefetch: # False
# just prefetch the whole thing in test (so assume it is small)
ds.prefetch(range(len(ds)))
if args.count > len(ds.doc_idx) - 1:
args.count = len(ds.doc_idx) - 1
for i in range(args.count):
start = ds.doc_idx[i]
end = ds.doc_idx[i + 1]
ids = ds[start:end]
print(f"Document {i}:")
print("--------------")
for s in ids:
assert len(s) > 0
l = s.data.tolist()
text = tokenizer.detokenize(l)
print(text)
print("---")
def test_indexed_dataset_get(args):
ds = indexed_dataset.make_dataset(args.data, args.dataset_impl) # ds=dataset
tokenizer = build_tokenizer(args)
size = ds.sizes[0] # [30 54 16 30 27 40 30 3]'s 30
print(f"size: {size}")
full = ds.get(0)
print(full)
print(tokenizer.detokenize(full.data.tolist())) # 「オタ」とも呼ばれているこのペラナカン(華人)の特製料理は、とてもおいしいスナック料理です。
print("---")
end = ds.get(0, offset=size - 10)
print(end)
print(tokenizer.detokenize(end.data.tolist())) # 、とてもおいしいスナック料理です。
start = ds.get(0, length=10)
print(start)
print(tokenizer.detokenize(start.data.tolist())) # 「オタ」とも呼ばれているこの
part = ds.get(0, offset=2, length=8)
print(part)
print(tokenizer.detokenize(part.data.tolist())) # オタ」とも呼ばれているこの
# def test_albert_dataset(args):
# # tokenizer = FullBertTokenizer(args.vocab, do_lower_case=True)
# # idataset = indexed_dataset.make_dataset(args.data, args.dataset_impl)
# # ds = AlbertDataset(idataset, tokenizer)
# ds = AlbertDataset.from_paths(args.vocab, args.data, args.dataset_impl,
# args.epochs, args.max_num_samples,
# args.masked_lm_prob, args.seq_length,
# args.short_seq_prob, args.seed)
# truncated = 0
# total = 0
# for i, s in enumerate(ds):
# ids = s['text']
# tokens = ds.tokenizer.convert_ids_to_tokens(ids)
# print(tokens)
# if i >= args.count-1:
# exit()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, help='prefix to data files')
parser.add_argument('--dataset-impl', type=str, default='infer',
choices=['lazy', 'cached', 'mmap', 'infer'])
parser.add_argument('--count', type=int, default=10,
help='Number of samples/documents to print')
group = parser.add_argument_group(title='tokenizer')
group.add_argument('--tokenizer-type', type=str, required=True,
choices=['BertWordPieceLowerCase', 'BertWordPieceCase', 'BertWordPieceCaseJp',
'GPT2BPETokenizer', 'GPT2BPETokenizerJp', 'GPT2BPETokenizerJpMecab'],
help='What type of tokenizer to use.')
group.add_argument('--vocab-file', type=str, default=None,
help='Path to the vocab file')
group.add_argument('--merge-file', type=str, default=None,
help='Path to the BPE merge file (if necessary).')
group.add_argument('--emoji-file', type=str, default=None,
help='Path to the emoji file for Japanese tokenization')
group.add_argument('--mecab-dict-path', type=str, default=None,
help='path to the mecab dict file for japanese tokenization')
parser.add_argument('--epochs', type=int, default=5,
help='Number of epochs to plan for')
parser.add_argument('--max-num-samples', type=int, default=None,
help='Maximum number of samples to plan for')
parser.add_argument('--masked-lm-prob', type=float, default=0.15,
help='probability of masking tokens')
parser.add_argument('--seq-length', type=int, default=512,
help='maximum sequence length')
parser.add_argument('--short-seq-prob', type=float, default=0.1,
help='probability of creating a short sequence')
parser.add_argument('--seed', type=int, default=1234,
help='random seed')
args = parser.parse_args()
args.rank = 0
args.make_vocab_size_divisible_by = 128
args.tensor_model_parallel_size = 1
if args.dataset_impl == "infer":
args.dataset_impl = indexed_dataset.infer_dataset_impl(args.data)
# test_albert_dataset(args)
print('-'*10 + 'test_indexed_dataset_get(args)' + '-'*10)
test_indexed_dataset_get(args)
print('-'*30)
print('-'*10 + 'test_indexed_dataset(args)' + '-'*10)
test_indexed_dataset(args)
if __name__ == "__main__":
main()
| [
"wuxianchao@gmail.com"
] | wuxianchao@gmail.com |
ee0b17ad5ca05895993677298cc9cc9d610b1be4 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-4/586613e9c4dd65b64bfcffb16c008562f967ba42-<test_cdist_calling_conventions>-bug.py | 98025efd20cb767648c554c73b6b99fa2470e902 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | def test_cdist_calling_conventions(self):
for eo_name in self.rnd_eo_names:
X1 = eo[eo_name][:, ::(- 1)]
X2 = eo[eo_name][:(- 3):2]
for metric in _metrics:
if (verbose > 2):
print('testing: ', metric, ' with: ', eo_name)
if ((metric == 'yule') and ('bool' not in eo_name)):
continue
self._check_calling_conventions(X1, X2, metric)
if (metric == 'wminkowski'):
w = (1.0 / X1.std(axis=0))
self._check_calling_conventions(X1, X2, metric, w=w)
elif (metric == 'seuclidean'):
X12 = np.vstack([X1, X2]).astype(np.double)
V = np.var(X12, axis=0, ddof=1)
self._check_calling_conventions(X1, X2, metric, V=V)
elif (metric == 'mahalanobis'):
X12 = np.vstack([X1, X2]).astype(np.double)
V = np.atleast_2d(np.cov(X12.T))
VI = np.array(np.linalg.inv(V).T)
self._check_calling_conventions(X1, X2, metric, VI=VI) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
4222606e1a7a16d3a76ac4e443a33e885993f6a7 | 915496215d4c4ae4c952dc1839e73d112dab0460 | /manage/fabriccloud/vagrant.py | 3fa1f48de523f43f2eed7851619dd4db94fc96c7 | [] | no_license | nicholsn/simple-application-framework | 4afa6f71af68968e45364fd0691c54be9acf0f71 | a62f332bdebc9fff31ad58bdcb68cff990bdc663 | refs/heads/master | 2021-01-18T01:20:40.474399 | 2014-03-18T20:41:53 | 2014-03-18T20:41:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | __author__ = 'stonerri'
from base import *
def setDefaults():
env.user = 'vagrant'
env.hosts = ['127.0.0.1']
env.port = 2200
# use vagrant ssh key
result = local('vagrant ssh-config | grep IdentityFile', capture=True)
env.key_filename = result.split()[1]
def systemInformation():
pass
def sync():
print 'vagrant gets sync for free' | [
"stonerri@gmail.com"
] | stonerri@gmail.com |
d2c84c2cfac6a26d53336cc831694263d2a11349 | 159aed4755e47623d0aa7b652e178296be5c9604 | /data/scripts/templates/object/draft_schematic/munition/component/shared_enhanced_charge_composition.py | eebfa2798168c09b5f44a3fe87ea1a262e586d88 | [
"MIT"
] | permissive | anhstudios/swganh | fb67d42776864b1371e95f769f6864d0784061a3 | 41c519f6cdef5a1c68b369e760781652ece7fec9 | refs/heads/develop | 2020-12-24T16:15:31.813207 | 2016-03-08T03:54:32 | 2016-03-08T03:54:32 | 1,380,891 | 33 | 44 | null | 2016-03-08T03:54:32 | 2011-02-18T02:32:45 | Python | UTF-8 | Python | false | false | 475 | py | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/munition/component/shared_enhanced_charge_composition.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | [
"rwl3564@rit.edu"
] | rwl3564@rit.edu |
ef2f8af259d7490fa0b6e5f825bd56b216c07e92 | 597b82737635e845fd5360e191f323669af1b2ae | /08_full_django/login_registration_2/login_registration_2/wsgi.py | 663215d1fbccb9d37f487ac45f6c023f09112572 | [] | no_license | twknab/learning-python | 1bd10497fbbe181a26f2070c147cb2fed6955178 | 75b76b2a607439aa2d8db675738adf8d3b8644df | refs/heads/master | 2021-08-08T08:50:04.337490 | 2017-11-10T00:28:45 | 2017-11-10T00:28:45 | 89,213,845 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | """
WSGI config for login_registration project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "login_registration_2.settings")
application = get_wsgi_application()
| [
"natureminded@users.noreply.github.com"
] | natureminded@users.noreply.github.com |
1abac6f5607e39fabc23ddc8411528b74bb9765f | 5c94e032b2d43ac347f6383d0a8f0c03ec3a0485 | /Push/sysex.py | 6322a4228abd634fc27012bc7522b1d1ea89d65f | [] | no_license | Elton47/Ableton-MRS-10.1.13 | 997f99a51157bd2a2bd1d2dc303e76b45b1eb93d | 54bb64ba5e6be52dd6b9f87678ee3462cc224c8a | refs/heads/master | 2022-07-04T01:35:27.447979 | 2020-05-14T19:02:09 | 2020-05-14T19:02:09 | 263,990,585 | 0 | 0 | null | 2020-05-14T18:12:04 | 2020-05-14T18:12:03 | null | UTF-8 | Python | false | false | 3,655 | py | # uncompyle6 version 3.6.7
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.17 (default, Dec 23 2019, 21:25:33)
# [GCC 4.2.1 Compatible Apple LLVM 11.0.0 (clang-1100.0.33.16)]
# Embedded file name: /Users/versonator/Jenkins/live/output/Live/mac_64_static/Release/python-bundle/MIDI Remote Scripts/Push/sysex.py
# Compiled at: 2020-01-09 15:21:34
from __future__ import absolute_import, print_function, unicode_literals
from ableton.v2.base import group, in_range
from pushbase.touch_strip_element import TouchStripModes, TouchStripStates
START = (240, 71, 127, 21)
CLEAR_LINE1 = START + (28, 0, 0, 247)
CLEAR_LINE2 = START + (29, 0, 0, 247)
CLEAR_LINE3 = START + (30, 0, 0, 247)
CLEAR_LINE4 = START + (31, 0, 0, 247)
WRITE_LINE1 = START + (24, 0, 69, 0)
WRITE_LINE2 = START + (25, 0, 69, 0)
WRITE_LINE3 = START + (26, 0, 69, 0)
WRITE_LINE4 = START + (27, 0, 69, 0)
WELCOME_MESSAGE = START + (1, 1, 247)
GOOD_BYE_MESSAGE = START + (1, 0, 247)
ALL_PADS_SENSITIVITY_PREFIX = START + (93, 0, 32)
PAD_SENSITIVITY_PREFIX = START + (90, 0, 33)
PAD_PARAMETER_PREFIX = START + (71, 0, 9)
DEFAULT_PEAK_SAMPLING_TIME = 50
DEFAULT_AFTERTOUCH_THRESHOLD = 0
DEFAULT_AFTERTOUCH_GATE_TIME = 500
SET_AFTERTOUCH_MODE = START + (92, 0, 1)
POLY_AFTERTOUCH = (0, )
MONO_AFTERTOUCH = (1, )
MODE_CHANGE = START + (98, 0, 1)
def make_pad_parameter_message(aftertouch_threshold=DEFAULT_AFTERTOUCH_THRESHOLD, peak_sampling_time=DEFAULT_PEAK_SAMPLING_TIME, aftertouch_gate_time=DEFAULT_AFTERTOUCH_GATE_TIME):
assert 0 <= aftertouch_threshold < 128
return to_bytes(peak_sampling_time, 4) + to_bytes(aftertouch_gate_time, 4) + (aftertouch_threshold,)
def to_sysex_int(number, unused_parameter_name):
return (
number >> 12 & 15, number >> 8 & 15, number >> 4 & 15, number & 15)
CALIBRATION_SET = START + (87, 0, 20) + to_sysex_int(215, 'Preload Scale Factor') + to_sysex_int(1000, 'Recalibration Interval') + to_sysex_int(200, 'Stuck Pad Detection Threshold') + to_sysex_int(0, 'Stuck Pad NoteOff Threshold Adder') + to_sysex_int(200, 'Pad Ignore Time') + (247, )
IDENTITY_ENQUIRY = (240, 126, 0, 6, 1, 247)
IDENTITY_PREFIX = (240, 126, 0, 6, 2, 71, 21, 0, 25)
DONGLE_ENQUIRY_PREFIX = START + (80, )
DONGLE_PREFIX = START + (81, )
def make_presentation_message(application):
return START + (
96,
0,
4,
65,
application.get_major_version(),
application.get_minor_version(),
application.get_bugfix_version(),
247)
TOUCHSTRIP_MODE_TO_VALUE = [
TouchStripModes.CUSTOM_PITCHBEND,
TouchStripModes.CUSTOM_VOLUME,
TouchStripModes.CUSTOM_PAN,
TouchStripModes.CUSTOM_DISCRETE,
TouchStripModes.CUSTOM_FREE,
TouchStripModes.PITCHBEND,
TouchStripModes.VOLUME,
TouchStripModes.PAN,
TouchStripModes.DISCRETE,
TouchStripModes.MODWHEEL]
def make_touch_strip_mode_message(mode):
return START + (99, 0, 1, TOUCHSTRIP_MODE_TO_VALUE.index(mode), 247)
TOUCHSTRIP_STATE_TO_VALUE = {TouchStripStates.STATE_OFF: 0,
TouchStripStates.STATE_HALF: 1,
TouchStripStates.STATE_FULL: 3}
def make_touch_strip_light_message(state):
state = [ TOUCHSTRIP_STATE_TO_VALUE[s] for s in state ]
group_size = 3
bytes = [ reduce(lambda byte, (i, state): byte | state << 2 * i, enumerate(state_group), 0) for state_group in group(state, group_size)
]
return START + (100, 0, 8) + tuple(bytes) + (247, )
def to_bytes(number, size):
u"""
turns the given value into tuple of 4bit bytes,
ordered from most significant to least significant byte
"""
assert in_range(number, 0, 1 << size * 4)
return tuple([ number >> offset & 15 for offset in xrange((size - 1) * 4, -1, -4) ]) | [
"ahmed.emerah@icloud.com"
] | ahmed.emerah@icloud.com |
aa7b172ff58d05379109ded6c2595ea34f13b028 | 673317f52e04401fd8c5f89282b56120ad6315df | /src/pretix/plugins/ticketoutputpdf/signals.py | 4b1ba025fab0a71b28fe112addcedb642a853ef0 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | barseghyanartur/pretix | 0218a20bc0f7a1bac59aa03bc83448c33eccdbff | 05bafd0db5a9048f585cc8431b92851e15ba87eb | refs/heads/master | 2020-03-18T22:17:20.328286 | 2018-05-29T08:39:41 | 2018-05-29T08:39:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,022 | py | from functools import partial
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from pretix.base.models import QuestionAnswer
from pretix.base.signals import ( # NOQA: legacy import
event_copy_data, layout_text_variables, register_data_exporters,
register_ticket_outputs,
)
from pretix.presale.style import ( # NOQA: legacy import
get_fonts, register_fonts,
)
@receiver(register_ticket_outputs, dispatch_uid="output_pdf")
def register_ticket_outputs(sender, **kwargs):
from .ticketoutput import PdfTicketOutput
return PdfTicketOutput
@receiver(register_data_exporters, dispatch_uid="dataexport_pdf")
def register_data(sender, **kwargs):
from .exporters import AllTicketsPDF
return AllTicketsPDF
def get_answer(op, order, event, question_id):
try:
a = op.answers.get(question_id=question_id)
return str(a).replace("\n", "<br/>\n")
except QuestionAnswer.DoesNotExist:
return ""
@receiver(layout_text_variables, dispatch_uid="pretix_ticketoutputpdf_layout_text_variables_questions")
def variables_from_questions(sender, *args, **kwargs):
d = {}
for q in sender.questions.all():
d['question_{}'.format(q.pk)] = {
'label': _('Question: {question}').format(question=q.question),
'editor_sample': _('<Answer: {question}>').format(question=q.question),
'evaluate': partial(get_answer, question_id=q.pk)
}
return d
@receiver(signal=event_copy_data, dispatch_uid="pretix_ticketoutputpdf_copy_data")
def event_copy_data_receiver(sender, other, question_map, **kwargs):
layout = sender.settings.get('ticketoutput_pdf_layout', as_type=list)
if not layout:
return
for o in layout:
if o['type'] == 'textarea':
if o['content'].startswith('question_'):
o['content'] = 'question_{}'.format(question_map.get(int(o['content'][9:]), 0).pk)
sender.settings.set('ticketoutput_pdf_layout', list(layout))
| [
"mail@raphaelmichel.de"
] | mail@raphaelmichel.de |
37a055a130168f8bb3f4a7ce8dc15be15511155c | ac81f7f0160571a7e601d9808d424d2c407573b6 | /0392-Is-Subsequence.py | 3c57686253a10c3e7ae98adb8e18d2e9278d9c57 | [] | no_license | nirmalnishant645/LeetCode | 61d74c152deb0e7fb991065ee91f6f7102d7bbc6 | 8bdb4583187ee181ca626063d7684dcc64c80be3 | refs/heads/master | 2022-08-14T04:10:23.110116 | 2022-07-13T06:27:18 | 2022-07-13T06:27:18 | 227,960,574 | 53 | 19 | null | 2021-01-16T17:26:30 | 2019-12-14T03:31:54 | Python | UTF-8 | Python | false | false | 1,103 | py | '''
Given a string s and a string t, check if s is subsequence of t.
You may assume that there is only lower case English letters in both s and t. t is potentially a very long (length ~= 500,000) string, and s is a short string (<=100).
A subsequence of a string is a new string which is formed from the original string by deleting some (can be none) of the characters without disturbing the relative positions of the remaining characters. (ie, "ace" is a subsequence of "abcde" while "aec" is not).
Example 1:
s = "abc", t = "ahbgdc"
Return true.
Example 2:
s = "axc", t = "ahbgdc"
Return false.
Follow up:
If there are lots of incoming S, say S1, S2, ... , Sk where k >= 1B, and you want to check one by one to see if T has its subsequence. In this scenario, how would you change your code?
'''
class Solution:
def isSubsequence(self, s: str, t: str) -> bool:
s_index = t_index = 0
while s_index < len(s) and t_index < len(t):
if s[s_index] == t[t_index]:
s_index += 1
t_index += 1
return s_index == len(s)
| [
"noreply@github.com"
] | nirmalnishant645.noreply@github.com |
2ddee8e373f05456f1e32c5856b3f3f9bffed03c | 90b5a86b07745561267fde367259b9f48da3ca74 | /apps/categorys/migrations/0002_auto_20180507_1612.py | 6079d5926e69d480553e84fad584aec7caa588d6 | [] | no_license | enjoy-binbin/pyblog | fb8bfc6017595412850faf20ba4ce0c8e5ee761b | 47e93d67dbfd8acf58bfb7e2e15c0c6cce32ef6e | refs/heads/master | 2021-08-11T06:38:52.959002 | 2019-02-06T13:46:50 | 2019-02-06T13:46:50 | 166,924,461 | 2 | 0 | null | 2021-08-09T20:47:43 | 2019-01-22T03:59:51 | Python | UTF-8 | Python | false | false | 455 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-05-07 16:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('categorys', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='category',
name='desc',
field=models.TextField(verbose_name='分类描述'),
),
]
| [
"binloveplay1314@qq.com"
] | binloveplay1314@qq.com |
0d33b3146a5d87f4bd3f2197443f3de225c55f6e | 1e9ef3df6b65d53127f1858dbf625cd31874fb11 | /REST-api/rest/migrations/0002_auto_20161022_2237.py | 35388c20b14429bdd7fd8eda173603248d6e7f11 | [] | no_license | Matvey-Kuk/openbmp-as-path-planet | 9007c9b86009b4576638b53f45e35d429c1ed55f | ddd2c83861aa7540bd9eb83e75b5072f1e20d322 | refs/heads/master | 2021-01-12T14:58:48.600762 | 2016-10-23T02:16:00 | 2016-10-23T02:16:00 | 71,657,356 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-22 22:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('rest', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Prefix',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('prefix', models.CharField(max_length=1000)),
],
),
migrations.RenameField(
model_name='pathupdate',
old_name='name',
new_name='path',
),
migrations.AddField(
model_name='pathupdate',
name='prefix',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, related_name='paths', to='rest.Prefix'),
preserve_default=False,
),
]
| [
"motakuk@gmail.com"
] | motakuk@gmail.com |
42341246ba9ecfedf89645e2ee529b7cfc00baef | e79c4d4a633e8578ef8fbadd5937140ad1761e5b | /src/wellsfargo/models/transfers.py | a85e657729c878b01103128f49763595bd26e565 | [
"ISC"
] | permissive | thelabnyc/django-oscar-wfrs | ca9a4737e8b6575bde21705179c67212ad47df16 | 61548f074ffd7ce41e31b3bc9a571d569a8f8248 | refs/heads/master | 2023-05-26T14:41:43.366400 | 2023-05-17T17:35:20 | 2023-05-17T17:35:20 | 59,534,130 | 1 | 2 | ISC | 2022-12-08T05:06:17 | 2016-05-24T02:32:09 | Python | UTF-8 | Python | false | false | 3,002 | py | from django.conf import settings
from django.db import models
from django.utils.translation import gettext_lazy as _
from django.utils.functional import cached_property
from oscar.core.loading import get_model
from oscar.models.fields import NullCharField
from ..core.constants import (
TRANS_TYPE_AUTH,
TRANS_TYPES,
TRANS_STATUSES,
)
from .mixins import AccountNumberMixin
class TransferMetadata(AccountNumberMixin, models.Model):
"""
Store WFRS specific metadata about a transfer
"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name=_("Requesting User"),
related_name="wfrs_transfers",
null=True,
blank=True,
on_delete=models.CASCADE,
)
merchant_name = NullCharField(_("Merchant Name"), max_length=200)
merchant_num = NullCharField(_("Merchant Number"), max_length=200)
merchant_reference = models.CharField(max_length=128, null=True)
amount = models.DecimalField(decimal_places=2, max_digits=12)
type_code = models.CharField(
_("Transaction Type"), choices=TRANS_TYPES, max_length=2
)
ticket_number = models.CharField(
_("Ticket Number"), null=True, blank=True, max_length=12
)
financing_plan = models.ForeignKey(
"wellsfargo.FinancingPlan",
verbose_name=_("Plan Number"),
null=True,
blank=False,
on_delete=models.SET_NULL,
)
auth_number = models.CharField(
_("Authorization Number"), null=True, blank=True, max_length=6, default="000000"
)
status = models.CharField(_("Status"), choices=TRANS_STATUSES, max_length=2)
message = models.TextField(_("Message"))
disclosure = models.TextField(_("Disclosure"))
created_datetime = models.DateTimeField(_("Created"), auto_now_add=True)
modified_datetime = models.DateTimeField(_("Modified"), auto_now=True)
@classmethod
def get_by_oscar_transaction(cls, transaction, type_code=TRANS_TYPE_AUTH):
return (
cls.objects.filter(merchant_reference=transaction.reference)
.filter(type_code=type_code)
.order_by("-created_datetime")
.first()
)
@property
def type_name(self):
return dict(TRANS_TYPES).get(self.type_code)
@property
def status_name(self):
return dict(TRANS_STATUSES).get(self.status)
@property
def financing_plan_number(self):
return self.financing_plan.plan_number if self.financing_plan else None
@cached_property
def order(self):
return self.get_order()
def get_oscar_transaction(self):
Transaction = get_model("payment", "Transaction")
try:
return Transaction.objects.get(reference=self.merchant_reference)
except Transaction.DoesNotExist:
return None
def get_order(self):
transaction = self.get_oscar_transaction()
if not transaction:
return None
return transaction.source.order
| [
"crgwbr@gmail.com"
] | crgwbr@gmail.com |
6a702164df517737cd177ab547688f038cb51aa1 | 57ddfddd1e11db649536a8ed6e19bf5312d82d71 | /AtCoder/ABC0/ABC038/ABC038-C.py | 5004183698b5aec8e51581e7b45a0868ffd3856f | [] | no_license | pgDora56/ProgrammingContest | f9e7f4bb77714dc5088c2287e641c0aa760d0f04 | fdf1ac5d1ad655c73208d98712110a3896b1683d | refs/heads/master | 2023-08-11T12:10:40.750151 | 2021-09-23T11:13:27 | 2021-09-23T11:13:27 | 139,927,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | n = int(input())
a = list(map(int, input().split()))
a.append(0)
prev = 10**6
count = 0
total = n
for v in a:
if prev >= v:
total += int(count*(count+1)/2)
count = 0
else:
count += 1
prev = v
print(total) | [
"doradora.prog@gmail.com"
] | doradora.prog@gmail.com |
68128ddef976c370dc87279299e61abaac08163a | 3f0410647ec6f7f597bb2adcc169b30c85c154bf | /dataduct/pipeline/default_object.py | 78d7251db70eb9c26c751ed787d2229b1fd9ebf8 | [
"Apache-2.0"
] | permissive | EverFi/dataduct | 5485239cc72a6aee4145634bf95a1dc5e67b28cd | 797cb719e6c2abeda0751ada3339c72bfb19c8f2 | refs/heads/staging | 2023-07-20T14:59:00.342480 | 2023-03-21T21:11:56 | 2023-03-21T21:11:56 | 96,341,718 | 3 | 0 | NOASSERTION | 2023-07-12T20:29:12 | 2017-07-05T16:54:48 | Python | UTF-8 | Python | false | false | 1,403 | py | """
Pipeline object class for default metadata
"""
from ..config import Config
from ..utils import constants as const
from .pipeline_object import PipelineObject
config = Config()
ROLE = config.etl['ROLE']
RESOURCE_ROLE = config.etl['RESOURCE_ROLE']
MAX_ACTIVE_INSTANCES = config.etl.get('MAX_ACTIVE_INSTANCES', const.ONE)
class DefaultObject(PipelineObject):
"""Default object added to all pipelines
"""
def __init__(self, id, pipeline_log_uri, sns=None, scheduleType='cron',
failureAndRerunMode='CASCADE', **kwargs):
"""Constructor for the DefaultObject class
Args:
id(str): must be 'Default' for this class
sns(sns): notify on failure
scheduleType(str): frequency type for the pipeline
failureAndRerunMode(str): aws input argument for failure mode
**kwargs(optional): Keyword arguments directly passed to base class
Note:
id must be Default for this object
"""
super(DefaultObject, self).__init__(
id='Default', # This should always have the default id
scheduleType=scheduleType,
failureAndRerunMode=failureAndRerunMode,
role=ROLE,
resourceRole=RESOURCE_ROLE,
maxActiveInstances=MAX_ACTIVE_INSTANCES,
pipelineLogUri=pipeline_log_uri,
onFail=sns
)
| [
"sb2nov@gmail.com"
] | sb2nov@gmail.com |
03e6521dbdeeba4a2d45ddaf701bf0485213ebac | d4df3c14cea021ab95dc208e915e88383f3c7371 | /Payload_Type/poseidon/mythic/agent_functions/jsimport.py | 755a354107c0d6348d572365a4a32d263f0633cf | [] | no_license | a32567901/poseidon | 79537ac7f082698137c7f77c746ecdc42ddc89f9 | 6c7d0bf52b67a952fa35a821d7c2d3e5a35cafd7 | refs/heads/master | 2023-06-18T06:25:17.575943 | 2021-07-12T20:44:03 | 2021-07-12T20:44:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,856 | py | from mythic_payloadtype_container.MythicCommandBase import *
import base64
import sys
import json
from mythic_payloadtype_container.MythicRPC import *
class JsImportArguments(TaskArguments):
def __init__(self, command_line):
super().__init__(command_line)
self.args = {
"file_id": CommandParameter(
name="JXA Script to Load",
type=ParameterType.File,
description="Select the JXA Script to load into memory",
ui_position=1
),
}
async def parse_arguments(self):
self.load_args_from_json_string(self.command_line)
class JsImportCommand(CommandBase):
cmd = "jsimport"
needs_admin = False
help_cmd = "jsimport"
description = "Upload a script into memory for use with jsimport_call"
version = 1
author = "@its_a_feature_"
argument_class = JsImportArguments
attributes = CommandAttributes(
# uncomment when poseidon can dynamically compile commands
supported_os=[SupportedOS.MacOS]
)
attackmapping = []
async def create_tasking(self, task: MythicTask) -> MythicTask:
original_file_name = json.loads(task.original_params)["JXA Script to Load"]
response = await MythicRPC().execute("create_file", task_id=task.id,
file=base64.b64encode(task.args.get_arg("file_id")).decode(),
saved_file_name=original_file_name,
delete_after_fetch=True,
)
if response.status == MythicStatus.Success:
task.args.add_arg("file_id", response.response["agent_file_id"])
task.display_params = "script " + original_file_name
else:
raise Exception("Error from Mythic: " + response.error)
return task
async def process_response(self, response: AgentResponse):
pass | [
"codybthomas@gmail.com"
] | codybthomas@gmail.com |
9ffe4224c93a6d260380414f96e07e3b4b1def64 | 159d4ae61f4ca91d94e29e769697ff46d11ae4a4 | /venv/lib/python3.9/site-packages/pygments/lexers/smv.py | a4cbf9455e3d5cfbee0f1fbdae8b7da6a7f39a21 | [
"MIT"
] | permissive | davidycliao/bisCrawler | 729db002afe10ae405306b9eed45b782e68eace8 | f42281f35b866b52e5860b6a062790ae8147a4a4 | refs/heads/main | 2023-05-24T00:41:50.224279 | 2023-01-22T23:17:51 | 2023-01-22T23:17:51 | 411,470,732 | 8 | 0 | MIT | 2023-02-09T16:28:24 | 2021-09-28T23:48:13 | Python | UTF-8 | Python | false | false | 2,773 | py | """
pygments.lexers.smv
~~~~~~~~~~~~~~~~~~~
Lexers for the SMV languages.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words
from pygments.token import Comment, Keyword, Name, Number, Operator, \
Punctuation, Text
__all__ = ['NuSMVLexer']
class NuSMVLexer(RegexLexer):
"""
Lexer for the NuSMV language.
.. versionadded:: 2.2
"""
name = 'NuSMV'
aliases = ['nusmv']
filenames = ['*.smv']
mimetypes = []
tokens = {
'root': [
# Comments
(r'(?s)\/\-\-.*?\-\-/', Comment),
(r'--.*\n', Comment),
# Reserved
(words(('MODULE', 'DEFINE', 'MDEFINE', 'CONSTANTS', 'VAR', 'IVAR',
'FROZENVAR', 'INIT', 'TRANS', 'INVAR', 'SPEC', 'CTLSPEC',
'LTLSPEC', 'PSLSPEC', 'COMPUTE', 'NAME', 'INVARSPEC',
'FAIRNESS', 'JUSTICE', 'COMPASSION', 'ISA', 'ASSIGN',
'CONSTRAINT', 'SIMPWFF', 'CTLWFF', 'LTLWFF', 'PSLWFF',
'COMPWFF', 'IN', 'MIN', 'MAX', 'MIRROR', 'PRED',
'PREDICATES'), suffix=r'(?![\w$#-])'),
Keyword.Declaration),
(r'process(?![\w$#-])', Keyword),
(words(('array', 'of', 'boolean', 'integer', 'real', 'word'),
suffix=r'(?![\w$#-])'), Keyword.Type),
(words(('case', 'esac'), suffix=r'(?![\w$#-])'), Keyword),
(words(('word1', 'bool', 'signed', 'unsigned', 'extend', 'resize',
'sizeof', 'uwconst', 'swconst', 'init', 'self', 'count',
'abs', 'max', 'min'), suffix=r'(?![\w$#-])'),
Name.Builtin),
(words(('EX', 'AX', 'EF', 'AF', 'EG', 'AG', 'E', 'F', 'O', 'G',
'H', 'X', 'Y', 'Z', 'A', 'U', 'S', 'V', 'T', 'BU', 'EBF',
'ABF', 'EBG', 'ABG', 'next', 'mod', 'union', 'in', 'xor',
'xnor'), suffix=r'(?![\w$#-])'),
Operator.Word),
(words(('TRUE', 'FALSE'), suffix=r'(?![\w$#-])'), Keyword.Constant),
# Names
(r'[a-zA-Z_][\w$#-]*', Name.Variable),
# Operators
(r':=', Operator),
(r'[-&|+*/<>!=]', Operator),
# Literals
(r'\-?\d+\b', Number.Integer),
(r'0[su][bB]\d*_[01_]+', Number.Bin),
(r'0[su][oO]\d*_[0-7_]+', Number.Oct),
(r'0[su][dD]\d*_[\d_]+', Number.Decimal),
(r'0[su][hH]\d*_[\da-fA-F_]+', Number.Hex),
# Whitespace, punctuation and the rest
(r'\s+', Text.Whitespace),
(r'[()\[\]{};?:.,]', Punctuation),
],
}
| [
"davidycliao@gmail.com"
] | davidycliao@gmail.com |
d6a19c14ed21fd7fff372aff3356a947433b7b86 | f543f74749ff6aa7731438cb1c33f01c7c6296b2 | /ZenPacks/community/zenSiebelCRM/routers.py | 5eda913bc6c7fa4119561975d408e0a962def296 | [
"Apache-2.0"
] | permissive | j053ph4/ZenPacks.community.zenSiebelCRM | 5ff2438cbf778a53b47dcf1d23fd068412232e41 | fcddf900ff0290fa646722060a40e315e857e439 | refs/heads/master | 2021-01-01T16:39:24.706506 | 2015-03-20T16:29:19 | 2015-03-20T16:29:19 | 2,608,202 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | from ZenPacks.community.ConstructionKit.ClassHelper import *
class zenSiebelCRMRouter(ClassHelper.zenSiebelCRMRouter):
''''''
| [
"janderson@agero.com"
] | janderson@agero.com |
eca07f6edf533d18f3e673a87f6e9048c8363109 | f39f870107ebd13914220b862a62709f22cd778d | /src/runrex/schema.py | 375806ac12b85718f53d37e288fd3d4851050c2d | [
"MIT"
] | permissive | kpwhri/runrex | 36efd549009d4c3cc77a498934cdcb5f92748d8e | 68f7e67419cd6b87ed86d755a760b6c5fcbfb07d | refs/heads/master | 2023-04-02T07:06:59.226692 | 2023-03-24T18:51:46 | 2023-03-24T18:51:46 | 224,006,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,624 | py | import json
import jsonschema
try:
from ruamel import yaml
except ModuleNotFoundError:
yaml = False
JSON_SCHEMA = {
'type': 'object',
'properties': {
'corpus': {
'type': 'object',
'properties': {
'directory': {'type': 'string'},
'directories': {
'type': 'array',
'items': {'type': 'string'}
},
'version': {'type': 'string'}, # text or lemma
'connections': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'name': {'type': 'string'}, # database name; path to CSV file
'encoding': {'type': 'string'}, # for CSV file
'driver': {'type': 'string'},
'server': {'type': 'string'},
'database': {'type': 'string'},
'name_col': {'type': 'string'},
'text_col': {'type': 'string'}
}
}
},
}
},
'annotation': {
'type': 'object',
'properties': {
'file': {'type': 'string'}
}
},
'annotations': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'file': {'type': 'string'}
}
}
},
'output': {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'kind': {'type': 'string'}, # sql, csv, etc.
'path': {'type': 'string'},
'driver': {'type': 'string'},
'server': {'type': 'string'},
'database': {'type': 'string'},
'ignore': {'type': 'boolean'},
'encoding': {'type': 'string'},
}
},
'select': {
'type': 'object',
'properties': {
'start': {'type': 'number'},
'end': {'type': 'number'},
'encoding': {'type': 'string'},
'filenames': {
'type': 'array',
'items': {'type': 'string'}
}
}
},
'algorithm': {
'type': 'object',
'names': {
'type': 'array',
'items': {'type': 'string'}
}
},
'loginfo': {
'type': 'object',
'properties': {
'directory': {'type': 'string'},
'ignore': {'type': 'boolean'},
'encoding': {'type': 'string'},
'kind': {'type': 'string'},
}
},
'skipinfo': {
'type': 'object',
'properties': {
'path': {'type': 'string'},
'rebuild': {'type': 'boolean'},
'ignore': {'type': 'boolean'},
}
},
'logger': {
'type': 'object',
'properties': {
'verbose': {'type': 'boolean'}
}
}
}
}
def myexec(code):
import warnings
warnings.warn('Executing python external file: only do this if you trust it')
import sys
from io import StringIO
temp_stdout = sys.stdout
sys.stdout = StringIO()
try:
# try if this is a expression
ret = eval(code)
result = sys.stdout.getvalue()
if ret:
result = result + ret
except:
try:
exec(code)
except:
# you can use <traceback> module here
import traceback
buf = StringIO()
traceback.print_exc(file=buf)
error = buf.getvalue()
raise ValueError(error)
else:
result = sys.stdout.getvalue()
sys.stdout = temp_stdout
return result
def get_config(path):
with open(path) as fh:
if path.endswith('json'):
return json.load(fh)
elif path.endswith('yaml') and yaml:
return yaml.load(fh)
elif path.endswith('py'):
return eval(myexec(fh.read()))
else:
raise ValueError('Unrecognized configuration file type: {}'.format(path.split('.')[-1]))
def validate_config(path):
conf = get_config(path)
jsonschema.validate(conf, JSON_SCHEMA)
return conf
| [
"dcronkite@gmail.com"
] | dcronkite@gmail.com |
fae05f2b90f2dbd3d408b281c80207daa203395c | 8ed1430279ae52fd950dd0afe88549a100001e26 | /qa/rpc-tests/test_framework/key.py | a36d6d71e08f3b53918ffad15038dbf82f0d3830 | [
"MIT"
] | permissive | mirzaei-ce/core-najafbit | 9fb70dbd4e17ec1635d7b886db17f8aab3f592bb | 6de34210a9ba9cc3f21fee631bc1a1f4d12d445d | refs/heads/master | 2021-08-11T08:53:58.165742 | 2017-11-13T13:00:14 | 2017-11-13T13:00:14 | 110,548,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,367 | py | # Copyright (c) 2011 Sam Rushing
#
# key.py - OpenSSL wrapper
#
# This file is modified from python-najafbitlib.
#
"""ECC secp256k1 crypto routines
WARNING: This module does not mlock() secrets; your private keys may end up on
disk in swap! Use with caution!
"""
import ctypes
import ctypes.util
import hashlib
import sys
ssl = ctypes.cdll.LoadLibrary(ctypes.util.find_library ('ssl') or 'libeay32')
ssl.BN_new.restype = ctypes.c_void_p
ssl.BN_new.argtypes = []
ssl.BN_bin2bn.restype = ctypes.c_void_p
ssl.BN_bin2bn.argtypes = [ctypes.c_char_p, ctypes.c_int, ctypes.c_void_p]
ssl.BN_CTX_free.restype = None
ssl.BN_CTX_free.argtypes = [ctypes.c_void_p]
ssl.BN_CTX_new.restype = ctypes.c_void_p
ssl.BN_CTX_new.argtypes = []
ssl.ECDH_compute_key.restype = ctypes.c_int
ssl.ECDH_compute_key.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
ssl.ECDSA_sign.restype = ctypes.c_int
ssl.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
ssl.ECDSA_verify.restype = ctypes.c_int
ssl.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p]
ssl.EC_KEY_free.restype = None
ssl.EC_KEY_free.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int]
ssl.EC_KEY_get0_group.restype = ctypes.c_void_p
ssl.EC_KEY_get0_group.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_get0_public_key.restype = ctypes.c_void_p
ssl.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_set_private_key.restype = ctypes.c_int
ssl.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.EC_KEY_set_conv_form.restype = None
ssl.EC_KEY_set_conv_form.argtypes = [ctypes.c_void_p, ctypes.c_int]
ssl.EC_KEY_set_public_key.restype = ctypes.c_int
ssl.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.i2o_ECPublicKey.restype = ctypes.c_void_p
ssl.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.EC_POINT_new.restype = ctypes.c_void_p
ssl.EC_POINT_new.argtypes = [ctypes.c_void_p]
ssl.EC_POINT_free.restype = None
ssl.EC_POINT_free.argtypes = [ctypes.c_void_p]
ssl.EC_POINT_mul.restype = ctypes.c_int
ssl.EC_POINT_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
# this specifies the curve used with ECDSA.
NID_secp256k1 = 714 # from openssl/obj_mac.h
# Thx to Sam Devlin for the ctypes magic 64-bit fix.
def _check_result(val, func, args):
if val == 0:
raise ValueError
else:
return ctypes.c_void_p (val)
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.errcheck = _check_result
class CECKey(object):
"""Wrapper around OpenSSL's EC_KEY"""
POINT_CONVERSION_COMPRESSED = 2
POINT_CONVERSION_UNCOMPRESSED = 4
def __init__(self):
self.k = ssl.EC_KEY_new_by_curve_name(NID_secp256k1)
def __del__(self):
if ssl:
ssl.EC_KEY_free(self.k)
self.k = None
def set_secretbytes(self, secret):
priv_key = ssl.BN_bin2bn(secret, 32, ssl.BN_new())
group = ssl.EC_KEY_get0_group(self.k)
pub_key = ssl.EC_POINT_new(group)
ctx = ssl.BN_CTX_new()
if not ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx):
raise ValueError("Could not derive public key from the supplied secret.")
ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx)
ssl.EC_KEY_set_private_key(self.k, priv_key)
ssl.EC_KEY_set_public_key(self.k, pub_key)
ssl.EC_POINT_free(pub_key)
ssl.BN_CTX_free(ctx)
return self.k
def set_privkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return ssl.d2i_ECPrivateKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def set_pubkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return ssl.o2i_ECPublicKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def get_privkey(self):
size = ssl.i2d_ECPrivateKey(self.k, 0)
mb_pri = ctypes.create_string_buffer(size)
ssl.i2d_ECPrivateKey(self.k, ctypes.byref(ctypes.pointer(mb_pri)))
return mb_pri.raw
def get_pubkey(self):
size = ssl.i2o_ECPublicKey(self.k, 0)
mb = ctypes.create_string_buffer(size)
ssl.i2o_ECPublicKey(self.k, ctypes.byref(ctypes.pointer(mb)))
return mb.raw
def get_raw_ecdh_key(self, other_pubkey):
ecdh_keybuffer = ctypes.create_string_buffer(32)
r = ssl.ECDH_compute_key(ctypes.pointer(ecdh_keybuffer), 32,
ssl.EC_KEY_get0_public_key(other_pubkey.k),
self.k, 0)
if r != 32:
raise Exception('CKey.get_ecdh_key(): ECDH_compute_key() failed')
return ecdh_keybuffer.raw
def get_ecdh_key(self, other_pubkey, kdf=lambda k: hashlib.sha256(k).digest()):
# FIXME: be warned it's not clear what the kdf should be as a default
r = self.get_raw_ecdh_key(other_pubkey)
return kdf(r)
def sign(self, hash):
# FIXME: need unit tests for below cases
if not isinstance(hash, bytes):
raise TypeError('Hash must be bytes instance; got %r' % hash.__class__)
if len(hash) != 32:
raise ValueError('Hash must be exactly 32 bytes long')
sig_size0 = ctypes.c_uint32()
sig_size0.value = ssl.ECDSA_size(self.k)
mb_sig = ctypes.create_string_buffer(sig_size0.value)
result = ssl.ECDSA_sign(0, hash, len(hash), mb_sig, ctypes.byref(sig_size0), self.k)
assert 1 == result
return mb_sig.raw[:sig_size0.value]
def verify(self, hash, sig):
"""Verify a DER signature"""
return ssl.ECDSA_verify(0, hash, len(hash), sig, len(sig), self.k) == 1
def set_compressed(self, compressed):
if compressed:
form = self.POINT_CONVERSION_COMPRESSED
else:
form = self.POINT_CONVERSION_UNCOMPRESSED
ssl.EC_KEY_set_conv_form(self.k, form)
class CPubKey(bytes):
"""An encapsulated public key
Attributes:
is_valid - Corresponds to CPubKey.IsValid()
is_fullyvalid - Corresponds to CPubKey.IsFullyValid()
is_compressed - Corresponds to CPubKey.IsCompressed()
"""
def __new__(cls, buf, _cec_key=None):
self = super(CPubKey, cls).__new__(cls, buf)
if _cec_key is None:
_cec_key = CECKey()
self._cec_key = _cec_key
self.is_fullyvalid = _cec_key.set_pubkey(self) != 0
return self
@property
def is_valid(self):
return len(self) > 0
@property
def is_compressed(self):
return len(self) == 33
def verify(self, hash, sig):
return self._cec_key.verify(hash, sig)
def __str__(self):
return repr(self)
def __repr__(self):
# Always have represent as b'<secret>' so test cases don't have to
# change for py2/3
if sys.version > '3':
return '%s(%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
else:
return '%s(b%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
| [
"mirzaei@ce.sharif.edu"
] | mirzaei@ce.sharif.edu |
4a8ff182ff64cd8354550c0be5e3c6d332c9e65e | fb5b258968d361e652e31a753b7729acea776470 | /tracker/extensions.py | 32059628cfe55f696c80043be6c0188722f973aa | [] | no_license | OpenDataServices/aid-transparency-tracker | ddbac46406dccd71b5b441e543e67d1819377da3 | d3d12dc3d038c24825374eb7aa74ed6e51266747 | refs/heads/master | 2020-06-14T00:53:28.939473 | 2019-06-30T15:18:55 | 2019-06-30T15:18:55 | 194,842,277 | 1 | 1 | null | 2019-07-02T10:30:35 | 2019-07-02T10:30:34 | null | UTF-8 | Python | false | false | 528 | py | """Extensions module. Each extension is initialized in the app factory located in app.py."""
from flask_caching import Cache
from flask_debugtoolbar import DebugToolbarExtension
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from flask_webpack import Webpack
from flask_wtf.csrf import CSRFProtect
from flask_security import Security
csrf_protect = CSRFProtect()
db = SQLAlchemy()
migrate = Migrate()
cache = Cache()
debug_toolbar = DebugToolbarExtension()
webpack = Webpack()
security = Security()
| [
"a.lulham@gmail.com"
] | a.lulham@gmail.com |
cd3649ce6be2f14862c05d40998542e059230fe7 | bcf88b912b9443c3326466c226f68a7e7ad5aa9d | /bdbag/fetch/transports/fetch_http.py | f55b0cd33f99c76fdd5b2177276a166c30c9865e | [
"Apache-2.0"
] | permissive | mvdbeek/bdbag | 33bc7e0275c720104af77654b0016024cb6ab012 | fe67b5bffc68b7dac823ce03d450ede3affccbef | refs/heads/master | 2020-03-25T05:17:09.646537 | 2018-07-12T03:58:06 | 2018-07-12T03:58:06 | 143,438,809 | 0 | 0 | null | 2018-08-03T14:42:27 | 2018-08-03T14:42:27 | null | UTF-8 | Python | false | false | 6,656 | py | import os
import datetime
import logging
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import certifi
from bdbag import urlsplit, get_typed_exception
import bdbag.fetch.auth.keychain as keychain
logger = logging.getLogger(__name__)
Kilobyte = 1024
Megabyte = 1024 ** 2
CHUNK_SIZE = 1024 * 10240
SESSIONS = dict()
HEADERS = {'Connection': 'keep-alive'}
def validate_auth_config(auth):
if not keychain.has_auth_attr(auth, 'auth_type'):
return False
if not keychain.has_auth_attr(auth, 'auth_params'):
return False
return True
def get_session(url, auth_config):
session = None
response = None
for auth in list((entry for entry in auth_config if hasattr(entry, 'uri') and (entry.uri.lower() in url.lower()))):
try:
if not validate_auth_config(auth):
continue
if auth.uri in SESSIONS:
session = SESSIONS[auth.uri]
break
else:
session = get_new_session()
if auth.auth_type == 'cookie':
if auth.auth_params and hasattr(auth.auth_params, 'cookies'):
cookies = auth.auth_params.cookies
for cookie in cookies:
name, value = cookie.split('=', 1)
session.cookies.set(name, value, domain=urlsplit(auth.uri).hostname, path='/')
SESSIONS[auth.uri] = session
break
# if we get here the assumption is that the auth_type is either http-basic or http-form
auth_uri = auth.uri
if keychain.has_auth_attr(auth, 'auth_uri'):
auth_uri = auth.auth_uri
if not (keychain.has_auth_attr(auth.auth_params, 'username') and
keychain.has_auth_attr(auth.auth_params, 'password')):
logging.warning(
"Missing required parameters [username, password] for auth_type [%s] for keychain entry [%s]" %
(auth.auth_type, auth.uri))
continue
if auth.auth_type == 'http-basic':
session.auth = (auth.auth_params.username, auth.auth_params.password)
auth_method = "post"
if keychain.has_auth_attr(auth.auth_params, 'auth_method'):
auth_method = auth.auth_params.auth_method.lower()
if auth_method == 'post':
response = session.post(auth_uri, auth=session.auth)
elif auth_method == 'get':
response = session.get(auth_uri, auth=session.auth)
else:
logging.warning("Unsupported auth_method [%s] for auth_type [%s] for keychain entry [%s]" %
(auth_method, auth.auth_type, auth.uri))
elif auth.auth_type == 'http-form':
response = session.post(auth_uri,
{auth.auth_params.username_field or "username": auth.auth_params.username,
auth.auth_params.password_field or "password": auth.auth_params.password})
if response.status_code > 203:
logger.warning(
'Authentication failed with Status Code: %s %s\n' % (response.status_code, response.text))
else:
logger.info("Session established: %s", auth.uri)
SESSIONS[auth.uri] = session
break
except Exception as e:
logger.warning("Unhandled exception during HTTP(S) authentication: %s" % get_typed_exception(e))
if not session:
url_parts = urlsplit(url)
base_url = str("%s://%s" % (url_parts.scheme, url_parts.netloc))
session = SESSIONS.get(base_url, None)
if not session:
session = get_new_session()
SESSIONS[base_url] = session
return session
def get_new_session():
session = requests.session()
retries = Retry(connect=5,
read=5,
backoff_factor=1.0,
status_forcelist=[500, 502, 503, 504])
session.mount('http://', HTTPAdapter(max_retries=retries))
session.mount('https://', HTTPAdapter(max_retries=retries))
return session
def get_file(url, output_path, auth_config, headers=None, session=None):
try:
if not session:
session = get_session(url, auth_config)
output_dir = os.path.dirname(os.path.abspath(output_path))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not headers:
headers = HEADERS
else:
headers.update(HEADERS)
logger.info("Attempting GET from URL: %s" % url)
r = session.get(url, headers=headers, stream=True, verify=certifi.where())
if r.status_code == 401:
session = get_session(url, auth_config)
r = session.get(url, headers=headers, stream=True, verify=certifi.where())
if r.status_code != 200:
logger.error('HTTP GET Failed for URL: %s' % url)
logger.error("Host %s responded:\n\n%s" % (urlsplit(url).netloc, r.text))
logger.warning('File transfer failed: [%s]' % output_path)
else:
total = 0
start = datetime.datetime.now()
logger.debug("Transferring file %s to %s" % (url, output_path))
with open(output_path, 'wb') as data_file:
for chunk in r.iter_content(chunk_size=CHUNK_SIZE):
data_file.write(chunk)
total += len(chunk)
elapsed_time = datetime.datetime.now() - start
total_secs = elapsed_time.total_seconds()
transferred = \
float(total) / float(Kilobyte) if total < Megabyte else float(total) / float(Megabyte)
throughput = str(" at %.2f MB/second" % (transferred / total_secs)) if (total_secs >= 1) else ""
elapsed = str("Elapsed time: %s." % elapsed_time) if (total_secs > 0) else ""
summary = "%.3f %s transferred%s. %s" % \
(transferred, "KB" if total < Megabyte else "MB", throughput, elapsed)
logger.info('File [%s] transfer successful. %s' % (output_path, summary))
return True
except requests.exceptions.RequestException as e:
logger.error('HTTP Request Exception: %s' % (get_typed_exception(e)))
return False
def cleanup():
for session in SESSIONS.values():
session.close()
SESSIONS.clear()
| [
"mikedarcy@users.noreply.github.com"
] | mikedarcy@users.noreply.github.com |
fb95f7509a541b20d0c87ac002317fa791d06f7b | 4c1c2e3a8882f58a895285232eddae337ddc1a3a | /tests/test_general.py | f0721fd9fa1cb43874e25eda16877495aa4b11b9 | [
"MIT"
] | permissive | ArtellaPipe/artellapipe-tools-renamer | b00472d94de48771b3de69d2706f2e37235736d2 | 2456f418e2b4cf853d228fcce27af94b4fe3185a | refs/heads/master | 2020-11-27T15:15:59.673296 | 2020-08-24T18:26:45 | 2020-08-24T18:26:45 | 229,507,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains general tests for artellapipe-tools-renamer
"""
import pytest
from artellapipe.tools.renamer import __version__
def test_version():
assert __version__.get_version()
| [
"tpovedatd@gmail.com"
] | tpovedatd@gmail.com |
2ccbe098d647f4aa3758b9746cb05213b6096eb2 | ee53b0262007b2f0db0fe15b2ad85f65fafa4e25 | /Leetcode/2545. Sort the Students by Their Kth Score.py | 1938cef18d0c6419bf112383400d38432dc59634 | [] | no_license | xiaohuanlin/Algorithms | bd48caacb08295fc5756acdac609be78e143a760 | 157cbaeeff74130e5105e58a6b4cdf66403a8a6f | refs/heads/master | 2023-08-09T05:18:06.221485 | 2023-08-08T11:53:15 | 2023-08-08T11:53:15 | 131,491,056 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,142 | py | '''
There is a class with m students and n exams. You are given a 0-indexed m x n integer matrix score, where each row represents one student and score[i][j] denotes the score the ith student got in the jth exam. The matrix score contains distinct integers only.
You are also given an integer k. Sort the students (i.e., the rows of the matrix) by their scores in the kth (0-indexed) exam from the highest to the lowest.
Return the matrix after sorting it.
Example 1:
Input: score = [[10,6,9,1],[7,5,11,2],[4,8,3,15]], k = 2
Output: [[7,5,11,2],[10,6,9,1],[4,8,3,15]]
Explanation: In the above diagram, S denotes the student, while E denotes the exam.
- The student with index 1 scored 11 in exam 2, which is the highest score, so they got first place.
- The student with index 0 scored 9 in exam 2, which is the second highest score, so they got second place.
- The student with index 2 scored 3 in exam 2, which is the lowest score, so they got third place.
Example 2:
Input: score = [[3,4],[5,6]], k = 0
Output: [[5,6],[3,4]]
Explanation: In the above diagram, S denotes the student, while E denotes the exam.
- The student with index 1 scored 5 in exam 0, which is the highest score, so they got first place.
- The student with index 0 scored 3 in exam 0, which is the lowest score, so they got second place.
Constraints:
m == score.length
n == score[i].length
1 <= m, n <= 250
1 <= score[i][j] <= 105
score consists of distinct integers.
0 <= k < n
'''
from typing import *
import unittest
class Solution:
def sortTheStudents(self, score: List[List[int]], k: int) -> List[List[int]]:
return sorted(score, key=lambda x: -x[k])
class TestSolution(unittest.TestCase):
def test_case(self):
examples = (
(([[10,6,9,1],[7,5,11,2],[4,8,3,15]],2),[[7,5,11,2],[10,6,9,1],[4,8,3,15]]),
)
for first, second in examples:
self.assert_function(first, second)
def assert_function(self, first, second):
self.assertEqual(Solution().sortTheStudents(*first), second,
msg="first: {}; second: {}".format(first, second))
unittest.main() | [
"xiaohuanlin1993@gmail.com"
] | xiaohuanlin1993@gmail.com |
20742f51da8d73b241b268b3e8ad34a2a7ec71a6 | e8215b98dcf46417e720cc6ef4a0329474ae9b82 | /PHYS304/Transcendental.py | f3540b28e7fbf87080a8c19cdb8799a835c8501e | [] | no_license | rgkaufmann/PythonCodes | 2d47bab84ec851fc962598f613b1e666a14c8efd | a5d5cd993beabdb79897a05b35420ad82f438f51 | refs/heads/master | 2021-06-13T23:19:09.109162 | 2021-03-03T06:00:04 | 2021-03-03T06:00:04 | 162,771,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,392 | py | import numpy as np
import matplotlib.pyplot as plt
def tangent(z):
return np.tan(z)
def transient(z):
return np.sqrt((8/z)**2-1)
zVals = np.linspace(np.pi, 8, 1000)
tanVals = tangent(zVals)
tranVals = transient(zVals)
zTransients = [4.16483091, 6.83067433]
tanTransients = tangent(zTransients)
plt.plot(zVals, tanVals, label='Tanget')
plt.plot(zVals, tranVals, label='Square Root')
plt.plot(zVals, np.abs(tanVals-tranVals), label='Absolute Value Difference')
plt.scatter(zTransients, tanTransients, label='Numerical Solutions')
plt.ylim(ymin=0, ymax=5)
plt.xlim(np.pi, 8)
plt.legend(loc='best')
plt.title('Graphical Representation of the Transcendental Equation')
plt.xlabel('z Values')
plt.show()
z0 = 4.16483091
z1 = 6.83067433
hbar = 1.0545718e-34
mass = 9.70938356e-31
#a = (8*hbar)/(np.sqrt(2*mass))
a=0.1
kappa0 = np.sqrt(8**2-z0**2)/a
kappa1 = np.sqrt(8**2-z1**2)/a
l0 = z0/a
l1 = z1/a
def HOWavefunction0(x):
# constant = ((mass)/(5*np.pi*hbar))**(1/4)
exponential = np.exp(-(mass)/(10*hbar)*x**2)
return exponential
def HOWavefunction1(x):
constant = ((mass)/(5*np.pi*hbar))**(1/4)
constant *= np.sqrt((2*mass)/(5*hbar))
exponential = np.exp(-(mass)/(10*hbar)*x**2)
return constant*x*exponential
def FSWWavefunction0Even(x):
results = np.zeros(x.shape)
results[np.where(x>a)] = np.exp(-kappa0*x[np.where(x>a)])
results[np.where(np.logical_and(0<x, x<a))] = np.cos(l0*x[np.where(np.logical_and(0<x, x<a))])
results[np.where(np.logical_and(0>x, x>-a))] = np.cos(l0*-1*x[np.where(np.logical_and(0>x, x>-a))])
results[np.where(x<-a)] = np.exp(kappa0*x[np.where(x<-a)])
return results
def FSWWavefunction0Odd(x):
results = np.zeros(x.shape)
results[np.where(x>a)] = np.exp(-kappa0*x[np.where(x>a)])
results[np.where(np.logical_and(0<x, x<a))] = np.sin(l0*x[np.where(np.logical_and(0<x, x<a))])
results[np.where(np.logical_and(0>x, x>-a))] = -1*np.sin(l0*-1*x[np.where(np.logical_and(0>x, x>-a))])
results[np.where(x<-a)] = -1*np.exp(kappa0*x[np.where(x<-a)])
return results
def FSWWavefunction1Even(x):
results = np.zeros(x.shape)
results[np.where(x>a)] = np.exp(-kappa1*x[np.where(x>a)])
results[np.where(np.logical_and(0<x, x<a))] = np.cos(l1*x[np.where(np.logical_and(0<x, x<a))])
results[np.where(np.logical_and(0>x, x>-a))] = np.cos(l1*-1*x[np.where(np.logical_and(0>x, x>-a))])
results[np.where(x<-a)] = np.exp(kappa1*x[np.where(x<-a)])
return results
def FSWWavefunction1Odd(x):
results = np.zeros(x.shape)
results[np.where(x>a)] = np.exp(-kappa1*x[np.where(x>a)])
results[np.where(np.logical_and(0<x, x<a))] = np.sin(l1*x[np.where(np.logical_and(0<x, x<a))])
results[np.where(np.logical_and(0>x, x>-a))] = -1*np.sin(l1*-1*x[np.where(np.logical_and(0>x, x>-a))])
results[np.where(x<-a)] = -1*np.exp(kappa1*x[np.where(x<-a)])
return results
xValues = np.linspace(-0.1, 0.1, 1000)
HO0 = HOWavefunction0(xValues)
HO1 = HOWavefunction1(xValues)
FSW0E = FSWWavefunction0Even(xValues)
FSW0O = FSWWavefunction0Odd(xValues)
FSW1E = FSWWavefunction1Even(xValues)
FSW1O = FSWWavefunction1Odd(xValues)
plt.plot(xValues, HO0)
plt.plot(xValues, FSW0E)
plt.plot(xValues, FSW0O)
plt.plot(xValues, np.abs(FSW0E+FSW0O))
plt.show()
plt.plot(xValues, HO1)
plt.plot(xValues, FSW1E)
plt.plot(xValues, FSW1O)
plt.plot(xValues, np.abs(FSW1E+FSW1O))
plt.show() | [
"ryankaufmannprof@gmail.com"
] | ryankaufmannprof@gmail.com |
817f9f6f9798e25518e5c410d73fac5f146c2faa | 84e4149b3571ff4abe5c27a66ecbde03c5afec3c | /chapter_09/section_3_3/test.py | 8de472d0bbe2646b49941b995e27abd2d498cb45 | [] | no_license | zhanlu-wm/Python-Crash-Course | 6efa04bd5c03e37394b3602d20e7ae57688836e7 | 043fe97b4acdf0008351fd0fdb045888e9bdd44d | refs/heads/master | 2021-07-18T18:34:32.435763 | 2017-10-23T15:27:17 | 2017-10-23T15:27:17 | 103,259,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | from chapter_09.section_3_3.electric_car import ElectricCar
my_tesla = ElectricCar('tesla', 'model s', 2016)
print(my_tesla.get_descriptive_name())
my_tesla.describe_battery() | [
"ncu09wangming@163.com"
] | ncu09wangming@163.com |
c35ed578aa88d6f303bd3df648ce69e2bf6172a4 | a2f6e449e6ec6bf54dda5e4bef82ba75e7af262c | /venv/Lib/site-packages/pandas/tests/frame/methods/test_nlargest.py | 7f9912e61e6e168237191bd9c0e95df2fc24b6ce | [] | no_license | mylonabusiness28/Final-Year-Project- | e4b79ccce6c19a371cac63c7a4ff431d6e26e38f | 68455795be7902b4032ee1f145258232212cc639 | refs/heads/main | 2023-07-08T21:43:49.300370 | 2021-06-05T12:34:16 | 2021-06-05T12:34:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:ad76d46aae04dbd1db10b0a2b2882fd55b14de9a1307caf45c549f0b3d316f1a
size 6942
| [
"chuksajeh1@gmail.com"
] | chuksajeh1@gmail.com |
271db093eab9eef6514cf71cd5cdc33b0ebbebbe | 55647a80c8b412af9df0ba3f50595cc2f29c25e6 | /res/scripts/client/PostProcessing/Effects/__init__.py | 4712566578e17fcdf1f5316527e1b33ae470f9e3 | [] | no_license | cnsuhao/WOT-0.9.17-CT | 0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb | d1f932d8cabaf8aa21708622e87f83c8d24d6451 | refs/heads/master | 2021-06-08T18:11:07.039293 | 2016-11-19T19:12:37 | 2016-11-19T19:12:37 | null | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,600 | py | # 2016.11.19 19:54:27 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/PostProcessing/Effects/__init__.py
"""PostProcessing.Effects python module
This module imports all Effects for ease-of-use by script programmers.
"""
s_effectFactories = {}
class implementEffectFactory:
def __init__(self, name, desc, *defaultArgs):
self.name = name
self.desc = desc
self.defaultArgs = defaultArgs
def __call__(self, f):
def callFn(*args):
if len(args) > 0:
return f(*args)
else:
return f(*self.defaultArgs)
fn = callFn
s_effectFactories[self.name] = [self.desc, fn]
return fn
def getEffectNames():
"""
This method returns a list of effect (names, descriptions)
used by the World Editor.
"""
ret = []
for key in sorted(s_effectFactories.iterkeys()):
desc = s_effectFactories[key][0]
ret.append((key, desc))
return ret
def effectFactory(name):
"""
This method builds a effect, given the corresponding factory name.
"""
return s_effectFactories[name][1]()
@implementEffectFactory('<new empty effect>', 'Create a new, empty effect.')
def empty():
e = Effect()
e.name = 'unnamed effect'
e.phases = []
return e
from DepthOfField import *
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\PostProcessing\Effects\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 19:54:27 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
e7e338dfe35947ecf54e68e2c21b2af79339e7a0 | ac64fda7f1bfc92f7897efd60b8f3f0aeb22b4d7 | /syntactic_mutations/mnist/mutants/mutant28.py | 8c15391a262bd2fab6f30bcef4af3ed76cab9317 | [] | no_license | dlfaults/mutation_operators_evaluation | ea7f33459ba7bcf7d70092d9db8b40f9b338d516 | 7d1ff30e901931a46bf8908e9bb05cae3daa5f0f | refs/heads/master | 2020-12-27T15:45:07.262012 | 2020-02-03T12:22:01 | 2020-02-03T12:22:01 | 237,955,342 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,590 | py | import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
batch_size = 128
num_classes = 10
epochs = 12
(img_rows, img_cols) = (28, 28)
def train(x_train, y_train, x_test, y_test, model_name):
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation=\
'relu', input_shape=\
input_shape))
model.add(Conv2D(64, (3, 4), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=\
keras.optimizers.Adadelta(), metrics=\
['accuracy'])
model.fit(x_train, y_train, batch_size=\
batch_size, epochs=\
epochs, verbose=\
1, validation_data=\
(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
return (score[0], score[1]) | [
"gunel71@gmail.com"
] | gunel71@gmail.com |
3f2754e1b1a8db1b55c2e9557da13f86471dabad | a7ab35ff204d7c4679ce8b1bf7da8fa363a7a708 | /algo_problems/q861-880/q873.py | 1015c2b49ca79f3a6b3e6bba1765132f41499876 | [] | no_license | lin13k/practice | c68e7270be2694cb9737c35af8beb19db7e30d65 | c3efe1a5839c3ff1c320c0fcfc7b65a9462f7b52 | refs/heads/master | 2021-01-19T11:05:43.521468 | 2018-11-12T09:42:29 | 2018-11-12T09:42:29 | 87,928,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | import collections
class Solution:
def lenLongestFibSubseq(self, A):
"""
:type A: List[int]
:rtype: int
"""
table = collections.defaultdict(lambda: 2)
indexDict = {v: k for k, v in enumerate(A)}
result = 0
for i, v in enumerate(A):
for j in range(i):
t = v - A[j]
k = indexDict.get(t, None)
if k is not None and j < k:
cand = table[k, i] = table[j, k] + 1
result = max(result, cand)
return result
if __name__ == '__main__':
print(Solution().lenLongestFibSubseq([1, 2, 3, 4, 5, 6, 7, 8]))
| [
"lin.13k@gmail.com"
] | lin.13k@gmail.com |
55054610ae4183039b1acb0e4e418f03195a81c4 | 9d862dd68f8b4ea4e7de9397fef8592824c77449 | /app/top/api/rest/DeliveryTemplateDeleteRequest.py | b9f87c34c48381a9fc9ce9798903f521d14d4bb9 | [] | no_license | hi-noikiy/tmall-sku-outer_id | ffaca630dfb288ca33d962b8a050932d1047b9c8 | 1bcf29386a513bcb210bf5d91016e0dcb1ebc1ad | refs/heads/master | 2021-05-09T18:20:27.150316 | 2017-03-08T06:43:57 | 2017-03-08T06:43:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | '''
Created by auto_sdk on 2016.04.11
'''
from app.top.api.base import RestApi
class DeliveryTemplateDeleteRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.template_id = None
def getapiname(self):
return 'taobao.delivery.template.delete'
| [
"1037096435@qq.com"
] | 1037096435@qq.com |
7aa16390fac2a04b1be129c306ea0507fc300de1 | 6f0ceee714bccf2a89c34a06aabd3bcb781a2fa4 | /tests/python/gpu/test_numpy_fallback.py | dc367b03139c0fcb872418d3db2dada7314dd41b | [
"Apache-2.0",
"MIT",
"Unlicense",
"BSL-1.0",
"NCSA",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause",
"OFL-1.0",
"BSD-2-Clause-Views",
"Zlib"
] | permissive | yajiedesign/mxnet | 5a495fd06dd1730c17d2d27d7e46c8a770847f17 | 8e5a16cf673db5aceb48d2cf7a0fc1abd0ee5e51 | refs/heads/master | 2021-03-30T22:37:18.603396 | 2020-10-23T06:40:17 | 2020-10-23T06:40:17 | 43,763,550 | 214 | 59 | Apache-2.0 | 2020-06-01T23:31:15 | 2015-10-06T16:36:40 | C++ | UTF-8 | Python | false | false | 4,728 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import absolute_import
from distutils.version import StrictVersion
import sys
import pytest
import itertools
import numpy as _np
import platform
import mxnet as mx
import scipy.stats as ss
import scipy.special as scipy_special
from mxnet import np, npx
from mxnet.base import MXNetError
from mxnet.test_utils import assert_almost_equal, use_np, set_default_context
import os
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
from common import assertRaises
import random
from mxnet.test_utils import verify_generator, gen_buckets_probs_with_ppf
from mxnet.numpy_op_signature import _get_builtin_op
from mxnet.util import numpy_fallback
set_default_context(mx.gpu(0))
@use_np
@pytest.mark.serial
def test_np_fallback_decorator():
@numpy_fallback
def dnp_func(a, b=None, split_inputs=(), ret_type=list):
"""
Dummy Doc:
dnp_func is using the same np.xxx operators
"""
ret_lst = []
# unsupported indexing case
ret_lst.append(a[:,a[1,:]>0])
# unsupported operator
ret_lst.append(np.nonzero(b))
# unsupported operator case
ret_lst.append(tuple(np.split(split_inputs[0], split_inputs[1])))
return ret_type(ret_lst)
def onp_func(a, b=None, split_inputs=(), ret_type=list):
ret_lst = []
ret_lst.append(a[:,a[1,:]>0])
ret_lst.append(_np.nonzero(b))
ret_lst.append(tuple(_np.split(split_inputs[0], split_inputs[1])))
return ret_type(ret_lst)
def get_indices(axis_size):
if axis_size is 0:
axis_size = random.randint(3, 6)
samples = random.randint(1, axis_size - 1)
indices = sorted(random.sample([i for i in range(1, axis_size)], samples))
indices = tuple(indices)
return indices
ret_type = list if random.uniform(0.0, 1.0) > 0.5 else tuple
mx_a = np.array([[1,2,3],[3,4,5]])
np_b = _np.random.uniform(size=(3, 4)) > 0.5
mx_b = np.array(np_b, dtype=np_b.dtype)
mx_c_len = random.randint(5, 20)
mx_c = np.random.uniform(size=(mx_c_len,))
mx_indices = np.array(get_indices(mx_c_len), dtype=np.int64)
assert dnp_func.__doc__ is not None
assert 'onp' not in dnp_func.__doc__
fallback_ret = dnp_func(mx_a, b=mx_b, split_inputs=(mx_c, mx_indices), ret_type=ret_type)
onp_ret = onp_func(mx_a.asnumpy(), b=mx_b.asnumpy(), split_inputs=(mx_c.asnumpy(), mx_indices.asnumpy()), ret_type=ret_type)
for fallback_out, onp_out in zip(fallback_ret, onp_ret):
if isinstance(fallback_out, (list, tuple)):
for fallback_item, onp_item in zip(fallback_out, onp_out):
assert fallback_item.ctx == mx.context.current_context(), "incorrect output context %s vs desired %s" % (str(fallback_item.ctx), str(mx.context.current_context()))
assert isinstance(fallback_item, np.ndarray)
assert_almost_equal(fallback_item.asnumpy(), onp_item, rtol=1e-3, atol=1e-5, equal_nan=False)
else:
assert fallback_out.ctx == mx.context.current_context(), "incorrect output context %s vs desired %s" % (str(fallback_out.ctx), str(mx.context.current_context()))
assert isinstance(fallback_out, np.ndarray)
assert_almost_equal(fallback_out.asnumpy(), onp_out, rtol=1e-3, atol=1e-5, equal_nan=False)
# does not support mixed-context inputs
assertRaises(AssertionError, dnp_func, mx_a.as_in_ctx(npx.cpu(0)), b=mx_b, split_inputs=(mx_c, mx_indices), ret_type=ret_type)
assertRaises(AssertionError, dnp_func, mx_a, b=mx_b,
split_inputs=(mx_c.as_in_ctx(npx.cpu(0)), mx_indices.as_in_ctx(npx.gpu(0))), ret_type=ret_type)
@numpy_fallback
def empty_ret_func():
return
# does not support functions with no return values
assertRaises(ValueError, empty_ret_func)
| [
"noreply@github.com"
] | yajiedesign.noreply@github.com |
4d30ab38156924fdc3bad48d9760d204c8fdebdc | d125c002a6447c3f14022b786b07712a7f5b4974 | /tests/bugs/core_4165_test.py | d2422778b5f53e4956446b0f1a4dd1f69a715d89 | [
"MIT"
] | permissive | FirebirdSQL/firebird-qa | 89d5b0035071f9f69d1c869997afff60c005fca9 | cae18186f8c31511a7f68248b20f03be2f0b97c6 | refs/heads/master | 2023-08-03T02:14:36.302876 | 2023-07-31T23:02:56 | 2023-07-31T23:02:56 | 295,681,819 | 3 | 2 | MIT | 2023-06-16T10:05:55 | 2020-09-15T09:41:22 | Python | UTF-8 | Python | false | false | 1,993 | py | #coding:utf-8
"""
ID: issue-4492
ISSUE: 4492
TITLE: Replace the hierarchical union execution with the plain one
DESCRIPTION:
JIRA: CORE-4165
FBTEST: bugs.core_4165
"""
import pytest
from firebird.qa import *
init_script = """
recreate table t1(id int);
recreate table t2(id int);
recreate table t3(id int);
commit;
insert into t1 select rand()*100 from rdb$types,rdb$types;
commit;
insert into t2 select * from t1;
insert into t3 select * from t1;
commit;
"""
db = db_factory(init=init_script)
test_script = """
set planonly;
set explain on;
select 0 i from t1
union all
select 1 from t1
union all
select 2 from t1
;
select 0 i from t2
union
select 1 from t2
union
select 2 from t2
;
select 0 i from t3
union distinct
select 1 from t3
union all
select 2 from t3
;
-- Note: values in 'record length' and 'key length' should be suppressed
-- because they contain not only size of field(s) but also db_key.
"""
act = isql_act('db', test_script, substitutions=[('record length.*', ''), ('key length.*', '')])
expected_stdout = """
Select Expression
-> Union
-> Table "T1" Full Scan
-> Table "T1" Full Scan
-> Table "T1" Full Scan
Select Expression
-> Unique Sort (record length: 52, key length: 8)
-> Union
-> Table "T2" Full Scan
-> Table "T2" Full Scan
-> Table "T2" Full Scan
Select Expression
-> Union
-> Unique Sort (record length: 44, key length: 8)
-> Union
-> Table "T3" Full Scan
-> Table "T3" Full Scan
-> Table "T3" Full Scan
"""
@pytest.mark.version('>=3.0')
def test_1(act: Action):
act.expected_stdout = expected_stdout
act.execute()
assert act.clean_stdout == act.clean_expected_stdout
| [
"pcisar@ibphoenix.cz"
] | pcisar@ibphoenix.cz |
5d25234ef3941728dcf320cbda2aeea1d13e6d35 | a672ac356faa8743a78703812ce41eb48fc0f99f | /tests/contrib/falcon/test_middleware.py | 68a9614099575934169190628353ac591579082e | [] | permissive | dailymotion/dd-trace-py | 2dd0b23aac89b60d7b40a74692e210a9b8778e94 | 47ecf1d805bbdff3579a7d644595ac083af04c70 | refs/heads/master | 2023-04-13T19:56:00.888553 | 2018-12-05T21:27:46 | 2018-12-05T21:27:46 | 84,096,497 | 0 | 1 | BSD-3-Clause | 2023-04-07T00:17:31 | 2017-03-06T16:38:57 | Python | UTF-8 | Python | false | false | 543 | py | from falcon import testing
from tests.test_tracer import get_dummy_tracer
from .app import get_app
from .test_suite import FalconTestCase
class MiddlewareTestCase(testing.TestCase, FalconTestCase):
"""Executes tests using the manual instrumentation so a middleware
is explicitly added.
"""
def setUp(self):
super(MiddlewareTestCase, self).setUp()
# build a test app with a dummy tracer
self._service = 'falcon'
self.tracer = get_dummy_tracer()
self.api = get_app(tracer=self.tracer)
| [
"emanuele.palazzetti@datadoghq.com"
] | emanuele.palazzetti@datadoghq.com |
e614b59b4db80db0b7fa97d34be01f68a4243409 | d4fa331d7d8a00865f99ee2c05ec8efc0468fb63 | /alg/unique_path.py | b28469b75a95f6b64b6d410929fd04451d54d2e0 | [] | no_license | nyannko/leetcode-python | 5342620c789a02c7ae3478d7ecf149b640779932 | f234bd7b62cb7bc2150faa764bf05a9095e19192 | refs/heads/master | 2021-08-11T04:11:00.715244 | 2019-02-05T15:26:43 | 2019-02-05T15:26:43 | 145,757,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | class Solution(object):
def uniquePaths(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
table = [[-1] * n for _ in range(m)]
table[0][0] = 1
def dfs(m, n):
if m < 0 or n < 0:
return 0
if m == 0 and n == 0:
return table[0][0]
if table[m][n] != -1:
return table[m][n]
else:
table[m][n] = dfs(m - 1, n) + dfs(m, n - 1)
return table[m][n]
def dfs1(m, n):
if table[m][n] == -1:
if m < 0 or n < 0:
return 0
if m == 0 and n == 0:
return table[0][0]
table[m][n] = dfs(m - 1, n) + dfs(m, n - 1)
return table[m][n]
return dfs1(m - 1, n - 1)
a = Solution()
print(a.uniquePaths(3, 2))
| [
"9638293+nyannko@users.noreply.github.com"
] | 9638293+nyannko@users.noreply.github.com |
1bd9fba50c8d65cd8d1f749fbeb322b3d5990748 | 12a21462d6cdb37ff7336d498a75f578a8ec7959 | /lib/public/load_cases.py | 15ebcfc8940fd78877551e1317ae4af284186e82 | [
"MIT",
"Python-2.0"
] | permissive | bushidosds/MeteorTears | b1c23331aed6158662e6d544dbf71df2b10ef78d | cde3151b42e9ccae3c58e45233b637808c152571 | refs/heads/master | 2020-05-25T02:16:25.171245 | 2019-05-17T10:32:17 | 2019-05-17T10:32:17 | 187,575,106 | 1 | 0 | MIT | 2019-05-20T05:47:01 | 2019-05-20T05:47:01 | null | UTF-8 | Python | false | false | 1,611 | py | # -*- coding:utf-8 -*-
import yaml
from lib.utils import fp
from lib.public import logger
from lib.utils import exceptions
class LoadCase(object):
def __init__(self, path: str = None):
self.path = path
def get_all_files(self) -> list:
"""
返回文件目录路径下全部文件列表
:Usage:
get_all_files()
"""
return fp.iter_files(self.path)
@property
def __get_files_name(self) -> list:
"""
返回文件目录下的文件名
:Usage:
__get_files_name
"""
return fp.iter_files(self.path, otype='name')
def load_files(self) -> list:
"""
加载文件
:Usage:
load_files()
"""
files_list = []
for index, file in enumerate(self.get_all_files()):
class_name = self.__get_files_name[index].split('.')[0].title().replace('_', '')
try:
with open(file, encoding='utf-8') as f:
files_list.append({class_name: yaml.safe_load(f)})
except exceptions.JsonLoadingError as err:
logger.log_error(
"Json file parsing error, error file: {0}, error message: {1}".format(
file, err))
return files_list
class Containers(object):
def __init__(self, crop: dict):
self.crop = crop
def __repr__(self):
return "Containers <{}->{}>".format(
self.crop.get('class_name'),
self.crop.get('func_name')
)
if __name__ == '__main__':
pass | [
"546464268@qq.com"
] | 546464268@qq.com |
fa21ef31448dccd7a96df6c42c7e27d93203474f | 9da8754002fa402ad8e6f25659978bd269bbcec8 | /src/426A/cdf_426A.py | acb2d5c61e1ef8068bbd8310cc8e909a4c400db3 | [
"MIT"
] | permissive | kopok2/CodeforcesSolutionsPython | a00f706dbf368ba0846c8ae86d4145b5dd3e1613 | 35bec0dbcff47765b123b5fe60476014376153df | refs/heads/master | 2023-02-02T03:08:22.097651 | 2020-12-17T22:00:50 | 2020-12-17T22:00:50 | 196,035,812 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | class CodeforcesTask426ASolution:
def __init__(self):
self.result = ''
self.n_s = []
self.mugs = []
def read_input(self):
self.n_s = [int(x) for x in input().split(" ")]
self.mugs = [int(x) for x in input().split(" ")]
def process_task(self):
self.mugs.sort()
self.result = "YES" if sum(self.mugs[:-1]) <= self.n_s[1] else "NO"
def get_result(self):
return self.result
if __name__ == "__main__":
Solution = CodeforcesTask426ASolution()
Solution.read_input()
Solution.process_task()
print(Solution.get_result())
| [
"oleszek.karol@gmail.com"
] | oleszek.karol@gmail.com |
95d20cf93f789370811e7bf8b5254408503d39e9 | d95cfcee3fc5825d10d9c930baac94ebe7b9fa13 | /tests/test_pysubstringsearch.py | ece0467bba081ffd33ddde85efc22cb0720d05f8 | [
"MIT"
] | permissive | Intsights/PySubstringSearch | 191723bcd3ed699fe91f552d75f52a3a24f7a61f | 1f027986472c5b3e5d1d0e12e0cf7259def8df9a | refs/heads/master | 2023-01-22T12:18:29.351840 | 2023-01-10T06:41:03 | 2023-01-10T06:41:03 | 231,457,596 | 34 | 3 | MIT | 2023-01-10T06:41:05 | 2020-01-02T20:51:12 | C | UTF-8 | Python | false | false | 8,338 | py | import os
import tempfile
import unittest
import pysubstringsearch
class PySubstringSearchTestCase(
unittest.TestCase,
):
def assert_substring_search(
self,
strings,
substring,
expected_results,
):
try:
with tempfile.TemporaryDirectory() as tmp_directory:
index_file_path = f'{tmp_directory}/output.idx'
writer = pysubstringsearch.Writer(
index_file_path=index_file_path,
)
for string in strings:
writer.add_entry(
text=string,
)
writer.finalize()
reader = pysubstringsearch.Reader(
index_file_path=index_file_path,
)
self.assertCountEqual(
first=reader.search(
substring=substring,
),
second=expected_results,
)
try:
os.unlink(
path=index_file_path,
)
except Exception:
pass
except PermissionError:
pass
def test_file_not_found(
self,
):
with self.assertRaises(
expected_exception=FileNotFoundError,
):
pysubstringsearch.Reader(
index_file_path='missing_index_file_path',
)
def test_sanity(
self,
):
strings = [
'one',
'two',
'three',
'four',
'five',
'six',
'seven',
'eight',
'nine',
'ten',
]
self.assert_substring_search(
strings=strings,
substring='four',
expected_results=[
'four',
],
)
self.assert_substring_search(
strings=strings,
substring='f',
expected_results=[
'four',
'five',
],
)
self.assert_substring_search(
strings=strings,
substring='our',
expected_results=[
'four',
],
)
self.assert_substring_search(
strings=strings,
substring='aaa',
expected_results=[],
)
def test_edgecases(
self,
):
strings = [
'one',
'two',
'three',
'four',
'five',
'six',
'seven',
'eight',
'nine',
'ten',
'tenten',
]
self.assert_substring_search(
strings=strings,
substring='none',
expected_results=[],
)
self.assert_substring_search(
strings=strings,
substring='one',
expected_results=[
'one',
],
)
self.assert_substring_search(
strings=strings,
substring='onet',
expected_results=[],
)
self.assert_substring_search(
strings=strings,
substring='ten',
expected_results=[
'ten',
'tenten',
],
)
def test_unicode(
self,
):
strings = [
'رجعوني عنيك لأيامي اللي راحوا',
'علموني أندم على الماضي وجراحه',
'اللي شفته قبل ما تشوفك عنيه',
'عمر ضايع يحسبوه إزاي عليّ',
'انت عمري اللي ابتدي بنورك صباحه',
'قد ايه من عمري قبلك راح وعدّى',
'يا حبيبي قد ايه من عمري راح',
'ولا شاف القلب قبلك فرحة واحدة',
'ولا داق في الدنيا غير طعم الجراح',
'ابتديت دلوقت بس أحب عمري',
'ابتديت دلوقت اخاف لا العمر يجري',
'كل فرحه اشتاقها من قبلك خيالي',
'التقاها في نور عنيك قلبي وفكري',
'يا حياة قلبي يا أغلى من حياتي',
'ليه ما قابلتش هواك يا حبيبي بدري',
'اللي شفته قبل ما تشوفك عنيه',
'عمر ضايع يحسبوه إزاي عليّ',
'انت عمري اللي ابتدي بنورك صباحه',
'الليالي الحلوه والشوق والمحبة',
'من زمان والقلب شايلهم عشانك',
'دوق معايا الحب دوق حبه بحبه',
'من حنان قلبي اللي طال شوقه لحنانك',
'هات عنيك تسرح في دنيتهم عنيه',
'هات ايديك ترتاح للمستهم ايديه',
]
self.assert_substring_search(
strings=strings,
substring='زمان',
expected_results=[
'من زمان والقلب شايلهم عشانك',
],
)
self.assert_substring_search(
strings=strings,
substring='في',
expected_results=[
'هات عنيك تسرح في دنيتهم عنيه',
'التقاها في نور عنيك قلبي وفكري',
'ولا داق في الدنيا غير طعم الجراح',
],
)
self.assert_substring_search(
strings=strings,
substring='حنان',
expected_results=[
'من حنان قلبي اللي طال شوقه لحنانك',
],
)
self.assert_substring_search(
strings=strings,
substring='none',
expected_results=[],
)
def test_multiple_words_string(
self,
):
strings = [
'some short string',
'another but now a longer string',
'more text to add',
]
self.assert_substring_search(
strings=strings,
substring='short',
expected_results=[
'some short string',
],
)
def test_short_string(
self,
):
strings = [
'ab',
]
self.assert_substring_search(
strings=strings,
substring='a',
expected_results=[
'ab',
],
)
def test_multiple_strings(
self,
):
try:
with tempfile.TemporaryDirectory() as tmp_directory:
index_file_path = f'{tmp_directory}/output.idx'
writer = pysubstringsearch.Writer(
index_file_path=index_file_path,
)
for string in [
'one',
'two',
'three',
'four',
'five',
'six',
'seven',
'eight',
'nine',
'ten',
'tenten',
]:
writer.add_entry(
text=string,
)
writer.finalize()
reader = pysubstringsearch.Reader(
index_file_path=index_file_path,
)
self.assertCountEqual(
first=reader.search_multiple(
substrings=[
'ee',
'ven',
],
),
second=[
'three',
'seven',
],
)
try:
os.unlink(
path=index_file_path,
)
except Exception:
pass
except PermissionError:
pass
| [
"gal@intsights.com"
] | gal@intsights.com |
633e92496b35946000f6bd921841af0b78776164 | 0e8b6f94467c25dd2440f7e2ea1519244e689620 | /MokkaJobs/MokkaGridJobs.py | b46bd21da2de9d0ba74c112d8fce0236cb7084ce | [] | no_license | StevenGreen1/HighEnergyPhotonAnalysis | 97a661eaca2efd00472f1969855c724c9d505369 | 8a82ac57f56aad5bdbe99d4a5afb771592bc1725 | refs/heads/master | 2021-01-10T14:08:50.550184 | 2015-10-12T12:43:47 | 2015-10-12T12:43:47 | 43,491,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,854 | py | # Submit Mokka jobs to the grid: MokkaGridJobs.py
import re
import os
import sys
### ----------------------------------------------------------------------------------------------------
def setGearFile(mokkaSteeringTemplate,gearFile):
mokkaSteeringTemplate = re.sub('GEAR_FILE_XXXX',gearFile,mokkaSteeringTemplate)
return mokkaSteeringTemplate
### ----------------------------------------------------------------------------------------------------
def setStartNumber(mokkaSteeringTemplate,startNumber):
mokkaSteeringTemplate = re.sub('START_EVENT_NUMBER_XXXX',str(startNumber),mokkaSteeringTemplate)
return mokkaSteeringTemplate
### ----------------------------------------------------------------------------------------------------
def setOutputFile(mokkaSteeringTemplate,outputFile):
mokkaSteeringTemplate = re.sub('OUTPUT_FILE_NAME_XXXX',outputFile,mokkaSteeringTemplate)
return mokkaSteeringTemplate
### ----------------------------------------------------------------------------------------------------
def getMokkaVersion(detectorConfigFile):
config = {}
execfile(detectorConfigFile, config)
return config['MokkaVersion']
### ----------------------------------------------------------------------------------------------------
def getMokkaSteeringFileTemplate(baseFileName,detectorConfigFile):
config = {}
execfile(detectorConfigFile, config)
baseFile = open(baseFileName,'r')
mokkaSteeringTemplate = baseFile.read()
baseFile.close()
# Detector Model
mokkaSteeringTemplate = re.sub('DETECTOR_MODEL_XXXX',config['DetectorModel'],mokkaSteeringTemplate)
# Physics List
mokkaSteeringTemplate = re.sub('PHYSICS_LIST_XXXX',config['PhysicsList'],mokkaSteeringTemplate)
# HCal absorber material
mokkaSteeringTemplate = re.sub('HCAL_ABSORBER_MATERIAL_XXXX',str(config['HCalAbsorberMaterial']),mokkaSteeringTemplate)
# HCal cell size
mokkaSteeringTemplate = re.sub('HCAL_CELL_SIZE_XXXX',str(config['HCalCellSize']),mokkaSteeringTemplate)
# Thickness of absorber layers in the HCal
mokkaSteeringTemplate = re.sub('HCAL_ABSORBER_LAYER_THICKNESS_XXXX',str(config['HCalAbsorberLayerThickness']),mokkaSteeringTemplate)
# Thickenss of scintillator layers in the HCal
mokkaSteeringTemplate = re.sub('HCAL_SCINTILLATOR_LAYER_THICKNESS_XXXX',str(config['HCalScintillatorThickness']),mokkaSteeringTemplate)
# Number of layers in the HCal
mokkaSteeringTemplate = re.sub('HCAL_NUMBER_OF_LAYERS_XXXX',str(config['NumberHCalLayers']),mokkaSteeringTemplate)
# Coil extra size, has to be varied if expanding HCal
mokkaSteeringTemplate = re.sub('COIL_EXTRA_SIZE_XXXX',str(config['CoilExtraSize']),mokkaSteeringTemplate)
# Strength of B field in tracker
mokkaSteeringTemplate = re.sub('BFIELD_XXXX',str(config['BField']),mokkaSteeringTemplate)
# Outer radius of the tracker/ inner radius of the ECal
mokkaSteeringTemplate = re.sub('TPC_OUTER_RADIUS_XXXX',str(config['TPCOuterRadius']),mokkaSteeringTemplate)
# Detailed shower mode
mokkaSteeringTemplate = re.sub('DETAILED_SHOWER_MODE_XXXX',config['DetailedShowerMode'],mokkaSteeringTemplate)
return mokkaSteeringTemplate
### ----------------------------------------------------------------------------------------------------
def getHEPEvtFiles(eventType, energy):
hepevtFiles = []
os.system('dirac-ilc-find-in-FC /ilc JobDescription="HEPEvt" Energy=' + str(energy) + ' EvtType="' + eventType + '" > tmp.txt')
with open('tmp.txt') as f:
lines = f.readlines()
for idx, line in enumerate(lines):
line = line.strip()
hepevtFiles.append(line)
os.system('rm tmp.txt')
return hepevtFiles
### ----------------------------------------------------------------------------------------------------
| [
"sg1sg2sg3@hotmail.co.uk"
] | sg1sg2sg3@hotmail.co.uk |
ad1d6246f54fb89f7cf4d0bd153800016048e5b1 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/devices/v20170701/list_iot_hub_resource_keys.py | a406e6b6c9a9cbd4caa7847dbb84ed9eee6cb173 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 2,753 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'ListIotHubResourceKeysResult',
'AwaitableListIotHubResourceKeysResult',
'list_iot_hub_resource_keys',
]
@pulumi.output_type
class ListIotHubResourceKeysResult:
"""
The list of shared access policies with a next link.
"""
def __init__(__self__, next_link=None, value=None):
if next_link and not isinstance(next_link, str):
raise TypeError("Expected argument 'next_link' to be a str")
pulumi.set(__self__, "next_link", next_link)
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="nextLink")
def next_link(self) -> str:
"""
The next link.
"""
return pulumi.get(self, "next_link")
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.SharedAccessSignatureAuthorizationRuleResponse']]:
"""
The list of shared access policies.
"""
return pulumi.get(self, "value")
class AwaitableListIotHubResourceKeysResult(ListIotHubResourceKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListIotHubResourceKeysResult(
next_link=self.next_link,
value=self.value)
def list_iot_hub_resource_keys(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListIotHubResourceKeysResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: The name of the resource group that contains the IoT hub.
:param str resource_name: The name of the IoT hub.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:devices/v20170701:listIotHubResourceKeys', __args__, opts=opts, typ=ListIotHubResourceKeysResult).value
return AwaitableListIotHubResourceKeysResult(
next_link=__ret__.next_link,
value=__ret__.value)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
bd0f247b7986d7d12e7110d7fe7257f8dc06b819 | faabe34af6297530617395bcc6811350765da847 | /platforms/leetcode/DeleteNodesAndReturnForest.py | 4c40784b5befdda322e99dfa179d34fc32592d04 | [] | no_license | pqnguyen/CompetitiveProgramming | 44a542aea299bd553dd022a9e737e087285b8b6d | 27330e7ff79c4ac883d7e1fcdf2f0d30939c3f78 | refs/heads/master | 2021-07-21T12:15:47.366599 | 2021-06-27T14:58:48 | 2021-06-27T14:58:48 | 132,837,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def delNodes(self, root: TreeNode, to_delete: List[int]) -> List[TreeNode]:
to_delete = set(to_delete)
roots = []
if root and root.val not in to_delete: roots.append(root)
self.delNodesUtil(root, set(to_delete), roots)
return roots
def delNodesUtil(self, root, to_delete, roots):
if not root: return None
needDelete = root.val in to_delete
root.left = self.delNodesUtil(root.left, to_delete, roots)
root.right = self.delNodesUtil(root.right, to_delete, roots)
if needDelete:
if root.left: roots.append(root.left)
if root.right: roots.append(root.right)
root.left = root.right = None
return None
return root
| [
"pqnguyen1996@gmail.com"
] | pqnguyen1996@gmail.com |
c8b141e64d1719e48b961907b0984796c4450614 | 4ace4d5a94ab0db79562f1b23edd6011a89148c6 | /src/airflow-stubs/contrib/hooks/dingding_hook.pyi | dab4dfa1201d2ddd2dda0f3a426571174e3d6bae | [
"MIT"
] | permissive | viewthespace/mypy-stubs | 9abebc2eab2b46b2230842f06114673e1a4de052 | 182fa275c4a7011eb5345694b88229adbddcc999 | refs/heads/master | 2023-06-07T18:52:46.739560 | 2023-06-01T22:05:27 | 2023-06-01T22:05:45 | 236,780,299 | 0 | 0 | MIT | 2022-01-11T20:53:55 | 2020-01-28T16:23:07 | Python | UTF-8 | Python | false | false | 532 | pyi | from airflow import AirflowException as AirflowException
from airflow.hooks.http_hook import HttpHook as HttpHook
from typing import Any
class DingdingHook(HttpHook):
message_type: Any
message: Any
at_mobiles: Any
at_all: Any
def __init__(self, dingding_conn_id: str = ..., message_type: str = ..., message: Any | None = ..., at_mobiles: Any | None = ..., at_all: bool = ..., *args, **kwargs) -> None: ...
base_url: Any
def get_conn(self, headers: Any | None = ...): ...
def send(self) -> None: ...
| [
"andrew.marshall@vts.com"
] | andrew.marshall@vts.com |
7919332572d3089ed39adfdcfd8799e6e725cb1d | 9b8ca63a377e6f94cc6a970cc97a6f7f50932811 | /nomitang_affpart/main.py | 3c8900c633842101ee3c2057923d4f1ddb4697b7 | [
"Apache-2.0"
] | permissive | lester-lees/extra_addons_sz | 9b6d2400abe4707b7b18d9e2e9caf2fb366cf3a6 | cddaf972cf4ea64c553bcff0006eb006a115d5ee | refs/heads/master | 2021-01-06T20:43:28.782147 | 2017-08-07T06:51:45 | 2017-08-07T06:51:45 | 99,545,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | # -*- coding: utf-8 -*-
from openerp import http
from openerp.http import request
from openerp.addons.website.models.website import slug
class website_offlinestore(http.Controller):
@http.route(['/thanks',], type='http', auth="public", website=True)
def thanks(self, **post):
return request.website.render("nomitang_affpart.nt_thanks_website_view", {'url':''})
| [
"346994202@qq.com"
] | 346994202@qq.com |
6c54c77da5bee9778fd95a7b143658328e2d1e93 | f9b919ee04754978f739c5516434c581a47c5eec | /music/migrations/0001_initial.py | ac5da28c56ca2e8706d5e74ff2e9a56684a4e237 | [
"MIT"
] | permissive | Hadryan/Music-Genre-Classification-5 | 7f50b9ef6778f96751c4f68391b730603f39a4bc | f67316b0710b2e5ca52e924e7f8254aa7897751b | refs/heads/master | 2022-11-05T12:33:52.208512 | 2020-06-20T14:23:04 | 2020-06-20T14:23:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,589 | py | # Generated by Django 3.0.4 on 2020-05-06 09:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('artist', models.CharField(max_length=250)),
('album_title', models.CharField(max_length=500)),
('genre', models.CharField(max_length=100)),
('album_logo', models.FileField(upload_to='')),
('is_favorite', models.BooleanField(default=False)),
('user', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Song',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('song_title', models.CharField(max_length=250)),
('audio_file', models.FileField(default='', upload_to='')),
('is_favorite', models.BooleanField(default=False)),
('album', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='music.Album')),
],
),
]
| [
"noreply@github.com"
] | Hadryan.noreply@github.com |
a5ff6eb348ef84d8446262dfaac54d71e4608979 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2345/60708/256477.py | 3cb0de279548277ee58180dc06b8c2254444f0e3 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | N=eval(input())
for n in range(0,N):
l=eval(input())
listwrong=input().split(" ")
listresult=[]
listright=[]
for i in range(0,l):
listright.append(str(i+1))
for i in range(0,l-1):
if listwrong[i]==listwrong[i+1]:
listresult.append(listwrong[i])
break
if len(listresult)!=1:
listresult.append("0")
for i in range(0,l):
if listright[i]!=listwrong[i]:
listresult.append(listright[i])
break
if len(listresult)!=2:
listresult.append("0")
for j,item in enumerate(listresult):
if j!=len(listresult)-1:
print(item,end=" ")
else:
print(item,end="")
print("") | [
"1069583789@qq.com"
] | 1069583789@qq.com |
578200ca81aacd2162a3509662c44dda5f63f3d6 | 33a50bb13812090a36257078522b798762978c66 | /top/api/rest/LogisticsAddressAddRequest.py | 7d798e9cee0dfaf1051e7c34d3b596dd71dc72e3 | [] | no_license | aa3632840/quanlin | 52ac862073608cd5b977769c14a7f6dcfb556678 | 2890d35fa87367d77e295009f2d911d4b9b56761 | refs/heads/master | 2021-01-10T22:05:14.076949 | 2014-10-25T02:28:15 | 2014-10-25T02:28:15 | 23,178,087 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | '''
Created by auto_sdk on 2014-09-08 16:48:02
'''
from top.api.base import RestApi
class LogisticsAddressAddRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.addr = None
self.cancel_def = None
self.city = None
self.contact_name = None
self.country = None
self.get_def = None
self.memo = None
self.mobile_phone = None
self.phone = None
self.province = None
self.seller_company = None
self.zip_code = None
def getapiname(self):
return 'taobao.logistics.address.add'
| [
"262708239@qq.com"
] | 262708239@qq.com |
6a9939922082aade9970368ecb9bd35d3ca06246 | 1297634c6641ec62c31cf30b8fabe1886aa8d9ea | /products_and_services_client/models/loan_interest_rate.py | afbcd7cfed79b47a5e2af0d0a10ef1bcfc3bb79d | [
"MIT"
] | permissive | pitzer42/opbk-br-quickstart | d77f19743fcc264bed7af28a3d956dbc2d20ac1a | b3f86b2e5f82a6090aaefb563614e174a452383c | refs/heads/main | 2023-03-04T13:06:34.205003 | 2021-02-21T23:41:56 | 2021-02-21T23:41:56 | 336,898,721 | 2 | 0 | MIT | 2021-02-07T22:03:15 | 2021-02-07T21:57:06 | null | UTF-8 | Python | false | false | 7,178 | py | # coding: utf-8
"""
API's OpenData do Open Banking Brasil
As API's descritas neste documento são referentes as API's da fase OpenData do Open Banking Brasil. # noqa: E501
OpenAPI spec version: 1.0.0-rc5.2
Contact: apiteam@swagger.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from products_and_services_client.models.interest_rate_fee import InterestRateFee # noqa: F401,E501
class LoanInterestRate(InterestRateFee):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'applications': 'list[ApplicationRate]',
'minimum_rate': 'str',
'maximum_rate': 'str'
}
if hasattr(InterestRateFee, "swagger_types"):
swagger_types.update(InterestRateFee.swagger_types)
attribute_map = {
'applications': 'applications',
'minimum_rate': 'minimumRate',
'maximum_rate': 'maximumRate'
}
if hasattr(InterestRateFee, "attribute_map"):
attribute_map.update(InterestRateFee.attribute_map)
def __init__(self, applications=None, minimum_rate=None, maximum_rate=None, *args, **kwargs): # noqa: E501
"""LoanInterestRate - a model defined in Swagger""" # noqa: E501
self._applications = None
self._minimum_rate = None
self._maximum_rate = None
self.discriminator = None
self.applications = applications
self.minimum_rate = minimum_rate
self.maximum_rate = maximum_rate
InterestRateFee.__init__(self, *args, **kwargs)
@property
def applications(self):
"""Gets the applications of this LoanInterestRate. # noqa: E501
Lista das faixas de cobrança da taxa efetiva aplicada pela contratação de crédito # noqa: E501
:return: The applications of this LoanInterestRate. # noqa: E501
:rtype: list[ApplicationRate]
"""
return self._applications
@applications.setter
def applications(self, applications):
"""Sets the applications of this LoanInterestRate.
Lista das faixas de cobrança da taxa efetiva aplicada pela contratação de crédito # noqa: E501
:param applications: The applications of this LoanInterestRate. # noqa: E501
:type: list[ApplicationRate]
"""
if applications is None:
raise ValueError("Invalid value for `applications`, must not be `None`") # noqa: E501
self._applications = applications
@property
def minimum_rate(self):
"""Gets the minimum_rate of this LoanInterestRate. # noqa: E501
Percentual mínimo cobrado (taxa efetiva) no mês de referência, para o Empréstimo contratado A apuração pode acontecer com até 4 casas decimais. O preenchimento deve respeitar as 4 casas decimais, mesmo que venham preenchidas com zeros (representação de porcentagem p.ex: 0.1500. Este valor representa 15%. O valor 1 representa 100%) # noqa: E501
:return: The minimum_rate of this LoanInterestRate. # noqa: E501
:rtype: str
"""
return self._minimum_rate
@minimum_rate.setter
def minimum_rate(self, minimum_rate):
"""Sets the minimum_rate of this LoanInterestRate.
Percentual mínimo cobrado (taxa efetiva) no mês de referência, para o Empréstimo contratado A apuração pode acontecer com até 4 casas decimais. O preenchimento deve respeitar as 4 casas decimais, mesmo que venham preenchidas com zeros (representação de porcentagem p.ex: 0.1500. Este valor representa 15%. O valor 1 representa 100%) # noqa: E501
:param minimum_rate: The minimum_rate of this LoanInterestRate. # noqa: E501
:type: str
"""
if minimum_rate is None:
raise ValueError("Invalid value for `minimum_rate`, must not be `None`") # noqa: E501
self._minimum_rate = minimum_rate
@property
def maximum_rate(self):
"""Gets the maximum_rate of this LoanInterestRate. # noqa: E501
Percentual máximo cobrado (taxa efetiva) no mês de referência, para o Empréstimo contratado A apuração pode acontecer com até 4 casas decimais. O preenchimento deve respeitar as 4 casas decimais, mesmo que venham preenchidas com zeros (representação de porcentagem p.ex: 0.1500. Este valor representa 15%. O valor 1 representa 100%) # noqa: E501
:return: The maximum_rate of this LoanInterestRate. # noqa: E501
:rtype: str
"""
return self._maximum_rate
@maximum_rate.setter
def maximum_rate(self, maximum_rate):
"""Sets the maximum_rate of this LoanInterestRate.
Percentual máximo cobrado (taxa efetiva) no mês de referência, para o Empréstimo contratado A apuração pode acontecer com até 4 casas decimais. O preenchimento deve respeitar as 4 casas decimais, mesmo que venham preenchidas com zeros (representação de porcentagem p.ex: 0.1500. Este valor representa 15%. O valor 1 representa 100%) # noqa: E501
:param maximum_rate: The maximum_rate of this LoanInterestRate. # noqa: E501
:type: str
"""
if maximum_rate is None:
raise ValueError("Invalid value for `maximum_rate`, must not be `None`") # noqa: E501
self._maximum_rate = maximum_rate
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(LoanInterestRate, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LoanInterestRate):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"arthurpitzer@id.uff.br"
] | arthurpitzer@id.uff.br |
3a877527b811e3300cf094da3d9842cd84d9119f | 629606ef6e0ce252f74729ac60f57ca8805c3c78 | /hw_001_Django/hw_009_test1/venv/bin/pip | 35101f0af30f654e8feaaf103ad4040a374ec8aa | [] | no_license | LeeXyan/lxgzhw006 | cc31024874725f60b766c9d5d24c2dafc66b8de3 | 621a73544262df7e104806579242deeaa8dbe2c2 | refs/heads/master | 2021-10-10T17:41:52.381843 | 2019-01-15T00:25:08 | 2019-01-15T00:25:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | #!/home/lxgzhw/PythonWork/hw_001_Django/hw_009_test1/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"1156956636@qq.com"
] | 1156956636@qq.com | |
0d21f18658bf890b75fc662137cf8b561bcd829b | 059e13f143a56ffe091c3181000c6928a14e2931 | /gen_of_passwords/asgi.py | 4bfcef1cf41b7ba7a6afb7dd84b04e0bb3c4f9e2 | [] | no_license | bhobbs20/Password-Generator | 5b4fea8720c4b3f36bb129e87e3a0312247d17ea | 043be29289e9c217a9c2db2dacfd219f8bed11fc | refs/heads/master | 2022-12-29T05:18:45.905247 | 2020-10-13T19:21:27 | 2020-10-13T19:21:27 | 303,807,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | """
ASGI config for gen_of_passwords project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gen_of_passwords.settings')
application = get_asgi_application()
| [
"brianhobbs216@gmail.com"
] | brianhobbs216@gmail.com |
dcb68361f62778ad7774c7e26665ce66d6246006 | 0378a2f1adad86f439ce214ebfe2a904cda6eb41 | /badball/migrations/0059_auto_20181203_0912.py | f113b7a5add80e8fe4053ec5329a7e873956d941 | [] | no_license | jeremyjbowers/badball | 2035902b5f8d2bc05219af887bd3e1bfcb45192b | 323289ec871e0e7e98e397c9d528d83773c86f85 | refs/heads/master | 2020-12-20T22:08:53.083230 | 2020-01-25T23:09:11 | 2020-01-25T23:09:11 | 236,222,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | # Generated by Django 2.0.8 on 2018-12-03 09:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('badball', '0058_team_championships'),
]
operations = [
migrations.AlterField(
model_name='tradereceipt',
name='picks',
field=models.ManyToManyField(blank=True, null=True, related_name='picks', to='badball.DraftPick'),
),
migrations.AlterField(
model_name='tradereceipt',
name='players',
field=models.ManyToManyField(blank=True, null=True, related_name='players', to='badball.Player'),
),
]
| [
"jeremyjbowers@gmail.com"
] | jeremyjbowers@gmail.com |
c33cd22e723b9f23db21dcc4d74c7254b66bddb4 | 6923f79f1eaaba0ab28b25337ba6cb56be97d32d | /Programming_for_Computations/osc_odespy.py | 738ce8a9cd8f905bd423e40bb2c2d69a7eacb3ec | [] | no_license | burakbayramli/books | 9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0 | 5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95 | refs/heads/master | 2023-08-17T05:31:08.885134 | 2023-08-14T10:05:37 | 2023-08-14T10:05:37 | 72,460,321 | 223 | 174 | null | 2022-10-24T12:15:06 | 2016-10-31T17:24:00 | Jupyter Notebook | UTF-8 | Python | false | false | 2,100 | py | """Use odespy to solve undamped oscillation ODEs."""
import odespy
from matplotlib.pyplot import \
plot, savefig, legend, xlabel, figure, title, hold, axis, show
def f(u, t, omega=2):
v, u = u
return [-omega**2*u, v]
def compare(odespy_methods,
omega,
X_0,
number_of_periods,
time_intervals_per_period=20):
from numpy import pi, linspace, cos
P = 2*pi/omega # length of one period
dt = P/time_intervals_per_period
T = number_of_periods*P
# If odespy_methods is not a list, but just the name of
# a single Odespy solver, we wrap that name in a list
# so we always have odespy_methods as a list
if type(odespy_methods) != type([]):
odespy_methods = [odespy_methods]
# Make a list of solver objects
solvers = [method(f, f_args=[omega]) for method in
odespy_methods]
for solver in solvers:
solver.set_initial_condition([0, X_0])
# Compute the time points where we want the solution
dt = float(dt) # avoid integer division
N_t = int(round(T/dt))
time_points = linspace(0, N_t*dt, N_t+1)
legends = []
for solver in solvers:
sol, t = solver.solve(time_points)
v = sol[:,0]
u = sol[:,1]
# Plot only the last p periods
p = 6
m = p*time_intervals_per_period # no time steps to plot
plot(t[-m:], u[-m:])
hold('on')
legends.append(solver.name())
xlabel('t')
# Plot exact solution too
plot(t[-m:], X_0*cos(omega*t)[-m:], 'k--')
legends.append('exact')
legend(legends, loc='lower left')
axis([t[-m], t[-1], -2*X_0, 2*X_0])
title('Simulation of %d periods with %d intervals per period'
% (number_of_periods, time_intervals_per_period))
savefig('tmp.pdf'); savefig('tmp.png')
show()
compare(
odespy_methods=[
odespy.EulerCromer,
#odespy.BackwardEuler,
odespy.RKFehlberg,
],
omega=2, X_0=2,
number_of_periods=200,
time_intervals_per_period=240)
| [
"bb@b.om"
] | bb@b.om |
82e1ac853cc31945253c155f19edfcdbcc2ff2ce | 71e8bdddd84338bbb2d77934351d76251c2fd77d | /best-time-to-buy-and-sell-stock-iv.py | d811820af9e000057e587a39aa06076967a8a1a7 | [] | no_license | onestarshang/leetcode | 3da20fbec1b42d3565eb95a64ea3f30c29f1e1eb | 0a7aa09a2b95e4caca5b5123fb735ceb5c01e992 | refs/heads/master | 2021-01-09T06:00:06.018037 | 2016-12-17T16:17:49 | 2016-12-17T16:17:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,317 | py | '''
https://leetcode.com/problems/best-time-to-buy-and-sell-stock-iv/
Say you have an array for which the ith element is the price of a given stock on day i.
Design an algorithm to find the maximum profit. You may complete at most k transactions.
Note:
You may not engage in multiple transactions at the same time (ie, you must sell the stock before you buy again).
'''
class Solution(object):
def maxProfit(self, k, prices): # O(kn) in worst case
"""
:type k: int
:type prices: List[int]
:rtype: int
"""
n = len(prices)
if n == 0:
return 0
if k >= n / 2:
ans = 0
for i in xrange(1, n):
if prices[i] > prices[i - 1]:
ans += prices[i] - prices[i - 1]
return ans
d = [[0 for j in xrange(n)] for i in xrange(k + 1)]
for t in xrange(1, k + 1):
max_d = -(1 << 31)
for i in xrange(n):
# d[t][i] = max(d[t][i], d[t][i - 1], d[t - 1][j] + prices[i] - prices[j])
d[t][i] = max(d[t][i], d[t][i - 1], max_d + prices[i])
max_d = max(max_d, d[t - 1][i] - prices[i])
return d[k][n - 1]
if __name__ == '__main__':
f = Solution().maxProfit
assert f(2, [1, 4, 2]) == 3
| [
"irachex@gmail.com"
] | irachex@gmail.com |
c87a4af03eb9fa29c6d0bbf0cbccdba9ae574442 | c4d9bdeb5353c6dd014f7c3f8d1f6380a76402af | /pylibviso2/node.py | 322165a8990a261c071e8120c8f71250ff8322d9 | [] | no_license | AtlasBuggy/libviso2-python | 28f390b7f516d7abe5c3acfdff5544d47788f726 | 5ae736a2e7f2bbe362e839c8d1c9dd2340245909 | refs/heads/master | 2021-03-22T01:09:12.302707 | 2017-10-26T06:37:22 | 2017-10-26T06:37:22 | 108,196,017 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | import time
from atlasbuggy.opencv import OpenCVPipeline
from .viso2 import Viso2Mono
class Viso2MonoPipeline(OpenCVPipeline):
def __init__(self, f, cu, cv, width=None, height=None, enabled=True, logger=None):
# self.set_logger(level=20)
super(Viso2MonoPipeline, self).__init__(enabled, logger=logger)
self.viso2 = Viso2Mono(f, cu, cv, width, height)
self.pose_service = "pose"
self.define_service(self.pose_service, message_type=tuple)
def pipeline(self, image):
status, image = self.viso2.update(image)
if status:
self.broadcast_nowait((self.viso2.x, self.viso2.y, self.viso2.z), self.pose_service)
return image
| [
"woz4tetra@gmail.com"
] | woz4tetra@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.