blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M โ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 โ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 โ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3c2d4f70f2f7b140cb2fb4fe4330a6d2fdb52a14 | d9eb516d18f7336c73d556cfcb4e38abc93a8248 | /nwpc_monitor_web/app/api/api_test.py | 4af8c71945144dbbe5aece3d22ced74d3565f95a | [] | no_license | cash2one/nwpc-monitor-platform | c555c544e676059ba05f063ef71065b8783fd9ff | ca7ad8751cd59f4f42337c52e50424ba06876987 | refs/heads/master | 2021-06-23T11:49:58.799933 | 2017-08-14T03:29:21 | 2017-08-14T03:29:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 833 | py | from flask import request, jsonify, json
import gzip
from nwpc_monitor_web.app.api import api_app
@api_app.route("/test/gzip")
def gzip_hello_page():
return "This page is used to test gzip on request body."
@api_app.route("/test/gzip/normal", methods=['POST'])
def get_normal_data():
message = request.form['message']
return jsonify({
'status': 'ok'
})
@api_app.route("/test/gzip/compress", methods=['POST'])
def get_gzip_data():
content_encoding = request.headers.get('content-encoding', '').lower()
gzipped_data = request.data
if content_encoding == 'gzip':
print('decompress gzip data')
data_string = gzip.decompress(gzipped_data)
data = json.loads(data_string.decode('utf-8'))
message = data['message']
return jsonify({
'status': 'ok'
})
| [
"perillaroc@gmail.com"
] | perillaroc@gmail.com |
f4803af085f5d10fc471178bdbd2010304291f53 | a3c68eafdb433c981f2b90e86f895e4f121c69fb | /lintcode/900-ไบๅๆ็ดขๆ ไธญๆๆฅ่ฟ็ๅผ.py | b2b3535ffd2d8a212daffa7bb99c8a41187b45fb | [] | no_license | Cassiexyq/Program-Exercise | ccc236ea76ca99ddc6fe0c4c47edebc3d557cfad | e962cc3add047d61df275dd3e22a091018fd964a | refs/heads/master | 2020-04-25T20:27:33.561226 | 2019-09-22T15:29:35 | 2019-09-22T15:29:35 | 173,050,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,126 | py | # -*- coding: utf-8 -*-
# @Author: xyq
"""
Definition of TreeNode:
้ๅฝๅคๆญ่ฟๆฃตๆ ๏ผๅฆๆๅฝๅ็ป็นๅผๅฐไบ็ฎๆ ๅผ๏ผๅชๅฏ่ฝๆฏๅฝๅ็ป็นๅผๆๅณๅญๆ ็ๅผ๏ผๅฆๆๅฝๅ็ป็นๅผๅคงไบ็ฎๆ ๅผ๏ผๅชๅฏ่ฝๆฏๅฝๅ็ป็นๅผๆๅทฆๅญๆ ็ๅผ
"""
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
class Solution:
"""
@param root: the given BST
@param target: the given target
@return: the value in the BST that is closest to the target
"""
def closestValue(self, root, target):
# write your code here
if root.val == target:
return root.val
if root.val < target:
if not root.right: return root.val
right = self.closestValue(root.right, target)
if abs(root.val-target) <= abs(right-target): return root.val
return right
else:
if not root.left: return root.val
left = self.closestValue(root.left, target)
if abs(root.val - target) <= abs(left-target): return root.val
return left
| [
"cassiexuan_yq@163.com"
] | cassiexuan_yq@163.com |
21ee4ec4148cb4f5d1c6568bc725075124605545 | 851e32d7dbfc43737625c4bf23ef7e785732cfdf | /src/main.py | 98bbff032cfdf4ca5834b6acecb472dc850089d9 | [
"CC0-1.0"
] | permissive | ytyaru/Python.PySide2.QScrollArea.20200501084013 | 37d765de2845d24598ec9ac9bade07972464ca59 | 8a70445443e140ed8910cba4bdfcd7a6842ec692 | refs/heads/master | 2022-05-20T12:46:23.698689 | 2020-04-30T23:56:46 | 2020-04-30T23:56:46 | 260,343,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 933 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys, os
from PySide2.QtWidgets import*
from PySide2.QtGui import*
from PySide2 import QtCore
import PySide2
def get_img_path():
here = os.path.dirname(os.path.abspath(__file__))
parent = os.path.dirname(here)
return os.path.join(parent, 'res', 'img.png')
application = QApplication(sys.argv)
window = QWidget()
window.setWindowTitle("QScrollArea")
picture = QPixmap(get_img_path())
picture = picture.scaledToHeight(picture.height() * 20, QtCore.Qt.FastTransformation)
#label = QLabel(window)
label = QLabel()
label.setPixmap(picture)
label.setGeometry(QtCore.QRect(0, 0, picture.width(), picture.height()))
#scroller = QScrollArea(window)
scroller = QScrollArea()
scroller.setWidget(label)
layout = QGridLayout()
layout.addWidget(scroller, 0, 0)
window.setLayout(layout)
window.resize(picture.width(), picture.height())
window.show()
sys.exit(application.exec_())
| [
"yttry0@gmail.com"
] | yttry0@gmail.com |
5228de27fb90b54abd559f265233cc629783ad00 | c3856325644c582625fef74ce798fcbe0355ad4d | /workflow/rules/VariantCall/Genotyping.smk | c791f4ca119de4043f5499d1c77a7f5223db291a | [] | no_license | mahajrod/AVAVpipe | fcd1eaa4e8214a23113334a9c63e3e15c0e6182f | 18354c343e2285b1086380f3aefce03a9083a463 | refs/heads/master | 2023-03-29T03:48:02.364703 | 2021-04-03T03:42:47 | 2021-04-03T03:42:47 | 345,326,778 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,844 | smk | localrules: create_sample_file
rule haplotypecaller_gvcf:
input:
region=reference_recalibration_region_dir_path / "intervals/region_{region_id}.list",
bam=rules.applybsqr.output.bam,
bai=rules.applybsqr.output.bai,
reference=config["reference"],
output:
gvcf=temp(snpcall_dir_path / "{sample_id}/haplotypecaller_gvcf/{sample_id}.region_{region_id}.gvcf")
log:
std=log_dir_path / "{sample_id}/haplotypecaller_gvcf/haplotypecaller_gvcf.region_{region_id}.log",
cluster_log=cluster_log_dir_path / "/haplotypecaller_gvcf/{sample_id}.haplotypecaller_gvcf.region_{region_id}.cluster.log",
cluster_err=cluster_log_dir_path / "haplotypecaller_gvcf/{sample_id}.haplotypecaller_gvcf.region_{region_id}.cluster.err"
benchmark:
benchmark_dir_path / "{sample_id}/haplotypecaller_gvcf/haplotypecaller_gvcf.region_{region_id}.benchmark.txt"
conda:
"../../../%s" % config["conda_config"]
resources:
cpus=config["haplotypecaller_threads"],
time=config["haplotypecaller_time"],
mem=config["haplotypecaller_mem_mb"],
threads: config["haplotypecaller_threads"]
shell:
"gatk --java-options '-Xmx{resources.mem}m' HaplotypeCaller -R {input.reference} -L {input.region} "
" -ERC GVCF -G StandardAnnotation -G AS_StandardAnnotation -G StandardHCAnnotation"
" -I {input.bam} -O {output} > {log.std} 2>&1"
rule merge_splited_gvcf:
input:
lambda wildcards: expand(snpcall_dir_path / "{sample_id}/haplotypecaller_gvcf/{sample_id}.region_{region_id}.gvcf",
region_id=glob_wildcards("%s/intervals/region_{region_id}.list" % reference_recalibration_region_dir_path)[0],
sample_id=[wildcards.sample_id])
output:
snpcall_dir_path / "{sample_id}/{sample_id}.gvcf"
params:
input_files=snpcall_dir_path / "{sample_id}/haplotypecaller_gvcf/*.gvcf",
splited_gvcf_list=snpcall_dir_path / "{sample_id}/{sample_id}.splited_gvf_list",
reference_dict=reference_dict_path
log:
std=log_dir_path / "{sample_id}.merge_splited_gvcf.log",
cluster_log=cluster_log_dir_path / "{sample_id}.merge_splited_gvcf.cluster.log",
cluster_err=cluster_log_dir_path / "{sample_id}.merge_splited_gvcf.cluster.err"
benchmark:
benchmark_dir_path / "{sample_id}/merge_splited_gvcf.benchmark.txt"
conda:
"../../../%s" % config["conda_config"]
resources:
cpus=config["merge_splited_gvcf_threads"],
time=config["merge_splited_gvcf_time"],
mem=config["merge_splited_gvcf_mem_mb"],
threads: config["merge_splited_gvcf_threads"]
shell:
"ls {params.input_files} | sort -V > {params.splited_gvcf_list}; "
" workflow/scripts/combine_same_sample_vcf.py -f {params.splited_gvcf_list} -o {output} > {log.std} 2>&1"
rule index_merged_gvcf:
input:
rules.merge_splited_gvcf.output
output:
snpcall_dir_path / "{sample_id}/{sample_id}.gvcf.idx"
log:
std=log_dir_path / "{sample_id}.index_merged_gvcf.log",
cluster_log=cluster_log_dir_path / "{sample_id}.index_merged_gvcf.cluster.log",
cluster_err=cluster_log_dir_path / "{sample_id}.index_merged_gvcf.cluster.err"
benchmark:
benchmark_dir_path / "{sample_id}/index_merged_gvcf.benchmark.txt"
conda:
"../../../%s" % config["conda_config"]
resources:
cpus=config["index_merged_gvcf_threads"],
time=config["index_merged_gvcf_time"],
mem=config["index_merged_gvcf_mem_mb"],
threads: config["index_merged_gvcf_threads"]
shell:
"gatk --java-options '-Xmx{resources.mem}m' IndexFeatureFile -I ${input} > {log.std} 2>&1"
rule create_sample_file:
input:
expand(snpcall_dir_path /"{sample_id}/{sample_id}.gvcf", sample_id=config["sample_list"])
output:
sample_file=joint_snpcall_dir_path / "sample_file.tsv"
run:
with open(output.sample_file, "w") as out_fd:
for sample in config["sample_list"]:
out_fd.write("{0}\t{1}/{0}/{0}.gvcf\n".format(sample, str(snpcall_dir_path)))
rule genomicsdbimport:
input:
gvcfs=expand(snpcall_dir_path/ "{sample_id}/{sample_id}.gvcf", sample_id=config["sample_list"]),
gvcf_indexes=expand(snpcall_dir_path / "{sample_id}/{sample_id}.gvcf.idx", sample_id=config["sample_list"]),
sample_file=rules.create_sample_file.output,
interval_file=rules.prepare_genotyping_whitelist_intervals.output
output:
directory(joint_snpcall_dir_path / "gvcf_database/callset.json")
params:
batch_size=50,
reader_threads=config["genomicsdbimport_reader_threads"],
interval_threads=config["genomicsdbimport_interval_threads"],
log:
std=log_dir_path / "genomicsdbimport.log",
cluster_log=cluster_log_dir_path / "genomicsdbimport.cluster.log",
cluster_err=cluster_log_dir_path / "genomicsdbimport.cluster.err"
benchmark:
benchmark_dir_path / "/genomicsdbimport.benchmark.txt"
conda:
"../../../%s" % config["conda_config"]
resources:
cpus=config["genomicsdbimport_reader_threads"] + config["genomicsdbimport_interval_threads"],
time=config["genomicsdbimport_time"],
mem=config["genomicsdbimport_mem_mb"],
threads: config["genomicsdbimport_reader_threads"] + config["genomicsdbimport_interval_threads"]
shell:
" gatk --java-options '-Xmx{resources.mem}m' GenomicsDBImport --batch-size {params.batch_size} "
" --sample-name-map {input.sample_file} "
" --max-num-intervals-to-import-in-parallel {params.interval_threads}"
" --reader-threads {params.reader_threads}"
" --genomicsdb-workspace-path {output} "
" -L {input.interval_file} > {log.std} 2>&1"
rule genotypegvcfs:
input:
database=rules.genomicsdbimport.output,
reference=reference_path
output:
joint_snpcall_dir_path / "all_samples.vcf.gz"
log:
std=log_dir_path / "genotypegvcfs.log",
cluster_log=cluster_log_dir_path / "genotypegvcfs.cluster.log",
cluster_err=cluster_log_dir_path / "genotypegvcfs.cluster.err"
benchmark:
benchmark_dir_path / "genotypegvcfs.benchmark.txt"
conda:
"../../../%s" % config["conda_config"]
resources:
cpus=config["genotypegvcfs_threads"],
time=config["genotypegvcfs_time"],
mem=config["genotypegvcfs_mem_mb"],
threads: config["genotypegvcfs_threads"]
shell:
" gatk --java-options '-Xmx{resources.mem}m' GenotypeGVCFs -R {input.reference} "
" -G StandardAnnotation -G AS_StandardAnnotation"
" -V gendb://{input.database}"
" -O {output} > {log.std} 2>&1"
| [
"mahajrod@gmail.com"
] | mahajrod@gmail.com |
ade1298871e666e27b391430372bd5836464737a | 3ff9821b1984417a83a75c7d186da9228e13ead9 | /No_0122_Best Time to Buy and Sell Stock II/by_price_gain_collection_based_on_list_and_sum.py | 2213134e6082ff8e4b717a78ad5b331cbdf3dd0a | [
"MIT"
] | permissive | brianchiang-tw/leetcode | fd4df1917daef403c48cb5a3f5834579526ad0c2 | 6978acfb8cb767002cb953d02be68999845425f3 | refs/heads/master | 2023-06-11T00:44:01.423772 | 2023-06-01T03:52:00 | 2023-06-01T03:52:00 | 222,939,709 | 41 | 12 | null | null | null | null | UTF-8 | Python | false | false | 2,229 | py | '''
Description:
Say you have an array for which the ith element is the price of a given stock on day i.
Design an algorithm to find the maximum profit. You may complete as many transactions as you like (i.e., buy one and sell one share of the stock multiple times).
Note: You may not engage in multiple transactions at the same time (i.e., you must sell the stock before you buy again).
Example 1:
Input: [7,1,5,3,6,4]
Output: 7
Explanation: Buy on day 2 (price = 1) and sell on day 3 (price = 5), profit = 5-1 = 4.
Then buy on day 4 (price = 3) and sell on day 5 (price = 6), profit = 6-3 = 3.
Example 2:
Input: [1,2,3,4,5]
Output: 4
Explanation: Buy on day 1 (price = 1) and sell on day 5 (price = 5), profit = 5-1 = 4.
Note that you cannot buy on day 1, buy on day 2 and sell them later, as you are
engaging multiple transactions at the same time. You must sell before buying again.
Example 3:
Input: [7,6,4,3,1]
Output: 0
Explanation: In this case, no transaction is done, i.e. max profit = 0.
'''
from typing import List
class Solution:
def maxProfit(self, prices: List[int]) -> int:
price_gain = []
for idx in range( len(prices)-1 ):
if prices[idx] < prices[idx+1]:
price_gain.append( prices[idx+1]- prices[idx])
return sum( price_gain )
# n : the length of input list, prices.
## Time Complexity: O( n )
#
# The overhead in time is the cost of for loop, which is of O( n )
## Space Complexity: O( n )
#
# The overhead in space is the storage for list, price_gain, which is of O( n )
from collections import namedtuple
TestEntry = namedtuple('TestEntry', 'stock_sequence')
def test_bench():
test_data = [
TestEntry( stock_sequence = [7,1,5,3,6,4] ),
TestEntry( stock_sequence = [1,2,3,4,5] ),
TestEntry( stock_sequence = [7,6,4,3,1] ),
]
# expected output:
'''
7
4
0
'''
for t in test_data:
print( Solution().maxProfit( prices = t.stock_sequence) )
return
if __name__ == '__main__':
test_bench()
| [
"brianchiang1988@icloud.com"
] | brianchiang1988@icloud.com |
90e882a47d44fbce8782d95b4dffbb2d9f53edeb | eb6c8428a8ef76c310e224adf604a2090aa84407 | /dashboard/dashboard/pinpoint/handlers/new.py | 06da483f60e223aa5277d63354429a65b1b693f8 | [
"BSD-3-Clause"
] | permissive | saadmahboob/catapult | d5809669f9600aeb1e39ff09ced4206c61474913 | 123b9d8ec2a233053e908750d1ce04c7c16fc3d8 | refs/heads/master | 2021-01-20T06:08:06.482685 | 2017-08-25T21:39:52 | 2017-08-25T21:39:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,405 | py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import webapp2
from dashboard.api import api_auth
from dashboard.pinpoint.handlers import quest_generator
from dashboard.pinpoint.models import change
from dashboard.pinpoint.models import job as job_module
_ERROR_BUG_ID = 'Bug ID must be an integer.'
class New(webapp2.RequestHandler):
"""Handler that cooks up a fresh Pinpoint job."""
def post(self):
try:
self._CreateJob()
except (api_auth.ApiAuthException, KeyError, TypeError, ValueError) as e:
self._WriteErrorMessage(e.message)
def _WriteErrorMessage(self, message):
self.response.out.write(json.dumps({'error': message}))
@api_auth.Authorize
def _CreateJob(self):
"""Start a new Pinpoint job."""
auto_explore = self.request.get('auto_explore') == '1'
bug_id = self.request.get('bug_id')
change_1 = {
'base_commit': {
'repository': self.request.get('start_repository'),
'git_hash': self.request.get('start_git_hash')
}
}
change_2 = {
'base_commit': {
'repository': self.request.get('end_repository'),
'git_hash': self.request.get('end_git_hash')
}
}
# Validate arguments and convert them to canonical internal representation.
arguments, quests = quest_generator.GenerateQuests(self.request)
bug_id = _ValidateBugId(bug_id)
changes = _ValidateChanges(change_1, change_2)
# Create job.
job = job_module.Job.New(
arguments=arguments,
quests=quests,
auto_explore=auto_explore,
bug_id=bug_id)
# Add changes.
for c in changes:
job.AddChange(c)
# Put job into datastore.
job_id = job.put().urlsafe()
# Start job.
job.Start()
job.put()
# TODO: Figure out if these should be underscores or lowerCamelCase.
# TODO: They should match the input arguments.
self.response.out.write(json.dumps({
'jobId': job_id,
'jobUrl': job.url
}))
def _ValidateBugId(bug_id):
if not bug_id:
return None
try:
return int(bug_id)
except ValueError:
raise ValueError(_ERROR_BUG_ID)
def _ValidateChanges(change_1, change_2):
return (change.Change.FromDict(change_1), change.Change.FromDict(change_2))
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
782b8859d7efbb57fdff7907d597c122d2c57d78 | 037a03d4b8b81bc39dc41cb4f3726f8297c8b672 | /0263.py | 4c1c7303e2e3fc208a7b7b71710861f2a6628ea9 | [] | no_license | Agchai52/Leetcode1 | ee3433ef6f6c3ddd800204c25a456dc7c3fd0053 | 9535d038bee690b7c7aeca352a4ab32d188684bb | refs/heads/master | 2021-08-22T02:59:45.632548 | 2020-05-21T00:31:45 | 2020-05-21T00:31:45 | 185,273,962 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | class Solution(object):
def isUgly(self, num):
"""
:type num: int
:rtype: bool
"""
'''
1. num, mod = num / 2 or / 3 or / 5, %2 or %3 or %5
2. while num != 1, if mod == 1
3. num <= 0 return False
'''
if num <= 0: return False
if num == 1: return True
while num % 5 == 0:
num = num / 5
while num % 3 == 0:
num = num / 3
while num % 2 == 0:
num = num / 2
return True if num == 1 else False
| [
"noreply@github.com"
] | Agchai52.noreply@github.com |
72ecf1bbef78ab913ad0732a134a9125ac9af106 | 03a2c1eb549a66cc0cff72857963eccb0a56031d | /leetcode/all-possible-full-binary-trees.py | 1253725a56852cb9657589735d8547ebbb789ceb | [] | no_license | nobe0716/problem_solving | c56e24564dbe3a8b7093fb37cd60c9e0b25f8e59 | cd43dc1eddb49d6b5965419e36db708c300dadf5 | refs/heads/master | 2023-01-21T14:05:54.170065 | 2023-01-15T16:36:30 | 2023-01-15T16:36:30 | 80,906,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | # Definition for a binary tree node.
from typing import List
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def allPossibleFBT(self, N: int) -> List[TreeNode]:
if N % 2 == 0:
return []
if N == 1:
return [TreeNode()]
res = []
for left_count in range(1, N, 2):
right_count = N - 1 - left_count
if right_count < 1:
continue
left_trees = self.allPossibleFBT(left_count)
right_trees = self.allPossibleFBT(right_count)
for left in left_trees:
for right in right_trees:
res.append(TreeNode(left=left, right=right))
return res
| [
"sunghyo.jung@navercorp.com"
] | sunghyo.jung@navercorp.com |
5cf169c88f9617cf622480494fde3c9f3025f2db | a9d6a3b0fe418e4e5cc131ebc05f9b56c0e4543e | /chapter04-threads/chapter4_threading_function.py | 345f671d197b3a4c0d4f5338ee0982b387d82631 | [] | no_license | Kianqunki/Python_CorePythonApplicationsProgramming | 34a36ba64bdc303814de507c4fcfc3c81ff88b5f | 77263c1fde0d02aade180f7e73d2cdee1d170d58 | refs/heads/master | 2021-05-07T02:41:44.567088 | 2014-10-27T17:43:51 | 2014-10-27T17:43:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | import threading
from time import sleep, ctime
def loop(loop_id, secs):
print('start loop', loop_id, 'at:', ctime())
sleep(secs)
print('loop', loop_id, 'done at:', ctime())
def main():
print("starting time", ctime())
threads = []
t1 = threading.Thread(target=loop, args=(1, 4))
threads.append(t1)
t2 = threading.Thread(target=loop, args=(2, 2))
threads.append(t2)
# start threads
for t in threads:
t.start()
# wait for all threads to end
for t in threads:
t.join()
print("ending time", ctime())
if __name__=="__main__":
main()
| [
"georstef@gmail.com"
] | georstef@gmail.com |
50ff1ad07162521c9a9e7a755bc28535f6d0ee9e | 6d14f197ff95a6626cbb15588db1e60d37b93a81 | /ๆฐๅญ่ฝฌๅไธบๆฏไธชๆฐ็้ๅ้ๅฝ.py | 9cb61b09f461ad9edf82d9f23f5079ede61b742e | [] | no_license | sonichuang/My-py-file | 144b35bf4a7dd907fe7801802221421659671910 | 4cfd0bf7e0569f5b082d867a0798f3850493f204 | refs/heads/master | 2020-04-17T04:19:36.461705 | 2019-04-23T13:48:54 | 2019-04-23T13:48:54 | 166,225,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | # ๆ็้ๅฝ็ๅๆณ
def get(x):
k = []
if x:
k.append(x%10)
return get(x//10) + k
else:
return k
t = int(input('input a number:'))
print(get(t))
| [
"huangshijinsonic@gmail.com"
] | huangshijinsonic@gmail.com |
0cd963e133be4653290a73653215a3e405c37c7b | 31f30239345e869e13111c1e5c5ec4ea032aa9ea | /0x01-warm_up/1-dictionary_of_list_of_dictionaries.py | fac1a43f2b91b5a2f41e6ddcd79db7b0ab34d99d | [] | no_license | Dkazem91/holbertonschool-interviews | a187970e8cb2750f72b06717331e13114a43ad8a | 0088b0b02cb8d06f118639d87c1b4de4b23ebcba | refs/heads/master | 2020-03-31T10:39:11.493231 | 2018-10-08T23:06:19 | 2018-10-08T23:06:19 | 152,143,116 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | #!/usr/bin/python3
# dictionary of dictionaries and API calls
import json
import requests
if __name__ == "__main__":
f = open("todo_all_employees.json", "w")
send = {}
ids = requests.get('https://jsonplaceholder.typicode.com/users').json()
tasks = requests.get('https://jsonplaceholder.typicode.com/todos').json()
for person in ids:
username = person['username']
info = [
{'username': username, 'task': task['title'],
'completed': task['completed']} for task in tasks
if task['userId'] == person['id']]
send[person['id']] = info
json.dump(send, f)
| [
"Dkazemian@gmail.com"
] | Dkazemian@gmail.com |
53311970e736947d52fc92b4310677cf6ecd10da | 2a8abd5d6acdc260aff3639bce35ca1e688869e9 | /telestream_cloud_qc_sdk/test/test_active_format_descriptor_test.py | 8563ecbe7cb7d078aa53bde8370cca49e98e01a7 | [
"MIT"
] | permissive | Telestream/telestream-cloud-python-sdk | 57dd2f0422c83531e213f48d87bc0c71f58b5872 | ce0ad503299661a0f622661359367173c06889fc | refs/heads/master | 2021-01-18T02:17:44.258254 | 2020-04-09T11:36:07 | 2020-04-09T11:36:07 | 49,494,916 | 0 | 0 | MIT | 2018-01-22T10:07:49 | 2016-01-12T11:10:56 | Python | UTF-8 | Python | false | false | 1,585 | py | # coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import telestream_cloud_qc
from telestream_cloud_qc.models.active_format_descriptor_test import ActiveFormatDescriptorTest # noqa: E501
from telestream_cloud_qc.rest import ApiException
class TestActiveFormatDescriptorTest(unittest.TestCase):
"""ActiveFormatDescriptorTest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ActiveFormatDescriptorTest
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = telestream_cloud_qc.models.active_format_descriptor_test.ActiveFormatDescriptorTest() # noqa: E501
if include_optional :
return ActiveFormatDescriptorTest(
allowed_formats = '0',
reject_on_error = True,
checked = True
)
else :
return ActiveFormatDescriptorTest(
)
def testActiveFormatDescriptorTest(self):
"""Test ActiveFormatDescriptorTest"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"cloudsupport@telestream.net"
] | cloudsupport@telestream.net |
5af26f995f23f62d34708d8a1e9b80aced484962 | 99094cc79bdbb69bb24516e473f17b385847cb3a | /264.Ugly Number II/Solution.py | caf8e0c4c5129aa2b5f915a4478ce417918682d6 | [] | no_license | simonxu14/LeetCode_Simon | 7d389bbfafd3906876a3f796195bb14db3a1aeb3 | 13f4595374f30b482c4da76e466037516ca3a420 | refs/heads/master | 2020-04-06T03:33:25.846686 | 2016-09-10T00:23:11 | 2016-09-10T00:23:11 | 40,810,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,795 | py | __author__ = 'Simon'
class Solution(object):
# def nthUglyNumber(self, n):
# """
# :type n: int
# :rtype: int
# """
# OPT = []
# OPT.append(1)
# L1 = []
# L2 = []
# L3 = []
# L1.append(2)
# L2.append(3)
# L3.append(5)
# i = 1
# number = 0
# while i < n:
# # if L1[len(L1)-1] == L2[len(L2)-1] and L1[len(L1)-1] == L3[len(L3)-1]:
# # OPT.append(L1[len(L1)-1])
# # L1.append(L1[len(L1)-1]+2)
# # L2.append(L2[len(L2)-1]+3)
# # L3.append(L3[len(L2)-1]+5)
# # elif L1[len(L1)-1] == L2[len(L2)-1] and L1[len(L1)-1] != L3[len(L3)-1]:
# # OPT.append(L1[len(L1)-1])
# # L1.append(L1[len(L1)-1]+2)
# # L2.append(L2[len(L2)-1]+3)
# # elif L1[len(L1)-1] != L2[len(L2)-1] and L1[len(L1)-1] == L3[len(L3)-1]:
# # OPT.append(L1[len(L1)-1])
# # L1.append(L1[len(L1)-1]+2)
# # L3.append(L3[len(L3)-1]+5)
# number = min(L1[len(L1)-1], L2[len(L2)-1], L3[len(L3)-1])
# OPT.append(number)
# if L1[len(L1)-1] == number and L2[len(L2)-1] == number and L3[len(L3)-1] == number:
# L1.append(L1[len(L1)-1]+2)
# L2.append(L2[len(L2)-1]+3)
# L3.append(L3[len(L3)-1]+5)
# elif L1[len(L1)-1] == number and L2[len(L2)-1] == number:
# L1.append(L1[len(L1)-1]+2)
# L2.append(L2[len(L2)-1]+3)
# elif L2[len(L2)-1] == number and L3[len(L3)-1] == number:
# L2.append(L2[len(L2)-1]+3)
# L3.append(L3[len(L3)-1]+5)
# elif L1[len(L1)-1] == number and L3[len(L3)-1] == number:
# L1.append(L1[len(L1)-1]+2)
# L3.append(L3[len(L3)-1]+5)
# elif L1[len(L1)-1] == number:
# L1.append(L1[len(L1)-1]+2)
# elif L2[len(L2)-1] == number:
# L2.append(L2[len(L2)-1]+3)
# elif L3[len(L3)-1] == number:
# L3.append(L3[len(L3)-1]+5)
# i += 1
# print OPT
# return OPT[n-1]
def nthUglyNumber(self, n):
ugly = [1]
i2, i3, i5 = 0, 0, 0
while n > 1:
u2, u3, u5 = 2 * ugly[i2], 3 * ugly[i3], 5 * ugly[i5]
umin = min((u2, u3, u5))
if umin == u2:
i2 += 1
if umin == u3:
i3 += 1
if umin == u5:
i5 += 1
ugly.append(umin)
n -= 1
print ugly
return ugly[-1]
if __name__ == '__main__':
S = Solution()
print S.nthUglyNumber(11) | [
"simonxu14@gmail.com"
] | simonxu14@gmail.com |
ef756af68074ab1bd26ee928f3f77842144103cb | c07aa8344b7adea209aa364e09241915fc189648 | /practice10e.py | 3aa6c09553d94cdda97332a3583bed938411c330 | [] | no_license | harmansehmbi/Project10 | 0c52763d63e29bb2698df28a28d1118de9f56fb0 | 06379f1c73a8c3224963801f589a4c773783c637 | refs/heads/master | 2020-06-12T10:50:49.458786 | 2019-06-28T13:14:29 | 2019-06-28T13:14:29 | 194,276,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | # Inheritance
class Parent:
def __init__(self, fname, lname):
self.fname = fname
self.lname = lname
print(">> Parent Constructor Executed")
def showDetails(self):
print(">>Hello", self.fname, self.lname)
class Child(Parent): # Relationship --> IS-A
def __init__(self, vehicle, salary):
self.vehicle = vehicle
self.salary = salary
print(">> Child Constructor Executed ")
print("Parent Class Dictonary: ",Parent.__dict__)
print("Child Class Dictonary: ",Child.__dict__)
ch = Child("John", "Watson")
print(ch.__dict__)
ch.showDetails()
# Error will come | [
"51370954+harmansehmbi@users.noreply.github.com"
] | 51370954+harmansehmbi@users.noreply.github.com |
45c20f141092860fc3fb4106908badd0a2d6d07c | 156c892cadfc60adba7722bdb56cf6c6dac8e19b | /06_classification/architecture/models.py | 38034bfbf5e6c32851743a993e34dda7d98066de | [
"MIT"
] | permissive | krakowiakpawel9/computer-vision-course | ad263fbbf081bf733d172477b744fd2f872ae790 | 798e84aaf2520bac564d5e042ee48d5ba3491d08 | refs/heads/master | 2022-12-09T02:53:05.926564 | 2021-10-17T07:15:20 | 2021-10-17T07:15:20 | 225,365,578 | 2 | 5 | MIT | 2022-12-08T03:15:48 | 2019-12-02T12:04:27 | Jupyter Notebook | UTF-8 | Python | false | false | 838 | py | from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
class LeNet5:
def __init__(self, input_shape):
self.input_shape = input_shape
def build(self):
model = Sequential()
model.add(Conv2D(filters=6, kernel_size=(3, 3), input_shape=self.input_shape, activation='relu'))
model.add(MaxPooling2D())
model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(units=120, activation='relu'))
model.add(Dense(units=84, activation='relu'))
model.add(Dense(units=1, activation='sigmoid'))
return model | [
"krakowiakpawel9@gmail.com"
] | krakowiakpawel9@gmail.com |
03b025fe02031ee7a8c0afe0bb4188aed0d4ff09 | 63ace5832d453e325681d02f6496a0999b72edcb | /examples/bip86.py | 73719ffcab8854740f85afef63d20bbbc690e01a | [
"MIT"
] | permissive | ebellocchia/bip_utils | c9ec04c687f4247e57434319e36b2abab78f0b32 | d15c75ddd74e4838c396a0d036ef6faf11b06a4b | refs/heads/master | 2023-09-01T13:38:55.567370 | 2023-08-16T17:04:14 | 2023-08-16T17:04:14 | 251,130,186 | 244 | 88 | MIT | 2023-08-23T13:46:19 | 2020-03-29T20:42:48 | Python | UTF-8 | Python | false | false | 1,324 | py | """Example of keys derivation using BIP86."""
from bip_utils import Bip39MnemonicGenerator, Bip39SeedGenerator, Bip39WordsNum, Bip44Changes, Bip86, Bip86Coins
ADDR_NUM: int = 5
# Generate random mnemonic
mnemonic = Bip39MnemonicGenerator().FromWordsNumber(Bip39WordsNum.WORDS_NUM_24)
print(f"Mnemonic string: {mnemonic}")
# Generate seed from mnemonic
seed_bytes = Bip39SeedGenerator(mnemonic).Generate()
# Construct from seed
bip86_mst_ctx = Bip86.FromSeed(seed_bytes, Bip86Coins.BITCOIN)
# Print master key
print(f"Master key (bytes): {bip86_mst_ctx.PrivateKey().Raw().ToHex()}")
print(f"Master key (extended): {bip86_mst_ctx.PrivateKey().ToExtended()}")
print(f"Master key (WIF): {bip86_mst_ctx.PrivateKey().ToWif()}")
# Derive BIP86 account keys: m/86'/0'/0'
bip86_acc_ctx = bip86_mst_ctx.Purpose().Coin().Account(0)
# Derive BIP86 chain keys: m/86'/0'/0'/0
bip86_chg_ctx = bip86_acc_ctx.Change(Bip44Changes.CHAIN_EXT)
# Derive addresses: m/86'/0'/0'/0/i
print("Addresses:")
for i in range(ADDR_NUM):
bip86_addr_ctx = bip86_chg_ctx.AddressIndex(i)
print(f" {i}. Address public key (extended): {bip86_addr_ctx.PublicKey().ToExtended()}")
print(f" {i}. Address private key (extended): {bip86_addr_ctx.PrivateKey().ToExtended()}")
print(f" {i}. Address: {bip86_addr_ctx.PublicKey().ToAddress()}")
| [
"54482000+ebellocchia@users.noreply.github.com"
] | 54482000+ebellocchia@users.noreply.github.com |
43d040790aea0b3922e3a5f563fcc1daac78441d | 05b0162d5ee7ab74f71ad4f21d5188a8735dfaef | /plugins/modules/device_administration_conditions_for_authentication_rule_info.py | 35d0d93dbca91131fa0f51ab962a1077fd52e574 | [
"MIT"
] | permissive | steinzi/ansible-ise | 567b2e6d04ce3ca6fbdbb6d0f15cd1913a1e215a | 0add9c8858ed8e0e5e7219fbaf0c936b6d7cc6c0 | refs/heads/main | 2023-06-25T15:28:22.252820 | 2021-07-23T14:21:40 | 2021-07-23T14:21:40 | 388,820,896 | 0 | 0 | MIT | 2021-07-23T14:03:07 | 2021-07-23T14:03:06 | null | UTF-8 | Python | false | false | 2,785 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Cisco Systems
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r"""
---
module: device_administration_conditions_for_authentication_rule_info
short_description: Information module for Device Administration Conditions For Authentication Rule
description:
- Get all Device Administration Conditions For Authentication Rule.
version_added: '1.0.0'
author: Rafael Campos (@racampos)
options: {}
requirements:
- ciscoisesdk
seealso:
# Reference by Internet resource
- name: Device Administration Conditions For Authentication Rule reference
description: Complete reference of the Device Administration Conditions For Authentication Rule object model.
link: https://ciscoisesdk.readthedocs.io/en/latest/api/api.html#v3-0-0-summary
"""
EXAMPLES = r"""
- name: Get all Device Administration Conditions For Authentication Rule
cisco.ise.device_administration_conditions_for_authentication_rule_info:
ise_hostname: "{{ise_hostname}}"
ise_username: "{{ise_username}}"
ise_password: "{{ise_password}}"
ise_verify: "{{ise_verify}}"
register: result
"""
RETURN = r"""
ise_response:
description: A dictionary or list with the response returned by the Cisco ISE Python SDK
returned: always
type: dict
sample: >
{
"response": [
{
"conditionType": "string",
"isNegate": true,
"link": {
"href": "string",
"rel": "string",
"type": "string"
},
"description": "string",
"id": "string",
"name": "string",
"attributeName": "string",
"attributeId": "string",
"attributeValue": "string",
"dictionaryName": "string",
"dictionaryValue": "string",
"operator": "string",
"children": [
{
"conditionType": "string",
"isNegate": true,
"link": {
"href": "string",
"rel": "string",
"type": "string"
}
}
],
"datesRange": {
"endDate": "string",
"startDate": "string"
},
"datesRangeException": {
"endDate": "string",
"startDate": "string"
},
"hoursRange": {
"endTime": "string",
"startTime": "string"
},
"hoursRangeException": {
"endTime": "string",
"startTime": "string"
},
"weekDays": [
"string"
],
"weekDaysException": [
"string"
]
}
],
"version": "string"
}
"""
| [
"wastorga@altus.co.cr"
] | wastorga@altus.co.cr |
771fc6098f1802b8e35a0e783e947604ed0a2b8f | 2d276785c3663d4798be462115291c4706dbd255 | /Pythonไป่้ธๅฐ้ซๆ/chapter21/demo21.17.py | be5cc43e7fe719bc7ce925e483a866e7d4386c22 | [] | no_license | bupthl/Python | 81c92433bd955663e6cda5fe7cab5ea3d067c3de | bdb33aeeb179a43100b9ef7129a925c63a133fd3 | refs/heads/master | 2022-02-21T11:02:40.195265 | 2019-08-16T05:49:18 | 2019-08-16T05:49:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,569 | py | '''
--------ใPythonไป่้ธๅฐ้ซๆใๆบไปฃ็ ------------
ๆฌง็็งๆ็ๆๆๆ
ไฝ่
๏ผๆๅฎ
ๅฆๆไปปไฝๆๆฏ้ฎ้ข๏ผ่ฏทๅ QQๆๆฏ่ฎจ่ฎบ็พค๏ผ264268059
ๆๅ
ณๆณจโๆๅฎข่ตทๆบโ่ฎข้
ๅทๆโๆฌง็็งๆโๆๅกๅทๆๆซ็ ๅ
ณๆณจ่ฎข้
ๅทๅๆๅกๅท๏ผไบ็ปด็ ๅจๆบไปฃ็ ๆ น็ฎๅฝ
ๅฆๆQQ็พคๅทฒๆปก๏ผ่ฏท่ฎฟ้ฎhttps://geekori.com๏ผๅจๅณไพงๆฅ็ๆๆฐ็QQ็พค๏ผๅๆถๅฏไปฅๆซ็ ๅ
ณๆณจๅ
ฌไผๅท
โๆฌง็ๅญฆ้ขโๆฏๆฌง็็งๆๆไธๅจ็บฟITๆ่ฒๅญฆ้ข๏ผๅ
ๅซๅคง้ITๅๆฒฟ่ง้ข่ฏพ็จ๏ผ
่ฏท่ฎฟ้ฎhttp://geekori.com/eduๆๅ
ณๆณจๅ้ขๆๅฐ็่ฎข้
ๅทๅๆๅกๅท๏ผ่ฟๅ
ฅ็งปๅจ็็ๆฌง็ๅญฆ้ข
โๆๅฎข้ขๅบโๆฏๆฌง็็งๆๆไธๅจ็บฟ้ขๅบ๏ผ่ฏทๆซๆๆบไปฃ็ ๆ น็ฎๅฝไธญ็ๅฐ็จๅบ็ ๅฎ่ฃ
โๆๅฎข้ขๅบโๅฐ็จๅบ
ๅ
ณไบๆดๅคไฟกๆฏ๏ผ่ฏท่ฎฟ้ฎไธ้ข็้กต้ข
https://geekori.com/help/videocourse/readme.html
'''
from flask import Flask,request,render_template
from flask_wtf import FlaskForm
from wtforms import TextField,SubmitField,validators
app = Flask(__name__)
app.secret_key ='sdjsldj4323sdsdfssfdf43434'
class ContactForm(FlaskForm):
firstname = TextField('ๅงๅ',[validators.Required('ๅงๅๅฟ
้กป่พๅ
ฅ')])
submit = SubmitField('ๆไบค')
@app.route('/', methods=['GET','POST'])
def contact():
form = ContactForm()
if request.method == 'POST':
if form.validate_on_submit() == False:
print(form.firstname.errors)
print('error')
return render_template('first.txt',form=form)
if __name__ == '__main__':
app.run(host = '0.0.0.0', port='1234')
| [
"registercn@outlook.com"
] | registercn@outlook.com |
220b3af5c34c9e5e93d20649552883d51ae2fd50 | 04ad0c1987ff1a2c57025fea93edc4db6ed0583b | /acctTracker/db_setup.py | c29255e45c4a3ccff6d429fb5114e0b29800b7d9 | [] | no_license | elronF/WebAppProject | 1ec08f00c934a5ea8a5172b882406144dfab6b78 | 3717243acc49249ff7a895063ec1cae1ffb5f65c | refs/heads/master | 2020-04-01T18:51:25.489294 | 2018-12-04T05:37:14 | 2018-12-04T05:37:14 | 153,519,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,833 | py | import sys
from sqlalchemy import Column, ForeignKey, Integer, String, UniqueConstraint
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
import psycopg2
Base = declarative_base()
class Owner(Base):
__tablename__ = 'owner'
id = Column(Integer, primary_key=True)
name = Column(String(25), unique=True, nullable=False)
@property
def serialize(self):
return {
'id': self.id,
'name': self.name,
}
class UserCred(Base):
__tablename__ = 'usercred'
id = Column(Integer, primary_key=True)
email = Column(String(100), unique=True)
@property
def serialize(self):
return {
'id': self.id,
'email': self.email,
}
class Account(Base):
__tablename__ = 'account'
id = Column(Integer, primary_key=True)
accountType = Column(String(10), nullable=False)
institution = Column(String(25), nullable=False)
@property
def serialize(self):
return {
'id': self.id,
'accountType': self.accountType,
'institution': self.institution,
}
class Stock(Base):
__tablename__ = 'stock'
id = Column(Integer, primary_key=True)
ticker = Column(String(8), nullable=False)
exchange = Column(String(8), nullable=False)
companyName = Column(String(50))
industry = Column(String(30))
description = Column(String(500))
account_id = Column(Integer, ForeignKey('account.id'))
account = relationship(Account)
user_id = Column(Integer, ForeignKey('usercred.id'))
usercred = relationship(UserCred)
__table_args__ = (
UniqueConstraint('ticker'),
)
@property
def serialize(self):
return {
'id': self.id,
'ticker': self.ticker,
'exchange': self.exchange,
'companyName': self.companyName,
'industry': self.industry,
'description': self.description,
}
# For later functionality
# class Transaction(Base):
# id = Column(Integer, primary_key = True)
# costBasis = Column(Integer, nullable = False)
# shareCount = Column(Integer, nullable = False)
# account_id = Column(Integer, ForeignKey('acccount.id'))
# stock_id = Column(Integer, ForeignKey('stock.id'))
# account = relationship(Account) # the transaction occurs in one account
# stock = relationship(Stock) # the transaction occurs with one stock
#engine = create_engine('sqlite:///tracker_v2.db')
engine = create_engine('postgresql://catalog:catpass@localhost/catalog')
Base.metadata.create_all(engine)
| [
"vagrant@vagrant.vm"
] | vagrant@vagrant.vm |
df2268355419277e266589f45e8bc92afdd65a95 | 5ac7ea7e67da7726dbf7cec59a7b45ece9c935f9 | /Module 8/15.1 method overloading.py | 9cfed92871c3c2f5750abd7ffb27327214cdfebc | [] | no_license | Vipulhere/Python-practice-Code | 61cc7a0246a28506c29857589e6b90dd5cbf952b | d47362241474527061e1beccb3d60f12ab7e2e5a | refs/heads/main | 2023-08-13T04:45:43.819247 | 2021-10-15T09:27:15 | 2021-10-15T09:27:15 | 313,358,144 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | class cars():
def __init__(self):
self.name="BMW"
def show_detail(self):
print(self.name)
class BMW(cars):
def __init__(self):
self.name="This is BMW Name"
def show_detail(self):
print(self.name)
c=cars()
c1=BMW()
c1.show_detail()
c.show_detail() | [
"noreply@github.com"
] | Vipulhere.noreply@github.com |
f08d9d453b49df74219dca035e8fc5c53064cb7a | 1ff0f9a3c6e33a3ec9d617fa5ebc0b33c38243b3 | /picoctf-2018/quackme_200/solution.py | e51b532b1ee6de9eb5dbcd0da832ec3ffedf9d0a | [] | no_license | james9909/PracticeCTF | 2187d6c26cf7387f32360d7e1c7b41211ff3e9b3 | dfa1f03c9d7a3c157a019b681bac04d2e1244fab | refs/heads/master | 2022-05-10T17:07:47.061155 | 2022-04-04T23:52:46 | 2022-04-04T23:52:46 | 34,630,061 | 37 | 14 | null | 2022-04-04T23:52:47 | 2015-04-26T20:30:27 | Python | UTF-8 | Python | false | false | 510 | py | enc = "\x29\x06\x16\x4f\x2b\x35\x30\x1e\x51\x1b\x5b\x14\x4b\x08\x5d\x2b\x50\x14\x5d\x00\x19\x17\x59\x52\x5d"
key = "You have now entered the Duck Web"
dec = ""
for x in range(len(enc)):
dec += chr(ord(enc[x]) ^ ord(key[x]))
print(dec)
"""
Browsing the disassembly, we see that our input is being xor'd with the string "You have now entered..."
and it's being compared to a secret buffer. Because of the properties of xor, we can reverse this operation
to decrypt the flag.
picoCTF{qu4ckm3_5f8d9c17}
"""
| [
"jameswang9909@hotmail.com"
] | jameswang9909@hotmail.com |
4f1c4c708baf91f15a2f05fd5a01db9058ece5a9 | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/EightTeV/Tprime/TprimeToTHinc_M_1400_TuneZ2star_8TeV_madgraph_cff.py | 02f0cbcc3da031fd5d6f36b739437611a55b8e39 | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 4,742 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.PythiaUEZ2starSettings_cfi import *
generator = cms.EDFilter("Pythia6HadronizerFilter",
pythiaHepMCVerbosity = cms.untracked.bool(False),
maxEventsToPrint = cms.untracked.int32(0),
pythiaPylistVerbosity = cms.untracked.int32(0),
comEnergy = cms.double(8000.0),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring(
'MSTP(1) = 4',
'MSEL=8 ! User defined processes',
'MWID(8)=2 !use width of tprime as defined by PMAS',
'MSTJ(1)=1 ! Fragmentation/hadronization on or off',
'MSTP(61)=1 ! Parton showering on or off',
'PMAS(5,1)=4.8 ! b quark mass', #from Spring11 4000040
'PMAS(6,1)=172.5 ! t quark mass', #from Spring11 4000040
'PMAS(25,1)=125.0! h0 masss',
'PMAS(8,1) = 1400.0D0 ! tprime quark mass',
'PMAS(8,2) = 14.0D0',
'PMAS(8,3) = 140.0D0',
'VCKM(1,1) = 0.97414000D0',
'VCKM(1,2) = 0.22450000D0',
'VCKM(1,3) = 0.00420000D0',
'VCKM(1,4) = 0.02500000D0',
'VCKM(2,1) = 0.22560000D0',
'VCKM(2,2) = 0.97170000D0',
'VCKM(2,3) = 0.04109000D0',
'VCKM(2,4) = 0.05700000D0',
'VCKM(3,1) = 0.00100000D0',
'VCKM(3,2) = 0.06200000D0',
'VCKM(3,3) = 0.91000000D0',
'VCKM(3,4) = 0.41000000D0',
'VCKM(4,1) = 0.01300000D0',
'VCKM(4,2) = 0.04000000D0',
'VCKM(4,3) = 0.41000000D0',
'VCKM(4,4) = 0.91000000D0',
'MDME(66,1)=0 ! g t4',
'MDME(67,1)=0 ! gamma t4',
'MDME(68,1)=0 ! Z0 t4'
'MDME(69,1)=0 ! W d',
'MDME(70,1)=0 ! W s',
'MDME(71,1)=0 ! W b',
'MDME(72,1)=0 ! W b4',
'KFDP(73,2)=6 ! defines h0 t (replace t4-default with t)',
'MDME(73,1)=1 ! h0 t ',
'MDME(74,1)=-1 ! H+ b',
'MDME(75,1)=-1 ! H+ b4',
'BRAT(66) = 0.0D0',
'BRAT(67) = 0.0D0',
'BRAT(68) = 0.0D0',
'BRAT(69) = 0.0D0',
'BRAT(70) = 0.0D0',
'BRAT(71) = 0.0D0',
'BRAT(72) = 0.0D0',
'BRAT(73) = 1.0D0',
'BRAT(74) = 0.0D0',
'BRAT(75) = 0.0D0',
'MDME(210,1)=1 !H decay into d dbar',
'MDME(211,1)=1 !H decay into u ubar',
'MDME(212,1)=1 !H decay into s sbar',
'MDME(213,1)=1 !H decay into c cbar',
'MDME(214,1)=1 !H decay into b bbar',
'MDME(215,1)=1 !H decay into t tbar',
'MDME(216,1)=-1 !Z decay into b4 b4bar',
'MDME(217,1)=-1 !Z decay into t4 t4bar',
'MDME(218,1)=1 !Z decay into e- e+',
'MDME(219,1)=1 !Z decay into mu- mu+',
'MDME(220,1)=1 !Z decay into tau- tau+',
'MDME(221,1)=-1 !Z decay into tau4 tau4bar',
'MDME(222,1)=1 !H decay into g g',
'MDME(223,1)=1 !H decay into gamma gamma',
'MDME(224,1)=1 !H decay into gamma Z0',
'MDME(225,1)=1 !H decay into Z0 Z0',
'MDME(226,1)=1 !H decay into W+ W-',
'MDME(190,1)=1 !W decay into u dbar',
'MDME(191,1)=1 !W decay into c dbar',
'MDME(192,1)=1 !W decay into t dbar',
'MDME(193,1)=-1 !W decay into t4 dbar',
'MDME(194,1)=1 !W decay into u sbar',
'MDME(195,1)=1 !W decay into c sbar',
'MDME(196,1)=1 !W decay into t sbar',
'MDME(197,1)=-1 !W decay into t4 sbar',
'MDME(198,1)=1 !W decay into u bbar',
'MDME(199,1)=1 !W decay into c bbar',
'MDME(200,1)=1 !W decay into t bbar',
'MDME(201,1)=-1 !W decay into t4 bbar',
'MDME(202,1)=-1 !W decay into u b4bar',
'MDME(203,1)=-1 !W decay into c b4bar',
'MDME(204,1)=-1 !W decay into t b4bar',
'MDME(205,1)=-1 !W decay into t4 b4bar',
'MDME(206,1)=1 !W decay into e- nu_e',
'MDME(207,1)=1 !W decay into mu nu_mu',
'MDME(208,1)=1 !W decay into tau nu_tau',
'MDME(209,1)=-1 !W decay into tau4 nu_tau4'),
# This is a vector of ParameterSet names to be read, in this order
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"sha1-5c9a4926c1ea08b633689ec734e2440da58b8c56@cern.ch"
] | sha1-5c9a4926c1ea08b633689ec734e2440da58b8c56@cern.ch |
cc6b69db17de0274380268b6fc80951fe0bfff87 | 651a296c8f45b5799781fd78a6b5329effe702a0 | /test_mat/sylvester_kac.py | 44297e14ba7d8cb43ac23f9c86e2cc1734fbb66c | [] | no_license | pdhhiep/Computation_using_Python | 095d14370fe1a01a192d7e44fcc81a52655f652b | 407ed29fddc267950e9860b8bbd1e038f0387c97 | refs/heads/master | 2021-05-29T12:35:12.630232 | 2015-06-27T01:05:17 | 2015-06-27T01:05:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,631 | py | #! /usr/bin/env python
#
def sylvester_kac ( n ):
#*****************************************************************************80
#
## SYLVESTER_KAC returns the SYLVESTER_KAC matrix.
#
# Formula:
#
# If J = I - 1
# A(I,J) = N + 1 - I
# If J = I + 1
# A(I,J) = I
#
# Example:
#
# N = 5,
#
# 0 1 0 0 0
# 4 0 2 0 0
# 0 3 0 3 0
# 0 0 2 0 4
# 0 0 0 1 0
#
# Properties:
#
# A is generally not symmetric: A' /= A.
#
# A is tridiagonal.
#
# If N is odd, the eigenvalues are:
# -(N-1), -(N-3), ..., -2, 0, 2, ... (N-3), (N-1).
#
# If N is even, the eigenvalues are:
# -(N-1), -(N-3), ..., -1, +1, ..., (N-3), (N-1).
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 13 April 2015
#
# Author:
#
# John Burkardt
#
# Reference:
#
# Paul Clement,
# A class of triple-diagonal matrices for test purposes,
# SIAM Review,
# Volume 1, 1959, pages 50-52.
#
# Parameters:
#
# Input, integer N, the number of rows and columns of A.
#
# Output, real A(N,N), the matrix.
#
import numpy as np
a = np.zeros ( ( n, n ) )
for i in range ( 0, n - 1 ):
a[i,i+1] = float ( i + 1 )
a[i+1,i] = float ( n - i - 1 )
return a
def sylvester_kac_determinant ( n ):
#*****************************************************************************80
#
## SYLVESTER_KAC_DETERMINANT computes the determinant of the SYLVESTER_KAC matrix.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 13 April 2015
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer N, the order of the matrix.
#
# Output, real VALUE, the determinant.
#
if ( ( n % 2 ) == 1 ):
value = 0.0
else:
value = 1.0
for i in range ( - n + 1, n + 1, 2 ):
value = value * float ( i )
return value
def sylvester_kac_eigen_right ( n ):
#*****************************************************************************80
#
## SYLVESTER_KAC_EIGEN_RIGHT: right eigenvectors of the SYLVESTER_KAC matrix.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 14 April 2015
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer N, the order of the matrix.
#
# Output, real V(N,N), the right eigenvectors.
#
import numpy as np
from r8_mop import r8_mop
b = np.zeros ( n - 1 )
for i in range ( 0, n - 1 ):
b[i] = float ( i + 1 )
c = np.zeros ( n - 1 )
for i in range ( 0, n - 1 ):
c[i] = float ( n - 1 - i )
v = np.zeros ( ( n, n ) )
for j in range ( 0, n ):
lam = - n + 1 + 2 * j
a = np.zeros ( n )
a[0] = 1.0
a[1] = - lam
for i in range ( 2, n ):
a[i] = - lam * a[i-1] - b[i-2] * c[i-2] * a[i-2]
bot = 1.0
v[0,j] = 1.0
for i in range ( 1, n ):
bot = bot * b[i-1]
v[i,j] = r8_mop ( i ) * a[i] / bot
return v
def sylvester_kac_eigenvalues ( n ):
#*****************************************************************************80
#
## SYLVESTER_KAC_EIGENVALUES returns the eigenvalues of the SYLVESTER_KAC matrix.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 15 April 2015
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer N, the order of the matrix.
#
# Output, real LAM(N), the eigenvalues.
#
import numpy as np
lam = np.zeros ( n )
i = 0
for i in range ( 0, n ):
lam[i] = float ( - n + 1 + 2 * i )
return lam
def sylvester_kac_inverse ( n ):
#*****************************************************************************80
#
## SYLVESTER_KAC_INVERSE returns the inverse of the SYLVESTER_KAC matrix.
#
# Example:
#
# N = 6:
#
# 0 1/5 0 -2/15 0 8/15
# 1 0 0 0 0 0
# 0 0 0 1/3 0 -4/3
# -4/3 0 1/3 0 0 0
# 0 0 0 0 0 1
# 8/15 0 -2/15 0 1/5 0
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 18 April 2015
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer N, the order of A.
#
# Output, real A(N,N), the matrix.
#
import numpy as np
from sys import exit
if ( ( n % 2 ) == 1 ):
print ''
print 'SYLVESTER_KAC_INVERSE - Fatal error!'
print ' The matrix is singular for odd N.'
exit ( 'SYLVESTER_KAC - Fatal error!' )
a = np.zeros ( ( n, n ) )
for i in range ( 0, n ):
if ( ( i % 2 ) == 0 ):
for j in range ( i, n - 1, 2 ):
if ( j == i ):
prod1 = 1.0 / float ( n - 1 - j )
prod2 = 1.0 / float ( 1 + j )
else:
prod1 = - prod1 * float ( j ) / float ( n - 1 - j )
prod2 = - prod2 * float ( n - j ) / float ( 1 + j )
a[i,j+1] = prod1
a[j+1,i] = prod2
return a
def sylvester_kac_test ( ):
#*****************************************************************************80
#
## SYLVESTER_KAC_TEST tests SYLVESTER_KAC.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 13 April 2015
#
# Author:
#
# John Burkardt
#
from r8mat_print import r8mat_print
print ''
print 'SYLVESTER_KAC_TEST'
print ' SYLVESTER_KAC computes the SYLVESTER_KAC matrix.'
m = 5
n = m
a = sylvester_kac ( n )
r8mat_print ( m, n, a, ' SYLVESTER_KAC matrix:' )
print ''
print 'SYLVESTER_KAC_TEST'
print ' Normal end of execution.'
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
sylvester_kac_test ( )
timestamp ( )
| [
"siplukabir@gmail.com"
] | siplukabir@gmail.com |
db11aeda9733268b92c31c9cff4627aa0b75d680 | 5dc33db00ba62bea9850315b5c72f1c0c741901e | /Python/JianzhiOffer/question6_1.py | b4593accaeec0c5d58d03577fe02e0b609470294 | [] | no_license | czx94/Algorithms-Collection | b8d0b06c60138e7d996c91643e3bdb3236462aed | 8fb6c1d947046dabd58ff8482b2c0b41f39aa988 | refs/heads/master | 2021-06-09T04:53:10.601593 | 2021-05-20T14:04:54 | 2021-05-20T14:04:54 | 181,836,499 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | '''
Print each node of a linkedlist from end to start
'''
from LinkedList import LinkedList
import numpy as np
# stack
def solution1(linkedlist):
print('Stack solution')
stack = []
root = linkedlist.root
while root:
stack.append(root.val)
root = root.next
while stack:
val = stack.pop()
print(val)
# recursive
def solution2(linkedlist):
print('Recursive solution')
def recursion(root):
if root.next:
recursion(root.next)
print(root.val)
root = linkedlist.root
recursion(root)
if __name__ == '__main__':
list_elements = list(np.random.choice(100, size=10, replace=False))
print(list_elements)
linkedlist = LinkedList(list_elements, 12)
solution1(linkedlist)
solution2(linkedlist)
| [
"czxplp@gmail.com"
] | czxplp@gmail.com |
62441cdd535497b26c470a8e6c58afebad67efc7 | 9b1446b26e81a79c303f9799fb6a91785c7adb03 | /.history/Code/dictogram_20200209185851.py | b2984114a1041e53aed656c00030a876b61bda19 | [] | no_license | SamirIngley/CS1.2-Tweet-Gen | 017ea15b1113881a156ff24682828bc654eb6c81 | bcd95fa63e05849cbf8e36230d8e31032b99daaa | refs/heads/master | 2020-12-14T20:19:57.733290 | 2020-08-04T23:19:23 | 2020-08-04T23:19:23 | 234,856,234 | 0 | 0 | null | 2020-06-05T21:13:04 | 2020-01-19T07:05:55 | Python | UTF-8 | Python | false | false | 4,706 | py | #!python
from __future__ import division, print_function # Python 2 and 3 compatibility
import random
class Dictogram(dict):
"""Dictogram is a histogram implemented as a subclass of the dict type."""
def __init__(self, word_list=None):
"""Initialize this histogram as a new dict and count given words."""
super(Dictogram, self).__init__() # Initialize this as a new dict
# Add properties to track useful word counts for this histogram
self.types = 0 # Count of distinct word types in this histogram
self.tokens = 0 # Total count of all word tokens in this histogram
# Count words in given list, if any
if word_list is not None:
for word in word_list:
self.add_count(word)
def add_count(self, word, count=1):
"""Increase frequency count of given word by given count amount."""
# TODO: Increase word frequency by count
if word in self:
self[word] += count
self.tokens += count
else:
self[word] = count
self.tokens += count
self.types += count
def frequency(self, word):
"""Return frequency count of given word, or 0 if word is not found."""
# TODO: Retrieve word frequency count
if word in self:
freq = self[word]
return freq
def sample(self):
"""Return a word from this histogram, randomly sampled by weighting
each word's probability of being chosen by its observed frequency."""
# TODO: Randomly choose a word based on its frequency in this histogram
percentages = []
total_wc = 0 # total word count
for item in histo:
total_wc += int(item[1])
for item in histo:
percent = (item[1] / total_wc) * 100 # calculate percentage based on freq / total
instance = (item[0], percent)
percentages.append(instance)
return percentages
def print_histogram(word_list):
print()
print('Histogram:')
print('word list: {}'.format(word_list))
# Create a dictogram and display its contents
histogram = Dictogram(word_list)
print('dictogram: {}'.format(histogram))
print('{} tokens, {} types'.format(histogram.tokens, histogram.types))
for word in word_list[-2:]:
freq = histogram.frequency(word)
print('{!r} occurs {} times'.format(word, freq))
print()
print_histogram_samples(histogram)
def print_histogram_samples(histogram):
print('Histogram samples:')
# Sample the histogram 10,000 times and count frequency of results
samples_list = [histogram.sample() for _ in range(10000)]
samples_hist = Dictogram(samples_list)
print('samples: {}'.format(samples_hist))
print()
print('Sampled frequency and error from observed frequency:')
header = '| word type | observed freq | sampled freq | error |'
divider = '-' * len(header)
print(divider)
print(header)
print(divider)
# Colors for error
green = '\033[32m'
yellow = '\033[33m'
red = '\033[31m'
reset = '\033[m'
# Check each word in original histogram
for word, count in histogram.items():
# Calculate word's observed frequency
observed_freq = count / histogram.tokens
# Calculate word's sampled frequency
samples = samples_hist.frequency(word)
sampled_freq = samples / samples_hist.tokens
# Calculate error between word's sampled and observed frequency
error = (sampled_freq - observed_freq) / observed_freq
color = green if abs(error) < 0.05 else yellow if abs(error) < 0.1 else red
print('| {!r:<9} '.format(word)
+ '| {:>4} = {:>6.2%} '.format(count, observed_freq)
+ '| {:>4} = {:>6.2%} '.format(samples, sampled_freq)
+ '| {}{:>+7.2%}{} |'.format(color, error, reset))
print(divider)
print()
def main():
import sys
arguments = sys.argv[1:] # Exclude script name in first argument
if len(arguments) >= 1:
# Test histogram on given arguments
print_histogram(arguments)
else:
# Test histogram on letters in a word
word = 'abracadabra'
print_histogram(list(word))
# Test histogram on words in a classic book title
fish_text = 'one fish two fish red fish blue fish'
print_histogram(fish_text.split())
# Test histogram on words in a long repetitive sentence
woodchuck_text = ('how much wood would a wood chuck chuck'
' if a wood chuck could chuck wood')
print_histogram(woodchuck_text.split())
if __name__ == '__main__':
main()
| [
"samir.ingle7@gmail.com"
] | samir.ingle7@gmail.com |
9e527a1512634636ccd9e40f263b0a7ab2ea8b6a | 61d08e23fbb62e16f7bd9d43673b1cf4e0558c37 | /miraScripts/mayaTools/lighting_tool/OF/tools/rename.py | c84e06e533fd45419ca04b08cfa200cd3c416973 | [] | no_license | jonntd/mira | 1a4b1f17a71cfefd20c96e0384af2d1fdff813e8 | 270f55ef5d4fecca7368887f489310f5e5094a92 | refs/heads/master | 2021-08-31T12:08:14.795480 | 2017-12-21T08:02:06 | 2017-12-21T08:02:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,064 | py | ๏ปฟimport os
import re
from Qt.QtWidgets import *
from Qt.QtCore import *
from Qt.QtGui import *
from Qt import __binding__
import maya.cmds as mc
def undo(func):
def _undo(*args, **kwargs):
try:
mc.undoInfo(ock=1)
result = func(*args, **kwargs)
except Exception, e:
raise e
else:
return result
finally:
mc.undoInfo(cck=1)
return _undo
def get_maya_win(module="PySide"):
"""
get a QMainWindow Object of maya main window
:param module (optional): string "PySide"(default) or "PyQt4"
:return main_window: QWidget or QMainWindow object
"""
import maya.OpenMayaUI as mui
prt = mui.MQtUtil.mainWindow()
if module == "PyQt":
import sip
from Qt.QtCore import *
main_window = sip.wrapinstance(long(prt), QObject)
elif module in ["PySide", "PyQt"]:
if __binding__ in ["PySide", "PyQt4"]:
import shiboken
elif __binding__ in ["PySide2", "PyQt5"]:
import shiboken2 as shiboken
from Qt.QtWidgets import *
main_window = shiboken.wrapInstance(long(prt), QWidget)
elif module == "mayaUI":
main_window = "MayaWindow"
else:
raise ValueError('param "module" must be "mayaUI" "PyQt4" or "PySide"')
return main_window
class Rename(QDialog):
def __init__(self, parent=None):
super(Rename, self).__init__(parent)
self.resize(500, 550)
self.setObjectName('Rename')
self.setWindowTitle('Rename')
self.setWindowFlags(Qt.Dialog | Qt.WindowMinimizeButtonHint)
self.path = None
layout = QVBoxLayout(self)
layout.setContentsMargins(5, 10, 5, 5)
layout_grp = QGroupBox('System')
layout_grp.setStyleSheet("QGroupBox{color:#00FF00;border: 1px solid #222222;"
"padding-top:15px;border-radius:2px;font-size: 15px}")
layout.addWidget(layout_grp)
main_layout = QVBoxLayout(layout_grp)
file_layout = QHBoxLayout()
file_label = QLabel('File Path')
self.file_le = QLineEdit()
file_layout.addWidget(file_label)
file_layout.addWidget(self.file_le)
self.lw = QListWidget()
self.lw.setSortingEnabled(True)
self.lw.setSelectionMode(QListWidget.ExtendedSelection)
bottom_layout = QHBoxLayout()
rename_layout = QGridLayout()
src_label = QLabel('Source Name')
src_label.setAlignment(Qt.AlignRight)
self.src_le = QLineEdit()
dst_label = QLabel('Destination Name')
dst_label.setAlignment(Qt.AlignRight)
self.dst_le = QLineEdit()
rename_layout.addWidget(src_label, 0, 0)
rename_layout.addWidget(self.src_le, 0, 1)
rename_layout.addWidget(dst_label, 1, 0)
rename_layout.addWidget(self.dst_le, 1, 1)
rename_btn_layout = QHBoxLayout()
rename_btn_layout.setAlignment(Qt.AlignTop)
self.rename_btn = QPushButton('Rename')
self.rename_btn.setFixedHeight(50)
rename_btn_layout.addWidget(self.rename_btn)
bottom_layout.addLayout(rename_layout)
bottom_layout.addLayout(rename_btn_layout)
add_grp = QGroupBox('Maya')
add_grp.setStyleSheet("QGroupBox{color:#00FF00;border: 1px solid #222222;"
"padding-top:15px;border-radius:2px;font-size: 15px}")
add_layout = QHBoxLayout(add_grp)
maya_rename_layout = QGridLayout()
maya_src_label = QLabel('Source Name')
maya_src_label.setAlignment(Qt.AlignRight)
self.maya_src_le = QLineEdit()
maya_dst_label = QLabel('Destination Name')
maya_dst_label.setAlignment(Qt.AlignRight)
self.maya_dst_le = QLineEdit()
maya_rename_layout.addWidget(maya_src_label, 0, 0)
maya_rename_layout.addWidget(self.maya_src_le, 0, 1)
maya_rename_layout.addWidget(maya_dst_label, 1, 0)
maya_rename_layout.addWidget(self.maya_dst_le, 1, 1)
add_layout.addLayout(maya_rename_layout)
self.maya_rename_btn = QPushButton('Rename')
self.maya_rename_btn.setFixedHeight(50)
add_layout.addWidget(self.maya_rename_btn)
main_layout.addLayout(file_layout)
main_layout.addWidget(self.lw)
main_layout.addLayout(bottom_layout)
layout.addWidget(add_grp)
self.set_signals()
def set_signals(self):
self.file_le.editingFinished.connect(self.set_item)
self.rename_btn.clicked.connect(self.do_rename)
self.maya_rename_btn.clicked.connect(self.maya_rename)
def set_item(self):
self.path = self.file_le.text()
if os.path.isdir(self.path):
self.lw.clear()
for file in os.listdir(self.path):
file_name = file.replace('\\', '/')
item = QListWidgetItem(file_name)
self.lw.addItem(item)
def do_rename(self):
if self.lw.selectedItems():
file_names = [item.text() for item in self.lw.selectedItems()]
src_name = self.src_le.text()
dst_name = self.dst_le.text()
if all((src_name, dst_name)):
for file_name in file_names:
old_file_name = os.path.join(self.path, file_name)
old_file_name = old_file_name.replace('\\', '/')
new_file_basename = re.sub(src_name, dst_name, file_name)
new_file_name = os.path.join(self.path, new_file_basename)
new_file_name = new_file_name.replace('\\', '/')
os.rename(old_file_name, new_file_name)
print "%s >> %s" % (old_file_name, new_file_name)
self.set_item()
@undo
def maya_rename(self, *args):
maya_src_name = self.maya_src_le.text()
maya_dst_name = self.maya_dst_le.text()
if all((maya_src_name, maya_dst_name)):
for file in mc.ls(type='file'):
texture_name = mc.getAttr('%s.fileTextureName' % file)
if maya_src_name in texture_name:
prifix_name, file_name = os.path.split(texture_name)
new_file_name = re.sub(maya_src_name, maya_dst_name, file_name)
new_texture_name = os.path.join(prifix_name, new_file_name)
new_texture_name = new_texture_name.replace('\\', '/')
if os.path.isfile(new_texture_name):
mc.setAttr('%s.fileTextureName' % file, new_texture_name, type='string')
@classmethod
def show_ui(cls):
if mc.window('Rename', q=1, exists=1):
mc.deleteUI('Rename')
atm = cls(get_maya_win())
atm.show()
def main():
Rename.show_ui()
if __name__ == '__main__':
main() | [
"276575758@qq.com"
] | 276575758@qq.com |
444d5c83bf5a92fecc3409cdda40126119132720 | edc1f1369794a4a1c499c6e9d5fe49a712657611 | /algorithms/leetcode_all/020.valid-parentheses/valid-parentheses.py | f3d37dfa31c05f969f5f8f1dda166ff88addb8e4 | [] | no_license | williamsyb/mycookbook | 93d4aca1a539b506c8ed2797863de6da8a0ed70f | dd917b6eba48eef42f1086a54880bab6cd1fbf07 | refs/heads/master | 2023-03-07T04:16:18.384481 | 2020-11-11T14:36:54 | 2020-11-11T14:36:54 | 280,005,004 | 2 | 0 | null | 2023-03-07T02:07:46 | 2020-07-15T23:34:24 | Python | UTF-8 | Python | false | false | 389 | py | class Solution(object):
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
stack = []
d = ["()", "[]", "{}"]
for i in xrange(0, len(s)):
stack.append(s[i])
if len(stack) >= 2 and stack[-2]+stack[-1] in d:
stack.pop()
stack.pop()
return len(stack) == 0
| [
"william_sun1990@hotmail.com"
] | william_sun1990@hotmail.com |
b771b04cf2673c78434a7baa850db141197fe690 | 847273de4b1d814fab8b19dc651c651c2d342ede | /.history/Sudoku_II_004_20180620133813.py | 06b0b160e198df5586c64a56f59f67a2ddb1b43a | [] | no_license | Los4U/sudoku_in_python | 0ba55850afcffeac4170321651620f3c89448b45 | 7d470604962a43da3fc3e5edce6f718076197d32 | refs/heads/master | 2020-03-22T08:10:13.939424 | 2018-07-04T17:21:13 | 2018-07-04T17:21:13 | 139,749,483 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,961 | py | from random import randint
sudoku1 = [
[5, 9, 8, 6, 1, 2, 3, 4, 7],
[2, 1, 7, 9, 3, 4, 8, 6, 5],
[6, 4, 3, 5, 8, 7, 1, 2, 9],
[1, 6, 5, 4, 9, 8, 2, 7, 3],
[3, 2, 9, 7, 6, 5, 4, 1, 8],
[7, 8, 4, 3, 2, 1, 5, 9, 6],
[8, 3, 1, 2, 7, 6, 9, 5, 4],
[4, 7, 2, 8, 5, 9, 6, 3, 1],
[9, 5, 6, 1, 4, 3, 7, 8, " "]
]
sudoku2 = [
[9, 8, 7, 4, 3, 2, 5, 6, 1]
[2, 4, 3, 5, 1, 6, 8, 7, 9]
[5, 6, 1, 7, 9, 8, 4, 3, 2]
[3, 9, 5, 6, 4, 7, 2, 1, 8]
[8, 2, 4, 3, 5, 1, 6, 9, 7]
[1, 7, 6, 2, 8, 9, 3, 4, 5]
[7, 1, 2, 8, 6, 3, 9, 5, 4]
[4, 3, 8, 9, 7, 5, 1, 2, 6]
[' ', 5, ' ', ' ', 2, ' ', 7, ' ', ' ']
]
def printSudoku():
i = 0
while i < 10:
if i == 0:
print(" 1 2 3 4 5 6 7 8 9")
print(" -------------------------")
elif i == 3 or i == 6 or i == 9:
print(" -------------------------")
spaceBar = "|"
if i < 9:
print('{2} {1} {0[0]} {0[1]} {0[2]} {1} {0[3]} {0[4]} {0[5]} {1} {0[6]} {0[7]} {0[8]} {1}'.format(sudoku1[i], spaceBar,i+1))
i = i + 1
while True: # prints Sudoku until is solved
print("Your sudoku to solve:")
printSudoku()
print("Input 3 numbers in format a b c, np. 4 5 8")
print(" a - row number")
print(" b - column number ")
print(" c - value")
# vprint(" r - reset chart to start\n ")
x = input("Input a b c: ")
print("")
numbers = " 0123456789" # conditions of entering the numbers !
if (len(x) != 5) or (str(x[0]) not in numbers) or (str(x[2]) not in numbers) or (
str(x[4]) not in numbers) or (str(x[1]) != " ") or (str(x[3]) != " "):
if x == "r": # reset
print(" Function reset() will be ready in Next Week")
else:
print("Error - wrong number format \n ")
continue
sudoku1[int(x[0])-1][int(x[2])-1] = int(x[4])
column1 = 0
column2 = 0
try:
i = 0
list = []
while i < 9:
column = 0
for item in sudoku1:
column = column + item[i]
list.append(column)
#p rint(list)
# print("Suma columny ", i, " = ", column)
i += 1
is45 = 0
for listElement in list:
if listElement == 45:
is45 = is45 + 1
# print("Ile kolumen OK", is45)
i = 0
for item in sudoku1:
if sum(item) == 45 and is45 == 9:
i = i + 1
if i == 9:
printSudoku()
print("@@@@@@@@@@ YOU WIN @@@@@@@@@@")
break
except TypeError:
print()
'''
print(" ")
print(" %@@@@@@@ @@@ @@@ (@@@@@@@@@ ,@@@@2@@@@@ @@@, /@@@/ @@@, @@@ ")
print(" @@@* @@@ @@@ (@@( /@@@# .@@@% (@@@ @@@, @@@% @@@, @@@. ")
print(" @@@& @@@ @@@ (@@( @@@* @@@% #@@% @@@,.@@@. @@@, @@@. ")
print(" ,@@@@@@* @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@%@@% @@@, @@@. ")
print(" /@@@@@# @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@,@@@( @@@, @@@. ")
print(" *@@@. @@@ .@@& (@@( @@@. @@@% &@@( @@@, &@@@. @@@* .@@@. ")
print(" &, &@@@ #@@@. ,@@@, (@@( ,&@@@* ,@@@& .@@@@ @@@, (@@@/ #@@@* @@@# ")
print(",@@@@@@@@( (@@@@@@@@% (@@@@@@@@@( #@@@@@@@@@, @@@, ,@@@% ,@@@@@@@@@. \n ")
print("To start game input:")
print(" r - to load random puzzle:")
print(" 1 - to load chart nr 1:")
print(" 2 - to load chart nr 2:")
print(" 3 - to load chart nr 3:")
choice = input("Input here: ")
if choice == "R" or choice == "r":
sudoku_number = randint(0, 1)
rows_fill(sudoku_number)
elif int(choice) == 1:
rows_fill(0)
elif int(choice) == 2:
rows_fill(1)
elif int(choice) == 3:
rows_fill(0)
'''
| [
"inz.kamil.wos@gmail.com"
] | inz.kamil.wos@gmail.com |
8eed19fb6d0c869d5b8a75af08b7271969d8fa28 | 1b596568ef6ced06173e60c71f01141682329ac4 | /log_example | 64493e12e5a625a4bf7201d031b380d159b2dc61 | [] | no_license | pfuntner/gists | 4eb1847ef22d3d9cb1e17e870a8434c376c4dbfc | 3322c922bd43480b4cc2759b1c31e5c76668c7ef | refs/heads/master | 2020-04-17T08:40:29.444378 | 2019-01-18T16:23:49 | 2019-01-18T16:23:49 | 166,421,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | #! /usr/bin/env python
import logging
import argparse
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument('-v', '--verbose', dest='verbose', action='count', help='Enable debugging - multiple uses prints more messages')
group.add_argument('--loglevel', dest='loglevel', action='store', help='Set log level: DEBUG, INFO, WARNING, ERROR, CRITICAL')
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s %(levelname)s %(pathname)s:%(lineno)d %(msg)s')
log = logging.getLogger()
log.setLevel(args.loglevel or (logging.WARNING - (args.verbose or 0) * 10))
log.debug('This is a debugging message')
log.info('This is an informational message')
log.warn('This is a warning message')
log.error('This is an error message')
log.fatal('This is a fatal/critical message')
| [
"jpfuntne@cisco.com"
] | jpfuntne@cisco.com | |
139b90b03a2ada07657e2c3171fbe3ad6dcaa6e5 | 3043a21e89b67e7f3d1e420358ab6af41fc203b4 | /0x00-python_variable_annotations/9-element_length.py | d4bc71ac28747753dd3bba1aea9999c25a8bbdd4 | [] | no_license | zacwoll/holbertonschool-web_back_end | 846ece845725d702d8a6ee0a1696e6d823362fd6 | ece925eabc1d1e22055f1b4d3f052b571e1c4400 | refs/heads/main | 2023-05-20T12:54:07.215103 | 2021-06-03T21:21:53 | 2021-06-03T21:21:53 | 348,202,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | #!/usr/bin/env python3
""" duck type """
from typing import Iterable, Sequence, Tuple, List
def element_length(lst: Iterable[Sequence]) -> List[Tuple[Sequence, int]]:
""" some function """
return [(i, len(i)) for i in lst] | [
"zacwoll@gmail.com"
] | zacwoll@gmail.com |
249b96eba8bb51de0b655e2fd71e2e05b7cf0d86 | 0f07107b016d2aee64788966b9f0d322ac46b998 | /moya/tests/test_expressionrange.py | bea67e69ae54228897fd3b831ebbcd0022c27df1 | [
"MIT"
] | permissive | fkztw/moya | 35f48cdc5d5723b04c671947099b0b1af1c7cc7a | 78b91d87b4519f91dfdd2b40dab44e72f201a843 | refs/heads/master | 2023-08-09T09:20:21.968908 | 2019-02-03T18:18:54 | 2019-02-03T18:18:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,481 | py | import unittest
from moya.context import Context
from moya.context.expressionrange import *
class TestExpressionRange(unittest.TestCase):
def test_exclusive_integer(self):
c = Context()
r = ExclusiveIntegerRange(c, 2, 4)
self.assertEqual(list(r), [2, 3])
self.assertEqual(len(r), 2)
self.assert_(1 not in r)
self.assert_(2 in r)
self.assert_(3 in r)
self.assert_(4 not in r)
self.assertEqual(r + 5, [2, 3, 5])
self.assertEqual(5 + r, [5, 2, 3])
self.assertEqual(list(r.keys()), [0, 1])
self.assertEqual(list(r.values()), [2, 3])
self.assertEqual(r.items(), [(0, 2), (1, 3)])
self.assertEqual(r[0], 2)
self.assertEqual(r[1], 3)
r = ExclusiveIntegerRange(c, 4, 2)
self.assertEqual(len(r), 2)
self.assertEqual(list(r), [4, 3])
self.assert_(1 not in r)
self.assert_(2 not in r)
self.assert_(3 in r)
self.assert_(4 in r)
self.assert_(5 not in r)
self.assertEqual(r + 5, [4, 3, 5])
self.assertEqual(5 + r, [5, 4, 3])
self.assertEqual(list(r.keys()), [0, 1])
self.assertEqual(list(r.values()), [4, 3])
self.assertEqual(list(r.items()), [(0, 4), (1, 3)])
self.assertEqual(r[0], 4)
self.assertEqual(r[1], 3)
def test_inclusive_integer(self):
c = Context()
r = InclusiveIntegerRange(c, 2, 4)
self.assertEqual(list(r), [2, 3, 4])
self.assertEqual(len(r), 3)
self.assert_(1 not in r)
self.assert_(2 in r)
self.assert_(3 in r)
self.assert_(4 in r)
self.assert_(5 not in r)
self.assertEqual(r + 5, [2, 3, 4, 5])
self.assertEqual(5 + r, [5, 2, 3, 4])
self.assertEqual(list(r.keys()), [0, 1, 2])
self.assertEqual(list(r.values()), [2, 3, 4])
self.assertEqual(list(r.items()), [(0, 2), (1, 3), (2, 4)])
self.assertEqual(r[0], 2)
self.assertEqual(r[1], 3)
self.assertEqual(r[2], 4)
r = InclusiveIntegerRange(c, 4, 2)
self.assertEqual(list(r), [4, 3, 2])
self.assertEqual(len(r), 3)
self.assert_(1 not in r)
self.assert_(2 in r)
self.assert_(3 in r)
self.assert_(4 in r)
self.assert_(5 not in r)
self.assertEqual(r + 5, [4, 3, 2, 5])
self.assertEqual(5 + r, [5, 4, 3, 2])
self.assertEqual(list(r.keys()), [0, 1, 2])
self.assertEqual(list(r.values()), [4, 3, 2])
self.assertEqual(list(r.items()), [(0, 4), (1, 3), (2, 2)])
self.assertEqual(r[0], 4)
self.assertEqual(r[1], 3)
self.assertEqual(r[2], 2)
def test_exclusive_character(self):
c = Context()
r = ExclusiveCharacterRange(c, "b", "d")
self.assertEqual(list(r), ["b", "c"])
self.assertEqual(len(r), 2)
self.assert_("a" not in r)
self.assert_("b" in r)
self.assert_("c" in r)
self.assert_("d" not in r)
self.assertEqual(r + "e", ["b", "c", "e"])
self.assertEqual("e" + r, ["e", "b", "c"])
self.assertEqual(list(r.keys()), [0, 1])
self.assertEqual(list(r.values()), ["b", "c"])
self.assertEqual(list(r.items()), [(0, "b"), (1, "c")])
self.assertEqual(r[0], "b")
self.assertEqual(r[1], "c")
r = ExclusiveCharacterRange(c, "d", "b")
self.assertEqual(list(r), ["d", "c"])
self.assertEqual(len(r), 2)
self.assert_("a" not in r)
self.assert_("b" not in r)
self.assert_("c" in r)
self.assert_("d" in r)
self.assert_("e" not in r)
self.assertEqual(r + "e", ["d", "c", "e"])
self.assertEqual("e" + r, ["e", "d", "c"])
self.assertEqual(list(r.keys()), [0, 1])
self.assertEqual(list(r.values()), ["d", "c"])
self.assertEqual(list(r.items()), [(0, "d"), (1, "c")])
self.assertEqual(r[0], "d")
self.assertEqual(r[1], "c")
def test_inclusive_character(self):
c = Context()
r = InclusiveCharacterRange(c, "b", "d")
self.assertEqual(list(r), ["b", "c", "d"])
self.assertEqual(len(r), 3)
self.assert_("a" not in r)
self.assert_("b" in r)
self.assert_("c" in r)
self.assert_("d" in r)
self.assert_("e" not in r)
self.assertEqual(r + "e", ["b", "c", "d", "e"])
self.assertEqual("e" + r, ["e", "b", "c", "d"])
self.assertEqual(list(r.keys()), [0, 1, 2])
self.assertEqual(list(r.values()), ["b", "c", "d"])
self.assertEqual(list(r.items()), [(0, "b"), (1, "c"), (2, "d")])
self.assertEqual(r[0], "b")
self.assertEqual(r[1], "c")
self.assertEqual(r[2], "d")
r = InclusiveCharacterRange(c, "d", "b")
self.assertEqual(list(r), ["d", "c", "b"])
self.assertEqual(len(r), 3)
self.assert_("a" not in r)
self.assert_("b" in r)
self.assert_("c" in r)
self.assert_("d" in r)
self.assert_("e" not in r)
self.assertEqual(r + "e", ["d", "c", "b", "e"])
self.assertEqual("e" + r, ["e", "d", "c", "b"])
self.assertEqual(list(r.keys()), [0, 1, 2])
self.assertEqual(list(r.values()), ["d", "c", "b"])
self.assertEqual(list(r.items()), [(0, "d"), (1, "c"), (2, "b")])
self.assertEqual(r[0], "d")
self.assertEqual(r[1], "c")
self.assertEqual(r[2], "b")
| [
"willmcgugan@gmail.com"
] | willmcgugan@gmail.com |
2c94b107dcb549b0b2b8d15b941c9e3eb7c61da7 | 6838d66f8dec23db63923f375438af2dd9fb22fe | /justaminute.py | 315de96a4935263987691525c56a328c3ba21444 | [] | no_license | xCiaraG/Kattis | 795ef7c463c60f362068f0d0e7c8ec8da4a69d19 | ec08aa588f61d78937fb3c63bdb803e999fc9c36 | refs/heads/master | 2021-01-22T21:27:54.736451 | 2020-10-25T17:44:09 | 2020-10-25T17:44:09 | 85,432,383 | 9 | 9 | null | 2020-10-25T17:44:10 | 2017-03-18T21:13:13 | Python | UTF-8 | Python | false | false | 315 | py | n = int(input())
total_minutes = 0
total_actual = 0
for i in range(0, n):
time = list(map(float, input().strip().split()))
total_minutes += time[0]*60
total_actual += time[1]
average = total_actual/total_minutes
if average <= 1:
print("measurement error")
else:
print("{:.9f}".format(average))
| [
"ciara.godwin3@mail.dcu.ie"
] | ciara.godwin3@mail.dcu.ie |
7e1090859245e3a7d13cb11ff24180715a977bab | 46577285b990bb2711cc718b99a24f78c53a7da7 | /๋ฐฑ์ค/์ง์ฌ๊ฐํ ๋ค๊ฐ์ ํฉ์งํฉ์ ๋ฉด์ ๊ตฌํ๊ธฐ.py | 17e6be380d969c06d18319c88204f087210604dc | [] | no_license | suwonraison900206/TIL | 890f02ff768c3a7d6ed647a27ba8da96b0cc06b4 | 2153623238bcc6965ec6983df9e7b207cc5aa361 | refs/heads/master | 2022-06-08T09:51:12.289608 | 2022-05-18T12:04:56 | 2022-05-18T12:04:56 | 235,004,266 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | temp = list(map(int,input().split()))
k = [[0]*10 for _ in range(10)]
print(k)
print(k[1][2])
cnt = 0
for j in range(1,4):
for q in range(2,4):
k[j][q] = 1
print(k)
for u in range(2,5):
for q in range(3,7):
k[u][q] = 1
print(k)
for x in range(3,6):
for q in range(1,5):
k[x][q] = 1
print(k)
for y in range(7,8):
for q in range(3,6):
k[y][q] = 1
print(k)
for o in range(10):
for p in range(10):
if k[o][p] == 1:
cnt = cnt +1
print(cnt) | [
"suwonraison@gmail.com"
] | suwonraison@gmail.com |
404b7a80d6b3cb46638300d4b2454c46500eef2d | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/merra_scripts/01_netCDF_extraction/merra902Combine/300-tideGauge.py | c6d03e5fa08cd36ecff6936bf0d19713697e1ecb | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,376 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 17 11:28:00 2020
--------------------------------------------
Load predictors for each TG and combine them
--------------------------------------------
@author: Michael Tadesse
"""
import os
import pandas as pd
#define directories
# dir_name = 'F:\\01_erainterim\\01_eraint_predictors\\eraint_D3'
dir_in = "/lustre/fs0/home/mtadesse/merraLocalized"
dir_out = "/lustre/fs0/home/mtadesse/merraAllCombined"
def combine():
os.chdir(dir_in)
#get names
tg_list_name = os.listdir()
x = 300
y = 301
for tg in range(x, y):
os.chdir(dir_in)
tg_name = tg_list_name[tg]
print(tg_name, '\n')
#looping through each TG folder
os.chdir(tg_name)
#check for empty folders
if len(os.listdir()) == 0:
continue
#defining the path for each predictor
where = os.getcwd()
csv_path = {'slp' : os.path.join(where, 'slp.csv'),\
"wnd_u": os.path.join(where, 'wnd_u.csv'),\
'wnd_v' : os.path.join(where, 'wnd_v.csv')}
first = True
for pr in csv_path.keys():
print(tg_name, ' ', pr)
#read predictor
pred = pd.read_csv(csv_path[pr])
#remove unwanted columns
pred.drop(['Unnamed: 0'], axis = 1, inplace=True)
#sort based on date as merra files are scrambled
pred.sort_values(by = 'date', inplace=True)
#give predictor columns a name
pred_col = list(pred.columns)
for pp in range(len(pred_col)):
if pred_col[pp] == 'date':
continue
pred_col[pp] = pr + str(pred_col[pp])
pred.columns = pred_col
#merge all predictors
if first:
pred_combined = pred
first = False
else:
pred_combined = pd.merge(pred_combined, pred, on = 'date')
#saving pred_combined
os.chdir(dir_out)
tg_name = str(tg)+"_"+tg_name;
pred_combined.to_csv('.'.join([tg_name, 'csv']))
os.chdir(dir_in)
print('\n')
#run script
combine()
| [
"michaelg.tadesse@gmail.com"
] | michaelg.tadesse@gmail.com |
1f8d9044beaaeefc144c85c76b2f906444c20f2e | e57d7785276053332c633b57f6925c90ad660580 | /sdk/storage/azure-storage-blob/tests/test_ors_async.py | f60a213704044e8da44c35ef8dcca533770c122a | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | adriananeci/azure-sdk-for-python | 0d560308497616a563b6afecbb494a88535da4c5 | b2bdfe659210998d6d479e73b133b6c51eb2c009 | refs/heads/main | 2023-08-18T11:12:21.271042 | 2021-09-10T18:48:44 | 2021-09-10T18:48:44 | 405,684,423 | 1 | 0 | MIT | 2021-09-12T15:51:51 | 2021-09-12T15:51:50 | null | UTF-8 | Python | false | false | 4,206 | py | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
from azure.core.pipeline.transport import AioHttpTransport
from multidict import CIMultiDict, CIMultiDictProxy
from devtools_testutils.storage.aio import AsyncStorageTestCase
from _shared.testcase import GlobalStorageAccountPreparer
from azure.storage.blob import BlobProperties
from azure.storage.blob.aio import BlobServiceClient
# ------------------------------------------------------------------------------
class AiohttpTestTransport(AioHttpTransport):
"""Workaround to vcrpy bug: https://github.com/kevin1024/vcrpy/pull/461
"""
async def send(self, request, **config):
response = await super(AiohttpTestTransport, self).send(request, **config)
if not isinstance(response.headers, CIMultiDictProxy):
response.headers = CIMultiDictProxy(CIMultiDict(response.internal_response.headers))
response.content_type = response.headers.get("content-type")
return response
class StorageObjectReplicationTest(AsyncStorageTestCase):
SRC_CONTAINER = "test1"
DST_CONTAINER = "test2"
BLOB_NAME = "bla.txt"
# -- Test cases for Object Replication enabled account ----------------------------------------------
# TODO the tests will temporarily use designated account, containers, and blobs to check the OR headers
# TODO use generated account and set OR policy dynamically
@pytest.mark.playback_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_ors_source(self, resource_group, location, storage_account, storage_account_key):
# Arrange
bsc = BlobServiceClient(
self.account_url(storage_account, "blob"),
credential=storage_account_key,
transport=AiohttpTestTransport(connection_data_block_size=1024))
blob = bsc.get_blob_client(container=self.SRC_CONTAINER, blob=self.BLOB_NAME)
# Act
props = await blob.get_blob_properties()
# Assert
self.assertIsInstance(props, BlobProperties)
self.assertIsNotNone(props.object_replication_source_properties)
for replication_policy in props.object_replication_source_properties:
self.assertNotEqual(replication_policy.policy_id, '')
self.assertIsNotNone(replication_policy.rules)
for rule in replication_policy.rules:
self.assertNotEqual(rule.rule_id, '')
self.assertIsNotNone(rule.status)
self.assertNotEqual(rule.status, '')
# Check that the download function gives back the same result
stream = await blob.download_blob()
self.assertEqual(stream.properties.object_replication_source_properties,
props.object_replication_source_properties)
@pytest.mark.playback_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_ors_destination(self, resource_group, location, storage_account, storage_account_key):
# Arrange
bsc = BlobServiceClient(
self.account_url(storage_account, "blob"),
credential=storage_account_key,
transport=AiohttpTestTransport(connection_data_block_size=1024))
blob = bsc.get_blob_client(container=self.DST_CONTAINER, blob=self.BLOB_NAME)
# Act
props = await blob.get_blob_properties()
# Assert
self.assertIsInstance(props, BlobProperties)
self.assertIsNotNone(props.object_replication_destination_policy)
# Check that the download function gives back the same result
stream = await blob.download_blob()
self.assertEqual(stream.properties.object_replication_destination_policy,
props.object_replication_destination_policy)
# ------------------------------------------------------------------------------
| [
"noreply@github.com"
] | adriananeci.noreply@github.com |
53d84ddbe8c7e20a0deb62c8a428f73af9ac2648 | 4677038af88d83a7c0ed693d1eba7ac371a83b7a | /pymworks/protocol/utils.py | 5c98ceae0294677ba19cd083e7d16a7160a2a0b6 | [] | no_license | jgattupa/pymworks | f0035bf4f234c6bdc7d3691b6b01055c25666db1 | 879d0b74f0179f138a543ce32426d3c3f24720c4 | refs/heads/master | 2021-01-18T20:49:37.971271 | 2014-08-01T23:26:34 | 2014-08-01T23:26:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | #!/usr/bin/env python
import os
from .. import utils
def load(fn):
return utils.ETree(file=fn)
def resolve_protocol(e):
if isinstance(e, (str, unicode)):
return load(os.path.expanduser(e))
return e
def parse_exp(v):
for c in ['(', ')', '+', '-', '/', '#GT', '#LT',
'#AND', '#OR', '#NOT', '#LE', '#GE', '==']:
v = v.replace(c, ' ')
return v.split()
def iter_nodes(e, stops=[]):
"""
stops : tag names
"""
if hasattr(e, 'getroot'):
for n in iter_nodes(e.getroot(), stops):
yield n
return
if any((f(e) for f in stops)):
return
yield e
for se in e:
for n in iter_nodes(se, stops):
yield n
| [
"brettgraham@gmail.com"
] | brettgraham@gmail.com |
b719c2a9f2ce469506e6d70bb6f58840df2c61e7 | 3c44ddbe867d953a5f27c8c073e1ea5e995b5873 | /deep_logistics/agent_storage.py | 978d15db27491456d332dd3f6d89b657071c14c5 | [] | no_license | cair/deep-warehouse | 37f6a3510638b36c276abb62b6b770d0ba6186af | 93cb7329c28733083b48ab6afd3de91676852175 | refs/heads/master | 2022-03-10T16:45:59.553325 | 2022-02-20T17:28:19 | 2022-02-20T17:28:19 | 167,932,576 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | from deep_logistics.agent import Agent
class AgentStore:
def __init__(self, env):
self.env = env
self.agents = []
def __iter__(self):
return iter(self.agents)
def __getitem__(self, item):
return self.agents[item]
def __len__(self):
return len(self.agents)
def add_agent(self, cls=Agent, n=1):
if cls is None:
cls = Agent
for i in range(n):
self.agents.append(cls(self.env))
def is_terminal(self, agent=None):
if agent:
pass
for agent in self.agents:
if not agent.is_terminal():
return False
return True
| [
"per@sysx.no"
] | per@sysx.no |
0cc4192a5634a04f7b4bb3a9570016eade195fd2 | 6b9084d234c87d7597f97ec95808e13f599bf9a1 | /models/TransT/variants/feature_pyramid/merged_cross_attention_block.py | 99d2fd7178cdf5dd29aa6a159fbf6f5803a5fcac | [] | no_license | LitingLin/ubiquitous-happiness | 4b46234ce0cb29c4d27b00ec5a60d3eeb52c26fc | aae2d764e136ca4a36c054212b361dd7e8b22cba | refs/heads/main | 2023-07-13T19:51:32.227633 | 2021-08-03T16:02:03 | 2021-08-03T16:02:03 | 316,664,903 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,218 | py | import torch.nn as nn
from .merged_cross_attention import PVTMergedCrossAttention
from .mlp import Mlp
from timm.models.layers import DropPath
class MergedCrossAttentionBlock(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1):
super(MergedCrossAttentionBlock, self).__init__()
self.norm1 = norm_layer(dim)
self.attention_block = PVTMergedCrossAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop, sr_ratio)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, merged, z_H, z_W, x_H, x_W, x_pos, merged_k_pos):
x = x + self.drop_path(self.attn(self.norm1(merged), z_H, z_W, x_H, x_W, x_pos, merged_k_pos))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
| [
"linliting06@live.com"
] | linliting06@live.com |
f3d95decf351fcba2c1ca4ccd73a100f4237fddd | f08e50d55bbbb90e4c8f9a8811eaede98ede2694 | /erpbee/stock/report/item_prices/item_prices.py | 714657ea479826fb88f9efb5d3c54087178d2331 | [] | no_license | mohrezbak/erpbee | bc48472a99a7f4357aa7b82ff3a9c1a4c98ba017 | 1134156ad337fd472e14cf347479c17bd8db7b33 | refs/heads/main | 2023-02-12T01:32:07.858555 | 2021-01-08T17:25:23 | 2021-01-08T17:25:23 | 327,872,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,599 | py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import msgprint, _
from frappe.utils import flt
def execute(filters=None):
if not filters: filters = {}
columns = get_columns(filters)
conditions = get_condition(filters)
item_map = get_item_details(conditions)
pl = get_price_list()
last_purchase_rate = get_last_purchase_rate()
bom_rate = get_item_bom_rate()
val_rate_map = get_valuation_rate()
from erpbee.accounts.utils import get_currency_precision
precision = get_currency_precision() or 2
data = []
for item in sorted(item_map):
data.append([item, item_map[item]["item_name"],item_map[item]["item_group"],
item_map[item]["brand"], item_map[item]["description"], item_map[item]["stock_uom"],
flt(last_purchase_rate.get(item, 0), precision),
flt(val_rate_map.get(item, 0), precision),
pl.get(item, {}).get("Selling"),
pl.get(item, {}).get("Buying"),
flt(bom_rate.get(item, 0), precision)
])
return columns, data
def get_columns(filters):
"""return columns based on filters"""
columns = [_("Item") + ":Link/Item:100", _("Item Name") + "::150",_("Item Group") + ":Link/Item Group:125",
_("Brand") + "::100", _("Description") + "::150", _("UOM") + ":Link/UOM:80",
_("Last Purchase Rate") + ":Currency:90", _("Valuation Rate") + ":Currency:80", _("Sales Price List") + "::180",
_("Purchase Price List") + "::180", _("BOM Rate") + ":Currency:90"]
return columns
def get_item_details(conditions):
"""returns all items details"""
item_map = {}
for i in frappe.db.sql("""select name, item_group, item_name, description,
brand, stock_uom from tabItem %s
order by item_code, item_group""" % (conditions), as_dict=1):
item_map.setdefault(i.name, i)
return item_map
def get_price_list():
"""Get selling & buying price list of every item"""
rate = {}
price_list = frappe.db.sql("""select ip.item_code, ip.buying, ip.selling,
concat(ifnull(cu.symbol,ip.currency), " ", round(ip.price_list_rate,2), " - ", ip.price_list) as price
from `tabItem Price` ip, `tabPrice List` pl, `tabCurrency` cu
where ip.price_list=pl.name and pl.currency=cu.name and pl.enabled=1""", as_dict=1)
for j in price_list:
if j.price:
rate.setdefault(j.item_code, {}).setdefault("Buying" if j.buying else "Selling", []).append(j.price)
item_rate_map = {}
for item in rate:
for buying_or_selling in rate[item]:
item_rate_map.setdefault(item, {}).setdefault(buying_or_selling,
", ".join(rate[item].get(buying_or_selling, [])))
return item_rate_map
def get_last_purchase_rate():
item_last_purchase_rate_map = {}
query = """select * from (
(select
po_item.item_code,
po.transaction_date as posting_date,
po_item.base_rate
from `tabPurchase Order` po, `tabPurchase Order Item` po_item
where po.name = po_item.parent and po.docstatus = 1)
union
(select
pr_item.item_code,
pr.posting_date,
pr_item.base_rate
from `tabPurchase Receipt` pr, `tabPurchase Receipt Item` pr_item
where pr.name = pr_item.parent and pr.docstatus = 1)
union
(select
pi_item.item_code,
pi.posting_date,
pi_item.base_rate
from `tabPurchase Invoice` pi, `tabPurchase Invoice Item` pi_item
where pi.name = pi_item.parent and pi.docstatus = 1 and pi.update_stock = 1)
) result order by result.item_code asc, result.posting_date asc"""
for d in frappe.db.sql(query, as_dict=1):
item_last_purchase_rate_map[d.item_code] = d.base_rate
return item_last_purchase_rate_map
def get_item_bom_rate():
"""Get BOM rate of an item from BOM"""
item_bom_map = {}
for b in frappe.db.sql("""select item, (total_cost/quantity) as bom_rate
from `tabBOM` where is_active=1 and is_default=1""", as_dict=1):
item_bom_map.setdefault(b.item, flt(b.bom_rate))
return item_bom_map
def get_valuation_rate():
"""Get an average valuation rate of an item from all warehouses"""
item_val_rate_map = {}
for d in frappe.db.sql("""select item_code,
sum(actual_qty*valuation_rate)/sum(actual_qty) as val_rate
from tabBin where actual_qty > 0 group by item_code""", as_dict=1):
item_val_rate_map.setdefault(d.item_code, d.val_rate)
return item_val_rate_map
def get_condition(filters):
"""Get Filter Items"""
if filters.get("items") == "Enabled Items only":
conditions = " where disabled=0 "
elif filters.get("items") == "Disabled Items only":
conditions = " where disabled=1 "
else:
conditions = ""
return conditions
| [
"bakhtiar2k6@gmail.com"
] | bakhtiar2k6@gmail.com |
1f8f1b351c221042799045a477a769f0c46e8f74 | 23fddc940a266c2d1d0e0b1687c36cdbcc9d54d9 | /app/game/action/node/_fight_start_logic.py | 3d1c8be535b28283aed27bdd665e3c365f498c63 | [] | no_license | Cuick/traversing | 210fcfb1c780037de59343fffeb4fa4d3f2eae32 | c78982580af7f63c8bff4dcb37005b7f7c682b5b | refs/heads/master | 2021-01-10T17:38:37.899460 | 2016-11-18T06:06:55 | 2016-11-18T06:06:55 | 55,397,540 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,524 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
้็จๅผๅงๆๆ็ธๅ
ณไปฃ็ ใ
"""
from app.game.component.fight.stage_factory import get_stage_by_stage_type
from app.game.redis_mode import tb_character_info
from gfirefly.server.logobj import logger
from app.battle.battle_unit import BattleUnit
#from app.battle.battle_process import BattlePVPProcess
from app.battle.server_process import pvp_start, pve_start, mine_start, mine_pvp_start, guild_pvp_start
from random import randint
from shared.utils.const import const
from shared.common_logic.feature_open import is_not_open, FO_FRIEND_SUPPORT
def pvp_process(player, line_up, red_units, blue_units, seed1, seed2, fight_type):
"""docstring for pvp_process"""
#save_line_up_order(line_up, player, current_unpar)
#player.fight_cache_component.awake_hero_units(blue_units)
#player.fight_cache_component.awake_hero_units(red_units)
if not blue_units:
return True
#unpar_type = player.line_up_component.unpar_type
#unpar_other_id = player.line_up_component.unpar_other_id
#red_unpar_data = dict(unpar_type=unpar_type, unpar_other_id=unpar_other_id)
if fight_type == const.BATTLE_PVP:
res = pvp_start(red_units, blue_units, {}, {},
seed1, seed2, player.base_info.level)
elif fight_type == const.BATTLE_MINE_PVP:
res = mine_pvp_start(red_units, blue_units, {}, {},
seed1, seed2, player.base_info.level)
elif fight_type == const.BATTLE_GUILD:
res = guild_pvp_start(red_units, blue_units, seed1, seed2)
logger.debug("pvp_process: %s" % res)
#fight_result = process.process()
return res
def pve_process_check(player, fight_result, steps, fight_type):
"""pve ๆ ก้ช"""
stage_info = player.fight_cache_component.stage_info
red_units = stage_info.get('red_units')
blue_groups = stage_info.get('blue_units')
#drop_num = stage_info.get('drop_num')
monster_unpara = stage_info.get('monster_unpara')
f_unit = stage_info.get('f_unit')
logger.debug("pve_process_check %s", red_units)
logger.debug("pve_process_check %s", blue_groups)
seed1 = player.fight_cache_component.seed1
seed2 = player.fight_cache_component.seed2
red_unpar_data = player.line_up_component.get_red_unpar_data()
blue_unpar_data = dict(blue_skill=monster_unpara, blue_skill_level=1)
if fight_type == const.BATTLE_PVE:
res = pve_start(red_units, blue_groups, red_unpar_data,
blue_unpar_data, f_unit, seed1, seed2, steps, player.base_info.level)
elif fight_type == const.BATTLE_MINE_PVE:
blue_units = blue_groups[0]
res = mine_start(red_units, blue_units, red_unpar_data,
blue_unpar_data, seed1, seed2, steps, player.base_info.level)
logger.debug("pve_start %s %s" % (res, fight_result))
return res[0] == fight_result, res[1], res[2], res[3], res[4]
def save_line_up_order(line_up, player, current_unpar, stage_id=0):
"""docstring for save_line_up_order"""
line_up_info = [] # {hero_id:pos}
for line in line_up:
line_up_info.append(line)
if len(line_up_info) != 6:
logger.error("line up order error %s !" % len(line_up_info))
return
logger.debug("line_up %s, current_unpar%s"% (line_up, current_unpar))
player.fight_cache_component.stage_id = stage_id
player.line_up_component.line_up_order = line_up_info
player.line_up_component.current_unpar = current_unpar
player.line_up_component.save_data(["line_up_order", "current_unpar"])
def pvp_assemble_units(red_units, blue_units, response):
"""assemble pvp response"""
for slot_no, red_unit in red_units.items():
if not red_unit:
continue
red_add = response.red.add()
assemble(red_add, red_unit)
for slot_no, blue_unit in blue_units.items():
if not blue_unit:
continue
blue_add = response.blue.add()
assemble(blue_add, blue_unit)
def pve_process(stage_id, stage_type, line_up, fid, player):
"""docstring for pve_process
line_up: line up order
best_skill_id: unpar
fid: friend id.
"""
player.fight_cache_component.stage_id = stage_id
stage = get_stage_by_stage_type(stage_type, stage_id, player)
stage_info = fight_start(stage, fid, player)
return stage_info
def fight_start(stage, fid, player):
"""ๅผๅงๆๆ
"""
# ๆ ก้ชไฟกๆฏ๏ผๆฏๅฆๅผๅฏ๏ผๆฏๅฆ่พพๅฐๆฌกๆฐไธ้็ญ
res = stage.check()
if not res.get('result'):
return res
fight_cache_component = player.fight_cache_component
fight_cache_component.stage_id = stage.stage_id
fight_cache_component.stage = stage
red_units, blue_units, drop_num, monster_unpara = fight_cache_component.fighting_start()
# ๅฅฝๅ
char_obj = tb_character_info.getObj(fid)
lord_data = char_obj.hget('lord_attr_info')
f_unit = None
if lord_data and not is_not_open(player, FO_FRIEND_SUPPORT):
info = lord_data.get('info')
f_unit = BattleUnit.loads(info)
else:
logger.debug('can not find friend id :%d' % fid)
return dict(result=True,
red_units=red_units,
blue_units=blue_units,
drop_num=drop_num,
monster_unpara=monster_unpara,
f_unit=f_unit,
result_no=0)
def pve_assemble_units(red_units, blue_groups, response):
"""docstring for pve_assemble_response"""
for slot_no, red_unit in red_units.items():
if not red_unit:
continue
red_add = response.red.add()
assemble(red_add, red_unit)
for blue_group in blue_groups:
blue_group_add = response.blue.add()
for slot_no, blue_unit in blue_group.items():
if not blue_unit:
continue
blue_add = blue_group_add.group.add()
assemble(blue_add, blue_unit)
# if blue_skill:
# response.monster_unpar = blue_skill
# response.hero_unpar = red_skill
# if red_skill in player.line_up_component.unpars:
# unpar_level = player.line_up_component.unpars[red_skill]
# response.hero_unpar_level = unpar_level
def pve_assemble_friend(f_unit, response):
if f_unit:
friend = response.friend
assemble(friend, f_unit)
# logger.debug('่ฟๅ
ฅๅ
ณๅก่ฟๅๆฐๆฎ:%s', response)
def assemble(unit_add, unit):
unit_add.no = unit.unit_no
unit_add.quality = unit.quality
for skill_no in unit.skill.break_skill_ids:
unit_add.break_skills.append(skill_no)
unit_add.hp = unit.hp
unit_add.hp_max = unit.hp_max
unit_add.atk = unit.atk
unit_add.physical_def = unit.physical_def
unit_add.magic_def = unit.magic_def
unit_add.hit = unit.hit
unit_add.dodge = unit.dodge
unit_add.cri = unit.cri
unit_add.cri_coeff = unit.cri_coeff
unit_add.cri_ded_coeff = unit.cri_ded_coeff
unit_add.block = unit.block
unit_add.level = unit.level
unit_add.break_level = unit.break_level
unit_add.position = unit.position
unit_add.is_boss = unit.is_boss
unit_add.is_awake = unit.is_awake
unit_add.origin_no = unit.origin_no
unit_add.is_break = unit.is_break
unit_add.origin_no = unit.origin_no
unit_add.awake_level = unit.awake_level
unit_add.power = int(unit.power)
def get_seeds():
seed1 = randint(1, 100)
seed2 = randint(1, 100)
return seed1, seed2
| [
"zxzxck@163.com"
] | zxzxck@163.com |
7309d042603cb51df3e768ebbb4ad3420d5c3475 | 96d1876d2c18fdac86e8f84bb6684296122a8962 | /test.py | 8630c5c3f7d8ab2f88015f4dc4ea33198c798376 | [] | no_license | jschnab/real-estate-scraping | 756636922a02f0670a1e5b574bf7d7be9200ea02 | 8334470f3b49d1b983be56ccecba040b20028001 | refs/heads/master | 2023-03-15T04:00:38.278259 | 2020-10-24T02:12:28 | 2020-10-24T02:12:28 | 213,277,448 | 1 | 0 | null | 2020-10-24T02:12:29 | 2019-10-07T02:10:11 | Python | UTF-8 | Python | false | false | 651 | py | import sys
from tor_sqs_browser import Browser
from nytimes.browse import *
from nytimes.parse_soup import *
def main():
crawler = Browser(
base_url="https://www.nytimes.com",
stop_test=is_last_page,
get_browsable=wrapper_next_page,
get_parsable=get_listings,
get_page_id=get_listing_id,
soup_parser=parse_webpage,
config_file="nytimes.conf",
)
if sys.argv[1] == "browse":
crawler.browse(BEGIN_RENT_LISTINGS)
elif sys.argv[1] == "harvest":
crawler.harvest()
elif sys.argv[1] == "extract":
crawler.extract()
if __name__ == "__main__":
main()
| [
"jonathan.schnabel31@gmail.com"
] | jonathan.schnabel31@gmail.com |
7871008023436bf19c0167767ce0c4bda40228cf | 75d4ad310511c1468fc596bffd64b68a2574cf95 | /blog/migrations/0010_auto_20200903_1825.py | bdf51e1058bca71f3f28258fc323e21c3108b114 | [] | no_license | famousowhedo/learnignite | 8fce5e4d55e9d19109c9ed1a32ae1ead6a3c1881 | c8cf8711d406849c11cf441babf12ff6887e6992 | refs/heads/master | 2022-12-07T11:31:27.504068 | 2020-09-05T03:32:24 | 2020-09-05T03:32:24 | 292,993,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | # Generated by Django 3.1 on 2020-09-03 18:25
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0009_auto_20200903_1824'),
]
operations = [
migrations.AlterField(
model_name='course',
name='date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
| [
"you@example.com"
] | you@example.com |
821861404863ce42172f4e1f0ffdb84c1d1f38e1 | f1c2a606938392b6e66a86f20f5db060f1099c18 | /empapp_1/urls.py | 40d1e68a06ec60089b042350fe203599c947c2e7 | [] | no_license | aynulislam/Some_API_Create | 26393ecd3f61892517110282d5cd3aaf848eb066 | e8301a4d6861278dc1782329bc28cee2bc9dd855 | refs/heads/master | 2020-08-22T07:10:35.505533 | 2019-10-20T10:40:26 | 2019-10-20T10:40:26 | 216,344,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | from django import views
from .views import DesignationAPIView, EmployeeAPIView
from django.urls import path
urlpatterns = [
path('Designation/', DesignationAPIView, name="Designation"),
path('Employee/<int:pk>', EmployeeAPIView, name="Employee"),
]
| [
"noreply@github.com"
] | aynulislam.noreply@github.com |
ad52cd1a0e01e511094c6a0642d7dea089a298af | dea48ecac82d241e7960f52794eb8a29e5d2e428 | /jianzhioffer/ๆฐ็ป/ๆๅคง้ๅขๅบๅ้ฟๅบฆ_่ฟ็ปญandไธ่ฟ็ปญ.py | 8d261b7e2545d02289bb0dd6ebc1ecbac945b5bc | [] | no_license | yxx94/2020- | e2302bed32c5d7d1e8b559ef378fc60408687934 | e63431cfc3d8c8903bb383144dd0c5ed5d71aa5c | refs/heads/master | 2020-09-03T09:04:08.795099 | 2019-09-19T08:12:51 | 2019-09-19T08:12:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,483 | py | #coding=utf-8
##### ่ฆๆฑ้ๅขๅญๅบๅ่ฟ็ปญ
# ๆถๅค O(n)
a = [10, 80, 6, 3, 4, 7, 1, 5, 11, 2, 12, 30, 31]
concoll = [0]
for i in range(len(a) - 1):
if a[i] < a[i+1]:
count = concoll[-1] + 1
concoll.append(count)
else:
concoll.append(1)
maxcou = max(concoll)
print maxcou
##### ้ๅขๅญๅบๅๅฏไธ่ฟ็ปญ
'''
[1,6,2,3,7,5] ==> [1,2,3,5]
่ฟๅ็ๆฏๅบๅ็้ฟๅบฆ๏ผไธๆฏ้ๅขๅบๅ
ๆถๅค๏ผO(n^2)
'''
# ๅจๆ่งๅๅฐฑ้พๅจ ๆดๆฐๆกไปถๆไนๅ๏ผ๏ผ๏ผ
class Solution(object):
def lengthOfLIS(self, nums):
if not nums:
return 0
l = len(nums)
dp = [1]*l
res = 1
for i in range(l):
for j in range(i): # ๆณจๆๆฏi
if dp[j]+1>dp[i] and nums[i]>nums[j]: # ็ถๆ่ฝฌ็งปๆน็จ
dp[i] = dp[j]+1
if res < dp[i]:
res = dp[i]
return res
s = Solution()
res = s.lengthOfLIS([10, 80, 6, 3, 4, 7, 1, 5, 11, 2, 12, 30, 31])
print res
# ๅฉ็จไบๅๆฅๆพ+dpๆๆณ๏ผ๏ผๆถๅคO(nlogn)
'''
dp[i]: ๆๆ้ฟๅบฆไธบi+1็้ๅขๅญๅบๅไธญ, ๆๅฐ็้ฃไธชๅบๅๅฐพๆฐ.
็ฑๅฎไน็ฅdpๆฐ็ปๅฟ
็ถๆฏไธไธช้ๅขๆฐ็ป, ๅฏไปฅ็จ maxL ๆฅ่กจ็คบๆ้ฟ้ๅขๅญๅบๅ็้ฟๅบฆ.
ๅฏนๆฐ็ป่ฟ่ก้ๅ, ไพๆฌกๅคๆญๆฏไธชๆฐnumๅฐๅ
ถๆๅ
ฅdpๆฐ็ป็ธๅบ็ไฝ็ฝฎ:
1. num > dp[maxL], ่กจ็คบnumๆฏๆๆๅทฒ็ฅ้ๅขๅบๅ็ๅฐพๆฐ้ฝๅคง, ๅฐnumๆทปๅ ๅ
ฅdp
ๆฐ็ปๅฐพ้จ, ๅนถๅฐๆ้ฟ้ๅขๅบๅ้ฟๅบฆmaxLๅ 1
2. dp[i-1] < num <= dp[i], ๅชๆดๆฐ็ธๅบ็dp[i] dp็ๆๆซๅ
็ด ๅฏไปฅๅๅฐ
ไฝๆญคๆถ็maxlenๆฏไธๅ็
'''
# ่ฟไธช็็ๅคชๅผบไบ๏ผ๏ผ๏ผ
def fun(nums):
l = len(nums)
dp = [0]*l # dp[i]:้ๅขๅญไธฒ้ฟๅบฆไธบiไธญ๏ผๆๅฐพๅ
็ด ๆๅฐ็้ฃไธช๏ผ็ๅฐพๅ
็ด ๅผ
maxlen = 0
for i in range(l): # ๆๅคๅฑๆถๅคๆฏO(n) ๅ
้จ็ไบๅๆถๅคๆฏO(logn)
lo, hi = 0, maxlen # ๆไปฅๆป็ๆถๅคๆฏO(nlogn)
while lo < hi:
mid = (lo+hi)/2
if dp[mid] < nums[i]:
lo = mid + 1
else:
hi = mid
dp[lo] = nums[i] # ไปฅไธไปฃ็ ๆฏไบๅๆฅๆพ็ๆnumsๅกซๅ
ฅdp
if lo == maxlen: # ๅฆๆlo!=maxlen็่ฏ ่ฏๆๆๅ
ฅ็ๅผๅจ0๏ฝmaxlen็่ฏไน้ด
# ๆฐๆฅ็ๅผไธๆฏdp[-1]ๅคง๏ผๆๅคง้ๅขไธฒไนๆฒก่ฝ+1
maxlen += 1
return maxlen
print fun([10, 80, 6, 3, 4, 7, 1, 5, 11, 2, 12, 30, 31]) | [
"jiachen0212@163.com"
] | jiachen0212@163.com |
6dc0b8db25f11ecb1ec374db52f151526d3ce060 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_014/ch42_2020_03_29_22_30_07_552136.py | 44857793d4db8e0ea56474d72cd167cbf74ff5c8 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | lista_palavras = [] * 10
palavra = input('Fale uma palavra: ')
while palavra != 'fim':
lista_palavras.append(palavra)
palavra = input('Fale outra palavra: ')
i = 0
while i < len(lista_palavras):
palavra = lista_palavras[i]
if len(palavra) > 1 and palavra[0] == 'a':
print (palavra)
i += 1 | [
"you@example.com"
] | you@example.com |
7b3d5a3df2d97f581290a02d843bcbf302fde66b | 3d020fd4cc9fe7f2d5e83850a6c81a4884b995b8 | /python/Hadronizer_TuneCP5_13TeV_HToZATo2L2B_pythia8_PSweights_cff.py | 5e6a00a0ba8d892dd27b95ac0dc8cc331a60de8b | [] | no_license | kjaffel/NanoGenScripts | 8cd0454f7d02346a4a6f83dd4088be30a923b05c | 1d368defa4cec85e462ef268acf0385b387c9d55 | refs/heads/master | 2023-07-15T04:23:48.099918 | 2021-08-25T11:33:07 | 2021-08-25T11:33:07 | 348,656,837 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,359 | py | import FWCore.ParameterSet.Config as cms
externalLHEProducer = cms.EDProducer("ExternalLHEProducer",
args = cms.vstring(''),
nEvents = cms.untracked.uint32(1000),
numberOfParameters = cms.uint32(1),
outputFile = cms.string('cmsgrid_final.lhe'),
scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh')
)
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP5Settings_cfi import *
from Configuration.Generator.PSweightsPythia.PythiaPSweightsSettings_cfi import *
# https://github.com/cms-sw/cmssw/blob/master/Configuration/Generator/python/Pythia8CommonSettings_cfi.py
# https://github.com/cms-sw/cmssw/blob/master/Configuration/Generator/python/MCTunes2017/PythiaCP5Settings_cfi.py
# https://github.com/cms-sw/cmssw/blob/master/Configuration/Generator/python/PSweightsPythia/PythiaPSweightsSettings_cfi.py
generator = cms.EDFilter("Pythia8HadronizerFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
PythiaParameters = cms.PSet(
pythia8CP5SettingsBlock,
pythia8PSweightsSettingsBlock,
pythia8CommonSettingsBlock,
processParameters = cms.vstring(
'Higgs:useBSM = on',# allow BSM Higgs production
'35:onMode = off' , # turn off all h2 decays
'35:onIfMatch = 36 23', # turn on only h2 to h3 Z
'35:isResonance = true',
#'35:doForceWidth = on',
'36:onMode = off' , # turn off all h3 decays
'36:onIfAny = 5', # turn on only h3 to b b~
'36:isResonance = true',
#'36:doForceWidth = on',
#'36:mWidth =0.01410862'
#'23:onMode = off' , # turn off all Z decays
#'23:onIfAny = 11 13 15', # turn on only decays Z to leptons
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CP5Settings',
'pythia8PSweightsSettings',
'processParameters'
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"khawla.jaffel@cern.ch"
] | khawla.jaffel@cern.ch |
cc964cf2e9b24c79fcc02254a5474753061dcb50 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/3050/889003050.py | cd6e4689ea1607dfdb04791747ece216306af46f | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 1,343 | py | from bots.botsconfig import *
from records003050 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'QG',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'G42', MIN: 1, MAX: 1},
{ID: 'N9', MIN: 0, MAX: 10},
{ID: 'G61', MIN: 0, MAX: 3},
{ID: 'G62', MIN: 1, MAX: 50},
{ID: 'NTE', MIN: 0, MAX: 300},
{ID: 'G43', MIN: 0, MAX: 1000},
{ID: 'G23', MIN: 0, MAX: 1},
{ID: 'N1', MIN: 1, MAX: 99, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 1},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
]},
{ID: 'G94', MIN: 0, MAX: 20, LEVEL: [
{ID: 'G95', MIN: 0, MAX: 99},
]},
{ID: 'LX', MIN: 0, MAX: 9999, LEVEL: [
{ID: 'G46', MIN: 0, MAX: 20},
{ID: 'G94', MIN: 0, MAX: 20, LEVEL: [
{ID: 'G95', MIN: 0, MAX: 99},
]},
{ID: 'G45', MIN: 0, MAX: 9999, LEVEL: [
{ID: 'G69', MIN: 0, MAX: 5},
{ID: 'G43', MIN: 0, MAX: 9999},
{ID: 'G51', MIN: 0, MAX: 10},
{ID: 'G23', MIN: 0, MAX: 1},
{ID: 'G62', MIN: 0, MAX: 10},
{ID: 'G22', MIN: 0, MAX: 1},
{ID: 'QTY', MIN: 0, MAX: 10},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
da2ab0c2dec52cad72d33019513e3341e899b3cb | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02596/s431031550.py | 0f7aa0665c6229afa67fb320f03392e10fcc8d96 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | def solve(string):
n, k = 7, int(string)
if k % 2 == 0 or k % 5 == 0:
return "-1"
for i in range(k):
if not n % k:
return str(i + 1)
n = (10 * n + 7) % k
if __name__ == '__main__':
import sys
print(solve(sys.stdin.read().strip()))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
67e6772698fc2c5e6a89a0abecb9d5b7cc68ac26 | bd8bc57159de1e077b1dacddad4bd0dab5df57ae | /qrcodelist/migrations/0001_initial.py | 9073675e7997e68b235f0d8982d4a93878c0b897 | [
"Apache-2.0"
] | permissive | Klaymr/GreaterWMS | ac22e9c9395f2ba5d6f7535601743df39d113384 | 7ddf0e8bf3447edf7e27aea16afec6483fb21dbc | refs/heads/master | 2023-01-07T07:07:24.171838 | 2020-11-17T05:41:26 | 2020-11-17T05:41:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | # Generated by Django 2.2.12 on 2020-07-19 15:08
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='QrCodeList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=100, verbose_name='ไบ็ปด็ ไฟกๆฏ')),
('text_img', models.CharField(max_length=100, verbose_name='ไบ็ปด็ ้พๆฅ')),
('create_time', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"singosgu@gmail.com"
] | singosgu@gmail.com |
7ce1a72eb9d888db890e4ad8374dde35e6d0897d | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/efa78b4ff4b4c68f4bce8a0c02c82134c4cb1bd6-<test_module_utils_basic_ansible_module__symbolic_mode_to_octal>-fix.py | 162d7965b94dc276ce7e786c4a606e6c6a653821 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,563 | py | def test_module_utils_basic_ansible_module__symbolic_mode_to_octal(self):
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(argument_spec=dict())
mock_stat = MagicMock()
mock_stat.st_mode = 16384
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'a+rwx'), 511)
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'u+rwx,g+rwx,o+rwx'), 511)
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'o+rwx'), 7)
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'g+rwx'), 56)
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'u+rwx'), 448)
mock_stat.st_mode = 16895
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'a-rwx'), 0)
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'u-rwx,g-rwx,o-rwx'), 0)
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'o-rwx'), 504)
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'g-rwx'), 455)
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'u-rwx'), 63)
mock_stat.st_mode = 16384
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'a=rwx'), 511)
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'u=rwx,g=rwx,o=rwx'), 511)
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'o=rwx'), 7)
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'g=rwx'), 56)
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'u=rwx'), 448)
mock_stat.st_mode = 16384
self.assertRaises(ValueError, am._symbolic_mode_to_octal, mock_stat, 'a=foo') | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
bbe72c369e4f7997567ed223479d14a4db8e78c8 | 74eb4f8153804ab27fcfc8c1412d64607ebc1536 | /pyapprox/tests/test_approximate.py | 1b1edfb48e45b98aafe1375aff62e2bdd768ac56 | [
"MIT"
] | permissive | samtx/pyapprox | 2d3c09f35de58f51eb5e9346acbb65ca0e5c242b | c926d910e30fbcfed7d0621175d3b0268d59f852 | refs/heads/master | 2023-01-27T11:41:28.064288 | 2020-11-09T20:46:40 | 2020-11-09T20:46:40 | 269,163,376 | 0 | 0 | MIT | 2020-06-03T18:23:12 | 2020-06-03T18:23:11 | null | UTF-8 | Python | false | false | 7,820 | py | import unittest
from scipy import stats
from pyapprox.approximate import *
from pyapprox.benchmarks.benchmarks import setup_benchmark
import pyapprox as pya
class TestApproximate(unittest.TestCase):
def setUp(self):
np.random.seed(1)
def test_approximate_sparse_grid_default_options(self):
nvars = 3
benchmark = setup_benchmark('ishigami',a=7,b=0.1)
univariate_variables = [stats.uniform(0,1)]*nvars
approx = adaptive_approximate(
benchmark.fun,univariate_variables,'sparse_grid').approx
nsamples = 100
error = compute_l2_error(
approx,benchmark.fun,approx.variable_transformation.variable,
nsamples)
assert error<1e-12
def test_approximate_sparse_grid_user_options(self):
nvars = 3
benchmark = setup_benchmark('ishigami',a=7,b=0.1)
univariate_variables = benchmark['variable'].all_variables()
errors = []
def callback(approx):
nsamples = 1000
error = compute_l2_error(
approx,benchmark.fun,approx.variable_transformation.variable,
nsamples)
errors.append(error)
univariate_quad_rule_info = [
pya.clenshaw_curtis_in_polynomial_order,
pya.clenshaw_curtis_rule_growth]
# ishigami has same value at first 3 points in clenshaw curtis rule
# and so adaptivity will not work so use different rule
#growth_rule=partial(pya.constant_increment_growth_rule,4)
#univariate_quad_rule_info = [
# pya.get_univariate_leja_quadrature_rule(
# univariate_variables[0],growth_rule),growth_rule]
refinement_indicator = partial(
variance_refinement_indicator,convex_param=0.5)
options = {'univariate_quad_rule_info':univariate_quad_rule_info,
'max_nsamples':300,'tol':0,'verbose':False,
'callback':callback,'verbose':0,
'refinement_indicator':refinement_indicator}
approx = adaptive_approximate(
benchmark.fun,univariate_variables,'sparse_grid',options).approx
#print(np.min(errors))
assert np.min(errors)<1e-3
def test_approximate_polynomial_chaos_default_options(self):
nvars = 3
benchmark = setup_benchmark('ishigami',a=7,b=0.1)
# we can use different univariate variables than specified by
# benchmark
univariate_variables = [stats.uniform(0,1)]*nvars
approx = adaptive_approximate(
benchmark.fun,univariate_variables,method='polynomial_chaos').approx
nsamples = 100
error = compute_l2_error(
approx,benchmark.fun,approx.variable_transformation.variable,
nsamples)
assert error<1e-12
def test_cross_validate_pce_degree(self):
num_vars = 2
univariate_variables = [stats.uniform(-1,2)]*num_vars
variable = pya.IndependentMultivariateRandomVariable(
univariate_variables)
var_trans = pya.AffineRandomVariableTransformation(variable)
poly = pya.PolynomialChaosExpansion()
poly_opts = pya.define_poly_options_from_variable_transformation(
var_trans)
poly.configure(poly_opts)
degree=3
poly.set_indices(pya.compute_hyperbolic_indices(num_vars,degree,1.0))
num_samples = poly.num_terms()*2
coef = np.random.normal(0,1,(poly.indices.shape[1],2))
coef[pya.nchoosek(num_vars+2,2):,0]=0
# for first qoi make degree 2 the best degree
poly.set_coefficients(coef)
train_samples=pya.generate_independent_random_samples(
variable,num_samples)
train_vals = poly(train_samples)
true_poly=poly
poly = approximate(
train_samples,train_vals,'polynomial_chaos',
{'basis_type':'hyperbolic_cross','variable':variable,
'options':{'verbosity':3}}).approx
num_validation_samples = 10
validation_samples = pya.generate_independent_random_samples(
variable,num_validation_samples)
assert np.allclose(
poly(validation_samples),true_poly(validation_samples))
poly = copy.deepcopy(true_poly)
approx_res = cross_validate_pce_degree(
poly,train_samples,train_vals,1,degree+2)
assert np.allclose(approx_res.degrees,[2,3])
def test_pce_basis_expansion(self):
num_vars = 2
univariate_variables = [stats.uniform(-1,2)]*num_vars
variable = pya.IndependentMultivariateRandomVariable(
univariate_variables)
var_trans = pya.AffineRandomVariableTransformation(variable)
poly = pya.PolynomialChaosExpansion()
poly_opts = pya.define_poly_options_from_variable_transformation(
var_trans)
poly.configure(poly_opts)
degree,hcross_strength=7,0.4
poly.set_indices(
pya.compute_hyperbolic_indices(num_vars,degree,hcross_strength))
num_samples = poly.num_terms()*2
degrees = poly.indices.sum(axis=0)
coef = np.random.normal(
0,1,(poly.indices.shape[1],2))/(degrees[:,np.newaxis]+1)**2
# set some coefficients to zero to make sure that different qoi
# are treated correctly.
I = np.random.permutation(coef.shape[0])[:coef.shape[0]//2]
coef[I,0]=0
I = np.random.permutation(coef.shape[0])[:coef.shape[0]//2]
coef[I,1]=0
poly.set_coefficients(coef)
train_samples=pya.generate_independent_random_samples(
variable,num_samples)
train_vals = poly(train_samples)
true_poly=poly
poly = approximate(
train_samples,train_vals,'polynomial_chaos',
{'basis_type':'expanding_basis','variable':variable}).approx
num_validation_samples = 100
validation_samples = pya.generate_independent_random_samples(
variable,num_validation_samples)
validation_samples = train_samples
error = np.linalg.norm(poly(validation_samples)-true_poly(
validation_samples))/np.sqrt(num_validation_samples)
assert np.allclose(
poly(validation_samples),true_poly(validation_samples),atol=1e-8),\
error
def test_approximate_gaussian_process(self):
from sklearn.gaussian_process.kernels import Matern
num_vars = 1
univariate_variables = [stats.uniform(-1,2)]*num_vars
variable = pya.IndependentMultivariateRandomVariable(
univariate_variables)
num_samples = 100
train_samples=pya.generate_independent_random_samples(
variable,num_samples)
# Generate random function
nu=np.inf#2.5
kernel = Matern(0.5, nu=nu)
X=np.linspace(-1,1,1000)[np.newaxis,:]
alpha=np.random.normal(0,1,X.shape[1])
train_vals = kernel(train_samples.T,X.T).dot(alpha)[:,np.newaxis]
gp = approximate(
train_samples,train_vals,'gaussian_process',{'nu':nu}).approx
error = np.linalg.norm(gp(X)[:,0]-kernel(X.T,X.T).dot(alpha))/np.sqrt(
X.shape[1])
assert error<1e-5
# import matplotlib.pyplot as plt
# plt.plot(X[0,:],kernel(X.T,X.T).dot(alpha),'r--',zorder=100)
# vals,std = gp(X,return_std=True)
# plt.plot(X[0,:],vals[:,0],c='b')
# plt.fill_between(
# X[0,:],vals[:,0]-2*std,vals[:,0]+2*std,color='b',alpha=0.5)
# plt.plot(train_samples[0,:], train_vals[:,0],'ro')
# plt.show()
if __name__== "__main__":
approximate_test_suite = unittest.TestLoader().loadTestsFromTestCase(
TestApproximate)
unittest.TextTestRunner(verbosity=2).run(approximate_test_suite)
| [
"29109026+jdjakem@users.noreply.github.com"
] | 29109026+jdjakem@users.noreply.github.com |
a89aad82400ab9f0b4905f0e287eaa219d14dfc4 | 9f24e060c7721f5fdc2195d8060f653db4e88a33 | /OpenCV-Python-Tutorial/cv-Tkinter-GUI/opencv-with-tkinter.py | 68d0215027290254f133eee43d448c208b9113d3 | [] | no_license | menzec/htht | 16052b8bad48d73a08388408f51ca6bed95c3861 | f3f19577534b05b4410bb37f6456aab4637d2e6d | refs/heads/master | 2020-04-26T03:56:14.115135 | 2019-03-01T09:55:06 | 2019-03-01T09:55:06 | 173,284,085 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,580 | py | # -*- coding: utf-8 -*-
# @Time : 2018/2/8 15:56
# @Author : play4fun
# @File : opencv-with-tkinter.py
# @Software: PyCharm
"""
opencv-with-tkinter.py:
https://www.pyimagesearch.com/2016/05/23/opencv-with-tkinter/
ไธ้่ฆ
pip install image
"""
# import the necessary packages
from tkinter import *
from PIL import Image
from PIL import ImageTk
import tkinter.filedialog as tkFileDialog
import cv2
def select_image():
# grab a reference to the image panels
global panelA, panelB
# open a file chooser dialog and allow the user to select an input
# image
path = tkFileDialog.askopenfilename()
# ensure a file path was selected
if len(path) > 0:
# load the image from disk, convert it to grayscale, and detect
# edges in it
image = cv2.imread(path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
edged = cv2.Canny(gray, 50, 100)
# represents images in BGR order; however PIL represents
# images in RGB order, so we need to swap the channels
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# convert the images to PIL format...
image = Image.fromarray(image)
edged = Image.fromarray(edged)
# ...and then to ImageTk format
image = ImageTk.PhotoImage(image)
edged = ImageTk.PhotoImage(edged)
# if the panels are None, initialize them
if panelA is None or panelB is None:
# the first panel will store our original image
panelA = Label(image=image)
panelA.image = image
panelA.pack(side="left", padx=10, pady=10)
# while the second panel will store the edge map
panelB = Label(image=edged)
panelB.image = edged
panelB.pack(side="right", padx=10, pady=10)
# otherwise, update the image panels
else:
# update the pannels
panelA.configure(image=image)
panelB.configure(image=edged)
panelA.image = image
panelB.image = edged
# initialize the window toolkit along with the two image panels
root = Tk()
panelA = None
panelB = None
# create a button, then when pressed, will trigger a file chooser
# dialog and allow the user to select an input image; then add the
# button the GUI
btn = Button(root, text="Select an image", command=select_image)
btn.pack(side="bottom", fill="both", expand="yes", padx="10", pady="10")
# kick off the GUI
root.mainloop()
| [
"menzc@outlook.com"
] | menzc@outlook.com |
c92b344b84e219edd8fde57641028f1dd92ef093 | 0a567d3417fbdf68fdbd1bf4ef9778518e898678 | /genderClassifier.py | 4cc96d01fa12228c898be846767061c24e56f48a | [] | no_license | maxberggren/sinus | 1d8890009f1eb28d49688883836d0ec2822163ad | 5f85858c0f48927486a3f96d8251ffedbfc3e553 | refs/heads/master | 2020-05-16T11:32:42.516246 | 2016-02-16T09:59:36 | 2016-02-16T09:59:36 | 24,930,665 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,441 | py | from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
import numpy as np
import dataset
import urllib2
import requests
from collections import OrderedDict
import config as c
import nltk
import codecs
from sklearn.cross_validation import train_test_split
# Get data
db = dataset.connect(c.LOCATIONDB+ "?charset=utf8")
db.query("set names 'utf8'")
result = db.query("SELECT b.* FROM blogs b "
"WHERE (SELECT count(*) FROM posts p WHERE "
" p.blog_id=b.id) > 0 AND "
"character_length(b.gender) > 0")
data = []
label = []
for row in result:
posts = db['posts'].find(blog_id=row['id'])
text = ""
for post in posts:
text = text + u"\n\n" + post['text']
if len(text) > 300:
data.append(text)
label.append(row['age'])
#if row['gender'] == "man":
# label.append(0)
# data.append(text)
#elif row['gender'] == "kvinna":
# label.append(1)
# data.append(text)
label = np.asarray(label)
X_train, X_test, Y_train, Y_test = train_test_split(data, label, test_size=0.33, random_state=42)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
#print(type(data_train.data))
#print(type(data_train.target))
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
#'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(X_train, Y_train)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
predicted = grid_search.predict(X_test)
print(np.mean(predicted == Y_test))
print("Done!")
| [
"maxberggren@gmail.com"
] | maxberggren@gmail.com |
4954cdbacf8c6acc46c5811e497d40545e3f10a8 | e2f507e0b434120e7f5d4f717540e5df2b1816da | /021-tuple-vs-list.py | ebe65a1d6fac2f2d4ba71e6b71c93f5ccf0db3c6 | [] | no_license | ash/amazing_python3 | 70984bd32ae325380382b1fe692c4b359ef23395 | 64c98940f8a8da18a8bf56f65cc8c8e09bd00e0c | refs/heads/master | 2021-06-23T14:59:37.005280 | 2021-01-21T06:56:33 | 2021-01-21T06:56:33 | 182,626,874 | 76 | 25 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | # Tuples vs lists
# Similar but different
var1 = [2, 4, 6, 8] # list
var2 = (2, 4, 6, 8) # tuple
print(type(var1))
print(type(var2))
| [
"andy@shitov.ru"
] | andy@shitov.ru |
f5db7fd8a9f22cb63c83e38171bca4846959715a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02700/s876239680.py | d2f6a79dacbdfd6ebfea049548aad9f2217693ab | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | A, B, C, D = list(map(int, input().split()))
win = ''
while True:
C = C - B
if C <= 0:
win = 'Yes'
break
A = A - D
if A <= 0:
win = 'No'
break
print(win) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
88cf4246b277e81c908dad205921830cda1584cf | dd27efb2b54a9ca87985dc4969f647f30273aa72 | /Searching/Binary_search.py | becb8a6caf89e1a8467303526484e2197ffc3993 | [] | no_license | Prakashchater/Daily-Practice-questions | c47dd3deec16fa968be0b4adcd1cf296c21adcee | f60b3044c36e4fa14b140614e302ede16be778fd | refs/heads/main | 2023-04-18T18:44:52.813481 | 2021-05-09T18:17:31 | 2021-05-09T18:17:31 | 351,801,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | #Recursive way
# def binary(arr,target,left, right):
# if right >= 1:
# mid = (left + right) // 2
# if arr[mid] == target:
# return mid
# elif arr[mid] > target:
# return binary(arr,target,left,mid-1)
# else:
# return binary(arr,target,mid+1,right)
# else:
# return -1
#Iterative Way
def binary(arr,target,left, right):
if right >= 1:
mid = (left + right) // 2
if arr[mid] == target:
return mid
elif arr[mid] > target:
right = mid -1
return right
else:
left = mid + 1
return left
else:
return -1
if __name__ == '__main__':
arr = [10,20,30,40,50]
target = 10
print(binary(arr,target,0,len(arr)-1))
| [
"prakashchater@gmail.com"
] | prakashchater@gmail.com |
637b64e91be718813de8efe9b8e820998e441c3a | 27e890f900bd4bfb2e66f4eab85bc381cf4d5d3f | /plugins/module_utils/facts/virtual/freebsd.py | bdef3006c5b9317d74b3beb49bcc5f4bde19cb52 | [] | no_license | coll-test/notstdlib.moveitallout | eb33a560070bbded5032385d0aea2f3cf60e690b | 0987f099b783c6cf977db9233e1c3d9efcbcb3c7 | refs/heads/master | 2020-12-19T22:28:33.369557 | 2020-01-23T18:51:26 | 2020-01-23T18:51:26 | 235,865,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,207 | py | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible_collections.notstdlib.moveitallout.plugins.module_utils.facts.virtual.base import Virtual, VirtualCollector
from ansible_collections.notstdlib.moveitallout.plugins.module_utils.facts.virtual.sysctl import VirtualSysctlDetectionMixin
class FreeBSDVirtual(Virtual, VirtualSysctlDetectionMixin):
"""
This is a FreeBSD-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'FreeBSD'
def get_virtual_facts(self):
virtual_facts = {}
# Set empty values as default
virtual_facts['virtualization_type'] = ''
virtual_facts['virtualization_role'] = ''
if os.path.exists('/dev/xen/xenstore'):
virtual_facts['virtualization_type'] = 'xen'
virtual_facts['virtualization_role'] = 'guest'
if virtual_facts['virtualization_type'] == '':
virtual_product_facts = self.detect_virt_product('kern.vm_guest') or self.detect_virt_product(
'hw.hv_vendor') or self.detect_virt_product('security.jail.jailed')
virtual_facts.update(virtual_product_facts)
if virtual_facts['virtualization_type'] == '':
virtual_vendor_facts = self.detect_virt_vendor('hw.model')
virtual_facts.update(virtual_vendor_facts)
return virtual_facts
class FreeBSDVirtualCollector(VirtualCollector):
_fact_class = FreeBSDVirtual
_platform = 'FreeBSD'
| [
"wk@sydorenko.org.ua"
] | wk@sydorenko.org.ua |
6a37886dd8875a19269dd10c9aff30aad7e0a344 | b00873d36e44128ce30623da0ee3b556e4e3d7e7 | /data_structure/binary_tree_with_next.py | 38627859b41e07c735ae9f669cce37abcc3fc708 | [
"MIT"
] | permissive | Satily/leetcode_python_solution | b4aadfd1998877b5086b5423c670750bb422b2c8 | 3f05fff7758d650469862bc28df9e4aa7b1d3203 | refs/heads/master | 2021-07-18T07:53:10.387182 | 2021-07-17T06:30:09 | 2021-07-17T06:30:09 | 155,074,789 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | class Node:
def __init__(self, val, left, right, next):
self.val = val
self.left = left
self.right = right
self.next = next
| [
"houjiaxu@xiaomi.com"
] | houjiaxu@xiaomi.com |
6cea93fa60e30a2e6d8f45364e10ee8e09204506 | 137ba8a70dfcf94dfe7aeef1599341ecc06ca48f | /student_result/2018/03_data_analysis/23/makingStatistics.py | 19e0815625a1be0ea6eb549a59a92e0028581dfd | [] | no_license | smtamh/oop_python_ex | e1d3a16ade54717d6cdf1759b6eba7b27cfc974e | bd58ee3bf13dad3de989d5fd92e503d5ff949dd9 | refs/heads/master | 2020-09-22T08:35:19.847656 | 2019-11-13T02:47:06 | 2019-11-13T02:47:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,535 | py | import csv
import operator
from sys import exit
# file์ ์ด๊ธฐ ์ํด์ ์๋ํด๋ณด๊ณ , ์์ผ๋ฉด ์๋ ค์ฃผ๊ณ ์์ผ๋ฉด data๋ก ๋ณ๊ฒฝ
try:
f = open('2017๋
_3์_์ญ๋ณ_์ผ๋ณ_์๊ฐ๋๋ณ_์น์ฐจ์ธ์.csv', encoding='cp949')
except FileNotFoundError:
print("ํ์ผ์ด ์์ด์...")
exit(0)
else:
data = csv.reader(f)
try:
# ๊ณต๊ณต ๋ฐ์ดํฐ์ ์ฒซ์ค์ ์ด๊ฒ ์ด๋ค ๋ฐ์ดํฐ์ธ์ง ์ ํ์๋ ์นผ๋ผ์ค ๊ด๋ฆฌ์ฉ ๋ณ์
first_row = 0
# ๋ฐ์ดํฐ๋ฅผ ๋ถ์ํด์ ์ ์ฅํ set Type
date_set = set([])
station_set = set([])
time_list = []
table = {}
# ํ๊ฐ์ฉ ๊บผ๋ด์ ๋ฐ์ดํฐ ๋ถ์
for row in data:
if first_row < 1:
print(row)
for time in row[3:]:
time_list.append(time)
first_row += 1
continue
date_set.add(row[1])
station_set.add(row[0])
for station in station_set:
table[station] = {}
for time in time_list:
table[station][time] = 0
# ๋ ์ง์ ์ญ์ ์ ์ฅํ dictionary ๋ง๋ค๊ธฐ
date_count = {}
station_count = {}
for date in date_set:
date_count[date] = 0
for station in station_set:
station_count[station] = 0
f = open('2017๋
_3์_์ญ๋ณ_์ผ๋ณ_์๊ฐ๋๋ณ_์น์ฐจ์ธ์.csv', encoding='cp949')
data = csv.reader(f)
first_row = 0
for row in data:
if first_row < 1:
first_row += 1
continue
row_sum = 0
for count in row[3:]:
row_sum += int(count.replace(',', '')) # ์ซ์์ ์ผํ ์ง์ฐ๊ธฐ
# ํ์ํ๊ฑธ ์น๋ค ๋ํ์ธ์
date_count[row[1]] += row_sum
station_count[row[0]] += row_sum
maxnumber = 0 # ์น์ฐจ์ธ์์ด ๊ฐ์ฅ ๋ง์ ๋ ์ซ์
minnumber = 9999999999 # ์น์ฐจ์ธ์์ด ๊ฐ์ฅ ์ ์ ๋ ์ซ์
maxname = 'EMPTY' # ์น์ฐจ์ธ์์ด ๊ฐ์ฅ ๋ง์ ๋
minname = 'EMPTY' # ์น์ฐจ์ธ์์ด ๊ฐ์ฅ ์ ์ ๋
for date in date_set:
if date_count[date] > maxnumber: # maxnumber๋ณด๋ค ๋ง์ผ๋ฉด ๋ฃ์ผ์ธ์
maxnumber, maxname = date_count[date], date
if date_count[date] < minnumber: # minnumber๋ณด๋ค ์์ผ๋ฉด ๋ฃ์ผ์ธ์
minnumber, minname = date_count[date], date
maxnumber2 = 0 # ์น์ฐจ์ธ์์ด ๊ฐ์ฅ ๋ง์ ์ญ ์ซ์
minnumber2 = 9999999999 # ์น์ฐจ์ธ์์ด ๊ฐ์ฅ ์ ์ ์ญ ์ซ์
maxname2 = 'EMPTY' # ์น์ฐจ์ธ์์ด ๊ฐ์ฅ ๋ง์ ์ญ
minname2 = 'EMPTY' # ์น์ฐจ์ธ์์ด ๊ฐ์ฅ ์ ์ ์ญ
for station in station_set:
if station_count[station] > maxnumber2: # maxnumber2๋ณด๋ค ๋ง์ผ๋ฉด ๋ฃ์ผ์ธ์
maxnumber2, maxname2 = station_count[station], station
if station_count[station] < minnumber2: # minnumber2๋ณด๋ค ์์ผ๋ฉด ๋ฃ์ผ์ธ์
minnumber2, minname2 = station_count[station], station
print('3์์ ์น์ฐจ์ธ์์ด ๊ฐ์ฅ ๋ง์ ๋ ์ %s์ด๊ณ , ๊ทธ ์ธ์์ %d๋ช
์
๋๋ค.' % (maxname, maxnumber / 2)) # 2017๋
3์ 10์ผ์ ๋ฐ๊ทผํ ์ ๋ํต๋ น ํํต์ผ
print('3์์ ์น์ฐจ์ธ์์ด ๊ฐ์ฅ ์ ์ ๋ ์ %s์ด๊ณ , ๊ทธ ์ธ์์ %d๋ช
์
๋๋ค.' % (minname, minnumber / 2))
print('3์์ ์น์ฐจ์ธ์์ด ๊ฐ์ฅ ๋ง์ ์ญ์ %s์ด๊ณ , ๊ทธ ์ธ์์ %d๋ช
์
๋๋ค.' % (maxname2, maxnumber2 / 2))
print('3์์ ์น์ฐจ์ธ์์ด ๊ฐ์ฅ ์ ์ ์ญ์ %s์ด๊ณ , ๊ทธ ์ธ์์ %d๋ช
์
๋๋ค.' % (minname2, minnumber2 / 2))
except KeyboardInterrupt:
print("์ ์ ๋ฐฉํดํ์ญ๋๊น...")
| [
"kadragon@sasa.hs.kr"
] | kadragon@sasa.hs.kr |
f96f5becd7bd765e7f9622c7a824b9874bd1c601 | 42c48f3178a48b4a2a0aded547770027bf976350 | /google/ads/google_ads/v3/services/payments_account_service_client_config.py | b7c380c80141520fd16fa559bd9870a52b0940d8 | [
"Apache-2.0"
] | permissive | fiboknacky/google-ads-python | e989464a85f28baca1f28d133994c73759e8b4d6 | a5b6cede64f4d9912ae6ad26927a54e40448c9fe | refs/heads/master | 2021-08-07T20:18:48.618563 | 2020-12-11T09:21:29 | 2020-12-11T09:21:29 | 229,712,514 | 0 | 0 | Apache-2.0 | 2019-12-23T08:44:49 | 2019-12-23T08:44:49 | null | UTF-8 | Python | false | false | 819 | py | config = {
"interfaces": {
"google.ads.googleads.v3.services.PaymentsAccountService": {
"retry_codes": {
"idempotent": [
"DEADLINE_EXCEEDED",
"UNAVAILABLE"
],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 5000,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 3600000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 3600000,
"total_timeout_millis": 3600000
}
},
"methods": {
"ListPaymentsAccounts": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
}
}
}
}
}
| [
"noreply@github.com"
] | fiboknacky.noreply@github.com |
42a9e04d357f8dfd10ea4d2e207ef19b805c6971 | 5c1ec474c579ad1280a2f6f71aaa5a9191f82f32 | /src/transmittals/migrations/0007_auto_20151104_1002.py | 2269a555c4ec5b82a8167e84e1180d3b1bef7ddb | [
"MIT"
] | permissive | Talengi/phase | bcc51837e6131fd65308728aa79380c37305d1a7 | 60ff6f37778971ae356c5b2b20e0d174a8288bfe | refs/heads/master | 2022-12-09T15:42:14.459215 | 2021-06-28T08:12:21 | 2021-06-28T08:12:21 | 9,164,756 | 8 | 7 | NOASSERTION | 2022-12-08T01:18:49 | 2013-04-02T07:28:30 | Python | UTF-8 | Python | false | false | 713 | py | # -*- coding: utf-8 -*-
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('transmittals', '0006_auto_20151102_1511'),
]
operations = [
migrations.AlterModelOptions(
name='outgoingtransmittalrevision',
options={},
),
migrations.AlterModelOptions(
name='transmittalrevision',
options={},
),
migrations.AlterUniqueTogether(
name='outgoingtransmittalrevision',
unique_together=set([]),
),
migrations.AlterUniqueTogether(
name='transmittalrevision',
unique_together=set([]),
),
]
| [
"thibault@miximum.fr"
] | thibault@miximum.fr |
bd2a0e28dded58ff6b977af4fc8276077205c093 | 4fa1a00bf3e3b6f7f5415a15ad93be6a702ff519 | /tests/unit/raptiformica/actions/spawn/test_start_compute_type.py | 8d5e8aba3d956e8e380dbc4ca35cb6f1c2f37dce | [
"MIT"
] | permissive | vdloo/raptiformica | 887a58f6842238c5fa62416b47439e9519e1e1bc | e2807e5e913312034161efcbd74525a4b15b37e7 | refs/heads/master | 2020-04-05T12:12:21.222189 | 2020-01-24T07:50:43 | 2020-01-24T07:59:44 | 64,013,772 | 21 | 2 | MIT | 2020-01-24T07:50:44 | 2016-07-23T11:45:47 | Python | UTF-8 | Python | false | false | 2,673 | py | from raptiformica.actions.spawn import start_compute_type
from tests.testcase import TestCase
class TestStartComputeType(TestCase):
def setUp(self):
self.retrieve_start_instance_config = self.set_up_patch(
'raptiformica.actions.spawn.retrieve_start_instance_config'
)
self.retrieve_start_instance_config.return_value = (
'https://github.com/vdloo/vagrantfiles',
'cd headless && vagrant up --provider=virtualbox',
"cd headless && vagrant ssh-config | grep HostName | awk '{print$NF}'",
"cd headless && vagrant ssh-config | grep Port | awk '{print$NF}'"
)
self.start_instance = self.set_up_patch(
'raptiformica.actions.spawn.start_instance'
)
self.get_first_server_type = self.set_up_patch(
'raptiformica.actions.spawn.get_first_server_type'
)
self.get_first_server_type.return_value = 'headless'
self.get_first_compute_type = self.set_up_patch(
'raptiformica.actions.spawn.get_first_compute_type'
)
self.get_first_server_type.return_value = 'docker'
def test_start_compute_type_retrieves_start_instance_config_for_default_types(self):
start_compute_type()
self.retrieve_start_instance_config.assert_called_once_with(
server_type=self.get_first_server_type.return_value,
compute_type=self.get_first_compute_type.return_value
)
def test_start_compute_type_starts_instance_of_the_default_compute_type(self):
start_compute_type()
self.start_instance.assert_called_once_with(
self.get_first_server_type.return_value,
self.get_first_compute_type.return_value,
*self.retrieve_start_instance_config.return_value
)
def test_start_compute_type_retrieves_start_instance_config_for_specified_types(self):
start_compute_type(server_type='htpc', compute_type='docker')
self.retrieve_start_instance_config.assert_called_once_with(
server_type='htpc',
compute_type='docker',
)
def test_start_compute_type_starts_instance_for_specified_compute_type(self):
start_compute_type(server_type='htpc', compute_type='docker')
self.start_instance.assert_called_once_with(
'htpc',
'docker',
*self.retrieve_start_instance_config.return_value
)
def test_start_compute_type_returns_connection_information(self):
ret = start_compute_type(server_type='htpc', compute_type='docker')
self.assertEqual(ret, self.start_instance.return_value)
| [
"rickvandeloo@gmail.com"
] | rickvandeloo@gmail.com |
9284ae9101f53194f31142295de5f7708c5fd96b | 462137348c3013fd1f389ae23557425d22497b36 | /24_days/Day 11/Question40.py | 6771623197488823cc33a9a153fa317d9179d323 | [] | no_license | wmemon/python_playground | f05b70c50c2889acd6353ba199fd725b75f48bb1 | 3b424388f92f81d82621645ee7fdbd4ac164da79 | refs/heads/master | 2022-11-23T05:09:30.828726 | 2020-07-28T12:47:03 | 2020-07-28T12:47:03 | 283,192,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | """
Write a program which accepts a string as input to print "Yes" if the string is
"yes" or "YES" or "Yes", otherwise print "No".
"""
# My use case will have some extra yes combinations.
if input("Please enter the string : ").upper() == "YES":
print("Yes")
else:
print("No")
| [
"wmemon100@gmail.com"
] | wmemon100@gmail.com |
d0d9ff6dff0239125de02f38a5cfc05731bd0d5c | f0181afd2eea9b086ce9487fb8d7fd949282140a | /matplotlib graphs/plot_16S_coverage.py | 19851813b19604658bd9e26bcdee383ff0ad8950 | [
"MIT"
] | permissive | linsalrob/EdwardsLab | 4a571676859c8b7238e733a0d3ad98ceb2e83c63 | 3c466acc07f1a56b575860ad26c92f900b272a53 | refs/heads/master | 2023-08-20T17:13:35.466103 | 2023-08-17T09:17:36 | 2023-08-17T09:17:36 | 25,702,093 | 36 | 25 | MIT | 2020-09-23T12:44:44 | 2014-10-24T18:27:16 | Python | UTF-8 | Python | false | false | 1,303 | py | import matplotlib.pyplot as plt
import numpy
x = []
y = []
with open('/home/redwards/Desktop/genus_species_analysis/ecoli_coverage.tsv', 'r') as fin:
#with open('/home/redwards/Desktop/genus_species_analysis/pseudo_coverage.txt', 'r') as fin:
for l in fin:
p=l.strip().split("\t")
x.append(float(p[0]))
y.append(float(p[1]))
fig = plt.figure()
ax = fig.add_subplot(111)
maxy = max(y)
ax.plot(x, y, color='r')
ax.plot(xs, kdepdf(xs), color='blue')
"""
These regions come from http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2562909/
v1: 66-99
v2: 137-242
v3: 433-497
v4: 576-682
v5: 822-879
v6: 986-1043
v7: 1117-1173
v8: 1243-1294
"""
regions = [
[66,99], [137, 242],
[433, 497], [576, 682],
[822, 879], [986, 1043],
[1117, 1173], [1243, 1294]
]
illumina = [
[517, 809],
]
for r in illumina:
for x in range(r[0], r[1]):
ax.bar(x, maxy, color='lightgrey', edgecolor='lightgrey')
for r in regions:
for x in range(r[0], r[1]):
ax.bar(x, maxy, color='lightblue', edgecolor='lightblue')
ax.set_xlabel("Position in the E. coli 16S gene")
ax.set_ylabel("Coverage")
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
fig.set_facecolor('white')
plt.show() | [
"raedwards@gmail.com"
] | raedwards@gmail.com |
b509123e763f848a534faef6ca7bf66fbac38cd5 | 49a259e069a8a5a930914dd338ecd8d80d58802c | /tests/settings.py | a49998d36d18c8bb6562037dab919dba8dfa5288 | [
"MIT"
] | permissive | furious-luke/django-test-without-migrations | 4d8e5e4abc32581ad3c891b4d5cdbd34344486ca | e8e01d23728480b1bb252ca9618d94ac50aa0acb | refs/heads/master | 2022-12-03T23:47:59.579641 | 2020-08-22T03:58:27 | 2020-08-22T03:58:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | # coding: utf-8
ALLOWED_HOSTS = ['*']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = (
'tests.myapp',
'test_without_migrations',
)
SITE_ID=1,
SECRET_KEY='secret'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
)
| [
"henrique@bastos.net"
] | henrique@bastos.net |
8fd0aad80716033de237a858d69f03d893286add | e51742f3b8026d6ae44761445689ac47f1f2e495 | /generate_total_res.py | 0e2aefc56b4f39192a95a2a5c0507475062570e0 | [] | no_license | igridchyn/neuro-analysis | a2979692ea6a2c99dc0991bc371125f0ec6edb5d | c0b1bb6abc9b34328aa515ce4a9098f3cbc9f05a | refs/heads/main | 2023-06-26T09:48:46.250806 | 2021-07-29T09:34:53 | 2021-07-29T09:34:53 | 390,671,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,948 | py | #!/usr/bin/env python
import os
from sys import argv
import struct
import numpy as np
if len(argv) < 3:
print 'USAGE: (1)<fet base> (2)<output file: total res>'
print 'GENERATE TOTAL RES FILE OUT OF THE SPIKE TIMESTAMPS IN THE FET FILES, NEEDS TETR CONFIG'
exit(0)
# read tetrode config - number of channels per tetrode
cwd = os.getcwd()
dirs = cwd.split('/')
ses = dirs[-1]
day = dirs[-2]
an = dirs[-3]
tconfpath = '/home/igor/code/ews/lfp_online/sdl_example/Res/tetr/tetr_' + an + '_' + day + '.conf'
if 'FULL' in ses:
tconfpath = tconfpath.replace('.conf', '_full.conf')
print 'WARNING: USING FULL TETRODE CONFIG!'
if not os.path.isfile(tconfpath):
print 'Cannot find the tetrode config at ', tconfpath
exit(1)
ftconf = open(tconfpath)
ftconf.readline()
nchan = []
for line in ftconf:
if len(line) > 4:
continue
else:
nchan.append(int(line))
print nchan
ares = []
#nfet = 12
tetr = 0
while os.path.isfile(argv[1] + str(tetr)):
print 'Read fet', tetr
nfet = nchan[tetr] * 2 + 4
f = open(argv[1] + str(tetr), 'rb')
tetr += 1
ares.append([])
while True:
buf = f.read(4*nfet)
if not buf:
break
if len(buf) < 4 * nfet:
print 'BUFFER CUT!'
continue
ar = struct.unpack('f'*nfet, buf)
# feats.append(ar)
buf = f.read(4)
if len(buf) < 4:
print 'BUFFER I CUT!'
continue
ares[-1].append(struct.unpack('i', buf)[0])
# DEBUG
#print ares[-1]
# merge res
ires = [0] * len(ares)
res = []
LR = len(ares)
DUM = 9000000000
for i in range(LR):
ares[i].append(DUM)
print 'LR = %d' % LR
while True:
ra = [ares[i][ires[i]] for i in range(LR)]
mn = min(ra)
am = np.argmin(ra)
ires[am] += 1
if mn == DUM:
break
else:
res.append(mn)
fout = open(argv[2], 'w')
for r in res:
fout.write(str(r) + '\n')
fout.close()
| [
"igor.gridchin@gmail.com"
] | igor.gridchin@gmail.com |
9b89017fe47bd60ff1ff9a950185dd96ca98b877 | 35e86125620183b15854ecca27a369b3ac17bf90 | /src/libs/send_email.py | 841ea79cd95328f244f8ae7174b72adeaf78247a | [
"Apache-2.0"
] | permissive | chu888chu888/Yearning | 4dfaa59abb4ce75316f519a0f46860c1385510f9 | 6813ced9cb333261da0481b1fa19698d6fd413d1 | refs/heads/master | 2020-03-16T07:40:24.778042 | 2018-05-07T13:13:22 | 2018-05-07T13:13:22 | 132,580,877 | 1 | 0 | Apache-2.0 | 2018-05-08T08:48:32 | 2018-05-08T08:48:32 | null | UTF-8 | Python | false | false | 5,358 | py | from libs import util
from email.header import Header
from email.mime.text import MIMEText
from email.utils import parseaddr, formataddr
import smtplib
conf = util.conf_path()
from_addr = conf.mail_user
password = conf.mail_password
smtp_server = conf.smtp
smtp_port = conf.smtp_port
class send_email(object):
def __init__(self, to_addr=None):
self.to_addr = to_addr
def _format_addr(self, s):
name, addr = parseaddr(s)
return formataddr((Header(name, 'utf-8').encode(), addr))
def send_mail(self,mail_data=None, type=None):
if type == 0: #ๆง่ก
text = '<html><body><h1>Yearning ๅทฅๅๆง่ก้็ฅ</h1>' \
'<br><p>ๅทฅๅๅท: %s</p>' \
'<br><p>ๅ่ตทไบบ: %s</p>' \
'<br><p>ๅฐๅ: <a href="%s">%s</a></p>' \
'<br><p>ๅทฅๅๅคๆณจ: %s</p>' \
'<br><p>็ถๆ: ๅทฒๆง่ก</p>' \
'<br><p>ๅคๆณจ: %s</p>' \
'</body></html>' %(
mail_data['workid'],
mail_data['to_user'],
mail_data['addr'],
mail_data['addr'],
mail_data['text'],
mail_data['note'])
elif type == 1: #้ฉณๅ
text = '<html><body><h1>Yearning ๅทฅๅ้ฉณๅ้็ฅ</h1>' \
'<br><p>ๅทฅๅๅท: %s</p>' \
'<br><p>ๅ่ตทไบบ: %s</p>' \
'<br><p>ๅฐๅ: <a href="%s">%s</a></p>' \
'<br><p>็ถๆ: ้ฉณๅ</p>' \
'<br><p>้ฉณๅ่ฏดๆ: %s</p>' \
'</body></html>' % (
mail_data['workid'],
mail_data['to_user'],
mail_data['addr'],
mail_data['addr'],
mail_data['rejected'])
elif type == 2: ##ๆ้็ณ่ฏท
text = '<html><body><h1>Yearning ๆ้็ณ่ฏท้็ฅ</h1>' \
'<br><p>ๅทฅๅๅท: %s</p>' \
'<br><p>ๅ่ตทไบบ: %s</p>' \
'<br><p>็ถๆ: ็ณ่ฏท</p>' \
'</body></html>' % (
mail_data['workid'],
mail_data['to_user'])
elif type == 3: ## ๆ้ๅๆ
text = '<html><body><h1>Yearning ๆ้ๅๆ้็ฅ</h1>' \
'<br><p>ๅทฅๅๅท: %s</p>' \
'<br><p>ๅ่ตทไบบ: %s</p>' \
'<br><p>็ถๆ: ๅๆ</p>' \
'</body></html>' % (
mail_data['workid'],
mail_data['to_user'])
elif type == 4: ##ๆ้้ฉณๅ
text = '<html><body><h1>Yearning ๆ้้ฉณๅ้็ฅ</h1>' \
'<br><p>ๅทฅๅๅท: %s</p>' \
'<br><p>ๅ่ตทไบบ: %s</p>' \
'<br><p>็ถๆ: ้ฉณๅ</p>' \
'</body></html>' % (
mail_data['workid'],
mail_data['to_user'])
elif type == 5: ##ๆฅ่ฏข็ณ่ฏท
text = '<html><body><h1>Yearning ๆฅ่ฏข็ณ่ฏท้็ฅ</h1>' \
'<br><p>ๅทฅๅๅท: %s</p>' \
'<br><p>ๅ่ตทไบบ: %s</p>' \
'<br><p>็ถๆ: ๆไบค</p>' \
'</body></html>' % (
mail_data['workid'],
mail_data['to_user'])
elif type == 6: ##ๆฅ่ฏขๅๆ
text = '<html><body><h1>Yearning ๆฅ่ฏขๅๆ้็ฅ</h1>' \
'<br><p>ๅทฅๅๅท: %s</p>' \
'<br><p>ๅ่ตทไบบ: %s</p>' \
'<br><p>็ถๆ: ๅๆ</p>' \
'</body></html>' % (
mail_data['workid'],
mail_data['to_user'])
elif type == 7: ##ๆฅ่ฏข้ฉณๅ
text = '<html><body><h1>Yearning ๆฅ่ฏข้ฉณๅ้็ฅ</h1>' \
'<br><p>ๅทฅๅๅท: %s</p>' \
'<br><p>ๅ่ตทไบบ: %s</p>' \
'<br><p>็ถๆ: ้ฉณๅ</p>' \
'</body></html>' % (
mail_data['workid'],
mail_data['to_user'])
else: #ๆไบค
text = '<html><body><h1>Yearning ๅทฅๅๆไบค้็ฅ</h1>' \
'<br><p>ๅทฅๅๅท: %s</p>' \
'<br><p>ๅ่ตทไบบ: %s</p>' \
'<br><p>ๅฐๅ: <a href="%s">%s</a></p>' \
'<br><p>ๅทฅๅๅคๆณจ: %s</p>' \
'<br><p>็ถๆ: ๅทฒๆไบค</p>' \
'<br><p>ๅคๆณจ: %s</p>' \
'</body></html>' % (
mail_data['workid'],
mail_data['to_user'],
mail_data['addr'],
mail_data['addr'],
mail_data['text'],
mail_data['note'])
msg = MIMEText(text, 'html', 'utf-8')
msg['From'] = self._format_addr('Yearning_Admin <%s>' % from_addr)
msg['To'] = self._format_addr('Dear_guest <%s>' % self.to_addr)
msg['Subject'] = Header('Yearning ๅทฅๅๆถๆฏๆจ้', 'utf-8').encode()
server = smtplib.SMTP(smtp_server, int(smtp_port))
server.set_debuglevel(1)
server.login(from_addr, password)
server.sendmail(from_addr, [self.to_addr], msg.as_string())
server.quit()
| [
"392183@qq.com"
] | 392183@qq.com |
cd06efa75d8d5a00ea430b8b91512719d32770f0 | bb6ebff7a7f6140903d37905c350954ff6599091 | /third_party/WebKit/Source/modules/modules.gyp | 54427584d19150459e863f0f92c412ac4be1d441 | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | PDi-Communication-Systems-Inc/lollipop_external_chromium_org | faa6602bd6bfd9b9b6277ce3cd16df0bd26e7f2f | ccadf4e63dd34be157281f53fe213d09a8c66d2c | refs/heads/master | 2022-12-23T18:07:04.568931 | 2016-04-11T16:03:36 | 2016-04-11T16:03:36 | 53,677,925 | 0 | 1 | BSD-3-Clause | 2022-12-09T23:46:46 | 2016-03-11T15:49:07 | C++ | UTF-8 | Python | false | false | 6,397 | gyp | #
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
{
'includes': [
'../build/scripts/scripts.gypi',
'../build/win/precompile.gypi',
'../bindings/modules/modules.gypi', # modules can depend on bindings/modules, but not on bindings
'modules.gypi',
],
'targets': [{
# GN version: //third_party/WebKit/Source/modules:modules
'target_name': 'modules',
'type': 'static_library',
'dependencies': [
'<(DEPTH)/third_party/zlib/zlib.gyp:zlib',
'<(DEPTH)/third_party/sqlite/sqlite.gyp:sqlite',
'../config.gyp:config',
'../core/core.gyp:webcore',
'make_modules_generated',
],
'defines': [
'BLINK_IMPLEMENTATION=1',
'INSIDE_BLINK',
],
'include_dirs': [
# FIXME: Remove these once scripts generate qualified
# includes correctly: http://crbug.com/380054
'<(blink_core_output_dir)',
'<(blink_modules_output_dir)',
],
'sources': [
'<@(modules_files)',
'<@(bindings_modules_v8_generated_aggregate_files)',
],
'actions': [
{
# GN version: //third_party/WebKit/Source/modules:modules_fetch_polyfill
'action_name': 'FetchPolyfill',
'process_outputs_as_sources': 1,
'variables': {
'resources': [
'serviceworkers/polyfills/fetchPolyfill.js',
],
},
'inputs': [
'../build/scripts/make-file-arrays.py',
'<@(resources)',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/blink/FetchPolyfill.h',
'<(SHARED_INTERMEDIATE_DIR)/blink/FetchPolyfill.cpp',
],
'action': [
'python',
'../build/scripts/make-file-arrays.py',
'--out-h=<(SHARED_INTERMEDIATE_DIR)/blink/FetchPolyfill.h',
'--out-cpp=<(SHARED_INTERMEDIATE_DIR)/blink/FetchPolyfill.cpp',
'--namespace=WebCore',
'<@(resources)',
],
},
{
# GN version: //third_party/WebKit/Source/modules:modules_cache_polyfill
'action_name': 'CachePolyfill',
'process_outputs_as_sources': 1,
'variables': {
'resources': [
'serviceworkers/polyfills/cachePolyfill.js',
],
},
'inputs': [
'../build/scripts/make-file-arrays.py',
'<@(resources)',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/blink/CachePolyfill.h',
'<(SHARED_INTERMEDIATE_DIR)/blink/CachePolyfill.cpp',
],
'action': [
'python',
'../build/scripts/make-file-arrays.py',
'--out-h=<(SHARED_INTERMEDIATE_DIR)/blink/CachePolyfill.h',
'--out-cpp=<(SHARED_INTERMEDIATE_DIR)/blink/CachePolyfill.cpp',
'--namespace=WebCore',
'<@(resources)',
],
},
{
# GN version: //third_party/WebKit/Source/modules:modules_cache_storage_polyfill
'action_name': 'CacheStoragePolyfill',
'process_outputs_as_sources': 1,
'variables': {
'resources': [
'serviceworkers/polyfills/cacheStoragePolyfill.js',
],
},
'inputs': [
'../build/scripts/make-file-arrays.py',
'<@(resources)',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/blink/CacheStoragePolyfill.h',
'<(SHARED_INTERMEDIATE_DIR)/blink/CacheStoragePolyfill.cpp',
],
'action': [
'python',
'../build/scripts/make-file-arrays.py',
'--out-h=<(SHARED_INTERMEDIATE_DIR)/blink/CacheStoragePolyfill.h',
'--out-cpp=<(SHARED_INTERMEDIATE_DIR)/blink/CacheStoragePolyfill.cpp',
'--namespace=WebCore',
'<@(resources)',
],
},
],
# Disable c4267 warnings until we fix size_t to int truncations.
'msvs_disabled_warnings': [ 4267, 4334, ]
},
{
# GN version: //third_party/WebKit/Source/modules:modules_testing
'target_name': 'modules_testing',
'type': 'static_library',
'dependencies': [
'../config.gyp:config',
'../core/core.gyp:webcore',
],
'defines': [
'BLINK_IMPLEMENTATION=1',
'INSIDE_BLINK',
],
'sources': [
'<@(modules_testing_files)',
],
},
{
# FIXME: should be in modules_generated.gyp
# GN version: //third_party/WebKit/Source/modules:make_modules_generated
'target_name': 'make_modules_generated',
'type': 'none',
'hard_dependency': 1,
'dependencies': [
#'generated_testing_idls',
'../core/core_generated.gyp:core_event_interfaces',
'../bindings/modules/generated.gyp:modules_event_generated',
'../config.gyp:config',
],
'sources': [
# bison rule
'../core/css/CSSGrammar.y',
'../core/xml/XPathGrammar.y',
],
'actions': [
],
}],
}
| [
"mrobbeloth@pdiarm.com"
] | mrobbeloth@pdiarm.com |
8f58b764cd10075c01b9acb32c25776d37936d9c | 6457989f738d52b8186628fa02c2243a569aaae9 | /golem/core/report.py | 1fbb265a59f0bc6251f03394b4f9bd8b724dc504 | [
"MIT"
] | permissive | matamehta/golem | 381941ff2d3e63bdffe81f8e52ff45f528ea7eb1 | 6314b31d8f0fb304a9f2d7dbcacf07d7ab3e01c6 | refs/heads/master | 2021-09-04T03:41:38.820326 | 2018-01-15T11:55:33 | 2018-01-15T11:55:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,073 | py | """Generate the report structure, json and screenshots"""
import json
import os
import uuid
def create_suite_execution_directory(workspace, project, suite_name, timestamp):
"""Create direcoty to store report for suite.
<workspace>/projects/<project>/reports/<suite_name>/<timestamp>/
"""
execution_directory = os.path.join(workspace, 'projects', project, 'reports',
suite_name, timestamp)
if not os.path.isdir(execution_directory):
try:
os.makedirs(execution_directory)
except:
pass
return execution_directory
def create_test_execution_directory(workspace, project, test_name, timestamp):
"""Create direcoty to store report for suite.
<workspace>/projects/<project>/reports/single_tests/<test_name>/<timestamp>/
"""
execution_directory = os.path.join(workspace, 'projects', project, 'reports',
'single_tests', test_name, timestamp)
if not os.path.isdir(execution_directory):
try:
os.makedirs(execution_directory)
except:
pass
return execution_directory
def create_report_directory(execution_directory, test_case_name, is_suite):
"""Create direcoty to store a single test report.
execution_directory takes the following format for suites:
<workspace>/projects/<project>/reports/<suite_name>/<timestamp>/
and this format for single tests
<workspace>/projects/<project>/reports/<suite_name>/<timestamp>/
The result for suites is:
<execution_directory>/<test_name>/<set_name>/
and for single tests is:
<execution_directory>/<set_name>/
"""
set_name = 'set_' + str(uuid.uuid4())[:6]
# create suite execution folder in reports directory
if is_suite:
report_directory = os.path.join(execution_directory, test_case_name, set_name)
else:
report_directory = os.path.join(execution_directory, set_name)
if not os.path.isdir(report_directory):
try:
os.makedirs(report_directory)
except:
pass
return report_directory
def generate_report(report_directory, test_case_name, test_data, result):
"""Generate the json report for a single test execution."""
json_report_path = os.path.join(report_directory, 'report.json')
short_error = ''
if result['error']:
short_error = '\n'.join(result['error'].split('\n')[-2:])
serializable_data = {}
for key, value in test_data.items():
try:
json.dumps('{"{}":"{}"}'.format(key, value))
serializable_data[key] = value
except:
serializable_data[key] = repr(value)
env_name = ''
if 'env' in test_data:
if 'name' in test_data.env:
env_name = test_data.env.name
browser = result['browser']
output_browser = result['browser']
if result['browser_full_name']:
output_browser = '{} - {}'.format(result['browser'], result['browser_full_name'])
elif browser == 'chrome-remote':
output_browser = 'chrome (remote)'
elif browser == 'chrome-headless':
output_browser = 'chrome (headless)'
elif browser == 'chrome-remote-headless':
output_browser = 'chrome (remote, headless)'
elif browser == 'firefox-remote':
output_browser = 'firefox (remote)'
# cast steps to str
steps = [str(x) for x in result['steps']]
report = {
'test_case': test_case_name,
'result': result['result'],
'steps': steps,
'description': result['description'],
'error': result['error'],
'short_error': short_error,
'test_elapsed_time': result['test_elapsed_time'],
'test_timestamp': result['test_timestamp'],
'browser': output_browser,
'test_data': serializable_data,
'environment': env_name,
'set_name': result['set_name']
}
with open(json_report_path, 'w', encoding='utf-8') as json_file:
json.dump(report, json_file, indent=4)
| [
"feo.luciano@gmail.com"
] | feo.luciano@gmail.com |
96a798e5581075e392ee35938448d26883a9f1f1 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /m8cikaKtFFsCZgsbL_2.py | fc662b71d5d16a6bd08875a495babe80ebc5882b | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,241 | py | """
Given a set of 3 jugs of water that have capacities of _a_ , _b_ , and _c_
liters, find the minimum number of operations performed before each jug has
_x_ , _y_ , and _z_ liters. Only jug C will start completely filled.
An operation is any of the following: A jug is emptied, a jug is filled, or
water is poured from one jug to another until one of the jugs is either empty
or full.
For example, jugs "A", "B", and "C" with capacities of 3, 5, and 8, where jugs
"A" and "B" start empty and "C" has the full 8, require 2 operations to reach
the state of 0, 3, and 5 liters in the jugs.
Create a function that, given an array of jug capacities `[A, B, C]` and an
goal state array `[x, y, z]`, returns the minimum number of operations needed
to reach the goal state. If the inputs are invalid or there is no solution,
return `"No solution."`
### Examples
waterjug([3, 5, 8], [0, 3, 5]) โ 2
waterjug([1, 3, 4], [0, 2, 2]) โ 3
waterjug([8, 17, 20], [0, 10, 10]) โ 9
waterjug([4, 17, 22], [2, 5, 15]) โ "No solution."
waterjug([3, 5, 8], [0, 0, 9]) โ "No solution."
### Notes
* The amount of water in a jug can never exceed the capacity of that jug.
* The total liters in the goal state must be equal to the capacity of jug "C".
"""
def process(state,i,others,capacities):
'''
Returns the jug states possible from permissible changes to state[i]
'''
possibles = []
temp = list(state)
temp[i] = 0 # can always empty it!
possibles.append(tuple(temp))
for pos in others:
temp = list(state)
pourable = min(temp[i],capacities[pos] - temp[pos])
temp[i] -= pourable
temp[pos] += pourable
possibles.append(tuple(temp)) # water poured from i.
โ
return possibles
def get_states(state, capacities):
'''
Returns a list of all the states possible from this one, based on
permissible actions
'''
states = []
size = len(state)
for i in range(size):
if state[i]: # it's got some water
others = sorted(set(range(size)) - {i})
states += process(state,i,others,capacities)
โ
return states
โ
def waterjug(capacities, target):
'''
Returns the minimum number of moves to reach target from the start state,
or 'No solution' if not possible, given constraints and operations as per
the instructions.
'''
CAPS = {i:jug for i, jug in enumerate(capacities)}
start = (0,0,CAPS[2])
target = tuple(target)
if sum(target) > start[2]:
return 'No solution.'
โ
q = [start]
visited = set()
path = {start:0}
โ
while q:
state = q.pop(0)
visited.add(state)
if state == target:
count = 0
current = target
while current != start:
count += 1
current = path[current]
โ
return count
โ
for next_state in get_states(state,CAPS): # all states possible from here
if next_state not in visited and next_state not in q:
path[next_state] = state # show it came from here
q.append(next_state)
โ
return 'No solution.' # did not find target
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
0eee46f7d9008342f548f607d4c34cb4c7a6f240 | 17ef6c9ead83c2a2c18fe029ae3f6ba90d57b8f4 | /supervised_learning/0x02-tensorflow/7-evaluate.py | 1dbd2a4d21f495287ce86a4dbf4648887c3bcae0 | [] | no_license | shincap8/holbertonschool-machine_learning | ede0c2be6df44f91c125c4497cf5ac1b90f654fe | cfc519b3290a1b8ecd6dc94f70c5220538ee7aa0 | refs/heads/master | 2023-03-26T07:00:10.238239 | 2021-03-18T04:39:01 | 2021-03-18T04:39:01 | 279,436,819 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | #!/usr/bin/env python3
"""evaluates the output of a neural network"""
import tensorflow as tf
def evaluate(X, Y, save_path):
"""evaluates the output of a neural network"""
with tf.Session() as sess:
saver = tf.train.import_meta_graph(save_path + ".meta")
saver.restore(sess, save_path)
x = tf.get_collection("x")[0]
y = tf.get_collection("y")[0]
y_pred = tf.get_collection("y_pred")[0]
accuracy = tf.get_collection("accuracy")[0]
loss = tf.get_collection("loss")[0]
feed_dict = {x: X, y: Y}
forwardP = sess.run(y_pred, feed_dict)
acc = sess.run(accuracy, feed_dict)
losses = sess.run(loss, feed_dict)
return (forwardP, acc, losses)
| [
"shincap8@gmail.com"
] | shincap8@gmail.com |
0c4e3ecfd3618f83151f657b725d8b650a772191 | c6d852e5842cf6f74123445d20ff03876377ae26 | /lemon/python22/lemon_10_190906_ๆไปถๅๅผๅธธ/lemon_190906_็ปไน .py | 84c1e3a5697fe35286db96156072cd549231c8a0 | [] | no_license | songyongzhuang/PythonCode_office | 0b3d35ca5d58bc305ae90fea8b1e8c7214619979 | cfadd3132c2c7c518c784589e0dab6510a662a6c | refs/heads/master | 2023-02-13T14:06:10.610935 | 2021-01-14T09:11:32 | 2021-01-14T09:11:32 | 327,183,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,853 | py | # --*-- coding : utf-8 --*--
# Project : python_lemon_ไฝไธ
# Current file : lemon_190906.py
# Author : ๅคงๅฃฎ
# Create time : 2019-09-07 07:46
# IDE : PyCharm
# TODO ๆ้ฟๅพ่ฆ๏ผ่ฟๆญฅๅพ็๏ผๅ ๆฒน๏ผ
"""
# ๅฏผๅ
ฅๆจกๅ๏ผ่ทฏๅพๅค็
import os
# ่ทๅ็ฐๅจ็ๆไปถๅคน่ทฏๅพ
# ๅจๅช้่ฟ่กๅฐฑๆๅฐ่ฟ่ก็่ทฏๅพ๏ผๅจๅช้่ฟ่ก็python, ๅฐฑๆฏ่ทๅ่ฟไธชๆไปถๅคน
# ่ทๅๅฝๅ็ๅทฅไฝ็ฎๅฝ๏ผ่ฟ็งๆนๅผๅพๅฐไฝฟ็จ
# print(os.getcwd())
# ๆไปถ็็ปๅฏน่ทฏๅพ
# ็ปๅฏนไธไผๅ๏ผ้ๅธธๆ็จ
a = os.path.abspath(__file__)
# ่ทๅ่ทฏๅพ็ๆไปถๅคน่ทฏๅพ
b = os.path.dirname(os.path.abspath(__file__))
# print(os.path.dirname(a))
# ๅๅปบๆไปถ
# ่ทฏๅพๆผๆฅ
# ๅญ็ฌฆไธฒๆผๆฅ
# print('ๅญ็ฌฆไธฒๆผๆฅ๏ผ', end='')
# c = os.path.join(b, 'data', 'ddd')
# ่ทฏๅพๆผๆฅ
# print('่ทฏๅพๆผๆฅ๏ผ', end='')
# ไธไธชๅๆๆ ่กจ็คบ่ฝฌ็งป๏ผ้่ฆๅไธคไธช
# ่ฟ้่ฆๅค็ๆไฝ็ณป็ป๏ผ่ฟๆๅๆๆ
# print(b + '\\')
# ๅๅปบๆไปถๅคน, mkdir, ่ฆไธๅฑไธๅฑๅๅปบ
# os.mkdir(c)
# ๅคๆญๆไปถๆฏๅฆๅญๅจ
# os.path.exists()
# c = os.path.join(b, 'data', 'ddd')
# c_01 = os.path.dirname(c)
# if os.path.exists(os.path.dirname(c)):
# os.mkdir(c_01)
# else:
# print("ๆไปถๅคนไธๅญๅจ")
# ๅๅปบไธไธชๆไปถ
# os.mkdir(os.path.join(b, 'data'))
# ๅคๆญๆไธช่ทฏๅพๆฏๅฆๅญๅจ ่ฟๅ็ๆฏๅธๅฐ็ฑปๅ
# print(os.path.exists(os.path.join(b, 'data')))
# ๅคๆญๆฏๅฆๆฏไธไธชๆไปถๅคน
# print(os.path.isdir(os.path.join(b, 'data')))
# ๅคๆญๆฏๅฆๆฏไธไธชๆไปถ
# print(os.path.isfile(os.path.join(b, 'data')))
"""
""" ๅ
็ฝฎๅฝๆฐ
a = print('hello')
print(a) # None
mylist = ['ๅๅ', 'ๆข
ๆข
', 'ๆฃๆฃ็ณ']
c = mylist.append('ๅๅ')
print(c) # None
# print(mylist.append('ๅฐ้จ')) # None
# ๅๆถ่ทๅๅผๅ็ดขๅผ
for index, w in enumerate(mylist):
print(index, w)
"""
# -------------------- ๆไปถๆไฝ --------------------------------
"""
# ๆไปถ่ฏปๅ
# ๅ
็ฝฎๅฝๆฐ
# open()
# f = open('demo.txt', encoding='utf-8')
# ่ฏปๆไปถ็ๅ
ๅฎน
# print(f.read())
# ๅๆไปถ
# f = open('new_fil.txt', mode='r', encoding='utf-8')
# f.write('ไบบ็่ฆ่ฏป๏ผๆ็จpython')
# print(f.readlines())
# ๅ
ณ้ญๆไปถ๏ผๅฝๆง่กไบๆๅผๆไปถๆไฝ๏ผไธๅฎ่ฆ่ฎฐๅพๅ
ณ้ญ
f.close()
# with open('demo.txt', mode='r', encoding='utf-8') as f:
# print(f.read())
"""
# -------------------- ๅผๅธธๅค็ --------------------------------
# ๅผๅธธๅค็
try:
1/0
except ZeroDivisionError: # ้่ฏฏ็ฑปๅไธๅฎ่ฆ่ฐ่ฏ๏ผไธ่ฝ็จๅซ็ไปฃๆฟ
print('ไธ่ฝ็ๅฐๅงๅง')
except IndexError:
print('ไธ่ฝๅปๆ็ฏฎ็')
# try้ๅฐ้่ฏฏไปฃ็ ๅฐฑ็ปๆญขๅฐฑไธๆง่ก๏ผๅคๆญ้่ฏฏ็ฑปไผผๆฏๅฆๆฏexceptๅ้ข็้่ฏฏ็ฑปๅ
# ๆพๅฏนๅบ็้่ฏฏ็ฑปๅ๏ผๅคๆญๆๆ็except๏ผๆพๅฐๅฐฑ่ฟ่กไธ้ข็ไปฃ็ ๏ผๆฒกๆๅฐฑๆๆๆ็except้ฝๆง่กๅฎๆฏ
| [
"songyongzhuang9@163.com"
] | songyongzhuang9@163.com |
9f0c61d141e15d5bda4ae5c8a3d3a1dc93e46a3a | 9c6d3acf0843446c2c26a2cf701031946576482e | /pywhich | 3019c3d0b57b2ef7cf6ced55d48f4fa9b712df25 | [] | no_license | kumar303/toolbox | 5165c55358e108ca4370372ff564f2959b741f97 | 9ed0af6f11f15b248fc36f9d87c45a55b4178a84 | refs/heads/master | 2021-07-08T17:56:22.177215 | 2016-09-19T04:17:07 | 2016-09-19T04:17:07 | 6,014,625 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,791 | #!/usr/bin/env python
import sys
import subprocess
import os
import optparse
import inspect
from pprint import pprint
def print_path():
print "PYTHONPATH: %s" % os.getenv('PYTHONPATH', None)
pprint(sys.path)
def main():
"""prints __file__ for module w/ various options"""
p = optparse.OptionParser(
usage=('%prog module[.path]' + "\n\n" + inspect.getdoc(main)))
p.add_option('-e', '--edit', action='store_true',
help='Edit the py file if there is one (will open the directory if it\'s a module)')
p.add_option('--verbose', action='store_true',
help='show path info when module is not found')
p.add_option('-p','--path', action='store_true',
help='just print path info (for the inquisitive types)')
p.add_option('--debug', action='store_true',
help='show the traceback when there are import errors')
p.add_option('-V','--version', action='store_true',
help="also look for and print module.__version__ or module.VERSION")
(options, args) = p.parse_args()
if options.path:
print_path()
else:
try:
namepath, = args
except ValueError:
p.error('incorrect args')
div = namepath.rfind('.')
if div != -1:
name = namepath[0:div]
fromlist = [namepath[div+1:]]
else:
name = namepath
fromlist = []
try:
mod = __import__(name, globals(), locals(), fromlist)
except ImportError:
if options.debug:
raise
print >>sys.stderr, "module '%s' not found" % namepath
if options.verbose:
print_path()
else:
if len(fromlist):
mod = getattr(mod, fromlist[0])
print mod.__file__
if options.version:
if hasattr(mod, '__version__'):
print mod.__version__
elif hasattr(mod, 'VERSION'):
# django
print mod.VERSION
else:
print >>sys.stderr, "no version info available"
if options.edit:
f = mod.__file__
if f.endswith('pyc'):
f = f[0:-1]
if f.endswith('__init__.py'):
f = f[0:-len('__init__.py')]
# can't use EDITOR here because that will launch
# a blocking editor for commit messages, etc
cmd = '%s "%s"' % ('mate', f)
if options.verbose:
print "will edit with: %s" % cmd
subprocess.call(cmd, shell=True, env={'PATH':os.environ['PATH']})
if __name__ == '__main__':
main() | [
"kumar.mcmillan@gmail.com"
] | kumar.mcmillan@gmail.com | |
1ff0410d1fad86254f59a327b50ce3a9b6d0eb9a | 5532cef9f8f9aa5fc70967dae7bff6f854872333 | /๋คํธ์ํฌ.py | 3df35a38d3a28b44f803072f3c7c46dfb062ef60 | [] | no_license | shinhaeran/Algorithm | e279903a98b6fa70ac0ffefc106ea9476d05a71a | 63147d9a6b6057fe359899d1074ec3e3f55ffc68 | refs/heads/master | 2021-06-20T05:47:23.052542 | 2021-01-14T16:34:27 | 2021-01-14T16:34:27 | 164,627,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 522 | py | def solution(n, computers):
answer = 0
from collections import deque
q = deque()
visit = [False for _ in range(n)]
for j in range(n):
if visit[j] == False:
q.appendleft(j)
visit[j] = True
while q:
node = q.pop()
for i in range(n):
if computers[node][i]==1 and visit[i] == False:
visit[i] = True
q.appendleft(i)
answer += 1
return answer | [
"haeran97@naver.com"
] | haeran97@naver.com |
77f63e4b110f5ab90eee9fa693cb875e91c470d1 | c67bd5f97fb35b2a5df95cb431f48780cdcb4d55 | /functional_tests/management/commands/create_session.py | a59471cb1ebb651b794d7156434e65f45ebd9175 | [] | no_license | ibrobabs/tdd | 6cd2a2d0d6992938a24ccd800311b7531703728d | 368d521039bf3b70b81eab286f45ebe3eed313eb | refs/heads/master | 2021-08-22T08:50:10.116194 | 2017-11-29T20:12:03 | 2017-11-29T20:12:03 | 111,754,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | from django.conf import settings
from django.contrib.auth import BACKEND_SESSION_KEY, SESSION_KEY, get_user_model
from django.contrib.sessions.backends.db import SessionStore
from django.core.management.base import BaseCommand
User = get_user_model()
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('email')
def handle(self, *args, **options):
session_key = create_pre_authenticated_session(options['email'])
self.stdout.write(session_key)
def create_pre_authenticated_session(email):
user = User.objects.create(email=email)
session = SessionStore()
session[SESSION_KEY] = user.pk
session[BACKEND_SESSION_KEY] = settings.AUTHENTICATION_BACKENDS[0]
session.save()
return session.session_key | [
"babskolawole@gmail.com"
] | babskolawole@gmail.com |
a4eed4214bc25f6e769da2017dbeee161ec01dc5 | 70450f0c551adf47b450468e424f4f90bebfb58d | /dataclasses/resources/test/test_linear_tree.py | 48f2e0e7be39bb6c93defc05bc95c21772df6453 | [
"MIT"
] | permissive | hschwane/offline_production | ebd878c5ac45221b0631a78d9e996dea3909bacb | e14a6493782f613b8bbe64217559765d5213dc1e | refs/heads/master | 2023-03-23T11:22:43.118222 | 2021-03-16T13:11:22 | 2021-03-16T13:11:22 | 280,381,714 | 0 | 0 | MIT | 2020-07-17T09:20:29 | 2020-07-17T09:20:29 | null | UTF-8 | Python | false | false | 3,145 | py | #!/usr/bin/env python
import sys, os
infiles = sys.argv[1:]
if len(infiles) == 0:
infiles = [os.path.expandvars('$I3_TESTDATA/dataclasses/9036_coinc.i3.bz2')]
import unittest, math
from icecube import icetray, dataio, dataclasses
from icecube.dataclasses import I3LinearizedMCTree, I3MCTree
class LinearTreeTest(unittest.TestCase):
def setUp(self):
self.mctree = self.frame['I3MCTree']
self.re_mctree = self.frame['I3LinearizedMCTree']
def testSize(self):
self.assertEquals(len(self.mctree), len(self.re_mctree))
def assertEquivalent(self, d1, d2, tol=None):
"""One-stop shop for asserting equality to within a tolerance"""
if math.isnan(d1) and math.isnan(d2):
pass
else:
if tol is None:
self.assertEquals(d1, d2)
elif abs(d1-d2) >= tol:
raise AssertionError("|%s - %s| >= %s" % (d1, d2, tol))
def assertEqualParticle(self, p1, p2):
self.assertEquivalent(p1.dir.zenith, p2.dir.zenith)
self.assertEquivalent(p1.dir.azimuth, p2.dir.azimuth)
self.assertEquivalent(p1.pos.x, p2.pos.x, 5e-1)
self.assertEquivalent(p1.pos.y, p2.pos.y, 5e-1)
self.assertEquivalent(p1.pos.z, p2.pos.z, 5e-1)
self.assertEquivalent(p1.time, p2.time, 1e-2)
self.assertEquivalent(p1.energy, p2.energy, 1e-2)
self.assertEquivalent(p1.length, p2.length)
self.assertEquivalent(p1.speed, p2.speed)
self.assertEquals(p1.type, p2.type)
self.assertEquals(p1.location_type, p2.location_type)
self.assertEquals(p1.shape, p2.shape)
self.assertEquals(p1.fit_status, p2.fit_status)
self.assertEquals(p1.major_id, p2.major_id)
self.assertEquals(p1.minor_id, p2.minor_id)
def testEquivalence(self):
for i, (raw, reco) in enumerate(zip(self.mctree, self.re_mctree)):
try:
self.assertEqualParticle(raw, reco)
except AssertionError:
print(i)
print(raw)
print(reco)
print(self.mctree)
print(self.re_mctree)
raise
from I3Tray import I3Tray
# first round: convert and serialize
tray = I3Tray()
outfile = 'i3linearizedmctree_tmp.i3.bz2'
tray.AddModule('I3Reader', 'reader', filenamelist=infiles)
tray.AddModule('Keep', 'keeper', Keys=['I3MCTree'])
# force re-serialization of original I3MCTree to ensure that particle IDs
# in the tree are unique
def clone(frame):
mctree = frame['I3MCTree']
del frame['I3MCTree']
frame['I3MCTree'] = mctree
frame['I3LinearizedMCTree'] = I3LinearizedMCTree(mctree)
tray.Add(clone, Streams=[icetray.I3Frame.DAQ])
tray.AddModule('I3Writer', 'writer',
Streams=[icetray.I3Frame.DAQ, icetray.I3Frame.Physics],
# DropOrphanStreams=[icetray.I3Frame.DAQ],
filename=outfile)
tray.Execute(100)
# second round: read back and test
tray = I3Tray()
tray.AddModule('I3Reader', 'reader', filenamelist=[outfile])
tray.AddModule(icetray.I3TestModuleFactory(LinearTreeTest), 'testy',
Streams=[icetray.I3Frame.DAQ])
tray.AddModule('Delete', 'kill_mctree', Keys=['I3MCTree'])
outfile = 'i3linearizedmctree_compact.i3.bz2'
tray.AddModule('I3Writer', 'writer',
Streams=[icetray.I3Frame.DAQ, icetray.I3Frame.Physics],
# DropOrphanStreams=[icetray.I3Frame.DAQ],
filename=outfile)
tray.Execute(100)
| [
"aolivas@umd.edu"
] | aolivas@umd.edu |
7da4176db0a193e31b87080155d1f1c5431f0cb0 | 673bf701a310f92f2de80b687600cfbe24612259 | /misoclib/com/liteeth/test/mac_core_tb.py | fe6451bd52e926d0514ca3b1c34b58697fdbc224 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mogorman/misoc | d78340a9bf67feaede20e8cac473bcfddbd186a3 | 4ec49e2aadcff0c3ca34ebd0d35013d88f4d3e1f | refs/heads/master | 2021-01-18T05:38:39.670977 | 2015-03-10T05:37:52 | 2015-03-10T05:37:52 | 30,672,191 | 1 | 0 | null | 2015-02-11T22:05:05 | 2015-02-11T22:05:05 | null | UTF-8 | Python | false | false | 2,217 | py | from migen.fhdl.std import *
from migen.bus import wishbone
from migen.bus.transactions import *
from migen.sim.generic import run_simulation
from misoclib.com.liteeth.common import *
from misoclib.com.liteeth.mac.core import LiteEthMACCore
from misoclib.com.liteeth.test.common import *
from misoclib.com.liteeth.test.model import phy, mac
class TB(Module):
def __init__(self):
self.submodules.phy_model = phy.PHY(8, debug=False)
self.submodules.mac_model = mac.MAC(self.phy_model, debug=False, loopback=True)
self.submodules.core = LiteEthMACCore(phy=self.phy_model, dw=8, with_hw_preamble_crc=True)
self.submodules.streamer = PacketStreamer(eth_phy_description(8), last_be=1)
self.submodules.streamer_randomizer = AckRandomizer(eth_phy_description(8), level=50)
self.submodules.logger_randomizer = AckRandomizer(eth_phy_description(8), level=50)
self.submodules.logger = PacketLogger(eth_phy_description(8))
# use sys_clk for each clock_domain
self.clock_domains.cd_eth_rx = ClockDomain()
self.clock_domains.cd_eth_tx = ClockDomain()
self.comb += [
self.cd_eth_rx.clk.eq(ClockSignal()),
self.cd_eth_rx.rst.eq(ResetSignal()),
self.cd_eth_tx.clk.eq(ClockSignal()),
self.cd_eth_tx.rst.eq(ResetSignal()),
]
self.comb += [
Record.connect(self.streamer.source, self.streamer_randomizer.sink),
Record.connect(self.streamer_randomizer.source, self.core.sink),
Record.connect(self.core.source, self.logger_randomizer.sink),
Record.connect(self.logger_randomizer.source, self.logger.sink)
]
def gen_simulation(self, selfp):
selfp.cd_eth_rx.rst = 1
selfp.cd_eth_tx.rst = 1
yield
selfp.cd_eth_rx.rst = 0
selfp.cd_eth_tx.rst = 0
for i in range(8):
packet = mac.MACPacket([i for i in range(64)])
packet.target_mac = 0x010203040506
packet.sender_mac = 0x090A0B0C0C0D
packet.ethernet_type = 0x0800
packet.encode_header()
yield from self.streamer.send(packet)
yield from self.logger.receive()
# check results
s, l, e = check(packet, self.logger.packet)
print("shift "+ str(s) + " / length " + str(l) + " / errors " + str(e))
if __name__ == "__main__":
run_simulation(TB(), ncycles=4000, vcd_name="my.vcd", keep_files=True)
| [
"florent@enjoy-digital.fr"
] | florent@enjoy-digital.fr |
d820de41a632d5b4955afd754238bc982d0cf9b1 | 288a00d2ab34cba6c389b8c2444455aee55a8a95 | /tests/expect-fail23/recipe-578426.py | 60637cc14c38e9c26bd1f1a842b644f93d3eea1e | [
"BSD-2-Clause"
] | permissive | JohannesBuchner/pystrict3 | ffd77b7bbc378bd4d8f21b5c6bd69a0d64a52ddb | 18b0dd369082422f9bf0f89c72e7acb53a49849c | refs/heads/master | 2023-08-14T06:37:37.954880 | 2023-07-13T11:16:38 | 2023-07-13T11:16:38 | 268,571,175 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,762 | py | # Logistic Map Fractal
# http://en.wikipedia.org/wiki/Logistic_map
# FB - 20130118
import random
import math
from PIL import Image, ImageDraw
imgx = 800; imgy = 800
image = Image.new("RGB", (imgx, imgy))
draw = ImageDraw.Draw(image)
pixels = image.load()
maxIt = 256
xa = -0.5; xb = 1.5
ya = -1.0; yb = 1.0
zEsc = 1000
r = random.random() + 3.0
def f(z):
for i in range(maxIt):
z = r * z * (1.0 - z)
if abs(z) > zEsc: break
return z
maxAbsX = 0.0; maxAbsY = 0.0; maxAbsZ = 0.0
percent = 0
for ky in range(imgy):
pc = 100 * ky / (imgy - 1)
if pc > percent: percent = pc; print('%' + str(percent))
y0 = ya + (yb - ya) * ky / (imgy - 1)
for kx in range(imgx):
x0 = xa + (xb - xa) * kx / (imgx - 1)
z = f(complex(x0, y0))
if abs(z.real) > maxAbsX: maxAbsX = abs(z.real)
if abs(z.imag) > maxAbsY: maxAbsY = abs(z.imag)
if abs(z) > maxAbsZ: maxAbsZ = abs(z)
percent = 0
for ky in range(imgy):
pc = 100 * ky / (imgy - 1)
if pc > percent: percent = pc; print('%' + str(percent))
y0 = ya + (yb - ya) * ky / (imgy - 1)
for kx in range(imgx):
x0 = xa + (xb - xa) * kx / (imgx - 1)
z = f(complex(x0, y0))
v0 = int(255 * abs(z.real) / maxAbsX)
v1 = int(255 * abs(z.imag) / maxAbsY)
v2 = int(255 * abs(z) / maxAbsZ)
v3 = int(255 * abs(math.atan2(z.imag, z.real)) / math.pi)
v = v3 * 256 ** 3 + v2 * 256 ** 2 + v1 * 256 + v0
colorRGB = int(16777215 * v / 256 ** 4)
red = int(colorRGB / 65536)
grn = int(colorRGB / 256) % 256
blu = colorRGB % 256
pixels[kx, ky] = (red, grn, blu)
draw.text((0, 0), "r = " + str(r), (0, 255, 0))
image.save("LogisticMapFractal.png", "PNG")
| [
"johannes.buchner.acad@gmx.com"
] | johannes.buchner.acad@gmx.com |
1e34c89001d9c56791e2c92e76576ca73596f8a4 | 1e013dc5f0de0f61e27f2867557803a01c01f4da | /Language/python/python_source_analysis/10/1_if_control.py | d03c33afb41a9966807a16eef551686442994b67 | [] | no_license | chengyi818/kata | a2941ce8675c6e7a47169a0eae4c757d3f6f5bf9 | a7cb7ad499037bcc168aaa0eaba857b33c04ef14 | refs/heads/master | 2023-04-10T18:39:09.518433 | 2023-01-08T15:22:12 | 2023-01-08T15:22:12 | 53,040,540 | 1 | 0 | null | 2023-03-25T00:46:51 | 2016-03-03T10:06:58 | C++ | UTF-8 | Python | false | false | 167 | py | a = 1
if a > 10:
print("a > 10")
elif a <= -2:
print("a <= -2")
elif a != 1:
print("a != 1")
elif a == 1:
print("a == 1")
else:
print("unknown a")
| [
"chengyi818@foxmail.com"
] | chengyi818@foxmail.com |
98dfeb6a3b6a918f46e6f92af1d4b1d42784d49f | 8839bd1f2e35726b6c8066985690fa2fa86b09a6 | /3.pyWiFi-ESP32/1.ๅบ็กๅฎ้ช/2.ๆ้ฎ/main.py | 91baf700c6b37572d1b5dd3430a59b957bf8fbdf | [
"MIT"
] | permissive | elektrik-elektronik-muhendisligi/MicroPython-Examples-1 | a9532b06aba470f7f26f841929f4fb145549f70b | f7b08e95ff73e3417af21918c9c6bcf2f83281c6 | refs/heads/master | 2021-05-25T22:58:36.207098 | 2020-04-01T09:50:53 | 2020-04-01T09:50:53 | 253,956,073 | 1 | 0 | null | 2020-04-08T01:39:46 | 2020-04-08T01:39:45 | null | UTF-8 | Python | false | false | 615 | py | '''
ๅฎ้ชๅ็งฐ๏ผๆ้ฎ
็ๆฌ๏ผv1.0
ๆฅๆ๏ผ2019.8
ไฝ่
๏ผ01Studio
่ฏดๆ๏ผ้่ฟๆ้ฎๆนๅLED็ไบฎ็ญ็ถๆ
'''
from machine import Pin
import time
LED=Pin(2,Pin.OUT) #ๆๅปบLEDๅฏน่ฑก,ๅผๅง็็ญ
KEY=Pin(0,Pin.IN,Pin.PULL_UP) #ๆๅปบKEYๅฏน่ฑก
state=0 #LEDๅผ่็ถๆ
while True:
if KEY.value()==0: #ๆ้ฎ่ขซๆไธ
time.sleep_ms(10) #ๆถ้คๆๅจ
if KEY.value()==0: #็กฎ่ฎคๆ้ฎ่ขซๆไธ
state=not state #ไฝฟ็จnot่ฏญๅฅ่้~่ฏญๅฅ
LED.value(state) #LED็ถๆ็ฟป่ฝฌ
while not KEY.value(): #ๆฃๆตๆ้ฎๆฏๅฆๆพๅผ
pass
| [
"237827161@qq.com"
] | 237827161@qq.com |
b91a8a0d33e8b2ae6e9ad85643f0c5c3958d728b | a7bdecf855e98bd859ddd7275415b150b8fca2e3 | /OLDBOY/work/atm/atm/core/auth.py | ec5509375b03298c7092df6a2062cd78cfb13969 | [] | no_license | qgw258/python | dfa0cb21e4438f2e36d64d716753bf8ae45892b8 | 266d873dadd9bd60a42f43334cd92037d1aaf1cd | refs/heads/master | 2023-02-16T01:43:16.611105 | 2020-12-09T08:59:27 | 2020-12-09T08:59:27 | 281,035,465 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,054 | py | #!_*_coding:utf-8_*_
import os
from core import db_handler
from conf import settings
from core import logger
import json
import time
def login_required(func):
"้ช่ฏ็จๆทๆฏๅฆ็ปๅฝ"
def wrapper(*args,**kwargs):
#print('--wrapper--->',args,kwargs)
if args[0].get('is_authenticated'):
return func(*args,**kwargs)
else:
exit("User is not authenticated.")
return wrapper
def acc_auth(account,password):
'''
account auth func
:param account: credit account number
:param password: credit card password
:return: if passed the authentication , retun the account object, otherwise ,return None
'''
db_path = db_handler.db_handler(settings.DATABASE)
account_file = "%s/%s.json" %(db_path,account)
print(account_file)
if os.path.isfile(account_file):
with open(account_file,'r') as f:
account_data = json.load(f)
if account_data['password'] == password:
exp_time_stamp = time.mktime(time.strptime(account_data['expire_date'], "%Y-%m-%d"))
if time.time() >exp_time_stamp:
print("\033[31;1mAccount [%s] has expired,please contact the back to get a new card!\033[0m" % account)
else: #passed the authentication
return account_data
else:
print("\033[31;1mAccount ID or password is incorrect!\033[0m")
else:
print("\033[31;1mAccount [%s] does not exist!\033[0m" % account)
def acc_auth2(account,password):
'''
ไผๅ็่ฎค่ฏๆฅๅฃ
:param account: credit account number
:param password: credit card password
:return: if passed the authentication , retun the account object, otherwise ,return None
'''
db_api = db_handler.db_handler()
data = db_api("select * from accounts where account=%s" % account)
if data['password'] == password:
exp_time_stamp = time.mktime(time.strptime(data['expire_date'], "%Y-%m-%d"))
if time.time() > exp_time_stamp:
print("\033[31;1mAccount [%s] has expired,please contact the back to get a new card!\033[0m" % account)
else: # passed the authentication
return data
else:
print("\033[31;1mAccount ID or password is incorrect!\033[0m")
def acc_login(user_data,log_obj):
'''
account login func
:user_data: user info data , only saves in memory
:return:
'''
retry_count = 0
while user_data['is_authenticated'] is not True and retry_count < 3 :
account = input("\033[32;1maccount:\033[0m").strip()
password = input("\033[32;1mpassword:\033[0m").strip()
auth = acc_auth2(account, password)
if auth: #not None means passed the authentication
user_data['is_authenticated'] = True
user_data['account_id'] = account
#print("welcome")
return auth
retry_count +=1
else:
log_obj.error("account [%s] too many login attempts" % account)
exit()
| [
"admin@example.com"
] | admin@example.com |
e6f4170cfd5261d3f2545655d590490c7a07f63c | ca5de66d73dcf62b3d3f9014f4577ed3119054c5 | /ivy/debugger.py | a521676b0bc0dc0397a710ef815d44c612f3eea6 | [
"Apache-2.0"
] | permissive | MZSHAN/ivy | 172dea365e8a903d3548b786cbb0f10d4fd03406 | 02ac7e8ee2202563049116186fe6595313d65cc1 | refs/heads/master | 2023-08-20T19:02:57.389728 | 2021-10-23T12:34:54 | 2021-10-23T12:34:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,458 | py | # global
import ivy
import pdb
import logging
# local
from ivy.wrapper import _wrap_or_unwrap_methods, NON_WRAPPED_METHODS
queue_timeout = None
debug_mode_val = False
# Methods #
def _wrap_method_for_debugging(fn):
if hasattr(fn, '__name__') and (fn.__name__[0] == '_' or fn.__name__ in
set(NON_WRAPPED_METHODS + ['has_nans', 'is_array', 'value_is_nan', 'reduce_sum',
'to_scalar'])):
return fn
if hasattr(fn, 'wrapped_for_debugging') and fn.wrapped_for_debugging:
return fn
def _method_wrapped(*args, **kwargs):
def _check_nans(x):
if ivy.is_array(x) and ivy.has_nans(x):
if debug_mode_val == 'exception':
raise Exception('found nans in {}'.format(x))
else:
logging.error('found nans in {}'.format(x))
pdb.set_trace()
return x
ivy.nested_map(args, _check_nans)
ivy.nested_map(kwargs, _check_nans)
ret = fn(*args, **kwargs)
ivy.nested_map(ret, _check_nans)
return ret
if hasattr(fn, '__name__'):
_method_wrapped.__name__ = fn.__name__
_method_wrapped.wrapped_for_debugging = True
_method_wrapped.inner_fn = fn
return _method_wrapped
def _unwrap_method_from_debugging(method_wrapped):
if not hasattr(method_wrapped, 'wrapped_for_debugging') or not method_wrapped.wrapped_for_debugging:
return method_wrapped
return method_wrapped.inner_fn
def _wrap_methods_for_debugging():
return _wrap_or_unwrap_methods(_wrap_method_for_debugging)
def _unwrap_methods_from_debugging():
return _wrap_or_unwrap_methods(_unwrap_method_from_debugging)
# Mode #
def set_debug_mode(debug_mode_in='exception'):
assert debug_mode_in in ['breakpoint', 'exception']
global debug_mode_val
debug_mode_val = debug_mode_in
global queue_timeout
queue_timeout = ivy.queue_timeout()
ivy.set_queue_timeout(None)
_wrap_methods_for_debugging()
def set_breakpoint_debug_mode():
set_debug_mode('breakpoint')
def set_exception_debug_mode():
set_debug_mode('exception')
def unset_debug_mode():
global debug_mode_val
debug_mode_val = False
_unwrap_methods_from_debugging()
global queue_timeout
ivy.set_queue_timeout(queue_timeout)
def debug_mode():
return debug_mode_val
| [
"daniel.lenton11@imperial.ac.uk"
] | daniel.lenton11@imperial.ac.uk |
7f8ebaaad81f62f16eb72bd6727799abbb2751f5 | 387ad3775fad21d2d8ffa3c84683d9205b6e697d | /testsuite/trunk/epath/print_ep_001.py | 96f85a9378956c0aaaa711313dcef7b4fefbfba3 | [] | no_license | kodiyalashetty/test_iot | 916088ceecffc17d2b6a78d49f7ea0bbd0a6d0b7 | 0ae3c2ea6081778e1005c40a9a3f6d4404a08797 | refs/heads/master | 2020-03-22T11:53:21.204497 | 2018-03-09T01:43:41 | 2018-03-09T01:43:41 | 140,002,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | #!/usr/bin/env python
"""
(C) Copyright IBM Corp. 2008
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. This
file and program are licensed under a BSD style license. See
the Copying file included with the OpenHPI distribution for
full licensing terms.
Authors:
Suntrupth S Yadav <suntrupth@in.ibm.com>
"""
# oh_print_ep: Zero elements entity path testcase.
import unittest
from openhpi import *
class TestSequence(unittest.TestCase):
def runTest(self):
offsets = 0
ep=SaHpiEntityPathT()
err = oh_print_ep(ep, offsets)
self.assertEqual (err!=None,True)
if __name__=='__main__':
unittest.main()
| [
"suntrupth@a44bbd40-eb13-0410-a9b2-f80f2f72fa26"
] | suntrupth@a44bbd40-eb13-0410-a9b2-f80f2f72fa26 |
163dabeaddbcfe77c740efbd2d6099cfe3bdcd9b | 49c986c1a7351d51fd6654f09d4493fa8dffc6cd | /์ด์ฝํ
/C13/20-2.py | c5c84ac371ceb22fa3466ee9728353ba24690976 | [] | no_license | beomseok-kang/Algorithms-Python | 53ca2cdbb718e2817eb0086662aea55cd3868559 | e06a015e3a8c909643e2fa60808d5157fbb7c3bd | refs/heads/master | 2022-12-24T04:42:04.586375 | 2020-10-03T09:09:23 | 2020-10-03T09:09:23 | 291,429,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,432 | py | from itertools import combinations
n = int(input())
board = []
teachers = []
spaces = []
for i in range(n):
board.append(list(input().split()))
for j in range(n):
if board[i][j] == 'T':
teachers. append((i, j))
if board[i][j] == 'X':
spaces.append((i, j))
def watch(x, y, direction):
if direction == 0: # ์ผ์ชฝ ๊ฐ์
while y >= 0:
if board[x][y] == 'S':
return True
if board[x][y] == 'O':
return False
y -= 1
if direction == 1: # ์ค๋ฅธ์ชฝ ๊ฐ์
while y < n:
if board[x][y] == 'S':
return True
if board[x][y] == 'O':
return False
y += 1
if direction == 2: # ์์ชฝ ๊ฐ์
while x >= 0:
if board[x][y] == 'S':
return True
if board[x][y] == 'O':
return False
x -= 1
if direction == 3:
while x < n:
if board[x][y] == 'S':
return True
if board[x][y] == 'O':
return False
x += 1
return False
def process():
for x, y in teachers:
for i in range(4):
if watch(x, y, i):
return True
return False
find = False
for data in combinations(spaces, 3):
# ์ฅ์ ๋ฌผ ์ค์น
for x, y in data:
board[x][y] = 'O'
# ํ์์ด ํ ๋ช
๋ ๊ฐ์ง๋์ง ์๋ ๊ฒฝ์ฐ
if not process():
find = True
break
# ์๋๋๋ก ๋๋ ค๋๊ธฐ
for x, y in data:
board[x][y] = 'X'
if find:
print('YES')
else:
print('NO')
| [
"beom.kang17@gmail.com"
] | beom.kang17@gmail.com |
0593f5c98010c322b4fbdaa70491c4256a7d8ec5 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/helpers/typeshed/stdlib/@python2/cookielib.pyi | 0f813c128cc4c7423b35f34e65350c4f70f32d70 | [
"Apache-2.0",
"MIT"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 4,664 | pyi | from typing import Any
class Cookie:
version: Any
name: Any
value: Any
port: Any
port_specified: Any
domain: Any
domain_specified: Any
domain_initial_dot: Any
path: Any
path_specified: Any
secure: Any
expires: Any
discard: Any
comment: Any
comment_url: Any
rfc2109: Any
def __init__(
self,
version,
name,
value,
port,
port_specified,
domain,
domain_specified,
domain_initial_dot,
path,
path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest,
rfc2109: bool = ...,
): ...
def has_nonstandard_attr(self, name): ...
def get_nonstandard_attr(self, name, default: Any | None = ...): ...
def set_nonstandard_attr(self, name, value): ...
def is_expired(self, now: Any | None = ...): ...
class CookiePolicy:
def set_ok(self, cookie, request): ...
def return_ok(self, cookie, request): ...
def domain_return_ok(self, domain, request): ...
def path_return_ok(self, path, request): ...
class DefaultCookiePolicy(CookiePolicy):
DomainStrictNoDots: Any
DomainStrictNonDomain: Any
DomainRFC2965Match: Any
DomainLiberal: Any
DomainStrict: Any
netscape: Any
rfc2965: Any
rfc2109_as_netscape: Any
hide_cookie2: Any
strict_domain: Any
strict_rfc2965_unverifiable: Any
strict_ns_unverifiable: Any
strict_ns_domain: Any
strict_ns_set_initial_dollar: Any
strict_ns_set_path: Any
def __init__(
self,
blocked_domains: Any | None = ...,
allowed_domains: Any | None = ...,
netscape: bool = ...,
rfc2965: bool = ...,
rfc2109_as_netscape: Any | None = ...,
hide_cookie2: bool = ...,
strict_domain: bool = ...,
strict_rfc2965_unverifiable: bool = ...,
strict_ns_unverifiable: bool = ...,
strict_ns_domain=...,
strict_ns_set_initial_dollar: bool = ...,
strict_ns_set_path: bool = ...,
): ...
def blocked_domains(self): ...
def set_blocked_domains(self, blocked_domains): ...
def is_blocked(self, domain): ...
def allowed_domains(self): ...
def set_allowed_domains(self, allowed_domains): ...
def is_not_allowed(self, domain): ...
def set_ok(self, cookie, request): ...
def set_ok_version(self, cookie, request): ...
def set_ok_verifiability(self, cookie, request): ...
def set_ok_name(self, cookie, request): ...
def set_ok_path(self, cookie, request): ...
def set_ok_domain(self, cookie, request): ...
def set_ok_port(self, cookie, request): ...
def return_ok(self, cookie, request): ...
def return_ok_version(self, cookie, request): ...
def return_ok_verifiability(self, cookie, request): ...
def return_ok_secure(self, cookie, request): ...
def return_ok_expires(self, cookie, request): ...
def return_ok_port(self, cookie, request): ...
def return_ok_domain(self, cookie, request): ...
def domain_return_ok(self, domain, request): ...
def path_return_ok(self, path, request): ...
class Absent: ...
class CookieJar:
non_word_re: Any
quote_re: Any
strict_domain_re: Any
domain_re: Any
dots_re: Any
magic_re: Any
def __init__(self, policy: Any | None = ...): ...
def set_policy(self, policy): ...
def add_cookie_header(self, request): ...
def make_cookies(self, response, request): ...
def set_cookie_if_ok(self, cookie, request): ...
def set_cookie(self, cookie): ...
def extract_cookies(self, response, request): ...
def clear(self, domain: Any | None = ..., path: Any | None = ..., name: Any | None = ...): ...
def clear_session_cookies(self): ...
def clear_expired_cookies(self): ...
def __iter__(self): ...
def __len__(self): ...
class LoadError(IOError): ...
class FileCookieJar(CookieJar):
filename: Any
delayload: Any
def __init__(self, filename: Any | None = ..., delayload: bool = ..., policy: Any | None = ...): ...
def save(self, filename: Any | None = ..., ignore_discard: bool = ..., ignore_expires: bool = ...): ...
def load(self, filename: Any | None = ..., ignore_discard: bool = ..., ignore_expires: bool = ...): ...
def revert(self, filename: Any | None = ..., ignore_discard: bool = ..., ignore_expires: bool = ...): ...
class LWPCookieJar(FileCookieJar):
def as_lwp_str(self, ignore_discard: bool = ..., ignore_expires: bool = ...) -> str: ... # undocumented
MozillaCookieJar = FileCookieJar
def lwp_cookie_str(cookie: Cookie) -> str: ...
| [
"intellij-monorepo-bot-no-reply@jetbrains.com"
] | intellij-monorepo-bot-no-reply@jetbrains.com |
b4fde5bc3bf9a74a38f4c7f5e40f5fea26c32763 | cf7025ff7d02604ea146775a35894733d8338593 | /home/helpers.py | 7fa9c814f5825f1c174536ddb898867f2fce9613 | [] | no_license | boxabhi/CodeKeen-starter | 7af6e13ec780df8a571e52d6cf10e16ac4717c3d | ac8be93494cf7013366ba7ad8cbd172d47feb466 | refs/heads/main | 2023-06-18T14:56:30.771286 | 2021-07-25T15:45:05 | 2021-07-25T15:45:05 | 382,294,773 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,109 | py |
from django.db.models.query import QuerySet
from django.db.models.query_utils import InvalidQuery
from faker import Faker
fake = Faker()
from .models import *
import random
import datetime
def generate_fake_data():
for i in range(0 , 20):
departments = Department.objects.all()
rand_index = random.randint(0 ,len(departments))
student_obj = Student.objects.create(
student_name = fake.name(),
student_age = random.randint(18 , 54),
student_dob = datetime.date.today(),
student_email = fake.email(),
department = departments[rand_index-1]
)
print('Student created')
skills = list(Skills.objects.all())
random.shuffle(skills)
print(skills)
random_list_count = random.randint(0 , len(skills)-1)
print(random_list_count)
for skill in skills[0:random_list_count] :
skill_obj = Skills.objects.get(id = skill.id)
student_obj.skills.add(skill_obj)
print('Student skill added ')
print(student_obj)
| [
"abhijeetg40@gmail.com"
] | abhijeetg40@gmail.com |
1bd560ba36e7197c81e5416af55f1f2b5f61a34e | 7bc6445331289ea9143e2ba282599389a595386a | /Courses/Pytorch Udacity/intro-to-pytorch/helper.py | 1711e4ad5631d7135b42fb6451d2ebbb0391fd5e | [
"MIT"
] | permissive | develooper1994/DeepLearningCaseStudies | b90ef1290a93876e2eb54b49e5d5ecdd12baab54 | 3bed182790031278bb2721dfe4986e462a2d5d50 | refs/heads/master | 2020-09-25T04:56:26.952956 | 2020-05-03T12:57:46 | 2020-05-03T12:57:46 | 260,541,706 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,874 | py | import matplotlib.pyplot as plt
import numpy as np
from torch import nn, optim
from torch.autograd import Variable
def test_network(net, trainloader):
criterion = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=0.001)
dataiter = iter(trainloader)
images, labels = dataiter.next()
# Create Variables for the inputs and targets
inputs = Variable(images)
targets = Variable(images)
# Clear the gradients from all Variables
optimizer.zero_grad()
# Forward pass, then backward pass, then update weights
output = net.forward(inputs)
loss = criterion(output, targets)
loss.backward()
optimizer.step()
return True
def imshow(image, ax=None, title=None, normalize=True):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
image = image.numpy().transpose((1, 2, 0))
if normalize:
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
image = np.clip(image, 0, 1)
ax.imshow(image)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.tick_params(axis='both', length=0)
ax.set_xticklabels('')
ax.set_yticklabels('')
return ax
def imshow2(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
def view_recon(img, recon):
''' Function for displaying an image (as a PyTorch Tensor) and its
reconstruction also a PyTorch Tensor
'''
fig, axes = plt.subplots(ncols=2, sharex=True, sharey=True)
axes[0].imshow(img.numpy().squeeze())
axes[1].imshow(recon.data.numpy().squeeze())
for ax in axes:
ax.axis('off')
ax.set_adjustable('box-forced')
def view_classify(img, ps, version="MNIST"):
''' Function for viewing an image and it's predicted classes.
'''
ps = ps.data.numpy().squeeze()
fig, (ax1, ax2) = plt.subplots(figsize=(6, 9), ncols=2)
ax1.imshow(img.resize_(1, 28, 28).numpy().squeeze())
ax1.axis('off')
ax2.barh(np.arange(10), ps)
ax2.set_aspect(0.1)
ax2.set_yticks(np.arange(10))
if version == "MNIST":
ax2.set_yticklabels(np.arange(10))
elif version == "Fashion":
ax2.set_yticklabels(['T-shirt/top',
'Trouser',
'Pullover',
'Dress',
'Coat',
'Sandal',
'Shirt',
'Sneaker',
'Bag',
'Ankle Boot'], size='small');
ax2.set_title('Class Probability')
ax2.set_xlim(0, 1.1)
plt.tight_layout()
| [
"selcukcaglar08@gmail.com"
] | selcukcaglar08@gmail.com |
bd2c19adf442436f6af1e0a7f27a4f0468336b25 | 838fb9231a67c45cef7dfdb326a5d2db9310156c | /Unconstrained_Firstorder_Optimization/homebrew_hybrid2.py | 5819cacb2c5e3f4890b7e4219d5072de7726e7ba | [] | no_license | iluvjava/Serious-Python-Stuff | 24427f44ded0978753f163b8277858dfa2a27caa | 7ad7af07786392764f36ae5d90ad15252b3c5bdc | refs/heads/main | 2023-06-27T19:27:41.132046 | 2021-08-03T07:06:00 | 2021-08-03T07:06:00 | 377,302,076 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,996 | py | """
First order optimizer.
Use finite diff to approximate Hessian.
Use both newton's or accelerated gradient, which ever is better.
When gradient is small, it start to random sample using local hessian and the sequence of descned parameters.
"""
import numpy as np
import matplotlib.pyplot as plt
class HybridOptimizer:
"""
A class that binds together:
* a Newton's method using hessian approximated by the gradient
* A nesterov accelerated gradient method.
Note: This algorithm is slow and kinda stupid for the following reasons:
1. It computes the Hessian in a faithful manner using finite difference on gradient.
2. It switches between Nesterov acc gradient and newton's method, and it constant checks which one is
better. Hence it loses competitions against modified newton's method, accelerated gradient, and
quasi-newton solver.
"""
ApproxHessian = 1 # Getting the Hessian with BFGS's algorithm
FiniteDiffHessian = 2 # Getting the Hessian by finite diff on gradient
def __init__(
_,
f:callable, # objective function
df:callable, # derivative of the function
x0:np.ndarray, # initial guess for optimization
eta:float, # the learning rate.
momentum:float):
"""
:param f:
:param df:
:param x0:
:param eta:
:param momentum:
:return:
"""
_.f, _.df = f, df
_._Momentum = momentum
_._Eta = eta
assert isinstance(f(x0), float) or isinstance(f(x0), int)
# running parameters for optimization:
_._Xpre = x0
_._Xnex = None
_._Gpre = df(x0)
_._Gnex = None
_._Velocity = 0
_._H = None # Hessian using 2 gradients.
# Running parameters for benchmarking and analyzing.
_.Xs = [_._Xpre]
_.Report = ""
_._Initialize()
@property
def H(this):
return this._H.copy()
def _Initialize(_):
while _._Xnex is None or _.f(_._Xnex) > _.f(_._Xpre):
_._Velocity = _._Momentum * _._Velocity - _._Eta*_.df(_._Xpre)
_._Xnex = _._Xpre + _._Velocity
if not(_._Xnex is None): _._Eta /= 2
_._Gnex = _.df(_._Xnex)
_._H = _._UpdateHessian()
_.Xs.append(_._Xnex)
def _UpdateHessian(_):
"""
Use Finite diff to find the Hessian at the point Xnex.
:return:
Hessian at point Xnex
"""
x1, x2 = _._Xpre, _._Xnex
n = x1.shape[0]
df = _.df
h = np.mean(x2 - x1)
H = np.zeros((n, n))
def e(i):
x = np.zeros((n, 1))
x[i, 0] = 1
return x
for I in range(n):
df1 = df(x2 + h*e(I))
df2 = df(x2 - h*e(I))
diff = df1 - df2
if np.min(abs(diff)) == 0:
return None
g = (diff)/(2*h)
H[:, [I]] = g
return H
def _UpdateRunningParams(_, Xnex):
"""
Update all the running parameters for the class.
:param Xnex:
next step to take.
:return:
"""
_._Xpre, _._Xnex = _._Xnex, Xnex
_.Xs.append(Xnex)
_._G, _._Gnex = _._Gnex, _.df(Xnex)
_._H = _._UpdateHessian()
def _TrySecant(_):
df1, df2 = _._Gpre, _._Gnex
assert df2.ndim == 2, "expected gradient to be 2d np array. "
pinv = np.linalg.pinv
H = _._H
Xnex = _._Xnex - pinv(H)@df2
return Xnex
def _TryAccGradient(_):
x1, x2 = _._Xpre, _._Xnex
eta = _._Eta
v, m = _._Velocity, _._Momentum
_._Velocity = m * v - eta*_.df(x2 + v) # Update velocity!
Xnex = x2 + _._Velocity
return Xnex
def __call__(_):
"""
Call on this function to get the next point that it will step into.
Only try newton's method when the gradient is decreasing and the
Hessian has positive determinant.
:return:
The point that is chosen next
"""
df1, df2 = _._Gpre, _._Gnex
f = _.f
det, norm = np.linalg.det, np.linalg.norm
Xnex = _._TryAccGradient()
Which = "G;"
if not _._H is None and ((norm(df2) < norm(df1) and norm(df2) > 1e-8) or det(_._H) > 0):
XnexNewton = _._TrySecant()
Which += "N"
if f(XnexNewton) < f(Xnex):
_._Velocity = 0 # reset velocity.
_._Eta = 1/(2*norm(_._H)**2) # reset learning rate too, using the Hessian
Xnex = XnexNewton
Which += ". N < G"
else:
Which += ". G < N"
_._UpdateRunningParams(Xnex)
return Xnex, Which
def Generate(_):
pass
def main():
def TestWithSmoothConvex():
norm = np.linalg.norm
N = 100
A = np.random.rand(N,N)
x0 = np.random.rand(N, 1)*20
f, df = lambda x: norm(A@x)**2, lambda x: 2*(A.T@A)@x
eta = 1/(4*norm(A.T@A))
optim = HybridOptimizer(f, df, x0, eta, 0.2)
ObjectivVals = []
for I in range(10):
v, w = optim()
ObjVal = f(v)
print(f"{v[0, 0]}; {v[1, 0]}, objval: {ObjVal}, {w}")
ObjectivVals.append(ObjVal)
if len(ObjectivVals) > 2 and (ObjectivVals[-1] - ObjectivVals[-2]) > 1e-8:
break
plt.plot(ObjectivVals); plt.title("objective value")
plt.show()
def TestWithAFuncFromWiki():
g = lambda x, y: (1.5 - x + x*y)**2 + (2.25 - x + x*y**2)**2 + (2.625 - x + x*y**3)**2
dfx = lambda x, y: 2*(y - 1)*(1.5 - x + x*y) + \
2*(y**2 - 1)*(2.25 - x + x*y**2) + \
2*(y**3 - 1)*(2.625 - x + x*y**3)
dfy = lambda x, y: 2*x*(1.5 -x + x*y) + \
4*x*y*(2.25 - x + x*y**2) + \
(6*x*y**2)*(2.625 - x + x*y**3)
df = lambda x: np.array(
[
[dfx(x[0, 0], x[1, 0])],
[dfy(x[0, 0], x[1, 0])]
])
f = lambda x: g(x[0, 0], x[1, 0])
x0 = np.array([[1e-2],[-1]])
eta = 0.01
optim = HybridOptimizer(f, df, x0, eta, 0.5)
ObjectivVals = []
x = x0
for I in range(240):
v, w = optim()
ObjVal = f(v)
print(f"{v[0, 0]}; {v[1, 0]}, objval: {ObjVal}, {w}")
ObjectivVals.append(ObjVal)
if np.linalg.norm(v - x) < 1e-10:
break
x = v
plt.plot(ObjectivVals);
plt.title("objective value")
plt.show()
TestWithAFuncFromWiki()
if __name__ == "__main__":
import os
print(f"{os.curdir}")
print(f"{os.getcwd()}")
main()
| [
"victor1301166040@gmail.com"
] | victor1301166040@gmail.com |
59a4d22d545818b6ef46572e11400d0ca3ae0d7a | 8ae0b37c3913fb344c1c2bb161b0a8b346e2f298 | /leetcode/Q050_PowxN.py | 43d1f256bf2f74a808eba5e999b8e7b759dffdf3 | [] | no_license | jiaz/leetpy | 76330a0e4f234bface88b3a6a5476cd24d7776ea | 94e8b0c2ebbf593dbca1b253e08c588f27679bd3 | refs/heads/master | 2021-01-10T09:07:45.549361 | 2015-11-26T06:44:50 | 2015-11-26T06:44:50 | 46,908,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | # -*- coding: utf-8 -*-
# Implement pow(x, n).
#
#
# Link:
# https://leetcode.com/problems/powx-n/
class Solution(object):
def myPow(self, x, n):
"""
:type x: float
:type n: int
:rtype: float
"""
| [
"jiaji.zh@gmail.com"
] | jiaji.zh@gmail.com |
047d4665fd17ef65ab70bc153876d7438b1b2609 | 1e4d75d2a7267cb4d01d39ea7c6367bfd5c51842 | /coupons/models.py | 01e2ae251d4069e19e1a7e8a770b27ecbf44ac4b | [] | no_license | Grayw0lf/my-shop | 9340e6cc47b1c77ff143d278013c46780767ae4a | ad1ea4d9db27196f263656bb01d52562678f9247 | refs/heads/master | 2022-11-27T23:00:44.726712 | 2020-03-17T18:09:43 | 2020-03-17T18:09:43 | 248,040,250 | 1 | 0 | null | 2022-11-22T04:40:09 | 2020-03-17T18:08:23 | Python | UTF-8 | Python | false | false | 490 | py | from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
class Coupon(models.Model):
code = models.CharField(max_length=50, unique=True)
valid_from = models.DateTimeField()
valid_to = models.DateTimeField()
discount = models.IntegerField(validators=[MinValueValidator(0),
MaxValueValidator(100)])
active = models.BooleanField()
def __str__(self):
return self.code
| [
"akosheev@rambler.ru"
] | akosheev@rambler.ru |
25565d51e4f038b1cf663fa91e9c5da4d5b876f6 | 93a9c36e85bd753608516efe581edf96bbdc3580 | /posts/views.py | 5bcfd7c020267c9572588e30dc58d7af29001b1c | [] | no_license | zhouf1234/django_obj | bef6a13fc3d183070725fcb937da7a0c4a688e1c | bb10edc03dfdbe692b6293ffc3e1d33a374604cf | refs/heads/master | 2020-05-05T03:05:24.838804 | 2019-04-05T10:18:27 | 2019-04-05T10:18:27 | 179,660,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,380 | py | from django.shortcuts import render
import json
from django.http import HttpResponse
from django.http import JsonResponse
from school.models import Schools
from .models import Posts,Tag
# Create your views here.
#ๆ็ซ ๅ่กจๅฑ็คบ
def postsList(request):
posts = Posts.objects.filter(is_status=1)
context ={'posts':posts}
return render(request,'posts/posts_list.html',context=context)
#ๆฅ็ๆ็ซ ่ฏฆๆ
def detailPos(request):
pos_id = request.POST.get('pos_id',None)
posts = Posts.objects.get(id=pos_id)
return render(request,'posts/detailpos.html',context={'posts':posts})
#็ผ่พๆ็ซ :ๆฅๆถๆฐๆฎ
def editPos(request):
pos_id = request.GET.get('pos_id',None)
posts = Posts.objects.get(id=pos_id)
post_school = Schools.objects.filter(is_status=1).exclude(name=posts.post_school.name)
tag = Tag.objects.exclude(name=posts.tags.name)
context = {
'posts':posts,
'post_school':post_school,
'tag':tag
}
return render(request,'posts/edit_pos.html',context=context)
#็ผ่พๆ็ซ :ไธไผ ๆฐๆฎ,ๆ็ซ ๅ
ๅฎนๆฏ็จๅฏๆๆฌ็ผ่พๅจๅ็๏ผ็ฎๅๅชๆฏๆๆฌ๏ผ่ฟไธๆญฅ้่ฆ็ๆฏๅๆถไธไผ ๅคๅผ ๅพ็ๅ่ฝ๏ผๅใใใ
def updatePos(request):
# ๆฅๆถๆฐๆฎ
pos_id = request.POST.get("pos_id",None)
posts = Posts.objects.get(id=pos_id)
'''ๅค็ๆฐๆฎ'''
data = request.POST.get('data',None)
data = json.loads(data)
'''ๅค็ๅพ็ๆฐๆฎ'''
post_image = request.FILES.get("post_image")
if post_image != None: #none๏ผๅณๆฒกๆไธไผ ๆฐๅพ็ๆถ็ๅผ
posts.post_image = post_image
print(posts.post_image)
posts.save()
'''ๅฏนๅ
ถไปๆฐๆฎ่ฟ่กๅค็'''
pos = Posts.objects.filter(id=pos_id)
pos.update(**data)
return HttpResponse(123)
#ๆทปๅ ๆฐๆ็ซ :่ทๅๆฐ้กต้ข
#้คไบๆ็ซ ๆ ้ขๅค๏ผ้ฝๅฏไธๅกซ
def addPos(request):
post_school = Schools.objects.filter(is_status=1)
tag = Tag.objects.all()
context = {
'post_school': post_school,
'tag': tag
}
return render(request, 'posts/add_pos.html',context=context)
#ๆทปๅ ๆฐๆ็ซ ๏ผไธไผ ๆฐๆฎ
def addssPos(request):
#่ทๅdata็ๆฐๆฎ
data = request.POST.get('data', None)
data = json.loads(data)
# print(data['post_school'])
post_image = request.FILES.get("post_image",None)
# print(post_image)
# ๅคๆญๆ็ซ ๆ ้ขๆฏๅฆๅญๅจ
info = Posts.objects.filter(post_title=data['post_title'], is_status=1).exists()
if info:
return JsonResponse({
'status': 'fail',
'message': '่ฏฅๆ็ซ ๅทฒๅญๅจ๏ผ',
'info': ''
})
Posts.objects.create(
post_title=data['post_title'],
source=data['source'],
source_link=data['source_link'],
#่ฟไธค้กนๆฏๅค้ฎ็idๅฆๆไธๅกซ๏ผไผๆฅ้๏ผๆ ็ญพ'ๆ '็idไธบ5๏ผschool็idไธบ11็ๆฏ'ๆ '๏ผไธๅกซๅฐฑ็ป่ฟไธช้ป่ฎคๅผ
tags_id='5' if data['tags']==' ' else data['tags'],
post_school_id='11' if data['post_school']==' ' else data['post_school'],
post_content=data['post_content'],
post_image=post_image,
)
return JsonResponse({
'status': 'success',
'message': 'ๅๅปบๆๅ',
'info': ''
})
#ๅ ้คๆ็ซ ๏ผๅ
ถๅฎๆฏๆstatusๅไธบ0
def delPos(request):
pos_id=request.GET.get('pos_id')
posts = Posts.objects.get(id=pos_id)
context={'posts':posts}
is_status = 0
posts.is_status = is_status
posts.save()
return render(request, 'posts/posts_list.html', context=context)
#ๅทฒๅ ้คๆ็ซ ๅฑ็คบ
def deldPos(request):
posts = Posts.objects.filter(is_status=0)
context ={'posts':posts}
return render(request,'posts/deld_pos.html',context=context)
#่ฟๅๅ ้คๆ็ซ ๏ผๅ
ถๅฎๆฏๆstatusๅไธบ1
def posPos(request):
pos_id=request.GET.get('pos_id')
posts = Posts.objects.get(id=pos_id)
context={'posts':posts}
is_status = 1
posts.is_status = is_status
posts.save()
return render(request, 'posts/deld_pos.html', context=context)
#ๆฐธไน
ๅ ้คๆ็ซ ๏ผๅ ้คๅญๅจๆฐๆฎๅบ็ไฟกๆฏ
def delesPos(request):
#่ทๅid
pos_id = request.GET.get('pos_id')
posts = Posts.objects.get(id=pos_id)
context = {'posts': posts}
Posts.objects.get(id=pos_id).delete()
return render(request,'posts/deld_pos.html',context=context) | [
"="
] | = |
8f59c92b70f4ae93805a85a751b28ec73ab940d0 | 2662c2c0a86b9f39c3122209c65015e7ca08ab38 | /tests/conftest.py | b4483890e7c1ce981501a76c90a6674f637bee0c | [
"MIT"
] | permissive | hyanwong/pyslim | c5eb0c2689e36147f2612619bb6ee8e6d57d5d63 | 7203b743e30e330729a73fa9b23f971565095202 | refs/heads/main | 2023-03-09T13:15:55.706829 | 2021-01-09T23:07:38 | 2021-01-09T23:07:38 | 328,613,475 | 0 | 0 | MIT | 2021-01-11T09:34:19 | 2021-01-11T09:34:19 | null | UTF-8 | Python | false | false | 1,129 | py | """
To make pytest run this setup/teardown code it needs to go here, I guess.
"""
import os
import random
import pytest
from . import example_files, run_slim_script
@pytest.fixture(scope="session", autouse=True)
def setup_slim_examples():
# Make random tests reproducible.
random.seed(210)
# run SLiM
for f in example_files:
basename = example_files[f]['basename']
treefile = basename + ".trees"
print(treefile)
try:
os.remove(treefile)
except FileNotFoundError:
pass
slimfile = basename + ".slim"
out = run_slim_script(slimfile)
assert out == 0
# the yield makes pytest wait and run tests before doing the rest of this
yield
for f in example_files:
basename = example_files[f]['basename']
treefile = basename + ".trees"
try:
os.remove(treefile)
pass
except FileNotFoundError:
pass
infofile = treefile + ".pedigree"
try:
os.remove(infofile)
pass
except FileNotFoundError:
pass
| [
"petrel.harp@gmail.com"
] | petrel.harp@gmail.com |
03498f89afc9c95c9b12379518efabdc209319a9 | 88853b9b6c3ae0c12cbd5bf83be3d48f2fe0159e | /document/eggs_package/deploy_egg/gflux/gcustomer/apps/gcustomer/management/commands/import_storegood_from_fact_trans.py | 69879f4b28f979e6da7060d719b398bad071e016 | [] | no_license | laoyin/nyf | 9151da3368c2e636501bcf1ad52f895fe446c04b | a2b12f54827b6088548f060881b6dafa2d791a3a | refs/heads/master | 2021-01-17T17:11:14.174968 | 2015-09-28T01:12:28 | 2015-09-28T01:12:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,073 | py | # coding=utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from django.core.management.base import BaseCommand
import logging,pdb
from django.conf import settings
import django_gearman_commands
from gcustomer.utils import get_none_fuel_last_10,get_none_fuel_top_10
from gcustomer.models import *
from gflux.apps.station.models import *
from dash.core.backends.sql.models import get_dash_session_maker
import json,pdb
class Command(BaseCommand):
help = 'ไป็ฎๅไบคๆ็ๆฐๆฎๅฏผๅ
ฅๅๅ็ๆฐๆฎ'
def handle(self, *args, **options):
session = get_dash_session_maker()()
goods = session.query(Trans).filter_by(trans_type = 1).all()
store_good_list = []
temp_good_list = []
for good in goods :
if not good.barcode in temp_good_list :
store_good_list.append(dict(
pos_id = good.barcode,
name = good.desc,
price = good.price
))
temp_good_list.append(good.barcode)
#ๅๅ
ฅๅฐๆไปถ
f = open("/home/work/store_items.txt","w")
count = 0
for good in store_good_list :
f.write(str(good['pos_id']) + ":"+str(good['name']))
f.write('\n')
print str(good['pos_id']) + ":"+str(good['name'])
count = count + 1
f.write(str(count))
f.close()
print "total:" + str(count)
#ๆดๆฐๅฐๅๅฐๅๅ็ฎก็
for good in store_good_list :
try :
objs = session.query(StoreItem).filter_by(pos_id = str(good['pos_id'])).all()
if len(objs) > 0 :
print objs[0].name + "ๅจ็ณป็ปไธญ."
else :
good = StoreItem(
pos_id = good['pos_id'],
name = good['name'],
price = good['price'],
user_source=1,
source_id = 3,
img_sha1 ="9329cec35570efd186bb60df1d905b31eec66463"
)
session.add(good)
except Exception , e:
pass
try:
session.commit()
except Exception,e:
session.rollback()
print e
return
| [
"niyoufa@tmlsystem.com"
] | niyoufa@tmlsystem.com |
78619c22c68d4e82f0a99d87ab8cc9d424abcb15 | a59231cdf10a83b9f7a6a670fff24f52cb3b4642 | /Cisco/Stack/valid-parentheses.py | 590b6cc012117668689d5a75acacdd810628e05a | [] | no_license | jadenpadua/ICPC | afae2b3e7c4eb2a5c841bfbadb6c6c8c06599f40 | 6f4edeb36391c4afdde9f8f15e39a25895183f95 | refs/heads/master | 2020-09-12T06:17:12.508460 | 2020-09-11T23:05:11 | 2020-09-11T23:05:11 | 222,338,314 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | class Solution:
def isValid(self, s: str) -> bool:
stack = []
ht = {"(":")", "{":"}", "[":"]"}
opening = "({["
closing = ")}]"
for i in range(len(s)):
if len(stack) == 0 and s[i] in closing:
return False
if s[i] in opening:
stack.append(s[i])
else:
if ht[stack.pop()] != s[i]:
return False
return len(stack) == 0
| [
"noreply@github.com"
] | jadenpadua.noreply@github.com |
d58c0ca64546bc18a40bb3065a14fa7fbe7e8b5a | a140fe192fd643ce556fa34bf2f84ddbdb97f091 | /.history/์ ๋ฌ๊ฐ๊ณผ ๋ฐํ๊ฐ_20200705101852.py | 70e2e8a10f17f79a0b03c2e4146b4f82eb3b267b | [] | no_license | sangha0719/py-practice | 826f13cb422ef43992a69f822b9f04c2cb6d4815 | 6d71ce64bf91cc3bccee81378577d84ba9d9c121 | refs/heads/master | 2023-03-13T04:40:55.883279 | 2021-02-25T12:02:04 | 2021-02-25T12:02:04 | 342,230,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 934 | py | def open_account():
print("์๋ก์ด ๊ณ์ข๊ฐ ๊ฐ์ค๋์์ต๋๋ค.")
def deposit(balance, money): # ์
๊ธ
print("์
๊ธ์ด ์๋ฃ๋์์ต๋๋ค. ์์ก์ {0} ์์
๋๋ค.".format(balance + money))
return balance + money
def withdraw(balance, money) # ์ถ๊ธ
if balance >= money: # ์์ก์ด ์ถ๊ธ๋ณด๋ค ๋ง์ผ๋ฉด
print("์ถ๊ธ์ด ์๋ฃ๋์์ต๋๋ค. ์์ก์ {0} ์์
๋๋ค.".format(balance - money))
return balance - money
else:
print("์ถ๊ธ์ด ์๋ฃ๋์ง ์์์ต๋๋ค ์์ก์ {0} ์์
๋๋ค.".format(balance))
return balance
def withdraw_night(balance, money): # ์ ๋
์ ์ถ๊ธ
commission = 100 # ์์๋ฃ 100์
return commission, balance - money - commission
balance = 0 # ์์ก
balance = deposit(balance, 1000)
# balance = deposit(balance, 2000)
# balance = deposit(balance, 500)
commission, balance = withdraw_night
print(balance)
| [
"sangha0719@gmail.com"
] | sangha0719@gmail.com |
17a9b272ce5a0cbf942565037edd27b5f0b68a1a | 1a937b899af949d23e667782a7360b9de1634456 | /SoftUni/Data Structure Efficiency/performance_test/test.py | 9a3578888c79683efd31e74e5cf7b83efcdbe079 | [] | no_license | stanislavkozlovski/data_structures_feb_2016 | c498df6ea7cb65d135057a300e0d7e6106713722 | adedac3349df249fe056bc10c11b0b51c49e24bb | refs/heads/master | 2021-07-06T17:37:18.117104 | 2017-09-30T19:01:51 | 2017-09-30T19:01:51 | 75,526,414 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | py | from datetime import datetime
import sys
from performance_test.asizeof import asizeof
from performance_test.ordered_set import OrderedSet
from sortedcontainers import SortedSet
test_count = 10000
# test with different elements, add, remove and search
sset_start = datetime.now()
sset = SortedSet()
for i in range(test_count):
sset.add(i)
assert i in sset
sset_memory = asizeof(sset)
for i in range(test_count):
sset.remove(i)
assert i not in sset
sset_end = datetime.now()
#########################
ordered_set_start = datetime.now()
ordered_set = OrderedSet()
for i in range(test_count):
ordered_set.add(i)
assert ordered_set.contains(i)
ordered_set_memory = asizeof(ordered_set)
for i in range(test_count):
ordered_set.remove(i)
assert not ordered_set.contains(i)
ordered_set_end = datetime.now()
print("Sorted Set elapsed time: {}".format(sset_end-sset_start))
print("Sorted Set memory: {}".format(sset_memory))
print("Ordered Set elapsed time: {}".format(ordered_set_end-ordered_set_start))
print("Ordered Set memory: {}".format(ordered_set_memory))
"""
Sorted Set elapsed time: 0:00:00.380138
Ordered Set elapsed time: 0:00:06.590211
Hell, mine is quite slower.
""" | [
"familyguyuser192@windowslive.com"
] | familyguyuser192@windowslive.com |
9409552114456334acb3fd1ddb037067682e418e | 4bc6028ed8ba403b69adfd6f5cbd139baece0f4d | /basic_python/machine_learn/chapter01/matrix4.py | 90101f10603b9a8a7c333433b5071cd1b50eab3e | [] | no_license | xrw560/learn-pyspark | 0ef9ed427ff887ceed1c5e5773bf97ed25ecae04 | 618d16dafd73165e714111670119d9cdecc0bf1f | refs/heads/master | 2020-03-07T00:12:36.885000 | 2019-01-04T09:51:32 | 2019-01-04T09:51:32 | 127,152,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | # -*- coding: utf-8 -*-
"""
็ฉ้ต็ๅ
ถไปๆไฝ๏ผ่กๅๆฐใๅ็ใๅคๅถใๆฏ่พ
"""
import sys
import os
import time
from numpy import *
# ้
็ฝฎutf-8่พๅบ็ฏๅข
reload(sys)
sys.setdefaultencoding('utf-8')
mymatrix = mat([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
[m, n] = shape(mymatrix) # ็ฉ้ต็่กๅๆฐ
print "็ฉ้ต็่กๆฐๅๅๆฐ:", m, n
myscl1 = mymatrix[0] # ๆ่กๅ็
print "ๆ่กๅ็:", myscl1
myscl2 = mymatrix.T[0] # ๆๅๅ็
print "ๆๅๅ็:", myscl2
mycpmat = mymatrix.copy() # ็ฉ้ต็ๅคๅถ
print "ๅคๅถ็ฉ้ต:\n", mycpmat
# ๆฏ่พ
print "็ฉ้ตๅ
็ด ็ๆฏ่พ:\n", mymatrix < mymatrix.T
# ็ฉ้ต็็นๅพๅผๅ็นๅพๅ้
A = [[8, 1, 6], [3, 5, 7], [4, 9, 2]]
evals, evecs = linalg.eig(A)
print "็นๅพๅผ:", evals, "\n็นๅพๅ้:", evecs
| [
"ncutits@163.com"
] | ncutits@163.com |
63b8b3f1473b600a71483e331bf765e189922e38 | e89309b66ac7950508aba78247d0776435b7e0f2 | /py/services/revenue/main.py | daa3d1708e8e259ecf8d44d0ffd2f3adb677d8c4 | [] | no_license | adalekin/otusdemo | 2bdcab1191d4985b0da0c5d15f0a2d96149cbc65 | 60b4dbb664c3fe7c2068b9c8691ce99b7c0d1780 | refs/heads/master | 2023-08-05T08:38:17.359421 | 2021-09-15T16:53:51 | 2021-09-15T16:53:51 | 406,182,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,672 | py | import asyncio
import json
import aiohealthcheck
import httpx
from aiokafka import AIOKafkaConsumer
from aiorun import run
from schemas.event import EventFired
from revenue import settings
def _deserializer(serialized):
return json.loads(serialized)
async def _get_account_id_by_user_id(client, user_id):
response = await client.get(f"{settings.BILLING_URL}/accounts/find_by_user_id/{user_id}/")
if response.status_code != 200:
return None
return response.json()["id"]
async def _consume_event_fired():
client = httpx.AsyncClient()
consumer = AIOKafkaConsumer(
"event-fired",
bootstrap_servers=settings.KAFKA_BOOTSTRAP_SERVERS,
group_id=settings.KAFKA_GROUP_ID,
value_deserializer=_deserializer,
auto_offset_reset="earliest",
)
await consumer.start()
try:
async for msg in consumer:
event_fired = EventFired(**msg.value)
if event_fired.ec != "revenue":
continue
account_id = await _get_account_id_by_user_id(client=client, user_id=event_fired.partner_id)
await client.post(
f"{settings.BILLING_URL}/balance_transactions/",
json={"account_id": account_id, "type": "charge", "amount": event_fired.ev, "currency": "USD"},
)
finally:
await consumer.stop()
await client.aclose()
if __name__ == "__main__":
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.create_task(aiohealthcheck.tcp_health_endpoint(port=5000))
loop.create_task(_consume_event_fired())
run(loop=loop, stop_on_unhandled_errors=True)
| [
"adalekin@gmail.com"
] | adalekin@gmail.com |
ec02f3e0d3f6a1a74fb152b750896a3e5fba551a | cd57ad36685cc188ea42219bd220905e23e61f4c | /chatterbot/response_selection.py | ebb3d6af924aaab68598657be7b7ba02d7607020 | [
"BSD-3-Clause"
] | permissive | gitCommitWiL/ChatterBot | fa404848c7eb8f8ffb07c80c7d3ec47aeb2fe177 | 4f2275ec8a6e3546c4251db9e9938f7b3fd29e68 | refs/heads/master | 2021-04-22T14:52:18.175648 | 2020-03-26T11:22:16 | 2020-03-26T11:22:16 | 249,854,439 | 2 | 0 | BSD-3-Clause | 2020-03-25T01:02:46 | 2020-03-25T01:02:46 | null | UTF-8 | Python | false | false | 2,950 | py | """
Response selection methods determines which response should be used in
the event that multiple responses are generated within a logic adapter.
"""
import logging
def get_most_frequent_response(input_statement, response_list, storage=None):
"""
:param input_statement: A statement, that closely matches an input to the chat bot.
:type input_statement: Statement
:param response_list: A list of statement options to choose a response from.
:type response_list: list
:param storage: An instance of a storage adapter to allow the response selection
method to access other statements if needed.
:type storage: StorageAdapter
:return: The response statement with the greatest number of occurrences.
:rtype: Statement
"""
matching_response = None
occurrence_count = -1
logger = logging.getLogger(__name__)
logger.info('Selecting response with greatest number of occurrences.')
for statement in response_list:
count = len(list(storage.filter(
text=statement.text,
in_response_to=input_statement.text)
))
# Keep the more common statement
if count >= occurrence_count:
matching_response = statement
occurrence_count = count
# Choose the most commonly occuring matching response
return matching_response
def get_first_response(input_statement, response_list, storage=None):
"""
:param input_statement: A statement, that closely matches an input to the chat bot.
:type input_statement: Statement
:param response_list: A list of statement options to choose a response from.
:type response_list: list
:param storage: An instance of a storage adapter to allow the response selection
method to access other statements if needed.
:type storage: StorageAdapter
:return: Return the first statement in the response list.
:rtype: Statement
"""
logger = logging.getLogger(__name__)
logger.info('Selecting first response from list of {} options.'.format(
len(response_list)
))
return response_list[0]
def get_random_response(input_statement, response_list, storage=None):
"""
:param input_statement: A statement, that closely matches an input to the chat bot.
:type input_statement: Statement
:param response_list: A list of statement options to choose a response from.
:type response_list: list
:param storage: An instance of a storage adapter to allow the response selection
method to access other statements if needed.
:type storage: StorageAdapter
:return: Choose a random response from the selection.
:rtype: Statement
"""
from random import choice
logger = logging.getLogger(__name__)
logger.info('Selecting a response from list of {} options.'.format(
len(response_list)
))
return choice(response_list)
| [
"gunthercx@gmail.com"
] | gunthercx@gmail.com |
e303cf1201005115382e02b5679d71751de79e53 | 578db86c51d44ebddd0dc7b1738985b3dc69eb74 | /corehq/form_processor/migrations/0067_auto_20170915_1506.py | 3adde484546ee34e02e1d427359da4231da3bb15 | [
"BSD-3-Clause"
] | permissive | dimagi/commcare-hq | a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b | e7391ddae1af1dbf118211ecb52c83fc508aa656 | refs/heads/master | 2023-08-16T22:38:27.853437 | 2023-08-16T19:07:19 | 2023-08-16T19:07:19 | 247,278 | 499 | 203 | BSD-3-Clause | 2023-09-14T19:03:24 | 2009-07-09T17:00:07 | Python | UTF-8 | Python | false | false | 458 | py | # Generated by Django 1.10.7 on 2017-09-15 15:06
from django.db import migrations
from corehq.util.django_migrations import AlterUniqueTogetherIfNotExists
class Migration(migrations.Migration):
dependencies = [
('form_processor', '0066_auto_20170818_2020'),
]
operations = [
AlterUniqueTogetherIfNotExists(
name='commcarecaseindexsql',
unique_together=set([('case', 'identifier')]),
),
]
| [
"skelly@dimagi.com"
] | skelly@dimagi.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.