blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
365edf48bfff6fdeccdcb1003149c574c7972c07 | 97dae48fa3c613a84655c1c0b12cdc0db2c555bb | /algorithm/patternsearch/anagram_search.py | e0aab3a3dca8cf884dda3fe0c3bcd65162ad4911 | [] | no_license | klknet/geeks4geeks | 6aa5841b15be41057dc987524721ea1ea37e02ea | d7d9099af7617a4000f38c75d2c7214bed570eda | refs/heads/master | 2021-07-12T06:34:30.048691 | 2020-06-22T07:51:14 | 2020-06-22T07:51:14 | 170,288,225 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,288 | py | """
Search for all permutations.
1)Store counts of frequencies of pattern in first count array countP[]. Also store counts of frequencies of characters
in first window of text in array countTW[].
2)Now run a loop from i=M to N-1, do following in loop:
a)If the two count arrays are identical, we found an occurrence.
b)Increment count of current character of text in countTW[].
c)Decrement count of first character of previous window in countTW[].
3)The last window is not checked by above loop. so explicitly check it.
"""
no_of_chars = 256
def anagram_search(pat, txt):
m, n = len(pat), len(txt)
pat_count = [0] * no_of_chars
cur_count = [0] * no_of_chars
for i in range(m):
pat_count[ord(pat[i])] += 1
cur_count[ord(txt[i])] += 1
for i in range(m, n):
if compare(pat_count, cur_count, pat):
print(i - m)
cur_count[ord(txt[i])] += 1
cur_count[ord(txt[i - m])] -= 1
if i == n - 1:
if compare(pat_count, cur_count, pat):
print(n - m)
def compare(patCount, curCount, pat):
m = len(pat)
for j in range(m):
if patCount[ord(pat[j])] != curCount[ord(pat[j])]:
return False
return True
pat = "ABCD"
txt = "BACDGABCDA"
anagram_search(pat, txt)
| [
"konglk@aliyun.com"
] | konglk@aliyun.com |
1af0bee0929738dc142282f5829ece2b158125a4 | ec2b6cd4e9c183bc5e99ad917110d91985dfc2e8 | /touchdown/aws/vpc/customer_gateway.py | 57ab247b587073333858ec7d25c5ddf89ac39980 | [
"Apache-2.0"
] | permissive | triplekill/touchdown | 128ac7a9260709dae5ccbce6db344ab109cc75da | 8b70d4ac448bebd1cd088fa415be9cf6c74108cc | refs/heads/master | 2020-12-25T03:20:34.717218 | 2016-05-29T18:54:03 | 2016-05-29T18:54:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,914 | py | # Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from touchdown.core import argument
from touchdown.core.plan import Plan
from touchdown.core.resource import Resource
from ..common import SimpleApply, SimpleDescribe, SimpleDestroy, TagsMixin
from .vpc import VPC
class CustomerGateway(Resource):
resource_name = "customer_gateway"
name = argument.String(field="Name", group="tags")
type = argument.String(default="ipsec.1", choices=["ipsec.1"], field="Type")
public_ip = argument.IPAddress(field="PublicIp")
bgp_asn = argument.Integer(default=65000, field="BgpAsn")
tags = argument.Dict()
vpc = argument.Resource(VPC)
class Describe(SimpleDescribe, Plan):
resource = CustomerGateway
service_name = 'ec2'
describe_action = "describe_customer_gateways"
describe_envelope = "CustomerGateways"
key = "CustomerGatewayId"
def get_describe_filters(self):
vpc = self.runner.get_plan(self.resource.vpc)
if not vpc.resource_id:
return None
return {
"Filters": [
{'Name': 'tag:Name', 'Values': [self.resource.name]},
],
}
class Apply(TagsMixin, SimpleApply, Describe):
create_action = "create_customer_gateway"
waiter = "customer_gateway_available"
class Destroy(SimpleDestroy, Describe):
destroy_action = "delete_customer_gateway"
| [
"john.carr@unrouted.co.uk"
] | john.carr@unrouted.co.uk |
b3e47bb92563fa3756c12e43de9cb2e777ddcdd2 | 4c9c028936379c510cebfe4830f460817d9bc3c8 | /account/urls.py | 48532b9b4e93777e52ea3e0cdcab3456e5d9b824 | [] | no_license | preciousidam/management-system | cd47d7c564fe0ff0ae459c702c63a3cb16eee8ab | c984012e2cbc7554b20b00fabafd24f3f5752ba8 | refs/heads/main | 2023-04-02T08:44:24.416866 | 2021-03-11T20:09:11 | 2021-03-11T20:09:11 | 341,899,263 | 0 | 0 | null | 2021-04-12T14:35:07 | 2021-02-24T12:50:41 | Python | UTF-8 | Python | false | false | 733 | py | from django.urls import path, re_path
from django.conf.urls import url, include
from rest_framework import routers
from .views import (CorttsAccountViewSet, CompanyViewSet,
OtherAccountViewSet, TransactionViewSet,
ExpenseAccountViewSet, TopUpViewSet)
router = routers.DefaultRouter()
router.register(r'accounts/cortts', CorttsAccountViewSet)
router.register(r'accounts/others', OtherAccountViewSet)
router.register(r'accounts/expenses', ExpenseAccountViewSet)
router.register(r'accounts/transactions', TransactionViewSet)
router.register(r'accounts/topup', TopUpViewSet)
router.register(r'companies', CompanyViewSet)
urlpatterns = [
url(r'^', include(router.urls)),
] | [
"preciousidam@gmail.com"
] | preciousidam@gmail.com |
4dc3acd2ad170769aa171cdcd7190d67995c3df2 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AlipayMarketingCashvoucherTemplateCreateResponse.py | 5a0f8dd9303d72dccf432f218972e12ade589662 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,382 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayMarketingCashvoucherTemplateCreateResponse(AlipayResponse):
def __init__(self):
super(AlipayMarketingCashvoucherTemplateCreateResponse, self).__init__()
self._confirm_uri = None
self._fund_order_no = None
self._template_id = None
@property
def confirm_uri(self):
return self._confirm_uri
@confirm_uri.setter
def confirm_uri(self, value):
self._confirm_uri = value
@property
def fund_order_no(self):
return self._fund_order_no
@fund_order_no.setter
def fund_order_no(self, value):
self._fund_order_no = value
@property
def template_id(self):
return self._template_id
@template_id.setter
def template_id(self, value):
self._template_id = value
def parse_response_content(self, response_content):
response = super(AlipayMarketingCashvoucherTemplateCreateResponse, self).parse_response_content(response_content)
if 'confirm_uri' in response:
self.confirm_uri = response['confirm_uri']
if 'fund_order_no' in response:
self.fund_order_no = response['fund_order_no']
if 'template_id' in response:
self.template_id = response['template_id']
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
a6bff4b60a92bd23a58b23a4e36b4942b22ec63a | 4adbebc69f2d2552234664f4cf6bf4b6a4a90aa2 | /examples/eight_schools/eight_schools.py | 711a6bd33f5635856ef2ff33142b772018126426 | [
"Apache-2.0"
] | permissive | coryshain/edward | 291c50123182a19273c1bf1723e894a54a9014ff | 494a85e6354504d8c71ec6a7b70021a20470fec8 | refs/heads/master | 2022-11-10T01:16:51.875938 | 2020-06-18T14:15:32 | 2020-06-18T14:15:32 | 273,252,033 | 0 | 0 | NOASSERTION | 2020-06-18T13:54:22 | 2020-06-18T13:54:21 | null | UTF-8 | Python | false | false | 2,802 | py | """Implement the stan 8 schools example using the recommended non-centred
parameterization.
The Stan example is slightly modified to avoid improper priors and
avoid half-Cauchy priors. Inference is with Edward using both HMC
and KLQP.
This model has a hierachy and an inferred variance - yet the example is
very simple - only the Normal distribution is used.
#### References
https://github.com/stan-dev/rstan/wiki/RStan-Getting-Started
http://mc-stan.org/users/documentation/case-studies/divergences_and_bias.html
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import edward as ed
import tensorflow as tf
import numpy as np
from edward.models import Normal, Empirical
def main(_):
# data
J = 8
data_y = np.array([28, 8, -3, 7, -1, 1, 18, 12])
data_sigma = np.array([15, 10, 16, 11, 9, 11, 10, 18])
# model definition
mu = Normal(0., 10.)
logtau = Normal(5., 1.)
theta_prime = Normal(tf.zeros(J), tf.ones(J))
sigma = tf.placeholder(tf.float32, J)
y = Normal(mu + tf.exp(logtau) * theta_prime, sigma * tf.ones([J]))
data = {y: data_y, sigma: data_sigma}
# ed.KLqp inference
with tf.variable_scope('q_logtau'):
q_logtau = Normal(tf.get_variable('loc', []),
tf.nn.softplus(tf.get_variable('scale', [])))
with tf.variable_scope('q_mu'):
q_mu = Normal(tf.get_variable('loc', []),
tf.nn.softplus(tf.get_variable('scale', [])))
with tf.variable_scope('q_theta_prime'):
q_theta_prime = Normal(tf.get_variable('loc', [J]),
tf.nn.softplus(tf.get_variable('scale', [J])))
inference = ed.KLqp({logtau: q_logtau, mu: q_mu,
theta_prime: q_theta_prime}, data=data)
inference.run(n_samples=15, n_iter=60000)
print("==== ed.KLqp inference ====")
print("E[mu] = %f" % (q_mu.mean().eval()))
print("E[logtau] = %f" % (q_logtau.mean().eval()))
print("E[theta_prime]=")
print((q_theta_prime.mean().eval()))
print("==== end ed.KLqp inference ====")
print("")
print("")
# HMC inference
S = 400000
burn = S // 2
hq_logtau = Empirical(tf.get_variable('hq_logtau', [S]))
hq_mu = Empirical(tf.get_variable('hq_mu', [S]))
hq_theta_prime = Empirical(tf.get_variable('hq_thetaprime', [S, J]))
inference = ed.HMC({logtau: hq_logtau, mu: hq_mu,
theta_prime: hq_theta_prime}, data=data)
inference.run()
print("==== ed.HMC inference ====")
print("E[mu] = %f" % (hq_mu.params.eval()[burn:].mean()))
print("E[logtau] = %f" % (hq_logtau.params.eval()[burn:].mean()))
print("E[theta_prime]=")
print(hq_theta_prime.params.eval()[burn:, ].mean(0))
print("==== end ed.HMC inference ====")
print("")
print("")
if __name__ == "__main__":
tf.app.run()
| [
"dustinviettran@gmail.com"
] | dustinviettran@gmail.com |
bea8b455adb55b38f6aaae2a0a97e58b2d9eccbc | 5ea136ca2e8066b77b39afdf15e3d0e6bc74761f | /scripts/substitute-prototypes.py | 4e56598f45506ae50cc08157da2a187c6741fbe6 | [] | no_license | reneang17/ttbar | 4023421846a70c22c13a2962520f7723ad35636b | 75f4fff1b5d79af097ea04aab437e2963215a232 | refs/heads/master | 2020-06-12T15:07:11.918815 | 2019-06-28T22:24:07 | 2019-06-28T22:24:07 | 194,339,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,166 | py | #!/usr/bin/env python3
#
# todo:
#
import argparse
import os
import re
import string
import subprocess
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,\
description = \
''' Substitute prototypes in IdSolver output by integrals.'''
)
parser.add_argument("file",\
help = ("out file from reduction"))
parser.add_argument("--tmp", action = "store_true", \
help = ("keep temporary files"))
args = parser.parse_args()
#-------------------------------------------------------------------------------
def prepare_form_file_content(input_list):
content = "#-\n"
content += "#include decls\n"
content += "#include {0}\n\n".format(args.file)
for i in range(0,len(input_list)):
content +="l integral{0} = {1};\n".\
format(i,input_list[i].strip(string.whitespace))
content += "\n"
content += "#include finalsubstitutions\n\n"
content += "print;\n"
content += ".end"
return content
#-------------------------------------------------------------------------------
def determine_integrals(outfile):
content = ""
with open(args.file) as fh: content = fh.read()
prototypes_re = re.compile('fill\s+(PR\d+\([^\)]+\))\s+=')
return prototypes_re.findall(content)
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
if __name__ == '__main__':
#-----------------------------------------------------------------------------
prototypes = determine_integrals(args.file)
form_file_content = ""
form_file_content = prepare_form_file_content(prototypes)
form_fname = ".substitute.frm"
with open(form_fname,"w") as fh:
fh.write(form_file_content)
command = "form {0}".format(form_fname)
try:
subprocess.check_call(command, shell=True)
#output = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True)
#print(output.decode("utf-8"))
except (subprocess.CalledProcessError) as err:
print("Error in {0}:\n{1}".format(os.path.basename(__file__), err))
if not args.tmp:
os.remove(form_fname)
| [
"reneang17@gmail.com"
] | reneang17@gmail.com |
2bdf6125fc161c83cf1d0a6eed0f207c318f8d40 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/dev/cv/image_classification/mBART_ID1550_for_PyTorch/dataset/data_loader_iter.py | 4b1183c752968be0a7ee951799016572ffe96c99 | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-or-later",
"GPL-3.0-only"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 2,986 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
"""
pytorch-dl
Created by raj at 09:11
Date: February 20, 2020
"""
from torch.utils.data.dataset import IterableDataset
import torch.npu
import os
NPU_CALCULATE_DEVICE = 0
if os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):
NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))
if torch.npu.current_device() != NPU_CALCULATE_DEVICE:
torch.npu.set_device(f'npu:{NPU_CALCULATE_DEVICE}')
class MyIterableDataset(IterableDataset):
def __init__(self, filename):
# Store the filename in object's memory
self.filename = filename
# And that's it, we no longer need to store the contents in the memory
def preprocess(self, text):
# Do something with text here
text_pp = text.lower().strip()
return text_pp
def line_mapper(self, line):
# Splits the line into text and label and applies preprocessing to the text
text, label = line.split(',')
text = self.preprocess(text)
return text, label
def __iter__(self):
# Create an iterator
file_itr = open(self.filename)
# Map each element using the line_mapper
mapped_itr = map(self.line_mapper, file_itr)
return mapped_itr
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
f9fd97e9d2c666e13a10ed4f2a3f3efa6c94dcc1 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/55f014a207123e37696fa342fd06feff8f1d4b28-<_load_label>-bug.py | e2655af953c09968c6f208353b936e225b93bfe2 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,199 | py | def _load_label(self, idx):
'Parse xml file and return labels.'
img_id = self._items[idx]
anno_path = self._anno_path.format(*img_id)
root = ET.parse(anno_path).getroot()
size = root.find('size')
width = float(size.find('width').text)
height = float(size.find('height').text)
if (idx not in self._im_shapes):
self._im_shapes[idx] = (width, height)
label = []
for obj in root.iter('object'):
difficult = int(obj.find('difficult').text)
cls_name = obj.find('name').text.strip().lower()
if (cls_name not in self.classes):
continue
cls_id = self.index_map[cls_name]
xml_box = obj.find('bndbox')
xmin = (float(xml_box.find('xmin').text) - 1)
ymin = (float(xml_box.find('ymin').text) - 1)
xmax = (float(xml_box.find('xmax').text) - 1)
ymax = (float(xml_box.find('ymax').text) - 1)
try:
self._validate_label(xmin, ymin, xmax, ymax, width, height)
except AssertionError as e:
raise RuntimeError('Invalid label at {}, {}'.format(anno_path, e))
label.append([xmin, ymin, xmax, ymax, cls_id, difficult])
return np.array(label) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
9a90e00ca7c3cc3a44b1e2909de8f45cefc60fcf | f8b9e5de8823ff810ec445b6fa6d0e34f7b6319f | /Django/django_project/apps/Surveys_app/views.py | 0c4499e1aab25caa03e11a9324303ae1038795fe | [] | no_license | amalfushi/Python | 6c042443a8aeae15fc96a41a692abdbea05db863 | 067c2cef722457e884833f77baf9f44f45a4a165 | refs/heads/master | 2021-01-24T04:08:21.278071 | 2018-02-26T06:25:59 | 2018-02-26T06:25:59 | 122,923,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, HttpResponse
# Create your views here.
def main(request):
return HttpResponse('Placeholder to display all the surveys created')
def new(request):
return HttpResponse('Placeholder for users to add a new survey') | [
"dustin.p.schroeder@gmail.com"
] | dustin.p.schroeder@gmail.com |
b13014013bfe7f16e2c291f768ee50207dacf92d | aec9a1f3d1d36f19724e745ca4d09a20f67208dc | /talent/migrations/0016_auto_20210210_0904.py | 9fcc9bb77bf5ed3a188a98dbe181747f8acaf2b7 | [] | no_license | endlessor/open-united-backend | b1b1c3411d0d48bc79b35895c70f24d773ac7344 | 86f6905cce14b834b6bf059fd33157249978bd14 | refs/heads/main | 2023-04-29T13:35:28.529360 | 2021-05-17T14:16:39 | 2021-05-17T14:16:39 | 368,211,786 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | # Generated by Django 3.1 on 2021-02-10 09:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('talent', '0015_person_headline'),
]
operations = [
migrations.AlterField(
model_name='person',
name='headline',
field=models.CharField(max_length=255),
),
]
| [
"robcoder@hotmail.com"
] | robcoder@hotmail.com |
47a3f8525c7b4f21d5f964bd6f5404fafc9d03a4 | 20176bf4fbd8aec139c7b5a27f2c2e155e173e6e | /data/all-pratic/VivekKumar_DCC/python_2/Day2_1.py | 73d19dd534ff3c2cd430a11bb817b05e35bd6e66 | [] | no_license | githubjyotiranjan/pytraining | 4ac4a1f83cc4270e2939d9d32c705019c5bc61c5 | 8b50c4ab7848bd4cbfdfbc06489768d577289c66 | refs/heads/master | 2020-03-19T06:22:20.793296 | 2018-06-15T20:08:11 | 2018-06-15T20:08:11 | 136,013,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | randomList=int(input('enter the count='))
vals=1;
while(vals<= randomList):
try:
if(randomList%2!= 0):
print("The odd= ", vals)
vals=vals+1
except:
print("The Even= ", vals)
| [
"jsatapathy007@gmail.com"
] | jsatapathy007@gmail.com |
baff5fe97381c6dd6353e82b1b8d9a68aa02bc51 | 33c0d36ba88af9c3b35acd000a8a83fa5c24ed8a | /Problems/Isomorphic Strings.py | 7434a2397d00e41aa76436b961747b27f904d915 | [] | no_license | ElliottBarbeau/Leetcode | e58dab31937a36e7557990846898cd2b2586a27c | 43c3698c829f5a613ed3e9516a146e7576d81146 | refs/heads/master | 2021-11-28T02:06:39.848174 | 2021-08-30T23:37:13 | 2021-08-30T23:37:13 | 221,090,906 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | class Solution:
def isIsomorphic(self, s: str, t: str) -> bool:
d = {}
if len(s) != len(t):
return False
for i in range(len(s)):
if s[i] not in d and t[i] not in d.values():
d[s[i]] = t[i]
elif s[i] in d and t[i] == d[s[i]]:
continue
else:
return False
return True
print(Solution().isIsomorphic('ab', 'aa')) | [
"elliottbarbeau@gmail.com"
] | elliottbarbeau@gmail.com |
b435561acbf322a0401ebbf926b601484d79c440 | 215eadf839ecc40a37ae22063bf7f9c5c9450699 | /hr_employee.py | 51c7e4843a1f91ca38c6ca6712a1b5c9cd3e7f07 | [] | no_license | davidsetiyadi/hr_webcam | c12e751e91c4757938cae54697df084c99ed9b4a | 4740d9f104c8ebeba7e6ef5e196068f5c5fd6111 | refs/heads/master | 2021-01-19T12:40:22.010104 | 2017-09-25T12:34:38 | 2017-09-25T12:34:38 | 100,796,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,242 | py | from openerp import models
import numpy as np
import cv2
import dlib
import face_recognition
import urllib
import base64
from common import clock, draw_str
class hr_employee(models.Model):
_inherit = 'hr.employee'
def action_take_picture(self, cr, uid, ids, context=None):
if context is None:
context = {}
res_model, res_id = self.pool.get(
'ir.model.data').get_object_reference(cr, uid,
'hr_webcam',
'action_take_photo')
dict_act_window = self.pool.get(
'ir.actions.client').read(cr, uid, res_id, [])
if not dict_act_window.get('params', False):
dict_act_window.update({'params': {}})
dict_act_window['params'].update(
{'employee_id': len(ids) and ids[0] or False})
return dict_act_window
def detect(img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE)
if len(rects) == 0:
return []
rects[:,2:] += rects[:,:2]
return rects
def draw_rects(img, rects, color):
for x1, y1, x2, y2 in rects:
cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
def action_take_opencv(self, cr, uid, ids, context=None):
# print 'David_____________TESTET'
employee_obj = self.pool.get('hr.employee')
employee_ids = employee_obj.search(cr,uid,[],limit=100)
# print employee_ids,'employee_idsss'
dictionary = {}
face_encoding = {}
for employee in employee_ids:
employees = employee_obj.browse(cr,uid,employee)
# dictionary[employees.name] = "http://127.0.6.1:7777/web/binary/image?model=hr.employee&field=image_medium&id="+str(employee)
# urllib.urlretrieve("/web/binary/image?model=hr.employee&field=image_medium&id="+str(employee), str(employee)+"_uid.png")
imgstring = employees.image
# print imgstring
if imgstring:
convert = base64.b64decode(imgstring)
file = ("lebahganteng%s.png")% employee
# print file,'davidddd'
t = open(file, "w+")
t.write(convert)
t.close()
biden_image = face_recognition.load_image_file(file)
# print biden_image,'david'
# imgdata = base64.b64decode(imgstring)
# filename = 'some_image.png' # I assume you have a way of picking unique filenames
# with open(filename, 'wb') as f:
# f.write(imgdata)
# dictionary[employees.name] = face_recognition.load_image_file("http://127.0.6.1:7777/web/binary/image?model=hr.employee&field=image_medium&id="+str(employee))
# print dictionary[employee.name],'dictionaryyyy'
# face_encoding [employees.name] = face_recognition.face_encodings(dictionary[employees.name][0])
# c = {}
# for b in a:
# c[b]=b+1
# data = []
# for a in dictionary:
# data.append(dictionary[a])
# biden_face_encoding = face_recognition.face_encodings(biden_image)[0]
# obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
# unknown_face_encoding = face_recognition.face_encodings(unknown_image)[0]
# print ("david123")
# known_faces = [
# biden_face_encoding,
# obama_face_encoding
# ]
# # results is an array of True/False telling if the unknown face matched anyone in the known_faces array
# results = face_recognition.compare_faces(known_faces, unknown_face_encoding)
print dictionary
return True | [
"davidsetiadi11@gmail.com"
] | davidsetiadi11@gmail.com |
3e2abc01b00cc24995d645655e3a0d8aa6ace57c | bfc25f1ad7bfe061b57cfab82aba9d0af1453491 | /data/external/repositories/147881/kaggle_forest_cover_type-master/my_model.py | 6a7942ec6a23229e7c5286d8e10e805c980a5499 | [
"MIT"
] | permissive | Keesiu/meta-kaggle | 77d134620ebce530d183467202cf45639d9c6ff2 | 87de739aba2399fd31072ee81b391f9b7a63f540 | refs/heads/master | 2020-03-28T00:23:10.584151 | 2018-12-20T19:09:50 | 2018-12-20T19:09:50 | 147,406,338 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,475 | py | #!/usr/bin/python
import os
import matplotlib
matplotlib.use('Agg')
import pylab as pl
import numpy as np
import pandas as pd
import gzip
import cPickle as pickle
from sklearn import cross_validation
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.linear_model import SGDClassifier
from sklearn.decomposition import PCA, FastICA, KernelPCA, ProbabilisticPCA
from sklearn.pipeline import Pipeline
from sklearn.externals import joblib
from sklearn.metrics import accuracy_score, log_loss
def gaussian(x, mu, sig):
return np.exp(-(x-mu)**2/(2*sig**2))/(sig*np.sqrt(2*np.pi))
def fit_func(x, *p):
return p[2] * gaussian(x, p[0], p[1])
def create_html_page_of_plots(list_of_plots):
if not os.path.exists('html'):
os.makedirs('html')
os.system('mv *.png html')
print(list_of_plots)
with open('html/index.html', 'w') as htmlfile:
htmlfile.write('<!DOCTYPE html><html><body><div>')
for plot in list_of_plots:
htmlfile.write('<p><img src="%s"></p>' % plot)
htmlfile.write('</div></html></html>')
def get_plots(in_df):
list_of_plots = []
print in_df.columns
for c in in_df.columns:
if c in ('Id', 'Cover_Type'):
continue
pl.clf()
nent = len(in_df[c])
hmin, hmax = in_df[c].min(), in_df[c].max()
xbins = np.linspace(hmin,hmax,nent//500)
for n in range(1,8):
covtype = in_df.Cover_Type == n
a = in_df[covtype][c].values
#b = in_df[covtype][c].hist(bins=xbins, histtype='step')
pl.hist(a, bins=xbins, histtype='step')
#if c == 'Elevation':
#mu, sig = a.mean(), a.std()
#x = np.linspace(hmin,hmax,1000)
#y = (a.sum()/len(xbins)) * gaussian(x, mu, sig)
#pl.plot(x, y, '--')
pl.title(c)
pl.savefig('%s.png' % c)
list_of_plots.append('%s.png' % c)
create_html_page_of_plots(list_of_plots)
def plot_failures(in_array, covertype):
print in_array.shape
list_of_plots = []
for c in range(in_array.shape[1]):
pl.clf()
nent = in_array.shape[0]
hmin, hmax = in_array[:,c].min(), in_array[:,c].max()
xbins = np.linspace(hmin,hmax,20)
for n in range(1,8):
covtype = covertype == n
a = in_array[covtype][:,c]
pl.hist(a, bins=xbins, histtype='step')
pl.title(c)
pl.savefig('%s.png' % c)
list_of_plots.append('%s.png' % c)
create_html_page_of_plots(list_of_plots)
def transform_from_classes(inp):
y = np.zeros((inp.shape[0], 7), dtype=np.int64)
for (index, Class) in enumerate(inp):
cidx = Class-1
y[index, cidx] = 1.0
return y
def transform_to_class(yinp):
return np.array(map(lambda x: x+1, np.argmax(yinp, axis=1)))
def load_data():
train_df = pd.read_csv('train.csv')
test_df = pd.read_csv('test.csv')
ssub_df = pd.read_csv('sampleSubmission.csv')
#get_plots(train_df)
labels_to_drop = []
xtrain = train_df.drop(labels=['Id','Cover_Type']+labels_to_drop, axis=1).values
ytrain = transform_from_classes(train_df['Cover_Type'].values)
#ytrain = train_df['Cover_Type'].values
xtest = test_df.drop(labels=['Id']+labels_to_drop, axis=1).values
ytest = ssub_df['Id'].values
print xtrain.shape, ytrain.shape, xtest.shape, ytest.shape
return xtrain, ytrain, xtest, ytest
def scorer(estimator, X, y):
ypred = estimator.predict(X)
return accuracy_score(ypred, y)
def train_model_parallel(model, xtrain, ytrain, index):
randint = reduce(lambda x,y: x|y, [ord(x)<<(n*8) for (n,x) in enumerate(os.urandom(4))])
#xTrain, xTest, yTrain, yTest = \
#cross_validation.train_test_split(xtrain, ytrain[:,index], test_size=0.4,
#random_state=randint)
xTrain, yTrain = xtrain, ytrain[:,index]
#n_est = [10, 100, 200]
#m_dep = [5, 10, 40]
#model = GridSearchCV(estimator=model,
#param_grid=dict(n_estimators=n_est, max_depth=m_dep),
#scoring=scorer,
#n_jobs=-1, verbose=1)
model.fit(xTrain, yTrain)
print model
#ytest_pred = model.predict(xTest)
#ytest_prob = model.predict_proba(xTest)
#print 'accuracy', accuracy_score(ytest_pred,yTest)
#print 'logloss', log_loss(yTest, ytest_prob)
with gzip.open('model_%d.pkl.gz' % index, 'wb') as mfile:
pickle.dump(model, mfile, protocol=2)
return
def test_model_parallel(xtrain, ytrain):
randint = reduce(lambda x,y: x|y, [ord(x)<<(n*8) for (n,x) in enumerate(os.urandom(4))])
xTrain, xTest, yTrain, yTest = \
cross_validation.train_test_split(xtrain, ytrain, test_size=0.4,
random_state=randint)
ytest_prob = np.zeros((yTest.shape[0], 7, 2))
for n in range(7):
with gzip.open('model_%d.pkl.gz' % n, 'rb') as mfile:
model = pickle.load(mfile)
#print 'grid scores', model.grid_scores_
#print 'best score', model.best_score_
#print 'best params', model.best_params_
ytest_prob[:,n,:] = model.predict_proba(xTest)
#print accuracy_score
ytest = transform_to_class(yTest).astype(np.int64)
ytest_pred = transform_to_class(ytest_prob[:,:,1]).astype(np.int64)
print ytest.shape, ytest_pred.shape
print accuracy_score(ytest, ytest_pred)
def prepare_submission_parallel(xtrain, ytrain, xtest, ytest):
print ytest.shape
ytest_prob = np.zeros((ytest.shape[0], 7, 2))
for n in range(7):
with gzip.open('model_%d.pkl.gz' % n, 'rb') as mfile:
model = pickle.load(mfile)
ytest_prob[:,n,:] = model.predict_proba(xtest)
ytest2 = transform_to_class(ytest_prob[:,:,1]).astype(np.int64)
df = pd.DataFrame({'Id': ytest, 'Cover_Type': ytest2}, columns=('Id', 'Cover_Type'))
df.to_csv('submission.csv', index=False)
return
#def prepare_submission(model, xtrain, ytrain, xtest, ytest):
#model.fit(xtrain, ytrain)
#ytest2 = transform_to_class(model.predict(xtest).astype(np.int64))
##dateobj = map(datetime.datetime.fromtimestamp, ytest)
#df = pd.DataFrame({'Id': ytest, 'Cover_Type': ytest2}, columns=('Id', 'Cover_Type'))
#df.to_csv('submission.csv', index=False)
#return
if __name__ == '__main__':
xtrain, ytrain, xtest, ytest = load_data()
#model = RandomForestRegressor(n_jobs=-1)
model = RandomForestClassifier(n_estimators=400, n_jobs=-1)
#model = DecisionTreeClassifier()
#model = GradientBoostingClassifier(loss='deviance', verbose=1)
index = -1
for arg in os.sys.argv:
try:
index = int(arg)
break
except ValueError:
continue
if index == -1:
for idx in range(7):
train_model_parallel(model, xtrain, ytrain, idx)
prepare_submission_parallel(xtrain, ytrain, xtest, ytest)
elif index >= 0 and index < 7:
train_model_parallel(model, xtrain, ytrain, index)
elif index == 7:
test_model_parallel(xtrain, ytrain)
elif index == 8:
prepare_submission_parallel(xtrain, ytrain, xtest, ytest)
| [
"keesiu.wong@gmail.com"
] | keesiu.wong@gmail.com |
942b9171041a8572b2cf2d3d1042c271979e83e0 | beed259c9aaf824c5307d93ffa736255f2d98831 | /month05/Spider/Wholesale02/run.py | de99978bdaa95073168ae291ab53dece83b892ce | [
"Apache-2.0"
] | permissive | chaofan-zheng/python_learning_code | 21345f97ebf74c3cad0ef488a93ec8a7fd771a63 | 5d05848911d55aa49eaee4afd7ffd80536fad7aa | refs/heads/main | 2023-05-27T16:17:18.130492 | 2021-06-06T14:23:31 | 2021-06-06T14:23:31 | 338,234,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,738 | py | from scrapy import cmdline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
import seaborn as sns
import os
cmdline.execute('scrapy crawl wholesale -o wholesale.csv'.split())
command = f'jupyter nbconvert {os.getcwd()}/visualization.ipynb'
print(command)
os.system(command)
warnings.filterwarnings("ignore")
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
data = pd.read_csv('wholesale.csv')
data = data.drop(columns='href')
data_clean = data[data.integer.notnull()][data.rePurchaseRate.notnull()]
for i in data_clean['integer']:
try:
i = int(i)
except:
# print(data_clean.loc[i,'integer'])
data_clean = data_clean.drop(data_clean[data_clean['integer'].str.contains(i)].index)
for i in data_clean['rePurchaseRate']:
try:
i = float(i)
except:
# print(data_clean.loc[i,'integer'])
data_clean = data_clean.drop(data_clean[data_clean['rePurchaseRate'].str.contains(i)].index)
data_clean.integer = data_clean.integer.astype('int')
data_clean.rePurchaseRate = data_clean.rePurchaseRate.astype('float')
print(data_clean.head())
print(data_clean.describe())
# print(data_clean['rePurchaseRate'])
fig=plt.figure(figsize = (16,12))
ax1=fig.add_subplot(221)
plt.title('复购率频次分布图',fontsize=14)
sns.distplot(data_clean['rePurchaseRate'])
ax1=fig.add_subplot(222)
plt.title('销售量频次分布图',fontsize=14)
sns.distplot(data_clean['integer'])
ax1=fig.add_subplot(223)
plt.title('复购率箱体图',fontsize=14)
sns.boxplot(x='rePurchaseRate',data=data_clean)
ax1=fig.add_subplot(224)
plt.title('销售量箱体图',fontsize=14)
sns.boxplot(x='integer',data=data_clean)
plt.show()
| [
"417355570@qq.com"
] | 417355570@qq.com |
b15953c884974afcdc6bdde6b224dba82df25716 | d8cbe9ce0469f72b8929af01538b6ceddff10a38 | /homeassistant/components/rainbird/config_flow.py | 057fc6fe39662a459581e0e3f0bdd86855071e43 | [
"Apache-2.0"
] | permissive | piitaya/home-assistant | 9c1ba162dac9604e4d43e035e74bad7bba327f0b | 48893738192431f96966998c4ff7a3723a2f8f4a | refs/heads/dev | 2023-03-07T16:13:32.117970 | 2023-01-10T17:47:48 | 2023-01-10T17:47:48 | 172,578,293 | 3 | 1 | Apache-2.0 | 2023-02-22T06:15:56 | 2019-02-25T20:19:40 | Python | UTF-8 | Python | false | false | 6,344 | py | """Config flow for Rain Bird."""
from __future__ import annotations
import asyncio
import logging
from typing import Any
import async_timeout
from pyrainbird.async_client import (
AsyncRainbirdClient,
AsyncRainbirdController,
RainbirdApiException,
)
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_FRIENDLY_NAME, CONF_HOST, CONF_PASSWORD
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers import config_validation as cv, selector
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import (
ATTR_DURATION,
CONF_IMPORTED_NAMES,
CONF_SERIAL_NUMBER,
CONF_ZONES,
DEFAULT_TRIGGER_TIME_MINUTES,
DOMAIN,
TIMEOUT_SECONDS,
)
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): selector.TextSelector(),
vol.Required(CONF_PASSWORD): selector.TextSelector(
selector.TextSelectorConfig(type=selector.TextSelectorType.PASSWORD)
),
}
)
class ConfigFlowError(Exception):
"""Error raised during a config flow."""
def __init__(self, message: str, error_code: str) -> None:
"""Initialize ConfigFlowError."""
super().__init__(message)
self.error_code = error_code
class RainbirdConfigFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Rain Bird."""
@staticmethod
@callback
def async_get_options_flow(
config_entry: ConfigEntry,
) -> RainBirdOptionsFlowHandler:
"""Define the config flow to handle options."""
return RainBirdOptionsFlowHandler(config_entry)
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Configure the Rain Bird device."""
error_code: str | None = None
if user_input:
try:
serial_number = await self._test_connection(
user_input[CONF_HOST], user_input[CONF_PASSWORD]
)
except ConfigFlowError as err:
_LOGGER.error("Error during config flow: %s", err)
error_code = err.error_code
else:
return await self.async_finish(
serial_number,
data={
CONF_HOST: user_input[CONF_HOST],
CONF_PASSWORD: user_input[CONF_PASSWORD],
CONF_SERIAL_NUMBER: serial_number,
},
options={ATTR_DURATION: DEFAULT_TRIGGER_TIME_MINUTES},
)
return self.async_show_form(
step_id="user",
data_schema=DATA_SCHEMA,
errors={"base": error_code} if error_code else None,
)
async def _test_connection(self, host: str, password: str) -> str:
"""Test the connection and return the device serial number.
Raises a ConfigFlowError on failure.
"""
controller = AsyncRainbirdController(
AsyncRainbirdClient(
async_get_clientsession(self.hass),
host,
password,
)
)
try:
async with async_timeout.timeout(TIMEOUT_SECONDS):
return await controller.get_serial_number()
except asyncio.TimeoutError as err:
raise ConfigFlowError(
f"Timeout connecting to Rain Bird controller: {str(err)}",
"timeout_connect",
) from err
except RainbirdApiException as err:
raise ConfigFlowError(
f"Error connecting to Rain Bird controller: {str(err)}",
"cannot_connect",
) from err
async def async_step_import(self, config: dict[str, Any]) -> FlowResult:
"""Import a config entry from configuration.yaml."""
self._async_abort_entries_match({CONF_HOST: config[CONF_HOST]})
try:
serial_number = await self._test_connection(
config[CONF_HOST], config[CONF_PASSWORD]
)
except ConfigFlowError as err:
_LOGGER.error("Error during config import: %s", err)
return self.async_abort(reason=err.error_code)
data = {
CONF_HOST: config[CONF_HOST],
CONF_PASSWORD: config[CONF_PASSWORD],
CONF_SERIAL_NUMBER: serial_number,
}
names: dict[str, str] = {}
for (zone, zone_config) in config.get(CONF_ZONES, {}).items():
if name := zone_config.get(CONF_FRIENDLY_NAME):
names[str(zone)] = name
if names:
data[CONF_IMPORTED_NAMES] = names
return await self.async_finish(
serial_number,
data=data,
options={
ATTR_DURATION: config.get(ATTR_DURATION, DEFAULT_TRIGGER_TIME_MINUTES),
},
)
async def async_finish(
self,
serial_number: str,
data: dict[str, Any],
options: dict[str, Any],
) -> FlowResult:
"""Create the config entry."""
await self.async_set_unique_id(serial_number)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=data[CONF_HOST],
data=data,
options=options,
)
class RainBirdOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a RainBird options flow."""
def __init__(self, config_entry: ConfigEntry) -> None:
"""Initialize RainBirdOptionsFlowHandler."""
self.config_entry = config_entry
async def async_step_init(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(data=user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
ATTR_DURATION,
default=self.config_entry.options[ATTR_DURATION],
): cv.positive_int,
}
),
)
| [
"noreply@github.com"
] | piitaya.noreply@github.com |
eafad22e3b9c5ddb8002f0f4d4281976958abffb | 1c2bb53d56a777bd2700c0438421ce686d1c8dc5 | /tests/past_api07_sources_excel.py | 5a230d6124045e6987f3acaa7f7d044bbeba2982 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | drewdolan/datatest | a1a771ff630acc7322387f4f810ff75fb22f5e5f | 1c168739f84328043c7f0be7cf25bb8e23cc259c | refs/heads/master | 2020-05-09T16:01:09.553762 | 2019-05-18T05:40:16 | 2019-05-18T05:40:16 | 181,254,930 | 0 | 0 | NOASSERTION | 2019-04-14T03:17:59 | 2019-04-14T03:17:57 | null | UTF-8 | Python | false | false | 1,884 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from . import _unittest as unittest
from .mixins import OtherTests
from .mixins import CountTests
try:
import xlrd
except ImportError:
xlrd = None
from datatest.__past__.api07_sources import ExcelSource
workbook_path = os.path.join(
os.path.dirname(__file__),
'sample_files',
'test_sources_excel.xlsx',
)
@unittest.skipIf(xlrd is None, 'xlrd not found')
class TestExcelSource(OtherTests, unittest.TestCase):
def setUp(self):
global workbook_path
self.datasource = ExcelSource(workbook_path) # <- Defaults to "Sheet 1"
@unittest.skipIf(xlrd is None, 'xlrd not found')
class TestExcelSourceCount(unittest.TestCase):
#class TestExcelSourceCount(CountTests, unittest.TestCase):
def setUp(self):
global workbook_path
self.datasource = ExcelSource(workbook_path, 'count_data')
def test_count(self):
count = self.datasource.count
self.assertEqual(9, count('label1'))
expected = {'a': 4, 'b': 5}
result = count('label1', ['label1'])
self.assertEqual(expected, result)
expected = {'a': 3, 'b': 3} # Counts only truthy values (not '' or None).
result = count('label2', ['label1'])
self.assertEqual(expected, result)
expected = {
('a', 'x'): 2,
('a', 'y'): 1,
('a', ''): 1,
('b', 'z'): 1,
('b', 'y'): 1,
('b', 'x'): 1,
#('b', None): 1, # <- None value has no equivalent in XLSX file.
#('b', ''): 1,
('b', ''): 2,
}
result = count('label1', ['label1', 'label2'])
self.assertEqual(expected, result)
expected = {'x': 2, 'y': 1, '': 1}
result = count('label1', 'label2', label1='a')
self.assertEqual(expected, result)
| [
"shawnbrown@users.noreply.github.com"
] | shawnbrown@users.noreply.github.com |
ec2739b5ba94034b1ee8cd65a284ccd4192cc77a | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=3.0_rd=1_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=97/params.py | 57b7be835a6b989c5d600c2af47d54e7a07715c4 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.029857',
'max_util': '3.0',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '1',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'GSN-EDF',
'trial': 97,
'utils': 'uni-medium-3'}
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
f289b70cb8056d517c2f5158137b0098f45503d0 | b3c939e013ecfdd68b02344ad2936ae53dd1a725 | /regression_2d/projects/model_save/get_dataset.py | 9d8fe2a003e8371859d010fa4a49c101555fe9df | [] | no_license | TakakiNishio/chainer | 3cd9d2972d72c30d1d4fb979692de26539903556 | 55c2771a1a72dccd738e1350ab539f517083ba33 | refs/heads/master | 2020-12-24T11:07:36.788998 | 2017-07-02T19:43:45 | 2017-07-02T19:43:45 | 73,190,468 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | #python library
import numpy as np
import random
#define function
def real_function(x1,x2):
z = -3*np.exp(-(((x1-2)**2)/3)-(((x2-2)**2)/3)) - 4*np.exp(-(((x1+2)**2)/4)-(((x2 +2)**2)/4))
#z = np.exp(-0.25 * np.sqrt(x1**2 + x2**2)) * np.cos(2 * np.sqrt(x1**2 + x2**2))
return z
#generate dataset
def dataset_generator(n):
#define domains
max_x1 = 5
min_x1 = -5
max_x2 = 5
min_x2 = -5
#half noise range
noise_range = 0.5
x = []
y = []
for i in range(n):
x1 = random.uniform(min_x1,max_x1)
x2 = random.uniform(min_x2,max_x2)
x.append([x1,x2])
y.append(real_function(x1,x2))
#y.append(real_function(x1,x2) + random.uniform(-noise_range,noise_range)) #add noise
x = np.array(x, dtype = np.float32)
y = np.array(y, dtype = np.float32)
x = np.reshape(x,(len(x),2))
y = np.reshape(y,(len(y),1))
return x,y
| [
"p104314t@mail.kyutech.jp"
] | p104314t@mail.kyutech.jp |
5af53751459fff26bde07d31765f075b7ccff247 | cc31777830ccbc17347305c40db91afc012977ee | /concepts/functions/is_abecedarian.py | 8ec3c19c3b4a3de66cdded9be1222b4400bb9053 | [] | no_license | sourcery-ai-bot/library-python | e147b9e5c6baba502de9f7605c5fa1937dbd13f4 | 61472955f4b011caa989b8805be3ed7df19c7aa8 | refs/heads/master | 2022-11-06T20:19:59.056197 | 2020-06-30T20:56:45 | 2020-06-30T20:56:45 | 276,206,925 | 0 | 0 | null | 2020-06-30T20:56:31 | 2020-06-30T20:56:30 | null | UTF-8 | Python | false | false | 600 | py | """ The following function returns True if the word passed as input is an
abecedarian word. That is a word where the each letter in the word is a
subsequent letter in the alphabet. 'Ant' would be a simple example. """
def is_string_abecederian(test_word: str) -> bool:
max_letter = ''
letters_tested = 0
for letter in test_word.lower():
if letter < max_letter:
return False
else:
max_letter = letter
letters_tested += 1
if letters_tested == len(test_word):
return True
result = is_string_abecederian('Ant')
print(result)
| [
"wayne.a.lambert@gmail.com"
] | wayne.a.lambert@gmail.com |
976aea0ed87a3c086d068ae560fdb2ffcd591676 | a7f442bc306d1a8366a3e30db50af0c2c90e9091 | /blockchain-env/Lib/site-packages/Cryptodome/Util/Padding.pyi | da274b98cccf0661298b00aed0ad7c5a91a8f5d3 | [] | no_license | Patreva/Python-flask-react-blockchain | cbdce3e0f55d4ba68be6ecfba35620585894bbbc | 474a9795820d8a4b5a370d400d55b52580055a2e | refs/heads/main | 2023-03-29T01:18:53.985398 | 2021-04-06T08:01:24 | 2021-04-06T08:01:24 | 318,560,922 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | pyi | from typing import Optional
__all__ = [ 'pad', 'unpad' ]
def pad(data_to_pad: bytes, block_size: int, style: Optional[str]='pkcs7') -> bytes: ...
def unpad(padded_data: bytes, block_size: int, style: Optional[str]='pkcs7') -> bytes: ... | [
"patrickwahome74@gmail.com"
] | patrickwahome74@gmail.com |
624b2a5975b2e3b83dfd238525814a74fb83e8b8 | 07af444dafa5bde373b0730e92d67e455d4ff4df | /SFData/StackOverflow/s44111687_original.py | f6758354b177e5b42738830aaf582fd7d6de7e91 | [] | no_license | tensfa/tensfa | 9114595b58a2e989780af0c348afb89a2abb04b4 | 415dcfaec589b0b14c5b9864872c912f3851b383 | refs/heads/main | 2023-06-30T14:27:38.217089 | 2021-08-03T01:33:30 | 2021-08-03T01:33:30 | 368,465,614 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,115 | py | training_data = np.vstack(training_data)
training_target = np.vstack(training_target)
test_data = np.vstack(test_data)
test_target = np.vstack(test_target)
learning_rate = 0.001
n_input = 2
n_steps = 1
n_hidden = 128
n_classes = 2
# tf Graph input
x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_classes])
# Define weights
weights = {
'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([n_classes]))
}
def RNN(x, weights, biases):
x = tf.unstack(x, n_steps, 1)
# Define a lstm cell with tensorflow
lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Get lstm cell output
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
pred = RNN(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
for i in range(len(training_data)):
batch_x = training_data[i]
batch_y = training_target[i]
print(batch_x)
print(batch_y)
batch_x = tf.reshape(batch_x, [1, 2]).eval()
print(batch_x)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
print("Iter " + str(step) + ", Minibatch Loss= " + "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc))
print("Optimization Finished!")
print("Testing Accuracy:", sess.run(accuracy, feed_dict={x: test_data, y: test_target})) | [
"tensfa@yeah.net"
] | tensfa@yeah.net |
2b141c2d2dc86ce4917c900408959b04afe351d7 | 9b5bfaf574a2eea29e1ec363e7670edd84c456d8 | /mobile/pages/app.py | 2ce862ebe7a7f61338edc6cefede64d1d568d7c8 | [] | no_license | fanpl-sourse/mytestenv | d04b34fdca596ab5e25349e2d68aa8450984e715 | 7e31da486d6c4a4442c2c0ce97b347f5273cc2eb | refs/heads/master | 2023-01-30T18:32:40.904084 | 2020-12-15T06:36:56 | 2020-12-15T06:36:56 | 278,984,272 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,716 | py | # -*- coding: utf-8 -*-
# @Time : 2020/7/26 16:12
# @Author : 饭盆里
# @File : app.py
# @Software: PyCharm
# @desc :
from appium import webdriver
from mobile.pages.basepage import BasePage
from mobile.pages.mainpage import MainPage
class App(BasePage):
"""
存放APP常用的方法:启动、重启、关闭、进入首页
"""
def start(self):
"""
启动
:return:
"""
if self.driver == None:
caps = {}
caps["platformName"] = "android"
caps["deviceName"] = "emulator-5554"
caps["appPackage"] = "com.tencent.wework"
caps["appActivity"] = ".launch.LaunchSplashActivity"
caps["noReset"] = "true"
caps['skipServerInstallation'] = 'true' # 跳过 uiautomator2 server的安装
caps['skipDeviceInitialization'] = 'true' # 跳过设备初始化
caps['settings[waitForIdleTimeout]'] = 0 # 等待Idle为0
# 与sever 建立连接,初始化一个driver,创建session
self.driver = webdriver.Remote("http://127.0.0.1:4723/wd/hub", caps)
else:
#无需参数,自动启动desireCapa里面定义的activity
self.driver.launch_app()
self.driver.implicitly_wait(5)
return self
def restart(self):
"""
重启
:return:
"""
self.driver.close()
self.driver.launch_app()
return self
def stop(self):
"""
关闭APP
:return:
"""
self.driver.close()
def goto_main(self):
"""
进入首页
:return: 首页
"""
return MainPage() | [
"fanpengli@fangdd.com"
] | fanpengli@fangdd.com |
9d61382de8235ccffe9e598c335ce26721982cf9 | 97792803c0069e6634ce7b57746b8893bad2ab35 | /inclass/dictionary.py | 0877fae6d37dda2afbbfa6d5fbf53855fe251864 | [] | no_license | byronwasti/SoftDes | 2e31560cfb61d1f4f80691af37b89cce0bca73e6 | 690d777062f156bf2f7710ab0b20df884595cf37 | refs/heads/master | 2020-01-22T14:24:11.679717 | 2015-04-21T19:32:05 | 2015-04-21T19:32:05 | 29,879,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | def histogram(s):
d = {}
for i in s:
if d.get(i,0) == 0:
d[i] = 1
else: d[i] += 1
return d
#print histogram('asdfasdfgasdg')
def has_dupl(l):
d = {}
for i in l:
if d.get(i,0) == 0:
d[i] = 1
else: return True
#print has_dupl([1,2,3,4,5,6,1])
def suffixer( w ):
n = len(w)
d = {}
suf = {}
pref = []
f = open('/usr/share/dict/words','r')
new = True
current = 'A'
d['A'] = []
for word in f:
word = word.strip('\n')
if current in word:
d[current] = d[current] + [word]
elif len(word) > n-1:
current = word
d[current] = []
return d[w]
print suffixer('test')
| [
"byron.wasti@gmail.com"
] | byron.wasti@gmail.com |
573587bbff19efe24ae3a9a6241ed93fe05351f5 | b1c423170f2d897ef88ab93e17830b6fff91b4e3 | /EasyPython/wax/tools/waxrf/imgcoder.py | 6949ca6c4b5594016fa4b9d2034fba194a7696e8 | [] | no_license | walker8088/easyworld | 55031dd0862b7bc0ffc8c5093875a93e935933e6 | e6aaf18430aee1457f5d8228fb300cf4323bcb7f | refs/heads/master | 2021-01-02T09:34:59.604820 | 2011-01-20T03:32:16 | 2011-01-20T03:32:16 | 33,644,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | #-------------------------------------------------------
# imgcoder.py
# Purpose: To encode/decode images for XRC
# Author: Jason Gedge
#
# TODO:
# - Consider better encoding/decoding
#-------------------------------------------------------
import base64
def DecodeImage(data):
""" Decode an image from WaxRF data. """
#return base64.b64decode(data)
return base64.decodestring(data)
def EncodeImage(data):
""" Encode an image for WaxRF. """
#return base64.b64encode(data)
return base64.encodestring(data)
def EncodeImageFile(fname):
""" Encode an image from a file. """
data = file(fname, 'rb').read()
return EncodeImage(data)
| [
"lidonglin8088@gmail.com@c3cacd82-1c91-3bdd-8267-0dbd049bf731"
] | lidonglin8088@gmail.com@c3cacd82-1c91-3bdd-8267-0dbd049bf731 |
7eff9f36e7e6bad508e866be840b19ba1c8eea02 | fe5db184c4abbd1ad25242ab24c18e2d785a069f | /apps/partida/migrations/0023_auto_20200503_1351.py | 291a48d573c5e9fb3e1e85d5ea758745ad4876fd | [] | no_license | valmoresjp/asl | aa20df3ac50f27d7360f77ce599c0dee91e0011f | 0b882cf3d5a97719e22ae39e29ccc933e6a10b7f | refs/heads/master | 2023-03-17T11:09:35.313488 | 2020-07-27T19:09:52 | 2020-07-27T19:09:52 | 267,399,738 | 1 | 0 | null | 2020-07-25T00:52:39 | 2020-05-27T18:44:30 | HTML | UTF-8 | Python | false | false | 429 | py | # Generated by Django 3.0.4 on 2020-05-03 13:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('partida', '0022_auto_20200429_1716'),
]
operations = [
migrations.AlterField(
model_name='cliente',
name='fecha',
field=models.DateTimeField(blank=None, default='2020-05-03 13:51:04', null=None),
),
]
| [
"valmoresjp@gmail.com"
] | valmoresjp@gmail.com |
3978ba4853132b98b1296c8b4418455710f65a6a | 775fdec8dd3d959560450fec3cf17c82a79e3f61 | /apps/dojo_ninjas/views.py | 4b8cd48396c0debabdbbee0f290a6e28bde444cd | [] | no_license | HarmsA/Dojo_Ninja | f2ff9833ea1b7707bed567ab869d1a645f8694a4 | 23ce11de538e600fccf64ac3c28348ca7bf38422 | refs/heads/master | 2020-04-09T03:13:10.591710 | 2018-12-02T18:27:29 | 2018-12-02T18:27:29 | 159,974,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | from django.shortcuts import render, HttpResponse
# Create your views here.
def index(request):
return HttpResponse('Dojo_ninja') | [
"harms2a@gmail.com"
] | harms2a@gmail.com |
959292466215e11be803178df6f439451a2cb66f | 1d7ae7456cad0d7a914a35bac6e854e566a16589 | /db_check.py | 7a7d6fffe84e3b7181e190d46c29da75876f0e12 | [] | no_license | truongngocasic/myrepos | eda728d31e7771e606126d0dc43e976e4eb0a309 | 58678ac27c201198f682cacbab6c8947a731d5eb | refs/heads/master | 2021-09-22T10:18:44.483641 | 2018-09-08T02:44:00 | 2018-09-08T02:44:00 | 112,811,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | import sqlite3
import json
db = sqlite3.connect('dbase/app.db')
# Get a cursor object
cursor = db.cursor()
#Show project
print "SHOW PROJECT"
cursor.execute("SELECT * FROM project")
rows = cursor.fetchall()
for row in rows:
print row
print json.dumps(row)
#Show users
print "SHOW USERS"
cursor.execute("SELECT * FROM users")
rows = cursor.fetchall()
for row in rows:
print(row)
| [
"root@beaglebone.(none)"
] | root@beaglebone.(none) |
f949c991858831a2c471ca6defa30d8260439840 | 136a379de74b2a28782cd0e2fb04da99dfabdf86 | /StacksAndQueues/FashionBoutique.py | 0e521c45b07ee0879e60a1065f5f486029e4bc75 | [] | no_license | mironmiron3/SoftUni-Python-Advanced | eb6c077c3b94e0381a82ed3b4abb26f1098dec82 | c7ac896a8fcc1f13a09f4c5573bd183d788a3157 | refs/heads/main | 2023-07-09T23:00:18.404835 | 2021-08-24T14:05:21 | 2021-08-24T14:05:21 | 399,486,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | clothes = [int(piece) for piece in input().split()]
initial_rack_capacity = int(input())
number_of_racks = 1
rack_capacity = initial_rack_capacity
while clothes:
current_piece = clothes.pop()
if current_piece > rack_capacity:
number_of_racks += 1
rack_capacity = initial_rack_capacity - current_piece
else:
rack_capacity -= current_piece
print(number_of_racks) | [
"noreply@github.com"
] | mironmiron3.noreply@github.com |
80f98b311d83f89f0caf6261134534cbdf3e1c93 | c4a3eeabe660e5d6b42f704d0325a755331ab3c5 | /hyperion/get_obs_CDF.py | 743366a29bdbc5509cdac8ee10191a4c26a47060 | [] | no_license | yaolun/misc | dfcfde2ac4a6429201644e1354912d3a064f9524 | 049b68ce826ddf638cec9a3b995d9ee84bf6075a | refs/heads/master | 2021-01-21T23:54:08.953071 | 2018-06-02T19:46:18 | 2018-06-02T19:46:18 | 26,666,071 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,328 | py | def get_obs_CDF(cdfdir, obj, spitzer_file=None, photfile=None):
"""
obj input in uppercase. But check the path to make sure.
"""
import numpy as np
from astropy.io import ascii
def spitzer_unc(filename, R=60., width=2.5):
"""
R is the resolving power (lambda/delta_lambda)
width = number of resolution elements
"""
irs = ascii.read(filename, data_start=2, header_start=None, comment='%')
wl_irs, flux_irs = irs['col1'], irs['col2']
# [wl_irs, flux_irs]= (np.genfromtxt(filename,skip_header=2,dtype='float').T)[0:2]
# Remove points with zero or negative flux
ind = (flux_irs > 0) & (np.isnan(flux_irs) == False)
wl_irs = wl_irs[ind]
flux_irs = flux_irs[ind]
unc_irs = np.empty_like(flux_irs)
oversample = (wl_irs[1]-wl_irs[0] + wl_irs[2]-wl_irs[1])/2 / (wl_irs[1]/R)
j = 0
edge = []
for i in range(len(wl_irs)):
if (wl_irs[i]-width/2 * wl_irs[i]/R >= min(wl_irs)) and (wl_irs[i]+width/2 * wl_irs[i]/R <= max(wl_irs)):
wl_dum = wl_irs[(wl_irs >= wl_irs[i]-width/2*wl_irs[i]/R) & (wl_irs <= wl_irs[i]+width/2*wl_irs[i]/R)]
flux_dum = flux_irs[(wl_irs >= wl_irs[i]-width/2*wl_irs[i]/R) & (wl_irs <= wl_irs[i]+width/2*wl_irs[i]/R)]
# return the coefficient, highest power first.
fit_dum = np.polyfit(wl_dum, flux_dum, 3)
base_dum = fit_dum[0]*wl_dum**3 + fit_dum[1]*wl_dum**2 + fit_dum[2]*wl_dum + fit_dum[3]
unc_irs[i] = np.std(flux_dum-base_dum) / np.sqrt(oversample)
if j == 0:
edge.append(unc_irs[i])
j += 1
edge_dum = unc_irs[i]
edge.append(edge_dum)
# print edge
for i in range(len(wl_irs)):
if wl_irs[i]-width/2 * wl_irs[i]/R < min(wl_irs):
unc_irs[i] = edge[0]
if wl_irs[i]+width/2 * wl_irs[i]/R > max(wl_irs):
unc_irs[i] = edge[1]
if flux_irs[i] - unc_irs[i] < 0:
unc_irs[i] = 1/3. * flux_irs[i]
return wl_irs, flux_irs, unc_irs
output = {}
# Read in Herschel data
# TODO: case for the sources without advanced products.
# continuum
[wl_pacs,flux_pacs] = np.genfromtxt(cdfdir+obj+'/pacs/advanced_products/'+obj+'_pacs_weighted_continuum.txt',dtype='float',skip_header=1).T
[wl_spire,flux_spire] = np.genfromtxt(cdfdir+obj+'/spire/advanced_products/'+obj+'_spire_corrected_continuum.txt',dtype='float',skip_header=1).T
# noise spectra
[wl_pacs_noise, flux_pacs_noise] = np.genfromtxt(cdfdir+obj+'/pacs/advanced_products/'+obj+'_pacs_weighted_residual_spectrum.txt',dtype='float',skip_header=1).T
[wl_spire_noise,flux_spire_noise] = np.genfromtxt(cdfdir+obj+'/spire/advanced_products/'+obj+'_spire_corrected_residual_spectrum.txt',dtype='float',skip_header=1).T
# Calculate the local variance (for spire), use the instrument uncertainty for pacs
#
wl_noise = [wl_pacs_noise, wl_spire_noise]
flux_noise = [flux_pacs_noise, flux_spire_noise]
sig_num = 20
sigma_noise = []
for i in range(0, len(wl_noise)):
sigma_dum = np.zeros_like(wl_noise[i])
for iwl in range(0, len(wl_noise[i])):
if iwl < sig_num/2:
sigma_dum[iwl] = np.std(np.hstack((flux_noise[i][0:int(sig_num/2)], flux_noise[i][0:int(sig_num/2)-iwl])))
elif len(wl_noise[i])-iwl < sig_num/2:
sigma_dum[iwl] = np.std(np.hstack((flux_noise[i][iwl:], flux_noise[i][len(wl_noise[i])-int(sig_num/2):])))
else:
sigma_dum[iwl] = np.std(flux_noise[i][iwl-int(sig_num/2):iwl+int(sig_num/2)])
sigma_noise = np.hstack((sigma_noise, sigma_dum))
# Read in Spitzer data
if spitzer_file != None:
wl_irs, flux_irs, unc_irs = spitzer_unc(spitzer_file)
wl_spec = np.hstack((wl_irs, wl_pacs, wl_spire))
flux_spec = np.hstack((flux_irs, flux_pacs, flux_spire))
sigma_noise = np.hstack((unc_irs, sigma_noise))
else:
wl_spec = np.hstack((wl_pacs,wl_spire))
flux_spec = np.hstack((flux_pacs,flux_spire))
flux_spec = flux_spec[np.argsort(wl_spec)]
sigma_noise = sigma_noise[np.argsort(wl_spec)]
wl_spec = wl_spec[np.argsort(wl_spec)]
# filter NaN value
wl_spec = wl_spec[np.isnan(flux_spec) == False]
sigma_noise = sigma_noise[np.isnan(flux_spec) == False]
flux_spec = flux_spec[np.isnan(flux_spec) == False]
output['spec'] = (wl_spec, flux_spec, sigma_noise)
if photfile!= None:
# Read in the photometry data
phot = ascii.read(photfile, comment='%')
# phot = np.genfromtxt(photfile, dtype=None, skip_header=1, comments='%')
# wl_phot = []
# flux_phot = []
# flux_sig_phot = []
# # note = []
# for i in range(0,len(phot)):
# wl_phot.append(phot[i][0])
# flux_phot.append(phot[i][1])
# flux_sig_phot.append(phot[i][2])
# # note.append(phot[i][4])
# wl_phot = np.array(wl_phot)
# flux_phot = np.array(flux_phot)
# flux_sig_phot = np.array(flux_sig_phot)
wl_phot = phot['wavelength']
flux_phot = phot['flux(Jy)']
flux_sig_phot = phot['error(Jy)']
selector = (wl_phot != 70) & (wl_phot != 100) & (wl_phot != 160) & (wl_phot != 250) & (wl_phot != 350) & (wl_phot != 500)
wl_phot = wl_phot[selector]
flux_phot = flux_phot[selector]
flux_sig_phot = flux_sig_phot[selector]
# Read in CDF photometry
phot_pacs = ascii.read(cdfdir+obj+'/pacs/data/'+obj+'_pacs_phot.txt', data_start=4)
phot_spire = ascii.read(cdfdir+obj+'/spire/data/'+obj+'_spire_phot.txt', data_start=4)
# average the photometry
phot_cdf = {'wave': [], 'flux': [], 'unc':[]}
# PACS
for i, w in enumerate(set(phot_pacs['wavelength(um)'])):
phot_cdf['wave'].append(w)
phot_cdf['flux'].append(np.mean(phot_pacs['flux(Jy)'][phot_pacs['wavelength(um)'] == w]))
phot_cdf['unc'].append((np.sum(phot_pacs['uncertainty(Jy)'][phot_pacs['wavelength(um)'] == w]**2)/len(phot_pacs['uncertainty(Jy)'][phot_pacs['wavelength(um)'] == w]))**0.5)
# SPIRE
for i, w in enumerate(set(phot_spire['wavelength(um)'])):
phot_cdf['wave'].append(w)
phot_cdf['flux'].append(np.mean(phot_spire['flux(Jy)'][phot_spire['wavelength(um)'] == w]))
phot_cdf['unc'].append((np.sum(phot_spire['uncertainty(Jy)'][phot_spire['wavelength(um)'] == w]**2)/len(phot_spire['uncertainty(Jy)'][phot_spire['wavelength(um)'] == w]))**0.5)
# combine photoemtry
wl_phot = np.hstack((wl_phot, np.array(phot_cdf['wave'])))
flux_phot = np.hstack((flux_phot, np.array(phot_cdf['flux'])))
flux_sig_phot = np.hstack((flux_sig_phot, np.array(phot_cdf['unc'])))
# filter NaN values
wl_phot = wl_phot[np.isnan(flux_phot) == False]
flux_sig_phot = flux_sig_phot[np.isnan(flux_phot) == False]
flux_phot = flux_phot[np.isnan(flux_phot) == False]
output['phot'] = (wl_phot, flux_phot, flux_sig_phot)
return output
| [
"allenya@gmail.com"
] | allenya@gmail.com |
e85beac70d5bacceda749318ba1c7279a6d05ee2 | 6b2ea44d7c7944dc2ec83a6cc9de8c1c475c093c | /GetUserShareCounts.py | 9f3aa6a6c0eb93f51791fea6dd24fa1c3317e27f | [] | no_license | yashodhank/GAM-Scripts | 2526d1aa2a2f878dfa426168bf9f5c2e73d21076 | 58c99983e7c7326893ccef5b9e4f15e7e8f58c4c | refs/heads/master | 2020-04-04T19:17:36.641822 | 2018-11-01T16:12:26 | 2018-11-01T16:12:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,739 | py | #!/usr/bin/env python2
"""
# Purpose: For a Google Drive User(s), output a CSV file showing the share type counts for files shared by the user(s)
# Note: This script can use Basic or Advanced GAM:
# https://github.com/jay0lee/GAM
# https://github.com/taers232c/GAMADV-X, https://github.com/taers232c/GAMADV-XTD, https://github.com/taers232c/GAMADV-XTD3
# Customize: Set DOMAIN_LIST to the list of domains you consider internal
# Usage:
# 1: Get ACLs for all files, if you don't want all users, replace all users with your user selection in the command below
# $ Example, Basic GAM: gam all users print filelist id title owners permissions > filelistperms.csv
# $ Example, Advanced GAM: gam config auto_batch_min 1 redirect csv ./filelistperms.csv multiprocess all users print filelist id title owners permissions
# 2: From that list of ACLs, output a CSV file with headers:
# Owner - email address of file owner
# Total - total files owned by Owner
# Shared - number of files shared
# Shared External - number of files shared publically (anyone) or to a domain/group/user where the domain is not in DOMAIN_LIST
# Shared Internal - number of files shared to a domain/group/user where the domain is in DOMAIN_LIST
# anyone - number of shares to anyone
# anyoneWithLink - number of shares to anyone with a link
# externalDomain - number of shares to an external domain
# externalDomainWithLink - number of shares to an external domain with a link
# internalDomain - number of shares to an internal domain
# internalDomainWithLink - number of shares to an internal domain with a link
# externalGroup - number of shares to an external group
# internalGroup - number of shares to an internal group
# externalUser - number of shares to an internal user
# internalUser - number of shares to an internal user
# $ python GetUserShareCounts.py filelistperms.csv usersharecounts.csv
"""
import csv
import re
import sys
# Substitute your internal domain(s) in the list below, e.g., DOMAIN_LIST = ['domain.com',] DOMAIN_LIST = ['domain1.com', 'domain2.com',]
DOMAIN_LIST = ['domain.com',]
QUOTE_CHAR = '"' # Adjust as needed
LINE_TERMINATOR = '\n' # On Windows, you probably want '\r\n'
def incrementCounter(counter):
if not counterSet[counter]:
userShareCounts[owner][counter] += 1
counterSet[counter] = True
TOTAL_COUNTER = 'Total'
SHARED_COUNTER = 'Shared'
SHARED_EXTERNAL_COUNTER = 'Shared External'
SHARED_INTERNAL_COUNTER = 'Shared Internal'
HEADERS = [
'Owner',
TOTAL_COUNTER, SHARED_COUNTER, SHARED_EXTERNAL_COUNTER, SHARED_INTERNAL_COUNTER,
'anyone', 'anyoneWithLink',
'externalDomain', 'externalDomainWithLink',
'internalDomain', 'internalDomainWithLink',
'externalGroup', 'internalGroup',
'externalUser', 'internalUser',
]
zeroCounts = {
TOTAL_COUNTER: 0, SHARED_COUNTER: 0, SHARED_EXTERNAL_COUNTER: 0, SHARED_INTERNAL_COUNTER: 0,
'anyone': 0, 'anyoneWithLink': 0,
'externalDomain': 0, 'externalDomainWithLink': 0,
'internalDomain': 0, 'internalDomainWithLink': 0,
'externalGroup': 0, 'internalGroup': 0,
'externalUser': 0, 'internalUser': 0,
}
COUNT_CATEGORIES = {
'anyone': {False: 'anyone', True: 'anyoneWithLink'},
'domain': {False: {False: 'externalDomain', True: 'externalDomainWithLink'}, True: {False: 'internalDomain', True: 'internalDomainWithLink'}},
'group': {False: 'externalGroup', True: 'internalGroup'},
'user': {False: 'externalUser', True: 'internalUser'},
}
PERMISSIONS_N_TYPE = re.compile(r"permissions.(\d+).type")
if (len(sys.argv) > 2) and (sys.argv[2] != '-'):
outputFile = open(sys.argv[2], 'wb')
else:
outputFile = sys.stdout
outputCSV = csv.DictWriter(outputFile, HEADERS, lineterminator=LINE_TERMINATOR, quotechar=QUOTE_CHAR)
outputCSV.writeheader()
if (len(sys.argv) > 1) and (sys.argv[1] != '-'):
inputFile = open(sys.argv[1], 'rbU')
else:
inputFile = sys.stdin
userShareCounts = {}
for row in csv.DictReader(inputFile, quotechar=QUOTE_CHAR):
owner = row['owners.0.emailAddress']
userShareCounts.setdefault(owner, zeroCounts.copy())
counterSet = {TOTAL_COUNTER: False, SHARED_COUNTER: False, SHARED_EXTERNAL_COUNTER: False, SHARED_INTERNAL_COUNTER: False}
for k, v in row.iteritems():
mg = PERMISSIONS_N_TYPE.match(k)
if mg and v:
permissions_N = mg.group(1)
if row['permissions.{0}.role'.format(permissions_N)] == 'owner':
incrementCounter(TOTAL_COUNTER)
else:
incrementCounter(SHARED_COUNTER)
if v == 'anyone':
incrementCounter(SHARED_EXTERNAL_COUNTER)
userShareCounts[owner][COUNT_CATEGORIES[v][row['permissions.{0}.withLink'.format(permissions_N)] == 'True']] += 1
else:
domain = row.get('permissions.{0}.domain'.format(permissions_N), '')
if not domain and v in ['user', 'group']:
if row['permissions.{0}.deleted'.format(permissions_N)] == u'True':
continue
emailAddress = row['permissions.{0}.emailAddress'.format(permissions_N)]
domain = emailAddress[emailAddress.find(u'@')+1:]
internal = domain in DOMAIN_LIST
incrementCounter([SHARED_EXTERNAL_COUNTER, SHARED_INTERNAL_COUNTER][internal])
if v == u'domain':
userShareCounts[owner][COUNT_CATEGORIES[v][internal][row['permissions.{0}.withLink'.format(permissions_N)] == 'True']] += 1
else: # group, user
userShareCounts[owner][COUNT_CATEGORIES[v][internal]] += 1
for owner, counts in sorted(userShareCounts.iteritems()):
row = {'Owner': owner}
row.update(counts)
outputCSV.writerow(row)
if inputFile != sys.stdin:
inputFile.close()
if outputFile != sys.stdout:
outputFile.close()
| [
"ross.scroggs@gmail.com"
] | ross.scroggs@gmail.com |
ac50bc52bc7373fcee843af31f074fd1f46ee40e | d815c4755e6f98098452528d8ab69a8f82096b78 | /day11/producer.py | e1ef9d4d5e62560a2626effd42106c83a7ede936 | [] | no_license | immortalmin/csk | 081f1baddde43f74151f08a7d701d4c611845f7f | aca509a03bb88ae2911c1611350decdf68a4419a | refs/heads/master | 2020-04-07T22:51:59.907665 | 2018-12-04T08:53:22 | 2018-12-04T08:53:22 | 158,788,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | #Author:immortal luo
# -*-coding:utf-8 -*-
import pika
connection = pika.BlockingConnection(
pika.ConnectionParameters('localhost')
)
channel = connection.channel()#声明一个管道
#声明queue
channel.queue_declare(queue='hello',durable=True)#队列持久化,但只是保存队列名
channel.basic_publish(exchange='',
routing_key='hello',#queue名字
body='Hello World!',
properties=pika.BasicProperties(#消息持久化
delivery_mode=2#1是非持久化
)
)
print("[x] Sent 'Hello World!'")
connection.close() | [
"1608725226@qq.com"
] | 1608725226@qq.com |
ca6d004796ccfbe78c85eb4efbea28468a04ebcc | 2289d33c903bf6eaa0aeb228418ef438863e763d | /fortest/fortest/settings.py | 31da12ea1ebcb2450e2cfea43fa4ed31e88ca251 | [] | no_license | theseana/f | e462255eff88370365afeeae53e080aa53239d15 | 8a66acfc55e223fcd702540462053a5b5e0196e4 | refs/heads/master | 2023-01-12T21:30:39.043604 | 2020-11-22T16:00:48 | 2020-11-22T16:00:48 | 315,075,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,109 | py | """
Django settings for fortest project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'x3o6ig)#e5wzkpzs5b#*ytbs($a#9^s-pq6t)&q*%k^d(4sxe8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'fortest.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fortest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"info@poulstar.com"
] | info@poulstar.com |
323e87f0298040446697d0117a55480796d625d1 | 1581ea7304a39a81a018e35e5c6d773bb9f1727a | /프로그래머스/PR_여행경로.py | 041746869622645b93f00ec9bd431719a1a62169 | [] | no_license | Yejin6911/Algorithm | 5faae951a19e47dd0babbe0f27e349f8499d5b38 | 80e715c718c8362b20f42115f737b8e918de5b11 | refs/heads/master | 2023-06-20T21:13:39.181327 | 2021-07-19T06:30:20 | 2021-07-19T06:30:20 | 330,934,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | from collections import defaultdict
def solution(tickets):
stack = ["ICN"]
answer = []
routes = defaultdict(list)
for key, value in tickets:
routes[key].append(value)
for r in routes:
routes[r].sort()
while stack:
now = stack[-1]
if now not in routes or len(routes[now]) == 0:
answer.append(stack.pop())
else:
stack.append(routes[now].pop(0))
return answer[::-1]
print(solution([["ICN", "SFO"], ["ICN", "ATL"], [
"SFO", "ATL"], ["ATL", "ICN"], ["ATL", "SFO"]]))
| [
"cdjin6911@gmail.com"
] | cdjin6911@gmail.com |
cc9747c96a7aa72f30372975203452bf4205eac7 | c56303068bf3bb97cb87202f8ed0e8b2f4316a2a | /covid19_pipeline/data/sampler.py | d8c675e849845b966ae44bd7913b6a25470b97d9 | [] | no_license | salvagimeno-ai/HKBU_HPML_COVID-19 | f049b0ed91b0a06db674407d72940452c84a3e06 | c23e9c7bf5bedec4ddcc3d6efd1e0ad0f814446f | refs/heads/master | 2022-12-04T07:03:27.722775 | 2020-08-30T07:47:01 | 2020-08-30T07:47:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | import torch
import torchvision
from torchline.data.sampler import SAMPLER_REGISTRY
from torchline.data import build_data
__all__ = [
'WeightedRandomSampler',
]
@SAMPLER_REGISTRY.register()
def WeightedRandomSampler(cfg):
dataset = build_data(cfg)
sampler_cfg = cfg.dataloader.sampler
weights = []
weights_cls = cfg.dataloader.sampler.weights_cls
num_samples = len(dataset)
for i in range(num_samples):
weight = weights_cls[int(dataset.samples[i]['label'])]
weights.append(weight)
replacement = sampler_cfg.replacement
return torch.utils.data.WeightedRandomSampler(weights, num_samples, replacement) | [
"1435679023@qq.com"
] | 1435679023@qq.com |
68f0e33fbfb6bfb09cc47e135e5d04fb76d17f89 | 82f993631da2871933edf83f7648deb6c59fd7e4 | /w1/L1/17.py | 5f12a88814e26948b3cfec9064768f06961e56b3 | [] | no_license | bobur554396/PPII2021Summer | 298f26ea0e74c199af7b57a5d40f65e20049ecdd | 7ef38fb4ad4f606940d2ba3daaa47cbd9ca8bcd2 | refs/heads/master | 2023-06-26T05:42:08.523345 | 2021-07-24T12:40:05 | 2021-07-24T12:40:05 | 380,511,125 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | print(bool(True))
print(bool(1))
print(bool(100))
print(bool('h'))
print(bool('hello'))
print(bool(2.6))
print(bool([1, 2, 3]))
print(bool((1, 2, 3)))
print(bool({'id': '123', 'name': 'Student 1'}))
print('-'*60)
print(bool(False))
print(bool(0))
print(bool(''))
print(bool([]))
print(bool(()))
print(bool({}))
| [
"bobur.muhsimbaev@gmail.com"
] | bobur.muhsimbaev@gmail.com |
0a255e211f9dad61eb4d0665a5241214dadd47f6 | f469652395fd34bd228ac23bb1a24efce6e5c4a0 | /看书笔记/看书练习/类/模块存储多个类/car.py | 001e32f69d227e1222a520cdfe4632cd75e494b0 | [] | no_license | wfwf1990/python | 0f5528f92d6172da96bce3ded12d1cc2f038ec3c | 6fa3b600cfcf4ab49da7cd8b5f62b5b62e276bfa | refs/heads/master | 2021-04-18T21:35:04.445511 | 2018-06-25T17:40:04 | 2018-06-25T17:40:04 | 126,700,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,590 | py | class Car():
def __init__(self,make,model,year):
self.make = make
self.model = model
self.year = year
self.odometer_reading = 0
def getDescriptiveName(self): #返回描述性信息
long_name = str(self.year) + " " + self.make + " "+ self.model
return long_name.title()
def getOdometerReading(self):
print("This car has " + str(self.odometer_reading) + " miles on it")
#通过方法接受一个里程值,并将其存储到self.odometer_reading中
def updateOdometer(self,mileage):
#禁止将里程数往回调
if mileage >= self.odometer_reading:
self.odometer_reading = mileage
else:
print("you can not roll back an odometer")
def increment_odometer(self,miles):
if miles >= 0:
self.odometer_reading += miles
else:
print("you can not roll back an odometer")
class ElectricCar(Car):
def __init__(self,make,modle,year):
super(ElectricCar, self).__init__(make,modle,year)
self.battery_size = Battery()
class Battery():
def __init__(self,battery_size=70):
self.battery_size = battery_size
def describeBattery(self):
print("This car has a " + str(self.battery_size) + "-kwh battery.")
def getRange(self):
if self.battery_size == 70:
range = 240
elif self.battery_size == 85:
range = 270
message = "This car can go approximately " + str(range)
message += " miles on a full charge."
print(message)
| [
"576589099@qq.com"
] | 576589099@qq.com |
411440d37c8077bf6abc259cf3ea6e44e925bf8d | af58fa633206f571d4b370919e27de8d4b9862ed | /tasks/forms.py | 1b6d8ead9fdf09748187e018e42dbc3040332b75 | [] | no_license | gmdmgithub/django-todo-list | 7d36b9603fcdd30959ad48e8f2e97070918c68b7 | 7efaee21bbbdaaff1db46e255b63267ac6a8ab31 | refs/heads/master | 2021-09-25T10:39:10.202237 | 2019-12-17T14:59:45 | 2019-12-17T14:59:45 | 227,467,068 | 0 | 0 | null | 2021-09-22T18:18:36 | 2019-12-11T21:50:47 | Python | UTF-8 | Python | false | false | 271 | py | from django import forms
from django.forms import ModelForm
from .models import *
class TaskForm(forms.ModelForm):
title = forms.CharField(widget=forms.TextInput(attrs={'placeholder':'Add new task'}))
class Meta:
model = Task
fields = '__all__' | [
"gmika@interia.pl"
] | gmika@interia.pl |
e4441350874f79918bd8c01eb254b00f5cf56043 | 6f044a0541ddf467bb6251645c3d8107df5f5756 | /status/migrations/0013_status_trait.py | ea4451fca16a2341d7584014d6176fc495d94aef | [] | no_license | tpvt99/new-social-network-backend | 04ae9f0551c09eceb5fd6b4bcf50430243e53199 | a18d6279a27ba0ce3af1f5d6e985b4b147a4233a | refs/heads/master | 2021-09-04T01:50:43.430961 | 2018-01-14T08:59:41 | 2018-01-14T08:59:41 | 117,415,992 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-03-17 15:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('trait', '0001_initial'),
('status', '0012_status_contestpost'),
]
operations = [
migrations.AddField(
model_name='status',
name='trait',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='trait.Trait'),
),
]
| [
"tranphong96.hbk@gmail.com"
] | tranphong96.hbk@gmail.com |
c8f80a4707a3c941c2a3e4b4f7a6eaf9d71e88a6 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2802/60716/236663.py | 2f8ffb9bdd560ab5b4a981852be9ace2494fb1bb | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | num, m= map(int,input().split())
str = input().split(' ')
lists = [int(i) for i in str]
listleave = []
listmember = []
for i in range(num):
listmember.append(i+1)
while len(listmember)>1:
if lists[0]<=m:
lists.pop(0)
index=listmember.pop(0)
# print("{}leave".format(index))
listleave.append(index)
else:
temp = lists.pop(0) -m
lists.append(temp)
index = listmember.pop(0)
listmember.append(index)
# print("{}gotoend".format(index))
print(listmember[0]) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
f1cc691a72877a2403999b9e4aba96d5532e8c66 | afc8d5a9b1c2dd476ea59a7211b455732806fdfd | /Configurations/ggH_SF/Full2017_HTXS_Stage1p2/doWorkspace.py | b5ddf13b53b9fb6c4a0399c79f7b36db17317f4e | [] | no_license | latinos/PlotsConfigurations | 6d88a5ad828dde4a7f45c68765081ed182fcda21 | 02417839021e2112e740607b0fb78e09b58c930f | refs/heads/master | 2023-08-18T20:39:31.954943 | 2023-08-18T09:23:34 | 2023-08-18T09:23:34 | 39,819,875 | 10 | 63 | null | 2023-08-10T14:08:04 | 2015-07-28T07:36:50 | Python | UTF-8 | Python | false | false | 3,744 | py |
import os
if os.path.exists('HTXS_stage1_categories.py') :
handle = open('HTXS_stage1_categories.py','r')
exec(handle)
handle.close()
sampleNames = []
for cat in HTXSStage1_1Categories:
if 'GG2H_' in cat:
sampleNames.append(cat.replace('GG2H','ggH_hww'))
sampleNames.append(cat.replace('GG2H','ggH_htt'))
elif 'QQ2HQQ_' in cat:
sampleNames.append(cat.replace('QQ2HQQ','qqH_hww'))
sampleNames.append(cat.replace('QQ2HQQ','qqH_htt'))
sampleNames.append(cat.replace('QQ2HQQ','WH_had_hww'))
sampleNames.append(cat.replace('QQ2HQQ','WH_had_htt'))
sampleNames.append(cat.replace('QQ2HQQ','ZH_had_hww'))
sampleNames.append(cat.replace('QQ2HQQ','ZH_had_htt'))
elif 'QQ2HLNU_' in cat:
sampleNames.append(cat.replace('QQ2HLNU','WH_lep_hww'))
sampleNames.append(cat.replace('QQ2HLNU','WH_lep_htt'))
elif 'QQ2HLL_' in cat:
sampleNames.append(cat.replace('QQ2HLL','ZH_lep_hww'))
sampleNames.append(cat.replace('QQ2HLL','ZH_lep_htt'))
elif 'GG2HLL_' in cat:
sampleNames.append(cat.replace('GG2HLL','ggZH_lep_hww'))
elif 'TTH' in cat:
sampleNames.append(cat.replace('TTH','ttH_hww'))
elif 'BBH' in cat:
sampleNames.append(cat.replace('BBH','bbH_hww'))
os.chdir('./Combination')
sampleNames.append('ggH_hww_PTH_200_300')
sampleNames.append('ggH_hww_PTH_300_450')
sampleNames.append('ggH_hww_PTH_450_650')
sampleNames.append('ggH_hww_PTH_GT650')
'''
#No merging
command="text2workspace.py Full2017_SF_ggH_HTXS_Stage1p2.txt -o Full2017_SF_ggH_HTXS_Stage1p2.root -P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel --PO verbose "
for sample in sampleNames:
if 'ggH_hww' not in sample: continue
if 'FWDH' in sample: continue
if 'GT200' in sample: continue
command+="--PO 'map=.*/{}:r_{}[1,-10,10]' ".format(sample,sample)
print command
os.system(command)
'''
#Merge some bins
command="text2workspace.py Full2017_SF_ggH_HTXS_Stage1p2.txt -o Full2017_SF_ggH_HTXS_Stage1p2_merged.root -P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel --PO verbose "
poi=''
for sample in sampleNames:
if 'ggH_hww' not in sample: continue
if 'FWDH' in sample: continue
#if 'GT200' in sample: continue
#if '0J' in sample: poi = 'r_ggH_hww_0J'
if ('1J_PTH_60_120' in sample or '1J_PTH_120_200' in sample): poi = 'r_ggH_hww_1J_PTH_GT60'
#elif ('1J_PTH_60_120' in sample or '1J_PTH_120_200' in sample): poi = 'r_ggH_hww_1J_PTH_GT60'
elif ('MJJ_350_700' in sample or 'MJJ_GT700' in sample): poi = 'r_ggH_hww_GE2J_MJJ_GT350'
elif ('MJJ_0_350_PTH_0_60' in sample or 'MJJ_0_350_PTH_60_120' in sample): poi = 'r_ggH_hww_GE2J_MJJ_0_350_PTH_LT120'
elif 'MJJ_0_350_PTH_120_200' in sample: poi = 'r_ggH_hww_GE2J_MJJ_0_350_PTH_GT120'
elif 'ggH_hww_PTH' in sample: poi = 'r_ggH_hww_PTH_GT200'
else: poi = 'r_'+sample
#if (sample in ['ggH_hww_PTH_300_450','ggH_hww_PTH_450_650','ggH_hww_PTH_GT650']): poi = 'r_ggH_hww_PTH_GT300'
#if ('MJJ_0_350_PTH_0_60' in sample or 'MJJ_0_350_PTH_60_120' in sample): poi = 'r_ggH_hww_GE2J_MJJ_0_350_PTH_LT120'
#elif ('MJJ_350_700' in sample): poi = 'r_ggH_hww_GE2J_MJJ_350_700'
#elif ('MJJ_GT700' in sample): poi = 'r_ggH_hww_GE2J_MJJ_GT700'
#else: poi = 'r_'+sample
command+="--PO 'map=.*/{}:{}[1,-10,10]' ".format(sample,poi)
# command+="--PO 'map=.*/{}:{}[1,-5,5]' ".format(sample,poi)
print command
os.system(command)
#Merge all bins
command="text2workspace.py Full2017_SF_ggH_HTXS_Stage1p2.txt -o Full2017_SF_ggH_HTXS_Stage1p2_onePOI.root -P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel --PO verbose "
poi=''
for sample in sampleNames:
if 'FWDH' in sample: continue
else: poi ='r_ggH_hww'
command+="--PO 'map=.*/{}:{}[1,-10,10]' ".format(sample,poi)
print command
os.system(command)
| [
"davide.di.croce@cern.ch"
] | davide.di.croce@cern.ch |
59fc2dab41b7a88b66446f7c5cb7bb6b83d6bf1e | 8be2df0c4508cc5254887b8cccb044032aea5c21 | /client-server-app/Lesson-1.1/6.py | c6eb9fed6056ecaed76ad071ea80c382788c222c | [] | no_license | ezhk/python-learning | 2d3dad2190ac9ce9299534f0f303e8b76a8eeab2 | 424ec9ca08541273f9ec39ff25f75a3b78d9dcb7 | refs/heads/master | 2023-01-05T16:50:08.829169 | 2020-06-02T18:03:05 | 2020-06-02T18:03:05 | 165,482,083 | 0 | 1 | null | 2023-01-04T04:59:43 | 2019-01-13T08:21:44 | Python | UTF-8 | Python | false | false | 2,430 | py | """
6. Создать текстовый файл test_file.txt, заполнить его тремя строками: «сетевое программирование», «сокет», «декоратор».
Проверить кодировку файла по умолчанию. Принудительно открыть файл в формате Unicode и вывести его содержимое.
"""
import sys
if __name__ == "__main__":
print(f"Кодировка по умолчанию: {sys.getdefaultencoding()}")
"""
Работа с файлом в обычном режиме намного проще — там
при записи и чтении возможны только строки, поэтому
попробуем поработать в бинарном режиме.
"""
with open('test_file.txt', 'wb') as fh:
for string in ("сетевое программирование", "сокет", "декоратор"):
fh.write(string.encode(sys.getdefaultencoding()))
fh.write(b"\n")
"""
Проверим наши строки с правильной кодировкой — UTF8 и неправильной — UTF16.
"""
with open('test_file.txt', 'rb') as fh:
print(fh)
for line in fh:
print(f"UTF-8 {line.decode('utf-8')}"
f"UTF-16 {line.decode('utf-16', 'replace')}")
"""
И откроем файл с указанной кодировкой.
"""
with open('test_file.txt', 'r', encoding='utf-8') as fh:
print(fh)
for line in fh:
print(f"UTF-8 encoded file: {line}", end='')
"""
Кодировка по умолчанию: utf-8
<_io.BufferedReader name='test_file.txt'>
UTF-8 сетевое программирование
UTF-16 臑뗐苑뗐닐뻐뗐퀠톿킀킾톳킀킰킼킼톸킀킾킲킰킽킸વ
UTF-8 сокет
UTF-16 臑뻐뫐뗐苑�
UTF-8 декоратор
UTF-16 듐뗐뫐뻐胑냐苑뻐胑�
<_io.TextIOWrapper name='test_file.txt' mode='r' encoding='utf-8'>
UTF-8 encoded file: сетевое программирование
UTF-8 encoded file: сокет
UTF-8 encoded file: декоратор
Сожержимое test_file.txt:
сетевое программирование
сокет
декоратор
"""
| [
"ezhik@ezhik.info"
] | ezhik@ezhik.info |
d67257825d79af4c7baa3475c3e4107a9f2ed5aa | c90ddd0930894c565197b739cd76140a7151fffd | /HLTrigger/Configuration/python/HLT_75e33/modules/hgcalLayerClustersL1Seeded_cfi.py | 9241d25990ffcca418d68a5a3de950c3318ae788 | [
"Apache-2.0"
] | permissive | p2l1pfp/cmssw | 9cc6b111ff1935e49f86ec3da9f9b84fb13bbcdf | 9f0a3a22fe451c25114134c30ac1f5c1261f3183 | refs/heads/L1PF_12_5_X | 2023-08-17T00:38:15.374760 | 2023-06-13T12:55:57 | 2023-06-13T12:55:57 | 127,881,751 | 6 | 1 | Apache-2.0 | 2023-09-05T13:54:59 | 2018-04-03T09:10:17 | C++ | UTF-8 | Python | false | false | 2,371 | py | import FWCore.ParameterSet.Config as cms
hgcalLayerClustersL1Seeded = cms.EDProducer("HGCalLayerClusterProducer",
HFNoseInput = cms.InputTag("HGCalRecHitL1Seeded","HGCHFNoseRecHits"),
HGCBHInput = cms.InputTag("hltRechitInRegionsHGCAL","HGCHEBRecHits"),
HGCEEInput = cms.InputTag("hltRechitInRegionsHGCAL","HGCEERecHits"),
HGCFHInput = cms.InputTag("hltRechitInRegionsHGCAL","HGCHEFRecHits"),
detector = cms.string('all'),
doSharing = cms.bool(False),
mightGet = cms.optional.untracked.vstring,
nHitsTime = cms.uint32(3),
plugin = cms.PSet(
dEdXweights = cms.vdouble(
0.0, 8.894541, 10.937907, 10.937907, 10.937907,
10.937907, 10.937907, 10.937907, 10.937907, 10.937907,
10.932882, 10.932882, 10.937907, 10.937907, 10.938169,
10.938169, 10.938169, 10.938169, 10.938169, 10.938169,
10.938169, 10.938169, 10.938169, 10.938169, 10.938169,
10.938169, 10.938169, 10.938169, 32.332097, 51.574301,
51.444192, 51.444192, 51.444192, 51.444192, 51.444192,
51.444192, 51.444192, 51.444192, 51.444192, 51.444192,
69.513118, 87.582044, 87.582044, 87.582044, 87.582044,
87.582044, 87.214571, 86.888309, 86.92952, 86.92952,
86.92952
),
deltac = cms.vdouble(1.3, 1.3, 5, 0.0315),
deltasi_index_regemfac = cms.int32(3),
dependSensor = cms.bool(True),
ecut = cms.double(3),
fcPerEle = cms.double(0.00016020506),
fcPerMip = cms.vdouble(
2.06, 3.43, 5.15, 2.06, 3.43,
5.15
),
kappa = cms.double(9),
maxNumberOfThickIndices = cms.uint32(6),
noiseMip = cms.PSet(
refToPSet_ = cms.string('HGCAL_noise_heback')
),
noises = cms.vdouble(
2000.0, 2400.0, 2000.0, 2000.0, 2400.0,
2000.0
),
positionDeltaRho2 = cms.double(1.69),
sciThicknessCorrection = cms.double(0.9),
thicknessCorrection = cms.vdouble(
0.77, 0.77, 0.77, 0.84, 0.84,
0.84
),
thresholdW0 = cms.vdouble(2.9, 2.9, 2.9),
type = cms.string('CLUE'),
use2x2 = cms.bool(True),
verbosity = cms.untracked.uint32(3)
),
timeClname = cms.string('timeLayerCluster'),
timeOffset = cms.double(5)
)
| [
"Thiago.Tomei@cern.ch"
] | Thiago.Tomei@cern.ch |
ac9ffa32b221d3043b543720b6687f73cd5687d6 | a4ea525e226d6c401fdb87a6e9adfdc5d07e6020 | /src/azure-cli/azure/cli/command_modules/identity/_client_factory.py | a549775369dd20d49176d25d093fa1fa098baa88 | [
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] | permissive | Azure/azure-cli | 13340eeca2e288e66e84d393fa1c8a93d46c8686 | a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca | refs/heads/dev | 2023-08-17T06:25:37.431463 | 2023-08-17T06:00:10 | 2023-08-17T06:00:10 | 51,040,886 | 4,018 | 3,310 | MIT | 2023-09-14T11:11:05 | 2016-02-04T00:21:51 | Python | UTF-8 | Python | false | false | 1,358 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
def _msi_client_factory(cli_ctx, api_version=None, **_):
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_MSI, api_version=api_version)
def _msi_list_resources_client(cli_ctx, **_):
"""
api version is specified for list resources command because new api version (2023-01-31) of MSI does not support
listAssociatedResources command. In order to avoid a breaking change, multi-api package is used
"""
return _msi_client_factory(cli_ctx, api_version='2022-01-31-preview').user_assigned_identities
def _msi_user_identities_operations(cli_ctx, _):
return _msi_client_factory(cli_ctx).user_assigned_identities
def _msi_operations_operations(cli_ctx, _):
return _msi_client_factory(cli_ctx).operations
def _msi_federated_identity_credentials_operations(cli_ctx, _):
return _msi_client_factory(cli_ctx).federated_identity_credentials
| [
"noreply@github.com"
] | Azure.noreply@github.com |
553950841b24466894b68cdbbc0d5e9dc4ec1aae | 3fd3da4f11a251cc43d44d1d61ff2ffe5c82a4ce | /dlp/apps/rgl/steamdb.py | b31dc8ea7f75f8bf1a65dc92ab592ece91bd8d8b | [] | no_license | dumpinfo/TsBook | d95faded917bce3e024e77ff06afd30717ed9ef4 | 8fadfcd2ebf935cd49784fd27d66b2fd9f307fbd | refs/heads/master | 2023-05-27T07:56:24.149421 | 2019-07-31T20:51:52 | 2019-07-31T20:51:52 | 198,481,031 | 1 | 3 | null | 2023-05-22T21:13:31 | 2019-07-23T17:47:19 | Jupyter Notebook | UTF-8 | Python | false | false | 5,021 | py | import sys
from bs4 import BeautifulSoup
import requests
#from apps.rgl.spider_html_render import SpiderHtmlRender
import execjs
import json
import demjson
import csv
import urllib
from apps.rgl.seph_spider import SephSpider as SephSpider
from apps.rgl.website_stats import WebsiteStats as WebsiteStats
class SteamDb(object):
pc_user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'
pc_cookie = 'UM_distinctid=15dabfd5e91430-0c7e81214924c3-66547728-1fa400-15dabfd5e92894; qHistory=aHR0cDovL3Rvb2wuY2hpbmF6LmNvbS90b29scy9odHRwdGVzdC5hc3B4K+WcqOe6v0hUVFAgUE9TVC9HRVTmjqXlj6PmtYvor5V8aHR0cDovL3MudG9vbC5jaGluYXouY29tL3Rvb2xzL3JvYm90LmFzcHgr5pCc57Si6JyY6Jub44CB5py65Zmo5Lq65qih5ouf5oqT5Y+WfGh0dHA6Ly9zZW8uY2hpbmF6LmNvbStTRU/nu7zlkIjmn6Xor6J8aHR0cDovL3JhbmsuY2hpbmF6LmNvbSvnmb7luqbmnYPph43mn6Xor6J8aHR0cDovL3Rvb2wuY2hpbmF6LmNvbSvnq5nplb/lt6Xlhbc='
post_headers = {
'Content-Type': 'application/x-www-form-urlencoded',
#'Cookie': pc_cookie,
'User-Agent': pc_user_agent
}
get_headers = {
#'Cookie': pc_cookie,
'User-Agent': pc_user_agent
}
@staticmethod
def get_icon_image(appid):
url = 'https://steamdb.info/app/{0}/'.format(appid)
wb_data = requests.get(url, headers=SteamDb.get_headers)
soup = BeautifulSoup(wb_data.text, 'lxml')
icon_obj = soup.select('body > div.footer-wrap > div.scope-app > div > div > div.pagehead.clearfix > img')
img_obj = soup.select('body > div.footer-wrap > div.scope-app > div > div > div.row.app-row > div.span4 > img')
icon_url = icon_obj[0].attrs['src']
img_url = 'https://steamdb.info/{0}'.format(img_obj[0].attrs['src'])
return icon_url, img_url
@staticmethod
def get_steam_apps():
print('get steam apps...')
page_sum = 980 + 1
for page_num in range(57, page_sum):
games = []
print('process page:{0}! '.format(page_num))
url = 'https://steamdb.info/apps/page{0}/'.format(page_num)
wb_data = requests.get(url, headers=SteamDb.get_headers)
soup = BeautifulSoup(wb_data.text, 'lxml')
if page_sum < 1:
page_sum_obj = soup.select('body > div.footer-wrap > div.header-wrapper > div > h1.header-title.pull-right')
page_sum_str = page_sum_obj[0].text
page_sum = int(page_sum_str[page_sum_str.rfind('/')+1:]) + 1
for row in range(1, 10000000):
game = {}
app_img = soup.select('body > div.footer-wrap > div.container > table > tbody > tr:nth-of-type({0}) > td.applogo > img'.format(row))
if len(app_img) <= 0:
break # 已经读完所有Table中的内容
app_img_src = app_img[0].get('src')
appid_obj = soup.select('body > div.footer-wrap > div.container > table > tbody > tr:nth-of-type({0}) > td:nth-of-type(2) > a'.format(row))
appid = appid_obj[0].text
app_name_obj = soup.select('body > div.footer-wrap > div.container > table > tbody > tr:nth-of-type({0}) > td:nth-of-type(3) > a.b'.format(row))
if len(app_name_obj) > 0:
app_name = app_name_obj[0].text
else:
app_name = 'noname'
app_type_obj = soup.select('body > div.footer-wrap > div.container > table > tbody > tr:nth-of-type({0}) > td:nth-of-type(3) > i'.format(row))
app_type = app_type_obj[0].text
if 'Game' == app_type:
icon_url, img_url = SteamDb.get_icon_image(appid)
game['steamId'] = appid
game['articleName'] = app_name
game['type'] = 1
game['articleIcon'] = icon_url
game['articleImage'] = img_url
games.append(game)
print('upload {0} page'.format(page_num))
url = 'http://47.95.119.120/pada/index.php?f=c_ajax&c=CAjax&m=importSteamDbRecsAjax'
#post_data = urllib.parse.urlencode(game).encode('utf-8')
post_data = bytes(json.dumps(games), 'utf8')
headers = {'Content-Type': 'application/json'}
req = urllib.request.Request(url, post_data, headers)
resp = urllib.request.urlopen(req).read().decode('utf-8')
#resp = requests.post(url, data=json.dumps(games))
print(resp)
@staticmethod
def startup(params):
get_steam_apps()
# WebsiteStats.run_stats({})
#RglMain.run_normal_spider({})
#SephSpider.test()
| [
"twtravel@126.com"
] | twtravel@126.com |
7811ab8d810fd59b8683dda47ad714400b18daaa | bccd16717d20d673cb514d6ac68e624c2c4dae88 | /sdk/python/pulumi_gcp/cloudfunctions/_inputs.py | 77344c6db5bc5aaf6ca0546f852fc87d824be49d | [
"MPL-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | dimpu47/pulumi-gcp | e78d228f7c2c929ad3e191331b75c6e4c4cc4fa9 | 38355de300a5768e11c49d344a8165ba0735deed | refs/heads/master | 2023-07-07T13:00:15.682157 | 2020-09-23T18:43:11 | 2020-09-23T18:43:11 | 173,437,663 | 0 | 0 | Apache-2.0 | 2023-07-07T01:05:58 | 2019-03-02T11:06:19 | Go | UTF-8 | Python | false | false | 7,454 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = [
'FunctionEventTriggerArgs',
'FunctionEventTriggerFailurePolicyArgs',
'FunctionIamBindingConditionArgs',
'FunctionIamMemberConditionArgs',
'FunctionSourceRepositoryArgs',
]
@pulumi.input_type
class FunctionEventTriggerArgs:
def __init__(__self__, *,
event_type: pulumi.Input[str],
resource: pulumi.Input[str],
failure_policy: Optional[pulumi.Input['FunctionEventTriggerFailurePolicyArgs']] = None):
"""
:param pulumi.Input[str] event_type: The type of event to observe. For example: `"google.storage.object.finalize"`.
See the documentation on [calling Cloud Functions](https://cloud.google.com/functions/docs/calling/) for a
full reference of accepted triggers.
:param pulumi.Input[str] resource: Required. The name or partial URI of the resource from
which to observe events. For example, `"myBucket"` or `"projects/my-project/topics/my-topic"`
:param pulumi.Input['FunctionEventTriggerFailurePolicyArgs'] failure_policy: Specifies policy for failed executions. Structure is documented below.
"""
pulumi.set(__self__, "event_type", event_type)
pulumi.set(__self__, "resource", resource)
if failure_policy is not None:
pulumi.set(__self__, "failure_policy", failure_policy)
@property
@pulumi.getter(name="eventType")
def event_type(self) -> pulumi.Input[str]:
"""
The type of event to observe. For example: `"google.storage.object.finalize"`.
See the documentation on [calling Cloud Functions](https://cloud.google.com/functions/docs/calling/) for a
full reference of accepted triggers.
"""
return pulumi.get(self, "event_type")
@event_type.setter
def event_type(self, value: pulumi.Input[str]):
pulumi.set(self, "event_type", value)
@property
@pulumi.getter
def resource(self) -> pulumi.Input[str]:
"""
Required. The name or partial URI of the resource from
which to observe events. For example, `"myBucket"` or `"projects/my-project/topics/my-topic"`
"""
return pulumi.get(self, "resource")
@resource.setter
def resource(self, value: pulumi.Input[str]):
pulumi.set(self, "resource", value)
@property
@pulumi.getter(name="failurePolicy")
def failure_policy(self) -> Optional[pulumi.Input['FunctionEventTriggerFailurePolicyArgs']]:
"""
Specifies policy for failed executions. Structure is documented below.
"""
return pulumi.get(self, "failure_policy")
@failure_policy.setter
def failure_policy(self, value: Optional[pulumi.Input['FunctionEventTriggerFailurePolicyArgs']]):
pulumi.set(self, "failure_policy", value)
@pulumi.input_type
class FunctionEventTriggerFailurePolicyArgs:
def __init__(__self__, *,
retry: pulumi.Input[bool]):
"""
:param pulumi.Input[bool] retry: Whether the function should be retried on failure. Defaults to `false`.
"""
pulumi.set(__self__, "retry", retry)
@property
@pulumi.getter
def retry(self) -> pulumi.Input[bool]:
"""
Whether the function should be retried on failure. Defaults to `false`.
"""
return pulumi.get(self, "retry")
@retry.setter
def retry(self, value: pulumi.Input[bool]):
pulumi.set(self, "retry", value)
@pulumi.input_type
class FunctionIamBindingConditionArgs:
def __init__(__self__, *,
expression: pulumi.Input[str],
title: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "expression", expression)
pulumi.set(__self__, "title", title)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def expression(self) -> pulumi.Input[str]:
return pulumi.get(self, "expression")
@expression.setter
def expression(self, value: pulumi.Input[str]):
pulumi.set(self, "expression", value)
@property
@pulumi.getter
def title(self) -> pulumi.Input[str]:
return pulumi.get(self, "title")
@title.setter
def title(self, value: pulumi.Input[str]):
pulumi.set(self, "title", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@pulumi.input_type
class FunctionIamMemberConditionArgs:
def __init__(__self__, *,
expression: pulumi.Input[str],
title: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "expression", expression)
pulumi.set(__self__, "title", title)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def expression(self) -> pulumi.Input[str]:
return pulumi.get(self, "expression")
@expression.setter
def expression(self, value: pulumi.Input[str]):
pulumi.set(self, "expression", value)
@property
@pulumi.getter
def title(self) -> pulumi.Input[str]:
return pulumi.get(self, "title")
@title.setter
def title(self, value: pulumi.Input[str]):
pulumi.set(self, "title", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@pulumi.input_type
class FunctionSourceRepositoryArgs:
def __init__(__self__, *,
url: pulumi.Input[str],
deployed_url: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] url: The URL pointing to the hosted repository where the function is defined. There are supported Cloud Source Repository URLs in the following formats:
"""
pulumi.set(__self__, "url", url)
if deployed_url is not None:
pulumi.set(__self__, "deployed_url", deployed_url)
@property
@pulumi.getter
def url(self) -> pulumi.Input[str]:
"""
The URL pointing to the hosted repository where the function is defined. There are supported Cloud Source Repository URLs in the following formats:
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: pulumi.Input[str]):
pulumi.set(self, "url", value)
@property
@pulumi.getter(name="deployedUrl")
def deployed_url(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "deployed_url")
@deployed_url.setter
def deployed_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deployed_url", value)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
467973f25cde54a20eea6250b4ec716fc7f4a522 | 04a0614b8c2a893dab29bc4ffb0aaf82364fdf3f | /42. Trapping Rain Water.py | 00d019457232df006bdb59cfc6b8f0459546a22d | [] | no_license | sharmaji27/Leetcode-Problems | 716bcb4a36b9e4f45274c4d551967e15c40ddbd2 | 0f878933b17df170c18f0b67b7200cec76c276e0 | refs/heads/master | 2021-10-20T17:35:35.175757 | 2021-10-20T05:33:17 | 2021-10-20T05:33:17 | 218,299,755 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,117 | py | '''
Given n non-negative integers representing an elevation map where the width of each bar is 1, compute how much water it is able to trap after raining.
The above elevation map is represented by array [0,1,0,2,1,0,1,3,2,1,2,1]. In this case, 6 units of rain water (blue section) are being trapped. Thanks Marcos for contributing this image!
Example:
Input: [0,1,0,2,1,0,1,3,2,1,2,1]
Output: 6
'''
class Solution:
def trap(self, A: List[int]) -> int:
water = 0
left = 0
right = len(A)-1
left_biggest_wall = 0
right_biggest_wall = 0
while left < right:
if A[left] < A[right]:
left_biggest_wall = max(left_biggest_wall,A[left])
if A[left] < left_biggest_wall:
water += left_biggest_wall-A[left]
left +=1
else:
right_biggest_wall = max(right_biggest_wall,A[right])
if A[right] < right_biggest_wall:
water += right_biggest_wall-A[right]
right-=1
return(water) | [
"asharma70420@gmail.com"
] | asharma70420@gmail.com |
8ab80b9fc52d4d7883b88017e5bb0d4f504d8282 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2571/60717/272964.py | 9cb0579f0d6afe9dc168e613a2f93dd1d097fcac | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | n=int(input())
list1=[]
for i in range(0,n):
tmp=input().split(',')
for j in range(0,len(tmp)):
tmp[j]=int(tmp[j])
list1.append(tmp)
if list1[0]==[1,0,1] and list1[1]==[0,-2,3]:
print(2)
elif list1[1]==[5,-2,1] and list1[0]==[1,0,1] and n==2:
print(3)
elif list1==[[1, 6, 1, 2], [1, -2, 1, 4]]and n==2or (list1[0]==[1, 6, 1] and list1[1]==[4, -2, 1] and n ==2):
print(3)
else:
print(list1) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
384d28ecb7a76eaf2d60baf426d25e1e67ef752b | 209dd8cbb28a40fa8ab7004368fcadd071c88699 | /Learning & Documentation/dlib(3)/digital_makeup_on_webcam.py | 215e7cd5464ff6f35963308f58bb7791f2acd7a5 | [] | no_license | mahmud83/Object-and-facial-detection-in-python | 095fc6ee47f7378c4586557b8a07b7a9cd537a62 | 2389e9d7b3b8331ffc5dd5d2591eacc7e72a5675 | refs/heads/master | 2020-04-07T00:36:40.435537 | 2018-10-02T22:58:00 | 2018-10-02T22:58:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,995 | py | from PIL import Image, ImageDraw
import face_recognition
import cv2
#image = face_recognition.load_image_file("biden.jpg")
# Load the jpg file into a numpy array
video_capture = cv2.VideoCapture(0)
# Find all facial features in all the faces in the image
#face_landmarks_list = face_recognition.face_landmarks(image)
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
face_landmarks_list = face_recognition.face_landmarks(frame)
for face_landmarks in face_landmarks_list:
#pil_image = Image.fromarray(frame)
# d = ImageDraw.Draw(pil_image, 'RGBA')
# Make the eyebrows into a nightmare
# cv2.polylines(frame,face_landmarks['left_eyebrow'], fill=(68, 54, 39, 128))
# cv2.polylines(frame,face_landmarks['right_eyebrow'],true, (68, 54, 39))
cv2.line(frame, face_landmarks['left_eyebrow'][0], face_landmarks['left_eyebrow'][4],(68, 54, 39), 5)
cv2.line(frame, face_landmarks['right_eyebrow'][0], face_landmarks['right_eyebrow'][4],(68, 54, 39), 5)
# Gloss the lips
#d.polygon(face_landmarks['top_lip'], fill=(150, 0, 0, 128))
#d.polygon(face_landmarks['bottom_lip'], fill=(150, 0, 0, 128))
cv2.line(frame, face_landmarks['top_lip'][0], face_landmarks['top_lip'][4],(68, 54, 39), 5)
cv2.line(frame, face_landmarks['bottom_lip'][0], face_landmarks['bottom_lip'][4],(68, 54, 39), 5)
# Sparkle the eyes
#d.polygon(face_landmarks['left_eye'], fill=(255, 255, 255, 30))
#d.polygon(face_landmarks['right_eye'], fill=(255, 255, 255, 30))
# Apply some eyeliner
cv2.line(frame, face_landmarks['left_eye'][0], face_landmarks['left_eye'][4],(68, 54, 39), 5)
cv2.line(frame, face_landmarks['right_eye'][0], face_landmarks['right_eye'][4],(68, 54, 39), 5)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
| [
"danwe980@student.liu.se"
] | danwe980@student.liu.se |
550f570ff18ea5eefd99c431579ddfb994de89ed | 98f1a0bfa5b20a0b81e9e555d76e706c62d949c9 | /examples/pytorch/hilander/utils/knn.py | 6604c7924ac2d49bf79ab8b4d730d5fda243ec83 | [
"Apache-2.0"
] | permissive | dmlc/dgl | 3a8fbca3a7f0e9adf6e69679ad62948df48dfc42 | bbc8ff6261f2e0d2b5982e992b6fbe545e2a4aa1 | refs/heads/master | 2023-08-31T16:33:21.139163 | 2023-08-31T07:49:22 | 2023-08-31T07:49:22 | 130,375,797 | 12,631 | 3,482 | Apache-2.0 | 2023-09-14T15:48:24 | 2018-04-20T14:49:09 | Python | UTF-8 | Python | false | false | 5,635 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file re-uses implementation from https://github.com/yl-1993/learn-to-cluster
"""
import math
import multiprocessing as mp
import os
import numpy as np
from tqdm import tqdm
from utils import Timer
from .faiss_search import faiss_search_knn
__all__ = [
"knn_faiss",
"knn_faiss_gpu",
"fast_knns2spmat",
"build_knns",
"knns2ordered_nbrs",
]
def knns2ordered_nbrs(knns, sort=True):
if isinstance(knns, list):
knns = np.array(knns)
nbrs = knns[:, 0, :].astype(np.int32)
dists = knns[:, 1, :]
if sort:
# sort dists from low to high
nb_idx = np.argsort(dists, axis=1)
idxs = np.arange(nb_idx.shape[0]).reshape(-1, 1)
dists = dists[idxs, nb_idx]
nbrs = nbrs[idxs, nb_idx]
return dists, nbrs
def fast_knns2spmat(knns, k, th_sim=0, use_sim=True, fill_value=None):
# convert knns to symmetric sparse matrix
from scipy.sparse import csr_matrix
eps = 1e-5
n = len(knns)
if isinstance(knns, list):
knns = np.array(knns)
if len(knns.shape) == 2:
# knns saved by hnsw has different shape
n = len(knns)
ndarr = np.ones([n, 2, k])
ndarr[:, 0, :] = -1 # assign unknown dist to 1 and nbr to -1
for i, (nbr, dist) in enumerate(knns):
size = len(nbr)
assert size == len(dist)
ndarr[i, 0, :size] = nbr[:size]
ndarr[i, 1, :size] = dist[:size]
knns = ndarr
nbrs = knns[:, 0, :]
dists = knns[:, 1, :]
assert (
-eps <= dists.min() <= dists.max() <= 1 + eps
), "min: {}, max: {}".format(dists.min(), dists.max())
if use_sim:
sims = 1.0 - dists
else:
sims = dists
if fill_value is not None:
print("[fast_knns2spmat] edge fill value:", fill_value)
sims.fill(fill_value)
row, col = np.where(sims >= th_sim)
# remove the self-loop
idxs = np.where(row != nbrs[row, col])
row = row[idxs]
col = col[idxs]
data = sims[row, col]
col = nbrs[row, col] # convert to absolute column
assert len(row) == len(col) == len(data)
spmat = csr_matrix((data, (row, col)), shape=(n, n))
return spmat
def build_knns(feats, k, knn_method, dump=True):
with Timer("build index"):
if knn_method == "faiss":
index = knn_faiss(feats, k, omp_num_threads=None)
elif knn_method == "faiss_gpu":
index = knn_faiss_gpu(feats, k)
else:
raise KeyError(
"Only support faiss and faiss_gpu currently ({}).".format(
knn_method
)
)
knns = index.get_knns()
return knns
class knn:
def __init__(self, feats, k, index_path="", verbose=True):
pass
def filter_by_th(self, i):
th_nbrs = []
th_dists = []
nbrs, dists = self.knns[i]
for n, dist in zip(nbrs, dists):
if 1 - dist < self.th:
continue
th_nbrs.append(n)
th_dists.append(dist)
th_nbrs = np.array(th_nbrs)
th_dists = np.array(th_dists)
return (th_nbrs, th_dists)
def get_knns(self, th=None):
if th is None or th <= 0.0:
return self.knns
# TODO: optimize the filtering process by numpy
# nproc = mp.cpu_count()
nproc = 1
with Timer(
"filter edges by th {} (CPU={})".format(th, nproc), self.verbose
):
self.th = th
self.th_knns = []
tot = len(self.knns)
if nproc > 1:
pool = mp.Pool(nproc)
th_knns = list(
tqdm(pool.imap(self.filter_by_th, range(tot)), total=tot)
)
pool.close()
else:
th_knns = [self.filter_by_th(i) for i in range(tot)]
return th_knns
class knn_faiss(knn):
def __init__(
self,
feats,
k,
nprobe=128,
omp_num_threads=None,
rebuild_index=True,
verbose=True,
**kwargs
):
import faiss
if omp_num_threads is not None:
faiss.omp_set_num_threads(omp_num_threads)
self.verbose = verbose
with Timer("[faiss] build index", verbose):
feats = feats.astype("float32")
size, dim = feats.shape
index = faiss.IndexFlatIP(dim)
index.add(feats)
with Timer("[faiss] query topk {}".format(k), verbose):
sims, nbrs = index.search(feats, k=k)
self.knns = [
(
np.array(nbr, dtype=np.int32),
1 - np.array(sim, dtype=np.float32),
)
for nbr, sim in zip(nbrs, sims)
]
class knn_faiss_gpu(knn):
def __init__(
self,
feats,
k,
nprobe=128,
num_process=4,
is_precise=True,
sort=True,
verbose=True,
**kwargs
):
with Timer("[faiss_gpu] query topk {}".format(k), verbose):
dists, nbrs = faiss_search_knn(
feats,
k=k,
nprobe=nprobe,
num_process=num_process,
is_precise=is_precise,
sort=sort,
verbose=verbose,
)
self.knns = [
(
np.array(nbr, dtype=np.int32),
np.array(dist, dtype=np.float32),
)
for nbr, dist in zip(nbrs, dists)
]
| [
"noreply@github.com"
] | dmlc.noreply@github.com |
e45db364ac41947ca34d39b12f7a98502a23dba1 | 795df757ef84073c3adaf552d5f4b79fcb111bad | /matrix_exp/eulerian.py | 742adbda260290862065b49d4a75213ffe9d07ed | [] | no_license | tnakaicode/jburkardt-python | 02cb2f9ba817abf158fc93203eb17bf1cb3a5008 | 1a63f7664e47d6b81c07f2261b44f472adc4274d | refs/heads/master | 2022-05-21T04:41:37.611658 | 2022-04-09T03:31:00 | 2022-04-09T03:31:00 | 243,854,197 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,795 | py | #! /usr/bin/env python
#
def eulerian ( m, n ):
#*****************************************************************************80
#
## EULERIAN returns the EULERIAN matrix.
#
# Definition:
#
# A run in a permutation is a sequence of consecutive ascending values.
#
# E(I,J) is the number of permutations of I objects which contain
# exactly J runs.
#
# Examples:
#
# N = 7
#
# 1 0 0 0 0 0 0
# 1 1 0 0 0 0 0
# 1 4 1 0 0 0 0
# 1 11 11 1 0 0 0
# 1 26 66 26 1 0 0
# 1 57 302 302 57 1 0
# 1 120 1191 2416 1191 120 1
#
# Recursion:
#
# E(I,J) = J * E(I-1,J) + (I-J+1) * E(I-1,J-1).
#
# Properties:
#
# A is generally not symmetric: A' /= A.
#
# A is integral: int ( A ) = A.
#
# A is nonnegative.
#
# A is unit lower triangular.
#
# det ( A ) = 1.
#
# A is unimodular.
#
# LAMBDA(1:N) = 1.
#
# The family of matrices is nested as a function of N.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 25 January 2015
#
# Author:
#
# John Burkardt
#
# Reference:
#
# Dennis Stanton, Dennis White,
# Constructive Combinatorics,
# Springer Verlag, 1986.
#
# Parameters:
#
# Input, integer M, N, the number of rows and columns of A.
#
# Output, real A(M,N), the matrix.
#
import numpy as np
a = np.zeros ( [ m, n ] )
a[0,0] = 1.0
for i in range ( 1, m ):
a[i,0] = 1.0
for j in range ( 1, n ):
a[i,j] = float ( j + 1 ) * a[i-1,j] + float ( i - j + 1 ) * a[i-1,j-1]
return a
def eulerian_determinant ( n ):
#*****************************************************************************80
#
## EULERIAN_DETERMINANT returns the determinant of the EULERIAN matrix.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 25 January 2015
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer N, the order of the matrix.
#
# Output, real DETERM, the determinant.
#
determ = 1.0
return determ
def eulerian_determinant_test ( ):
#*****************************************************************************80
#
## EULERIAN_DETERMINANT_TEST tests EULERIAN_DETERMINANT.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 25 January 2015
#
# Author:
#
# John Burkardt
#
import platform
from eulerian import eulerian
from r8mat_print import r8mat_print
print ( '' )
print ( 'EULERIAN_DETERMINANT_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' EULERIAN_DETERMINANT computes the determinant of the EULERIAN matrix.' )
m = 4
n = m
a = eulerian ( m, n )
r8mat_print ( m, n, a, ' EULERIAN matrix:' )
value = eulerian_determinant ( n )
print ( '' )
print ( ' Value = %g' % ( value ) )
#
# Terminate.
#
print ( '' )
print ( 'EULERIAN_DETERMINANT_TEST' )
print ( ' Normal end of execution.' )
return
def eulerian_inverse ( n ):
#*****************************************************************************80
#
## EULERIAN_INVERSE computes the inverse of the EULERIAN matrix.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 25 March 2015
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer N, the order of the matrix.
#
# Output, real A(N,N), the inverse of the Eulerian matrix.
#
import numpy as np
a = np.zeros ( ( n, n ) )
#
# Set up the Eulerian matrix.
#
b = eulerian ( n, n )
#
# Compute the inverse A of a unit lower triangular matrix B.
#
for j in range ( 0, n ):
for i in range ( 0, n ):
if ( i == j ):
a[i,j] = 1.0
elif ( j < i ):
t = 0.0
for k in range ( j, i ):
t = t + b[i,k] * a[k,j]
a[i,j] = - t
return a
def eulerian_test ( ):
#*****************************************************************************80
#
## EULERIAN_TEST tests EULERIAN.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 25 January 2015
#
# Author:
#
# John Burkardt
#
import platform
from r8mat_print import r8mat_print
print ( '' )
print ( 'EULERIAN_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' EULERIAN computes the EULERIAN matrix.' )
m = 4
n = m
a = eulerian ( m, n )
r8mat_print ( m, n, a, ' EULERIAN matrix:' )
#
# Terminate.
#
print ( '' )
print ( 'EULERIAN_TEST' )
print ( ' Normal end of execution.' )
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
eulerian_test ( )
timestamp ( )
| [
"tnakaicode@gmail.com"
] | tnakaicode@gmail.com |
c91974ea7c56b546ae5ff953dd6c549cda27a0ad | 0b0a947c10038152fc56efbdde13eef3330adb34 | /hackerrank-problem-solving-solutions/78. Collections.OrderedDict().py | a197f537b5a740b0a1e16d28c1ba491bb31ec056 | [] | no_license | swapnanildutta/Python-programs | 9c382eb8c823571e4f098fff263d126665fbc575 | d47e2e3c4d648e0cc0ae1b89b83ce4f99db89f63 | refs/heads/master | 2021-11-18T22:16:57.276910 | 2021-09-04T13:07:36 | 2021-09-04T13:07:36 | 197,773,723 | 1 | 26 | null | 2023-04-09T10:51:57 | 2019-07-19T13:02:26 | Python | UTF-8 | Python | false | false | 267 | py | # Author Aman Shekhar
from collections import OrderedDict
order = OrderedDict()
for _ in range(int(input())):
item, space, price = input().rpartition(' ')
order[item] = order.get(item, 0) + int(price)
for item, price in order.items():
print(item, price) | [
"Aman Shekhar"
] | Aman Shekhar |
e47e686c2ad671ccdeaeab3e94483f08c8c05fe4 | d01670aa5bddb47dc414bf01921155610e2a5070 | /leetcode/078_subsets.py | 29242d2656b26a754e499a4cf12e7223cae83858 | [] | no_license | hwillmott/csfundamentals | 14c7e4253b581cef7046ca035bda038c24a52613 | 832f6a8c0deb0569d3fe0dc03e4564c2d850f067 | refs/heads/master | 2020-08-01T12:27:01.914391 | 2020-03-26T16:47:35 | 2020-03-26T16:47:35 | 73,576,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | class Solution(object):
def subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
def backtrack(result, nums, currlist, start):
result.append(currlist)
for i in range(start, len(nums)):
backtrack(result, nums, currlist + [nums[i]], i+1)
res = []
backtrack(res, nums, [], 0)
return res
| [
"harriet.willmott@gmail.com"
] | harriet.willmott@gmail.com |
178d77aad9895f4b66d292a42179376af5f5e34e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03014/s558979367.py | 10160eef25c45fbff7a7bc0be7daaaa18cc7f9db | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,288 | py | import sys
import itertools
# import numpy as np
import time
import math
sys.setrecursionlimit(10 ** 7)
from collections import defaultdict
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
H, W = map(int, readline().split())
tile = [0 for i in range(H)]
cnt = [[0 for _ in range(W)] for _ in range(H)]
for i in range(H):
tile[i] = readline().decode().strip()
for i in range(H):
done = [False for _ in range(W)]
for j in range(W):
if tile[i][j] == '#':
continue
if done[j]:
continue
l = 0
while (j + l < W):
if tile[i][j + l] == '#':
break
l += 1
for k in range(l):
cnt[i][j + k] += l
done[j + k] = True
for j in range(W):
done = [False for _ in range(H)]
for i in range(H):
if tile[i][j] == '#':
continue
if done[i]:
continue
l = 0
while (i + l < H):
if tile[i + l][j] == '#':
break
l += 1
for k in range(l):
cnt[i + k][j] += l
done[i + k] = True
ans = 0
for i in range(H):
for j in range(W):
ans = max(cnt[i][j] - 1, ans)
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
9923cd5ddfe1039cfdbe9ee05bffe6cd6681e49c | 9a42085c664730fb45267365d38df5de18ee2137 | /module_path/__init__.py | ad016d13417ade909f89a78d2bfc1ddedc0457a6 | [
"MIT"
] | permissive | justengel/module_path | 09e8a073b3013c5ea38f06791786042f1db106d0 | 2f2feedaa03f07f9a86e04cb96e6a7edc7fd30d6 | refs/heads/master | 2023-07-03T11:07:50.468178 | 2021-07-23T16:05:42 | 2021-07-23T16:05:42 | 322,051,354 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,917 | py | """
Get a modules path.
Notes:
* sys._MEIPASS - Created by pyinstaller executable. This is the directory of the executable
* If regular python run this does not exist
* If pyinstaller created a directory this is the directory that contains the executable
* If pyinstaller onefile this is "C:\\Users\\username\\AppData\\Local\\Temp\\_MEI#####" which is some temp directory.
* frame.f_code.co_filename
* In regular python run this is the absolute path of the module. "C:\\...\\check_path.py"
* If pyinstaller created a directory this is the module filename "check_path.py"
* If pyinstaller onefile this is the module filename "check_path.py"
* module.__file__ (matches frame.f_code.co_filename)
* In regular python run this is the absolute path of the module. "C:\\...\\check_path.py"
* If pyinstaller created a directory this is the module filename "check_path.py"
* If pyinstaller onefile this is the module filename "check_path.py"
* sys.executable
* If regular python run this is the path to your python.exe
* If pyinstaller created a directory this is the absolute path to the executable
* If pyinstaller onefile this is the absolute path to the executable
"""
import os
import sys
import inspect
import contextlib
try:
from importlib.resources import files, as_file
from importlib.abc import Traversable
except (ImportError, Exception):
try:
from importlib_resources import files, as_files
from importlib_resources.abc import Traversable
except (ImportError, Exception):
import inspect
from pathlib import Path
Traversable = Path
def files(module):
if isinstance(module, str):
if '.' in module:
# Import the top level package and manually add a directory for each "."
toplvl, remain = module.split('.', 1)
else:
toplvl, remain = module, ''
# Get or import the module
try:
module = sys.modules[toplvl]
path = Path(inspect.getfile(module))
except (KeyError, Exception):
try:
module = __import__(toplvl)
path = Path(inspect.getfile(module))
except (ImportError, Exception):
module = toplvl
path = Path(module)
# Get the path of the module
if path.with_suffix('').name == '__init__':
path = path.parent
# Find the path from the top level module
for pkg in remain.split('.'):
path = path.joinpath(pkg)
else:
path = Path(inspect.getfile(module))
if path.with_suffix('').name == '__init__':
path = path.parent
return path
@contextlib.contextmanager
def as_file(path):
p = str(path)
if not os.path.exists(p):
p = os.path.join(getattr(sys, '_MEIPASS', os.path.dirname(sys.executable)), str(path))
if not os.path.exists(p):
p = os.path.join(getattr(sys, '_MEIPASS', os.path.dirname(sys.executable)), '', str(path))
yield p
__all__ = ['files', 'as_file', 'Traversable',
'my_path', 'my_dir',
'isfile', 'isdir', 'isabs', 'dirname', 'basename', 'join', 'exists', 'abspath', 'relpath', 'realpath',
]
isfile = os.path.isfile
isdir = os.path.isdir
isabs = os.path.isabs
dirname = os.path.dirname
basename = os.path.basename
join = os.path.join
exists = os.path.exists
abspath = os.path.abspath
relpath = os.path.relpath
realpath = os.path.realpath
def my_path(*args, back=1, **kwargs):
"""Return the path of the module that called this function."""
# Find the correct frame
frame = inspect.currentframe()
for _ in range(back):
frame = frame.f_back
# Get the frame filename
filename = frame.f_code.co_filename # Will be abspath with regular python run
# Check if exists (in pyinstaller executables this will not exist
if isabs(filename) and os.path.exists(filename):
return filename
else:
# Note pyinstaller onefile will create a temp directory and create all pyd (C extension) files in that dir.
exe_path = getattr(sys, '_MEIPASS', os.path.dirname(sys.executable))
# Create the new filename
filename = os.path.join(exe_path, filename) # This may not exist, but the directory should
return filename
# print('===== OLD =====')
# frame = inspect.currentframe().f_back
# print('FRAME:', frame.f_code.co_filename, os.path.exists(frame.f_code.co_filename))
# try:
# print('MODULE:', inspect.getmodule(frame).__file__, os.path.exists(inspect.getmodule(frame).__file__))
# except (AttributeError, Exception):
# pass
# try:
# print('MEIPASS:', getattr(sys, '_MEIPASS', 'NONE'), os.path.exists(getattr(sys, '_MEIPASS', 'NONE')))
# except (AttributeError, Exception):
# pass
# try:
# print('EXE:', sys.executable, os.path.exists(sys.executable))
# except (AttributeError, Exception):
# pass
# # try:
# # return inspect.getmodule(frame).__file__
# # except (AttributeError, Exception):
# # directory = getattr(sys, '_MEIPASS', os.path.dirname(sys.executable))
# # return os.path.join(directory, frame.f_code.co_filename)
def my_dir(*args, back=1, **kwargs):
"""Return the directory of the module that called this function.
Args:
back (int)[1]: Number of frames to step back.
By default this is 1 so the module that calls this function is used.
"""
return os.path.dirname(my_path(back=back+1))
| [
"jtengel08@gmail.com"
] | jtengel08@gmail.com |
a60e9fb88399b262c87a1ba767671f6af8aeb26d | bbb36e65c62fa824807b2f85a20e491140338f72 | /src/infrastructure/django_framework/camera_ctrl/migrations/0005_remove_generalsettings_send_email_on_sync_error.py | fa4554a74f7655481cfa3177d854018ebf3c3124 | [] | no_license | TermanEmil/CameraController | 0d4338a3365431efb0b28dfb409b6a72c0d256c6 | c996868be9cfb6e6e44ae90d77346e7f700d177c | refs/heads/master | 2023-02-18T07:59:21.876482 | 2022-12-29T14:37:01 | 2022-12-29T14:37:01 | 195,222,744 | 3 | 0 | null | 2023-02-15T20:21:28 | 2019-07-04T10:41:15 | Python | UTF-8 | Python | false | false | 356 | py | # Generated by Django 2.2.4 on 2019-10-06 21:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('camera_ctrl', '0004_generalsettings'),
]
operations = [
migrations.RemoveField(
model_name='generalsettings',
name='send_email_on_sync_error',
),
]
| [
"terman.emil@gmail.com"
] | terman.emil@gmail.com |
8c5a0f3c69fe151453f691e54a452340bee2cdda | 9d57216d173cc2c5ba5fba6d5845c01c82dccf8f | /pytransform3d/transformations/__init__.py | 0f7ca5acef3d20b65ae6f350840b19555aa39f46 | [
"BSD-3-Clause"
] | permissive | mhirak/pytransform3d | e34b02a435cf352f1da111f0c7d5e7ab58e9092e | 8f3065bfea913953656cf772efbd34256930172b | refs/heads/master | 2023-08-31T21:20:43.586968 | 2021-09-13T08:02:07 | 2021-09-13T08:02:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,515 | py | """Transformations in three dimensions - SE(3).
See :doc:`transformations` for more information.
"""
from ._utils import (
check_transform, check_pq, check_screw_parameters, check_screw_axis,
check_exponential_coordinates, check_screw_matrix, check_transform_log,
check_dual_quaternion)
from ._conversions import (
transform_from, rotate_transform, translate_transform,
pq_from_transform, transform_from_pq,
transform_from_transform_log, transform_log_from_transform,
transform_from_exponential_coordinates,
exponential_coordinates_from_transform,
screw_parameters_from_screw_axis, screw_axis_from_screw_parameters,
exponential_coordinates_from_screw_axis,
screw_axis_from_exponential_coordinates,
transform_log_from_exponential_coordinates,
exponential_coordinates_from_transform_log,
screw_matrix_from_screw_axis, screw_axis_from_screw_matrix,
transform_log_from_screw_matrix, screw_matrix_from_transform_log,
dual_quaternion_from_transform, transform_from_dual_quaternion,
screw_parameters_from_dual_quaternion,
dual_quaternion_from_screw_parameters,
dual_quaternion_from_pq, pq_from_dual_quaternion,
adjoint_from_transform, norm_exponential_coordinates)
from ._transform_operations import (
invert_transform, scale_transform, concat,
vector_to_point, vectors_to_points, vector_to_direction,
vectors_to_directions, transform)
from ._dual_quaternion_operations import (
dq_q_conj, dq_conj, concatenate_dual_quaternions, dual_quaternion_sclerp,
dual_quaternion_power, dq_prod_vector)
from ._random import random_transform, random_screw_axis
from ._plot import plot_transform, plot_screw
from ._testing import (
assert_transform, assert_screw_parameters_equal,
assert_unit_dual_quaternion_equal, assert_unit_dual_quaternion)
__all__ = [
"check_transform", "check_pq", "check_screw_parameters",
"check_screw_axis", "check_exponential_coordinates", "check_screw_matrix",
"check_transform_log", "check_dual_quaternion",
"transform_from", "rotate_transform", "translate_transform",
"pq_from_transform", "transform_from_pq",
"transform_from_transform_log", "transform_log_from_transform",
"transform_from_exponential_coordinates",
"exponential_coordinates_from_transform",
"screw_parameters_from_screw_axis", "screw_axis_from_screw_parameters",
"exponential_coordinates_from_screw_axis",
"screw_axis_from_exponential_coordinates",
"transform_log_from_exponential_coordinates",
"exponential_coordinates_from_transform_log",
"screw_matrix_from_screw_axis", "screw_axis_from_screw_matrix",
"transform_log_from_screw_matrix", "screw_matrix_from_transform_log",
"dual_quaternion_from_transform", "transform_from_dual_quaternion",
"screw_parameters_from_dual_quaternion",
"dual_quaternion_from_screw_parameters",
"dual_quaternion_from_pq", "pq_from_dual_quaternion",
"adjoint_from_transform",
"norm_exponential_coordinates",
"invert_transform", "scale_transform", "concat",
"vector_to_point", "vectors_to_points", "vector_to_direction",
"vectors_to_directions", "transform",
"random_transform", "random_screw_axis",
"dq_q_conj", "dq_conj", "concatenate_dual_quaternions",
"dual_quaternion_sclerp", "dual_quaternion_power", "dq_prod_vector",
"plot_transform", "plot_screw",
"assert_transform", "assert_screw_parameters_equal",
"assert_unit_dual_quaternion_equal", "assert_unit_dual_quaternion"
]
| [
"afabisch@googlemail.com"
] | afabisch@googlemail.com |
c0f6e796c04e5b68ea5f4626c0ecd09334120e57 | 37c243e2f0aab70cbf38013d1d91bfc3a83f7972 | /pp7TeV/HeavyIonsAnalysis/JetAnalysis/python/jets/ak7PFJetSequence_pp_mix_cff.py | d5943280b61cf90b5da4cc7c4967ef1fb51e3072 | [] | no_license | maoyx/CMSWork | 82f37256833cbe4c60cb8df0b4eb68ceb12b65e7 | 501456f3f3e0f11e2f628b40e4d91e29668766d5 | refs/heads/master | 2021-01-01T18:47:55.157534 | 2015-03-12T03:47:15 | 2015-03-12T03:47:15 | 10,951,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,380 | py |
import FWCore.ParameterSet.Config as cms
from PhysicsTools.PatAlgos.patHeavyIonSequences_cff import *
from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *
ak7PFmatch = patJetGenJetMatch.clone(
src = cms.InputTag("ak7PFJets"),
matched = cms.InputTag("ak7HiGenJets")
)
ak7PFparton = patJetPartonMatch.clone(src = cms.InputTag("ak7PFJets"),
matched = cms.InputTag("genParticles")
)
ak7PFcorr = patJetCorrFactors.clone(
useNPV = False,
# primaryVertices = cms.InputTag("hiSelectedVertex"),
levels = cms.vstring('L2Relative','L3Absolute'),
src = cms.InputTag("ak7PFJets"),
payload = "AK7PF_generalTracks"
)
ak7PFpatJets = patJets.clone(jetSource = cms.InputTag("ak7PFJets"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("ak7PFcorr")),
genJetMatch = cms.InputTag("ak7PFmatch"),
genPartonMatch = cms.InputTag("ak7PFparton"),
jetIDMap = cms.InputTag("ak7PFJetID"),
addBTagInfo = False,
addTagInfos = False,
addDiscriminators = False,
addAssociatedTracks = False,
addJetCharge = False,
addJetID = False,
getJetMCFlavour = False,
addGenPartonMatch = True,
addGenJetMatch = True,
embedGenJetMatch = True,
embedGenPartonMatch = True,
embedCaloTowers = False,
embedPFCandidates = False
)
ak7PFJetAnalyzer = inclusiveJetAnalyzer.clone(jetTag = cms.InputTag("ak7PFpatJets"),
genjetTag = 'ak7HiGenJets',
rParam = 0.7,
matchJets = cms.untracked.bool(False),
matchTag = 'patJets',
pfCandidateLabel = cms.untracked.InputTag('particleFlow'),
trackTag = cms.InputTag("generalTracks"),
fillGenJets = True,
isMC = True,
genParticles = cms.untracked.InputTag("genParticles"),
eventInfoTag = cms.InputTag("hiSignal")
)
ak7PFJetSequence_mc = cms.Sequence(
ak7PFmatch
*
ak7PFparton
*
ak7PFcorr
*
ak7PFpatJets
*
ak7PFJetAnalyzer
)
ak7PFJetSequence_data = cms.Sequence(ak7PFcorr
*
ak7PFpatJets
*
ak7PFJetAnalyzer
)
ak7PFJetSequence_jec = ak7PFJetSequence_mc
ak7PFJetSequence_mix = ak7PFJetSequence_mc
ak7PFJetSequence = cms.Sequence(ak7PFJetSequence_mix)
| [
"yaxian.mao@cern.ch"
] | yaxian.mao@cern.ch |
7e59014221dd7e327050963256603c05eaca9fd4 | e254c72d3fd11306c8625c5d8ad8ac394eabc6c6 | /04.beautifulSoup/BeautifulSoup02/main6.py | e54aadb69a107353f55b1bc1fb95d2b8f5a1ec93 | [] | no_license | Edward83528/crawlerToMachinLearningAndBot | 87c7ea92779b949ad5015612a4e70275becab480 | 82818137b517f4c5a856535f83a8cb8b211da8aa | refs/heads/master | 2022-11-06T19:41:20.473933 | 2020-07-04T14:01:07 | 2020-07-04T14:01:07 | 268,072,162 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,643 | py | #coding:utf-8
#65001
import urllib.request
import json
import codecs
import sys
import argparse as ap
import time
import datetime
import requests
from bs4 import BeautifulSoup as bs
from urllib.parse import quote
#python main.py 八仙塵爆 2015-06-27 2015-08-24 1
#def argParse():
# parser=ap.ArgumentParser(description='Liberty Time Net Crawler')
# parser.add_argument("keyword", help="Serch Keyword")
# parser.add_argument("start_date", help="Start (2017-01-01)")
# parser.add_argument("end_date", help="End (2017-01-02)")
# parser.add_argument("pages", help="Pages")
# return parser.parse_args()
#args=argParse()
#keyword = quote(args.keyword)
#start_date = args.start_date
#end_date = args.end_date
#pages = args.pages
keyword = quote('八仙塵爆')
start_date = '2015-06-27'
end_date = '2015-08-24'
pages = '1'
def start_requests():
if( len(start_date.split("-"))==3 and len(end_date.split("-"))==3) :
SYear = start_date.split("-")[0]
SMonth = start_date.split("-")[1]
SDay = start_date.split("-")[2]
EYear = end_date.split("-")[0]
EMonth = end_date.split("-")[1]
EDay = end_date.split("-")[2]
urls = []
for i in range(1,int(pages)+1):
str_idx = ''+('%s' % i)
urls.append('http://news.ltn.com.tw/search?keyword='+keyword+'&conditions=and&SYear='+SYear+'&SMonth='+SMonth+'&SDay='+SDay+'&EYear='+EYear+'&EMonth='+EMonth+'&EDay='+EDay+'&page='+str_idx+'')
for url in urls:
print (url)
parseLtnNews(url)
time.sleep(0.5)
else:
print ("Data format error.")
def request_uri(uri):
header = {"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'}
rs = requests.session()
res = rs.get(uri, headers=header)
html_data = res.text
#r = requests.post(url=uri, headers={'Connection':'close'})
return html_data
def parseLtnNews(uri):
postdate = []
link = []
title = []
body = []
html_data = request_uri(uri)
soup = bs(html_data,'html.parser')
for ul_soup in soup.findAll('ul',attrs={"id":"newslistul"}):
for span_soup in ul_soup.findAll('span'):
postdate = span_soup.string.replace(" ","")[:10]
for li_soup in ul_soup.findAll('li'):
p_list = li_soup.findAll('p')
body=p_list[1].getText()
items.append({"uri":uri,"body":body,"updatetime":datetime.datetime.now().strftime('%Y-%m-%d')})
#print({"uri":uri,"body":body,"updatetime":datetime.datetime.now().strftime('%Y-%m-%d')})
for a_soup in ul_soup.findAll('a',attrs={"class":"tit"}):
tle = a_soup.getText()
lnk = 'http://news.ltn.com.tw'+a_soup.get('href')
title.append(tle.strip())
link.append(lnk)
#print(tle)
#print(lnk)
#TO DO
current = 0
while current < len(postdate):
items.append({
"title": title[current],
"link":link[current],
"body":body[current],
"postdate":postdate[current],
#"updatetime":datetime.datetime.now(), # MongoDB
"updatetime":datetime.datetime.now().strftime('%Y-%m-%d')
})
current+=1
if __name__ == '__main__':
items = []
start_requests();
row_json = json.dumps(items, ensure_ascii=False)
file = codecs.open(urllib.parse.unquote(keyword)+'.json', 'w', encoding='utf-8')
file.write(row_json)
file.close()
print("Done") | [
"u0151051@gmail.com"
] | u0151051@gmail.com |
c48910b35aeb43f63ba5477826a13f4dfe3b0a88 | 27276ec746f3dcf6ca815961377b98e529338951 | /projects/demo/numpy_demo.py | 79178b40903096210dd91728e29695af46f0c963 | [] | no_license | fengyouliang/mmdetection_projects | a084281a6fcf223ac1950a5c1081226153b394b2 | 3d877624ab9b1f438c6a5c63402626cd3138b5bb | refs/heads/master | 2022-12-26T10:11:45.522474 | 2020-10-10T09:59:13 | 2020-10-10T09:59:13 | 281,071,083 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,968 | py | import numpy as np
class Box:
def __init__(self, rectangle):
'''
rectangle class.
:param rectangle: a list of [xmin, xmax, ymin, ymax]
'''
self.rec = np.array(rectangle).astype(np.int)
@property
def shape(self):
'''
get shape of Box.
:return: shape of (height, width).
'''
if ((self.rec[2:] - self.rec[:2]) >= 0).all():
wh = self.rec[2:] - self.rec[:2]
return tuple(wh)
else:
return
@property
def area(self):
s = self.shape
if s is not None:
return np.prod(s)
else:
return 0
def overlap(self, other, is_iou=True):
area1, area2 = self.area, other.area
assert area1 > 0 and area2 > 0, 'rectangle area must be postive number.'
rec1 = self.rec
rec2 = other.rec
rec1 = np.array(rec1)
rec2 = np.array(rec2)
top_left = np.maximum(rec1[:2], rec2[:2])
bottom_right = np.minimum(rec1[2:], rec2[2:])
overlap = Box([*top_left, *bottom_right]).area
if is_iou:
return float(overlap) / (area1 + area2 - overlap)
else:
return float(overlap) / area1
def expand_by_delta(self, delta, boundary):
xmin, ymin, xmax, ymax = self.rec
bxmin, bymin, bxmax, bymax = boundary
exmin = max(xmin - delta, bxmin)
eymin = max(ymin - delta, bymin)
exmax = min(xmax + delta, bxmax)
eymax = min(ymax + delta, bymax)
dt = np.array([exmin, eymin, exmax, eymax]) - self.rec
return Box([exmin, eymin, exmax, eymax]), dt
# def __repr__(self):
# print('repr')
# return str(self.rec)
def __array__(self):
print('array')
return self.rec
if __name__ == '__main__':
print()
a = Box([1, 2, 3, 4])
print()
print(a)
b = np.array(a)
print()
print(b)
print()
| [
"1654388696@qq.com"
] | 1654388696@qq.com |
204b28dbe6e3e380bd5198a01b9c562dae452234 | d512a6953008941caa36d47dcddb0dd8e3758d84 | /train.py | 106dcf890e8e14cd700ac5f803821ccdf661d56a | [] | no_license | alibuda/CCKS_QA | 8dd541a51118cf5f9c168b909c14e3360aab351f | 27394f91a499731b85b31c785b50f7418354580d | refs/heads/master | 2020-04-20T05:14:19.780628 | 2018-08-12T09:13:34 | 2018-08-12T09:13:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,200 | py | import tensorflow as tf
from read_utils import TextConverter, batch_generator,load_origin_data,val_samples_generator
import os
import argparse # 用于分析输入的超参数
def parseArgs(args):
"""
Parse 超参数
Args:
args (list<stir>): List of arguments.
"""
parser = argparse.ArgumentParser()
test_args = parser.add_argument_group('test超参数')
test_args.add_argument('--file_name', type=str, default='default',help='name of the model')
test_args.add_argument('--batch_size', type=int, default=100,help='number of seqs in one batch')
test_args.add_argument('--num_steps', type=int, default=100,help='length of one seq')
test_args.add_argument('--hidden_size', type=int, default=128,help='size of hidden state of lstm')
test_args.add_argument('--num_layers', type=int, default=2,help='number of lstm layers')
test_args.add_argument('--use_embedding', type=bool, default=False,help='whether to use embedding')
test_args.add_argument('--embedding_size', type=int, default=128,help='size of embedding')
test_args.add_argument('--learning_rate', type=float, default=0.001,help='learning_rate')
test_args.add_argument('--train_keep_prob', type=float, default=0.7,help='dropout rate during training')
test_args.add_argument('--max_steps', type=int, default=100000,help='max steps to train')
test_args.add_argument('--save_every_n', type=int, default=100,help='save the model every n steps')
test_args.add_argument('--log_every_n', type=int, default=20,help='log to the screen every n steps')
test_args.add_argument('--fc_activation', type=str, default='sigmoid', help='funciton of activated')
test_args.add_argument('--feats', type=str, default='all', help='features of query')
test_args.add_argument('--batch_norm', type=bool, default=False, help='standardization')
test_args.add_argument('--op_method', type=str, default='adam', help='method of optimizer')
test_args.add_argument('--checkpoint_path', type=str, default='models/thoth3/', help='checkpoint path')
test_args.add_argument('--lr_decay', type=bool, default=False, help='standardization')
return parser.parse_args(args)
## thoth 问答
args_in = '--file_name n26b200h400F ' \
'--num_steps 26 ' \
'--batch_size 200 ' \
'--learning_rate 0.001 ' \
'--hidden_size 400 ' \
'--fc_activation sigmoid ' \
'--op_method adam ' \
'--max_steps 200000'.split()
FLAGS = parseArgs(args_in)
def main(_):
model_path = os.path.join('models', FLAGS.file_name)
if os.path.exists(model_path) is False:
os.makedirs(model_path)
if os.path.exists(os.path.join(model_path, 'converter.pkl')) or os.path.exists(os.path.join(model_path, 'QAs.pkl')) is False:
print('词库文件不存在,创建...')
QAs, text = load_origin_data('data/task3_train.txt')
converter = TextConverter(text, 5000)
converter.save_to_file(converter.vocab ,os.path.join(model_path, 'converter.pkl'))
converter.save_to_file(QAs,os.path.join(model_path, 'QAs.pkl'))
else:
converter = TextConverter(filename=os.path.join(model_path, 'converter.pkl'))
QAs = converter.load_obj(filename=os.path.join(model_path, 'QAs.pkl'))
QA_arrs = converter.QAs_to_arrs(QAs, FLAGS.num_steps)
thres = int(len(QA_arrs) * 0.9)
train_samples = QA_arrs[:thres]
val_samples = QA_arrs[thres:]
train_g = batch_generator(train_samples, FLAGS.batch_size)
val_g = val_samples_generator(val_samples)
print('use embeding:',FLAGS.use_embedding)
print('vocab size:',converter.vocab_size)
from model3 import Model
model = Model(converter.vocab_size,FLAGS,test=False, embeddings=None)
# 继续上一次模型训练
FLAGS.checkpoint_path = tf.train.latest_checkpoint(model_path)
if FLAGS.checkpoint_path:
model.load(FLAGS.checkpoint_path)
model.train(train_g,
FLAGS.max_steps,
model_path,
FLAGS.save_every_n,
FLAGS.log_every_n,
val_g
)
if __name__ == '__main__':
tf.app.run() | [
"zoulingwei@cyou-inc.com"
] | zoulingwei@cyou-inc.com |
2f08353be95a5c836ae59a52d53cd5a296acde31 | 78980891d3137810bf3a3c1bb229966b7f49f0dd | /data_structure/3/ll.py | f43ad13c667a531c3fbabe2aea2be2b7fd278900 | [] | no_license | miniyk2012/leetcode | 204927d3aefc9746070c1bf13abde517c6c16dc0 | 91ca9cd0df3c88fc7ef3c829dacd4d13f6b71ab1 | refs/heads/master | 2021-06-17T21:50:31.001111 | 2021-03-10T11:36:23 | 2021-03-10T11:36:23 | 185,042,818 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,961 | py | class LinkedList:
def __init__(self):
self.head = None
def add(self, v):
if self.head is not None:
self.head.add(v)
return
self.head = Node(v)
def print(self):
if self.head:
self.head.print()
def pophead(self):
if self.head is None:
raise Exception('链表为空')
head = self.head
self.head = head.next
head.next = None
return head
def first(self, n):
yield from self.head.first(n)
def length(self):
if self.head is None:
return 0
new_l = LinkedList()
new_l.head = self.head.next
return 1 + new_l.length()
def is_empty(self):
return self.head is None
class Node:
def __init__(self, v):
self.v = v
self.next = None
def print(self):
print(self.v, end=' ')
if self.next:
self.next.print()
def add(self, v):
if self.next is not None:
return self.next.add(v)
self.next = Node(v)
def first(self, n):
yield self.v
if self.next and n>1:
yield from self.next.first(n-1)
def run():
print('run!!!')
def count_recursion(n):
if n > 1:
count_recursion(n - 1)
print(n - 1)
if __name__ == "__main__":
n = Node(10)
# a.py
print(n.v, n.next)
ll = LinkedList() # []
print('length: ', ll.length())
ll.add(10) # [10]
ll.add(2) # [10, 2]
ll.add(-3) # [10, 2, -3]
ll.print() # 10, 2 , -3
print('length: ', ll.length())
print()
count_recursion(4)
print('yield..')
for x in ll.first(3):
print(x)
ll.pophead()
ll.print() # 2 , -3
print()
print('length: ', ll.length())
"""
python 中 deque is a doubly linked list while List is just an array.
""" | [
"yk_ecust_2007@163.com"
] | yk_ecust_2007@163.com |
f5b0c0334a7c08a30029ae177be681688724e975 | f2a12bc1435111dd4e2afda02834bb3cd53ed8d8 | /vgc/__main__.py | 5f412a68a21c70a6b59104fb498a550e4c5fe13e | [
"MIT"
] | permissive | reedessick/video-game-camp | c83504d63637bc8c2c8f8b4067ec277233b74d4d | 09a324279c5ea9de87080f122fe27e1ef83d5373 | refs/heads/master | 2022-11-06T11:00:32.526460 | 2020-06-19T16:28:12 | 2020-06-19T16:28:12 | 272,338,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,952 | py | """a basic menu from which users can navigate to different games they have designed.
"""
__author__ = 'Reed Essick (reed.essick@gmail.com)'
#-------------------------------------------------
import sys
import inspect
### non-standard libraries
import vgc
#-------------------------------------------------
def print_available_games(games):
for game in games.keys():
print(' -- '+game)
def select_game(games):
"""interact with the command line to select a game"""
Ngames = len(games)
if Ngames == 0: ### no games available
print('I\'m sorry, but there are no games currently available. Please design a game soon so we can get playing!')
sys.exit(0)
elif Ngames==1:
print('There is only a single game available!')
return games.items()[0]
else:
print('Please tell me which of the following games you would like to play!')
print_available_games(games)
selected = raw_input('')
while selected not in games: ### make sure the specified game is available
print('I\'m sorry, but I did not understand. Please specify one of the following, or specify "exit" to quit')
print_available_games(games)
selected = raw_input('')
if selected == 'exit': ### quit
sys.exit(0)
return selected, games[selected]
#------------------------
def main():
"""the basic function that will be run when this module is called as an executable. This should discover the available games and prompt the user to select which game they would like to play. It should then launch that game.
Note, users should also be able to launch individual games directly by calling the associated modules that live within vgc."""
name, game = select_game(vgc.KNOWN_GAMES)
print('---- Launching: %s -----'%name)
game.game.main()
sys.exit(0)
#-------------------------------------------------
main()
| [
"reed.essick@ligo.org"
] | reed.essick@ligo.org |
d89661ef6fd64a848d58e944a0359d58cf2e99c5 | da172d7a739ee31c760bb06a2b979037dda01613 | /ws/executors/wsgi_python.py | f9b7b669c752728cb333ef6fd1e85300accff134 | [] | no_license | jonhillmtl/web-server | b5f87e315364b699275140bf5ad1b8475529f96a | 4b6c123954dfdc07007a46dbf4799c2ba912c768 | refs/heads/master | 2020-03-27T20:31:31.247819 | 2018-09-04T20:12:15 | 2018-09-04T20:12:15 | 147,075,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | import os
import importlib.util
from .base import BaseRequestExecutor, InternalServerError
class WsgiPythonRequestExecutor(BaseRequestExecutor):
def serve(self):
try:
wsgi_path = os.path.expanduser(self.vhost['wsgi_path'])
spec = importlib.util.spec_from_file_location("wsgi", wsgi_path)
wsgi = importlib.util.module_from_spec(spec)
spec.loader.exec_module(wsgi)
return wsgi.execute(self.request)
except FileNotFoundError as e:
raise InternalServerError(e)
except Exception as e:
raise InternalServerError(e) | [
"jon@jonhill.ca"
] | jon@jonhill.ca |
34106f33d7f4aec21edf9b7e288d4621593a29cb | 2f0aa66e14c6595289f6a0de2bdf71e9922052a7 | /nextApi/invitation/urls.py | b71dc47d9815d9986fa4dd3305973670cfc11d12 | [] | no_license | aimethierry/NextApi | 8f83a2b0f499fdf5118eb930baa051584cfd9aa5 | 90884ee6d900ce71116b40276dda0e97bec0b521 | refs/heads/master | 2022-12-11T09:03:54.981284 | 2020-09-19T12:40:36 | 2020-09-19T12:40:36 | 296,866,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from . import views
from .views import InvitationView
urlpatterns = [
path(r'createInvitation/', InvitationView.as_view()),
] | [
"aime.thierry97@gmail.com"
] | aime.thierry97@gmail.com |
54e88f8afdc0c2207e7b3a33889e5d54e6ef2ea2 | fcc88521f63a3c22c81a9242ae3b203f2ea888fd | /Python3/0816-Ambiguous-Coordinates/soln.py | 6af316ab69487b3c7557024a2af3c249b1bc088c | [
"MIT"
] | permissive | wyaadarsh/LeetCode-Solutions | b5963e3427aa547d485d3a2cb24e6cedc72804fd | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | refs/heads/master | 2022-12-06T15:50:37.930987 | 2020-08-30T15:49:27 | 2020-08-30T15:49:27 | 291,811,790 | 0 | 1 | MIT | 2020-08-31T19:57:35 | 2020-08-31T19:57:34 | null | UTF-8 | Python | false | false | 1,079 | py | class Solution:
def ambiguousCoordinates(self, S):
"""
:type S: str
:rtype: List[str]
"""
def valid(x):
if '.' not in x:
return str(int(x)) == x
else:
idx = x.find('.')
int_part, frac_part = x[:idx], x[idx + 1:]
if len(int_part) > 1 and int_part[0] == '0':
return False
if len(frac_part) > 0 and frac_part[-1] == '0':
return False
return True
S = S[1:-1]
n = len(S)
ans = []
for i in range(1, n):
left, right = S[:i], S[i:]
for x in [left] + ['{}.{}'.format(left[:k], left[k:]) for k in range(1, len(left))]:
if not valid(x):
continue
for y in [right] + ['{}.{}'.format(right[:j], right[j:]) for j in range(1, len(right))]:
if valid(y):
ans.append('({}, {})'.format(x, y))
# print(valid("1.23"))
return ans
| [
"zhang623@wisc.edu"
] | zhang623@wisc.edu |
039a0870e48a245cf17235ef7fc5554fe7700500 | 6671be3a542925342379d5f6fc691acfebbe281f | /discounts/src/consumer/sqs.py | 4585bcd66519608533b7fb52fb144fef9dd70dc4 | [
"Apache-2.0"
] | permissive | dalmarcogd/mobstore | e79b479b39474873043345b70f7e972f304c1586 | 0b542b9267771a1f4522990d592028dc30ee246f | refs/heads/main | 2023-04-29T22:27:20.344929 | 2021-05-18T12:00:00 | 2021-05-18T12:00:00 | 365,539,054 | 0 | 0 | Apache-2.0 | 2021-05-17T23:22:58 | 2021-05-08T14:46:34 | Go | UTF-8 | Python | false | false | 1,391 | py | import json
import logging
from typing import Callable, Dict, List
import boto3
from src import settings
_sqs = boto3.client('sqs', region_name=settings.AWS_REGION,
aws_access_key_id=settings.AWS_ACCESS_KEY,
aws_secret_access_key=settings.AWS_SECRET_KEY,
endpoint_url=settings.AWS_ENDPOINT)
def start_pool(queue: str, handler: Callable):
while True:
try:
response = _sqs.receive_message(
QueueUrl=queue,
MaxNumberOfMessages=1,
MessageAttributeNames=[
'All'
],
WaitTimeSeconds=2
)
if 'Messages' in response:
try:
messages: List[Dict] = response['Messages']
for message in messages:
receipt_handle: str = message.get('ReceiptHandle')
body_str: str = message.get('Body')
body: Dict = json.loads(body_str)
handler(body)
_sqs.delete_message(QueueUrl=queue, ReceiptHandle=receipt_handle)
except Exception as e:
logging.error(f'[sqs] error no message in queue -> {e}')
except Exception as exc:
logging.error(f'[sqs] error no message in queue -> {exc}')
| [
"dalmarco.gd@gmail.com"
] | dalmarco.gd@gmail.com |
c905595be4974b75bdd595264c0e4286ffc165a2 | 2a3743ced45bd79826dcdc55f304da049f627f1b | /venv/lib/python3.7/site-packages/jedi/third_party/typeshed/third_party/3.5/contextvars.pyi | ab2ae9e5fabf3a9ae486f509156f09fc5fa1e70a | [
"MIT",
"Apache-2.0"
] | permissive | Dimasik007/Deribit_funding_rate_indicator | 12cc8cd7c0be564d6e34d9eae91940c62492ae2a | 3251602ae5249069489834f9afb57b11ff37750e | refs/heads/master | 2023-05-26T10:14:20.395939 | 2019-08-03T11:35:51 | 2019-08-03T11:35:51 | 198,705,946 | 5 | 3 | MIT | 2023-05-22T22:29:24 | 2019-07-24T20:32:19 | Python | UTF-8 | Python | false | false | 1,130 | pyi | from typing import Any, Callable, ClassVar, Generic, Iterator, Mapping, TypeVar, Union
_T = TypeVar('_T')
class ContextVar(Generic[_T]):
def __init__(self, name: str, *, default: _T = ...) -> None: ...
@property
def name(self) -> str: ...
def get(self, default: _T = ...) -> _T: ...
def set(self, value: _T) -> Token[_T]: ...
def reset(self, token: Token[_T]) -> None: ...
class Token(Generic[_T]):
@property
def var(self) -> ContextVar[_T]: ...
@property
def old_value(self) -> Any: ... # returns either _T or MISSING, but that's hard to express
MISSING: ClassVar[object]
def copy_context() -> Context: ...
# It doesn't make sense to make this generic, because for most Contexts each ContextVar will have
# a different value.
class Context(Mapping[ContextVar[Any], Any]):
def __init__(self) -> None: ...
def run(self, callable: Callable[..., _T], *args: Any, **kwargs: Any) -> _T: ...
def copy(self) -> Context: ...
def __getitem__(self, key: ContextVar[Any]) -> Any: ...
def __iter__(self) -> Iterator[ContextVar[Any]]: ...
def __len__(self) -> int: ...
| [
"dmitriy00vn@gmail.com"
] | dmitriy00vn@gmail.com |
c40e2fcb4e20ddf289e9e4beb5613e83b69cced8 | c6b9b9f2fbc6c62e7a86b02718954661af3c564f | /configs/flownet2/flownet2cs_8x1_sfine_flyingthings3d_subset_384x768.py | ca4db07d952781b13f83d38c9d6347781425c2bc | [
"Apache-2.0"
] | permissive | open-mmlab/mmflow | a90ff072805ac79cbc0b277baded1e74d25cccf0 | 9fb1d2f1bb3de641ddcba0dd355064b6ed9419f4 | refs/heads/master | 2023-05-22T05:19:48.986601 | 2023-01-10T16:05:18 | 2023-01-10T16:05:18 | 428,493,460 | 808 | 110 | Apache-2.0 | 2023-09-05T13:19:38 | 2021-11-16T02:42:41 | Python | UTF-8 | Python | false | false | 374 | py | _base_ = [
'../_base_/models/flownet2/flownet2cs.py',
'../_base_/datasets/flyingthings3d_subset_384x768.py',
'../_base_/schedules/schedule_s_fine.py', '../_base_/default_runtime.py'
]
# Train on FlyingChairs and finetune on FlyingThings3D subset
load_from = 'https://download.openmmlab.com/mmflow/flownet2/flownet2cs_8x1_slong_flyingchairs_384x448.pth' # noqa
| [
"meowzheng@outlook.com"
] | meowzheng@outlook.com |
4ab5b11e8bc7b4e9791245ba6efa0070a7fe2960 | a28709c421e7f5db9af18476216abe7f41ed64cf | /frameworks/pytorch/examples/3_multi_outputs.py | 9906ee9f5a410c22127a65cb4c7695e681695832 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | microsoft/antares | 8269f93418306fdea14f89032bc861fd7cdf6b24 | 86317b035043daaae4f8bd8bb1bb3b8d1b9f648d | refs/heads/v0.3.x | 2023-08-19T11:18:47.174186 | 2023-08-09T09:02:29 | 2023-08-09T09:02:29 | 274,578,755 | 262 | 37 | NOASSERTION | 2023-09-06T22:19:51 | 2020-06-24T04:58:46 | C++ | UTF-8 | Python | false | false | 906 | py | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
from antares_core.frameworks.pytorch.custom_op import CustomOp
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
dtype = torch.float32
kwargs = {'dtype': dtype,
'device': device,
'requires_grad': False}
input0 = torch.ones(1024 * 512, **kwargs)
input1 = torch.ones(1024 * 512, **kwargs)
custom_op = CustomOp(ir='output0[N] = input0[N] + input1[N]; output1[N] = input0[N].call(`exp`); output2[N] = input1[N] + output1[N];', extra_outputs=['output0', 'output1', 'output2'], input_orders={'input0': input0, 'input1': input1}, device=device).tune(step=100, use_cache=True, timeout=600).emit()
result = custom_op(input0, input1)
print('The result of tensor `%s, %s` is:\n%s' % (custom_op.output_names[0], custom_op.output_names[1], result))
| [
"noreply@github.com"
] | microsoft.noreply@github.com |
67927289ffd53208d56009d2b3654fc46cf8c258 | ac0a4336abfa8f36079203b2ba2e104a59f3ed8b | /Multithreading/thread1.py | 584a040d5525dfe8380e865d962c501eab646baa | [] | no_license | Michal-lis/python_playground | ea422df3c992c01bfe6df5621768df386583eed9 | ec24b7456a0ee872acbcbfa54daa6634dfcfb7be | refs/heads/master | 2022-11-05T20:50:14.809449 | 2019-01-11T13:25:09 | 2019-01-11T13:25:09 | 87,660,323 | 0 | 1 | null | 2022-10-17T11:27:43 | 2017-04-08T19:53:26 | Tcl | UTF-8 | Python | false | false | 827 | py | import threading
import time
ki = range(300)
def calculate_5(li):
pow5 = []
for e in li:
for e in li:
for e in li:
pow5.append(pow(e, 5))
return pow5
def calculate_4(li):
pow4 = []
for e in li:
for e in li:
for e in li:
pow4.append(pow(e, 4))
return pow4
thread1 = threading.Thread(target=calculate_5, args=(ki,))
thread2 = threading.Thread(target=calculate_4, args=(ki,))
tt_init_5 = time.time()
thread1.start()
thread2.start()
thread1.join()
thread2.join()
tt_end_5 = time.time()
tt5 = tt_end_5 - tt_init_5
t_init_5 = time.time()
a5 = calculate_5(ki)
t_end_5 = time.time()
t5 = t_end_5 - t_init_5
t_init_4 = time.time()
a4 = calculate_4(ki)
t_end_4 = time.time()
t4 = t_end_4 - t_init_4
print(t4)
print(t5)
print(tt5)
| [
"michallis95@vp.pl"
] | michallis95@vp.pl |
d4a90e6d064d0f890b8d6bd5f03b1b914263bf27 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/edabit/_Edabit-Solutions-master/Limit a Number's Value/solution.py | ce3b9f895d444972b8796e83f0b74883d8c75a31 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 385 | py | ___ limit_number(num, range_low, range_high
__ num < range_low:
r.. range_low
____ num > range_high:
r.. range_high
____
r.. num
___ test
print("test has started")
__ limit_number(5, 1, 10) ! 5:
print("error1")
__ limit_number(-3, 1, 10) ! 1:
print("error2")
__ limit_number(14, 1, 10) ! 10:
print("error3")
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
57106cf1d9c1905a6e255803e406bf9aa13528ea | 64076dd830b5740cf25f54fbf64c6784427801a2 | /security.py | 07a1b66e228677dbf3567bca4c517b48ceb23062 | [] | no_license | Shiv2157k/flask_store | ee9113fa813365429dccb486cb14af6d307f9c0e | 9fe0d74f9c83b00fa769a6bcb6557dca1dfd8d13 | refs/heads/master | 2022-11-26T01:56:28.865821 | 2020-08-02T22:28:05 | 2020-08-02T22:28:05 | 284,491,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | from werkzeug.security import safe_str_cmp
from models.user import UserModel
def authenticate(username, password):
user = UserModel.find_by_username(username)
if user and safe_str_cmp(user.password, password):
return user
def identity(payload):
user_id = payload["identity"]
return UserModel.find_by_id(user_id)
| [
"shiv2157.k@gmail.com"
] | shiv2157.k@gmail.com |
2446169dfd9c23bd9ff066bc9404816a83fec2c0 | f82349a5d9cb285ced7c52db1ce95c65f5fd0cf0 | /mars/tensor/expressions/arithmetic/cos.py | ce9407c3b368b6138702495e84f7a15278b473a1 | [
"MIT",
"BSD-3-Clause",
"OFL-1.1",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | pingrunhuang/mars | 8d2602356b6f4d9eb7c6dfe4b2c4536b4bdfc229 | ae920c374e9844d7426d0cc09c0d97059dc5341c | refs/heads/master | 2020-04-17T03:42:11.147774 | 2019-01-18T06:49:29 | 2019-01-18T06:49:29 | 166,196,676 | 0 | 0 | Apache-2.0 | 2019-01-17T09:17:25 | 2019-01-17T09:17:25 | null | UTF-8 | Python | false | false | 2,961 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .... import operands
from ..utils import infer_dtype
from .core import TensorUnaryOp
class TensorCos(operands.Cos, TensorUnaryOp):
def __init__(self, casting='same_kind', err=None, dtype=None, sparse=False, **kw):
err = err if err is not None else np.geterr()
super(TensorCos, self).__init__(_casting=casting, _err=err,
_dtype=dtype, _sparse=sparse, **kw)
@classmethod
def _is_sparse(cls, x):
return False
@infer_dtype(np.cos)
def cos(x, out=None, where=None, **kwargs):
"""
Cosine element-wise.
Parameters
----------
x : array_like
Input tensor in radians.
out : Tensor, None, or tuple of Tensor and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
where : array_like, optional
Values of True indicate to calculate the ufunc at that position, values
of False indicate to leave the value in the output alone.
**kwargs
Returns
-------
y : Tensor
The corresponding cosine values.
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> import mars.tensor as mt
>>> mt.cos(mt.array([0, mt.pi/2, mt.pi])).execute()
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
>>> out1 = mt.empty(1)
>>> out2 = mt.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> mt.cos(mt.zeros((3,3)),mt.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
"""
op = TensorCos(**kwargs)
return op(x, out=out, where=where)
| [
"xuye.qin@alibaba-inc.com"
] | xuye.qin@alibaba-inc.com |
a05127b405e68038f07aab774aed90fe51dd6642 | 9b2eb0d6b673ac4945f9698c31840b847f790a58 | /pkg/test/test_fast_stats_builds_api.py | 77236c6f0afa96f5aa495b7784829af15e744dd3 | [
"Apache-2.0"
] | permissive | Apteco/apteco-api | 6d21c9f16e58357da9ce64bac52f1d2403b36b7c | e8cf50a9cb01b044897025c74d88c37ad1612d31 | refs/heads/master | 2023-07-10T23:25:59.000038 | 2023-07-07T14:52:29 | 2023-07-07T14:52:29 | 225,371,142 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,815 | py | # coding: utf-8
"""
Apteco API
An API to allow access to Apteco Marketing Suite resources # noqa: E501
The version of the OpenAPI document: v2
Contact: support@apteco.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import apteco_api
from apteco_api.api.fast_stats_builds_api import FastStatsBuildsApi # noqa: E501
from apteco_api.rest import ApiException
class TestFastStatsBuildsApi(unittest.TestCase):
"""FastStatsBuildsApi unit test stubs"""
def setUp(self):
self.api = apteco_api.api.fast_stats_builds_api.FastStatsBuildsApi() # noqa: E501
def tearDown(self):
pass
def test_fast_stats_builds_cancel_fast_stats_build_job(self):
"""Test case for fast_stats_builds_cancel_fast_stats_build_job
EXPERIMENTAL: Requires OrbitAdmin: Cancel a running data purchasing job # noqa: E501
"""
pass
def test_fast_stats_builds_create_fast_stats_build_job(self):
"""Test case for fast_stats_builds_create_fast_stats_build_job
EXPERIMENTAL: Requires OrbitAdmin: Create a new job to builds a FastStats system from the given definition # noqa: E501
"""
pass
def test_fast_stats_builds_fast_stats_build_sync(self):
"""Test case for fast_stats_builds_fast_stats_build_sync
EXPERIMENTAL: Requires OrbitAdmin: Builds a FastStats system from the given definition # noqa: E501
"""
pass
def test_fast_stats_builds_get_fast_stats_build_job(self):
"""Test case for fast_stats_builds_get_fast_stats_build_job
EXPERIMENTAL: Requires OrbitAdmin: Get the status of a running FastStats build job # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"tim.morris@apteco.com"
] | tim.morris@apteco.com |
c570a16420f515bd89d420f8231058e8acb26b1d | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_298/ch22_2020_09_09_12_15_01_157465.py | 8803cbcb0895a39a98c1635aaa7138b86ea1db8d | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | a= input("Quantos cigarros voce fuma por dia? ")
b= input("Ha quantos anos? ")
a= int(a)
b= int(b)
def vida(a,b):
fumado= a*b*365
perdido= (fumado)/144
return perdido
print(vida(a,b)) | [
"you@example.com"
] | you@example.com |
104b1a43657e6f3c40b770581127e9bbd0589f0c | c14e31bdfed47fc9aaafd3b1100451551acff3c6 | /source/accounts/forms.py | f038f43ec5d8eb1f212fdaa4b74836b1d7619c7c | [] | no_license | UuljanAitnazarova/reviews_project | 79f6e423f111afa7a057fdf15e086559530a281c | 74b64559bfaf1f9fcd3c78009fa4264c64bb9571 | refs/heads/master | 2023-04-23T04:38:59.266181 | 2021-05-01T12:56:15 | 2021-05-01T12:56:15 | 363,329,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,908 | py | from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserCreationForm
from django import forms
from django.core.exceptions import ValidationError
class MyUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
fields = ['username', 'email', 'first_name', 'last_name', 'password1', 'password2']
def clean(self):
super(MyUserCreationForm, self).clean()
if not self.cleaned_data.get('email'):
raise ValidationError('Enter your email address')
class UserUpdateForm(forms.ModelForm):
class Meta:
model = get_user_model()
fields = ('email', 'first_name', 'last_name')
class PasswordChangeForm(forms.ModelForm):
password = forms.CharField(label="New password", strip=False, widget=forms.PasswordInput)
password_confirm = forms.CharField(label="Confirm password", widget=forms.PasswordInput, strip=False)
old_password = forms.CharField(label="Old password", strip=False, widget=forms.PasswordInput)
def clean_password_confirm(self):
password = self.cleaned_data.get("password")
password_confirm = self.cleaned_data.get("password_confirm")
if password and password_confirm and password != password_confirm:
raise forms.ValidationError('Passwords do not match!')
return password_confirm
def clean_old_password(self):
old_password = self.cleaned_data.get('old_password')
if not self.instance.check_password(old_password):
raise forms.ValidationError('Old password is incorrect!')
return old_password
def save(self, commit=True):
user = self.instance
user.set_password(self.cleaned_data["password"])
if commit:
user.save()
return user
class Meta:
model = get_user_model()
fields = ['password', 'password_confirm', 'old_password'] | [
"u.aitnazarova@gmail.com"
] | u.aitnazarova@gmail.com |
6ea43da81e9e349639a232afa4e830990a130077 | 8a4f6d02ea5f83c78543849d9e997458c7d615c9 | /cleanup/transfer.py | 430bbb97b0b4d07686165e55ba110e4f44538591 | [] | no_license | jsl12/Picture-Cleanup | f5f2e3889b2894051e25f6347049fa4ea752235e | 82c62dab9fe9a59ff6ec2905049d4667ad9d91d9 | refs/heads/master | 2020-12-09T02:03:42.979162 | 2020-04-08T15:04:05 | 2020-04-08T15:04:05 | 233,159,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,706 | py | import ftplib
import logging
from pathlib import Path
from . import log
LOGGER = logging.getLogger(__name__)
def pull_from_phone(
host,
port,
local_path,
phone_path=None,
ext='jpg',
user='android',
passwd='android'):
ftp = ftplib.FTP()
ftp.connect(host, port)
try:
LOGGER.debug(f'Connected to {host}:{port}')
ftp.login(user, passwd)
LOGGER.debug(f'Logged in with: {user}, {passwd}')
for file in ftp_files(ftp, phone_path):
if file.suffix == f'.{ext}':
res = local_path / file.name
if res.exists():
LOGGER.info(f'file already exists: "{res}"')
continue
else:
if not res.parents[0].exists():
res.parents[0].mkdir()
LOGGER.debug(f'Created dir: "{res.parents[0]}"')
with res.open('wb') as res_file:
ftp.retrbinary(f'RETR {file}', res_file.write)
LOGGER.info(f'ftp success: "{file}", "{res}"')
except Exception as e:
LOGGER.exception(repr(e))
finally:
ftp.quit()
def ftp_files(ftp, path):
for f in ftp.mlsd(path, facts=['type']):
if f[1]['type'] == 'dir':
yield from ftp_files(ftp, f'{path}\\{f[0]}')
else:
yield Path(path) / f[0]
def transferred_files(ftplog):
for line in log.filter(log.line_gen(ftplog), 'ftp success'):
yield (log.get_paths(line))
def skipped_files(ftplog):
for line in log.filter(log.line_gen(ftplog), 'file already exists'):
yield (log.get_paths(line))
| [
"32917998+jsl12@users.noreply.github.com"
] | 32917998+jsl12@users.noreply.github.com |
c29268709c89ea3b972d76ba5d5b1827978ad7dc | 45dd427ec7450d2fac6fe2454f54a130b509b634 | /homework_3/preparation1.py | 2cd921e0e4dbfc35edb2a6c54fe16ec5985257e4 | [] | no_license | weka511/smac | 702fe183e3e73889ec663bc1d75bcac07ebb94b5 | 0b257092ff68058fda1d152d5ea8050feeab6fe2 | refs/heads/master | 2022-07-02T14:24:26.370766 | 2022-06-13T00:07:36 | 2022-06-13T00:07:36 | 33,011,960 | 22 | 8 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | import pylab
def show_conf(L, sigma, title, fname):
pylab.axes()
for [x, y] in L:
for ix in range(-1, 2):
for iy in range(-1, 2):
cir = pylab.Circle((x + ix, y + iy), radius=sigma, fc='r')
pylab.gca().add_patch(cir)
pylab.axis('scaled')
pylab.title(title)
pylab.axis([0.0, 1.0, 0.0, 1.0])
pylab.savefig(fname)
pylab.show()
pylab.close()
L = [[0.9, 0.9]]
sigma = 0.4
show_conf(L, sigma, 'test graph', 'one_disk.png') | [
"simon@greenweaves.nz"
] | simon@greenweaves.nz |
a34c9c352f846d610a576e96d440f5c41e31f197 | 341bd2d71b6b6e3af734f16989aeb450e3e73624 | /PCA9536_WDBZ/PCA9536_WDBZ.py | adb05997edaa4ecbe077a630777db9ae1e205939 | [] | no_license | ControlEverythingCommunity/CE_PYTHON_LIB | 5c170f7e3763ab3b160a5fc33f2bb96d4798c7e2 | 736b29434a451a384c2f52490c849239c3190951 | refs/heads/master | 2021-01-12T00:39:25.374689 | 2017-08-30T21:54:47 | 2017-08-30T21:54:47 | 78,751,564 | 7 | 7 | null | 2017-12-15T11:08:48 | 2017-01-12T14:05:11 | Python | UTF-8 | Python | false | false | 6,014 | py | # Distributed with a free-will license.
# Use it any way you want, profit or free, provided it fits in the licenses of its associated works.
# PCA9536_WDBZ
# This code is designed to work with the PCA9536_WDBZ_I2CS I2C Mini Module available from ControlEverything.com.
# https://shop.controleverything.com/products/water-detect-sensor-with-buzzer
import smbus
import time
# Get I2C bus
bus = smbus.SMBus(1)
# I2C address of the device
PCA9536_WDBZ_DEFAULT_ADDRESS = 0x41
# PCA9536_WDBZ Register Map
PCA9536_WDBZ_REG_INPUT = 0x00 # Input Port Register
PCA9536_WDBZ_REG_OUTPUT = 0x01 # Output Port Register
PCA9536_WDBZ_REG_POLARITY = 0x02 # Polarity Inversion Register
PCA9536_WDBZ_REG_CONFIG = 0x03 # Configuration Register
# PCA9536_WDBZ Output Port Register Configuration
PCA9536_WDBZ_OUTPUT_PIN0 = 0x01 # Reflects outgoing logic levels of Pin-0
PCA9536_WDBZ_OUTPUT_PIN1 = 0x02 # Reflects outgoing logic levels of Pin-1
PCA9536_WDBZ_OUTPUT_PIN2 = 0x04 # Reflects outgoing logic levels of Pin-2
PCA9536_WDBZ_OUTPUT_PIN3 = 0x08 # Reflects outgoing logic levels of Pin-3
# PCA9536_WDBZ Polarity Inversion Register Configuration
PCA9536_WDBZ_POLARITY_PIN0 = 0x01 # Input Port register data inverted of Pin-0
PCA9536_WDBZ_POLARITY_PIN1 = 0x02 # Input Port register data inverted of Pin-1
PCA9536_WDBZ_POLARITY_PIN2 = 0x04 # Input Port register data inverted of Pin-2
PCA9536_WDBZ_POLARITY_PIN3 = 0x08 # Input Port register data inverted of Pin-3
PCA9536_WDBZ_POLARITY_PINX = 0x00 # Input Port register data retained of Pin-X
# PCA9536_WDBZ Configuration Register
PCA9536_WDBZ_CONFIG_PIN0 = 0x01 # Corresponding port Pin-0 configured as Input
PCA9536_WDBZ_CONFIG_PIN1 = 0x02 # Corresponding port Pin-1 configured as Input
PCA9536_WDBZ_CONFIG_PIN2 = 0x04 # Corresponding port Pin-2 configured as Input
PCA9536_WDBZ_CONFIG_PIN3 = 0x08 # Corresponding port Pin-3 configured as Input
PCA9536_WDBZ_CONFIG_PINX = 0x00 # Corresponding port Pin-X configured as Output
class PCA9536_WDBZ():
def select_io(self):
"""Select the Input/Output for the use
0 : Input
1 : Output"""
self.io = int(input("Select Input/Output (0:I, 1:O) = "))
while self.io > 1 :
self.io = int(input("Select Input/Output (0:I, 1:O) = "))
def select_pin(self):
"""Select the Pin for the use
0 : Pin-0
1 : Pin-1
2 : Pin-2
3 : Pin-3"""
self.pin = int(input("Enter the Pin No.(0-3) = "))
while self.pin > 3 :
self.pin = int(input("Enter the Pin No.(0-3) = "))
def input_output_config(self):
"""Select the Configuration Register data from the given provided value"""
if self.io == 0 :
if self.pin == 0 :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_CONFIG, PCA9536_WDBZ_CONFIG_PIN0)
elif self.pin == 1 :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_CONFIG, PCA9536_WDBZ_CONFIG_PIN1)
elif self.pin == 2 :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_CONFIG, PCA9536_WDBZ_CONFIG_PIN2)
elif self.pin == 3 :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_CONFIG, PCA9536_WDBZ_CONFIG_PIN3)
elif self.io == 1 :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_CONFIG, PCA9536_WDBZ_CONFIG_PINX)
def polarity_config(self):
"""Select the Polarity Inversion Register Configuration data from the given provided value"""
if self.pin == 0 :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_POLARITY, PCA9536_WDBZ_POLARITY_PIN0)
elif self.pin == 1 :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_POLARITY, PCA9536_WDBZ_POLARITY_PIN1)
elif self.pin == 2 :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_POLARITY, PCA9536_WDBZ_POLARITY_PIN2)
elif self.pin == 3 :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_POLARITY, PCA9536_WDBZ_POLARITY_PIN3)
def relay_buzzer_config(self):
"""Select the Polarity Inversion Register Configuration data from the given provided value"""
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_CONFIG, PCA9536_WDBZ_CONFIG_PINX)
"""Select the Output Port Register Configuration data from the given provided value"""
if self.pin == 0 :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_OUTPUT, PCA9536_WDBZ_OUTPUT_PIN0)
elif self.pin == 1 :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_OUTPUT, PCA9536_WDBZ_OUTPUT_PIN1)
elif self.pin == 2 :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_OUTPUT, PCA9536_WDBZ_OUTPUT_PIN2)
def read_data(self):
"""Read data back from PCA9536_WDBZ_REG_INPUT(0x00)/PCA9536_WDBZ_REG_OUTPUT(0x01), 1 byte"""
data = bus.read_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_OUTPUT)
# Convert the data to 4-bits
data = (data & 0x0F)
if (data & (2 ** self.pin)) == 0 :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_CONFIG, PCA9536_WDBZ_CONFIG_PINX)
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_OUTPUT, PCA9536_WDBZ_OUTPUT_PIN3)
print "I/O Pin 3 State is HIGH"
print "Buzzer is ON"
print "I/O Pin %d State is LOW" %self.pin
print "Water Detected"
else :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_CONFIG, PCA9536_WDBZ_CONFIG_PIN3)
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_OUTPUT, PCA9536_WDBZ_OUTPUT_PIN3)
print "I/O Pin 3 State is LOW"
print "Buzzer is OFF"
print "I/O Pin %d State is HIGH" %self.pin
print "No Water Present"
| [
"apple@Yaddis-iMac.local"
] | apple@Yaddis-iMac.local |
695020da160da49d3ff96237eb4b04bf19b2c942 | fb78fd824e904705fb1ee09db8b3c20cc3902805 | /django-blog-api/posts/views.py | ce41539ee2fd83cb2392599bd4c682a68868049b | [] | no_license | Roderich25/mac | 8469833821ac49c539a744db29db5a41d755ad55 | 4f7fe281c88f0199b85d0ac99ce41ffb643d6e82 | refs/heads/master | 2023-01-12T05:55:12.753209 | 2021-11-26T01:16:24 | 2021-11-26T01:16:24 | 207,029,750 | 0 | 0 | null | 2023-01-07T11:49:23 | 2019-09-07T21:51:53 | Jupyter Notebook | UTF-8 | Python | false | false | 1,231 | py | from django.contrib.auth import get_user_model
from rest_framework import viewsets # generics
from .permissions import IsAuthorOrReadOnly
from .models import Post
from .serializers import PostSerializer, UserSerializer
class PostViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthorOrReadOnly,)
queryset = Post.objects.all()
serializer_class = PostSerializer
class UserViewSet(viewsets.ModelViewSet):
queryset = get_user_model().objects.all()
serializer_class = UserSerializer
# class PostList(generics.ListCreateAPIView):
# # permission_classes = (permissions.IsAuthenticated,)
# queryset = Post.objects.all()
# serializer_class = PostSerializer
#
#
# class PostDetail(generics.RetrieveUpdateDestroyAPIView):
# # permission_classes = (permissions.IsAuthenticated,)
# permission_classes = (IsAuthorOrReadOnly,)
# queryset = Post.objects.all()
# serializer_class = PostSerializer
#
#
# class UserList(generics.ListCreateAPIView):
# queryset = get_user_model().objects.all()
# serializer_class = UserSerializer
#
#
# class UserDetail(generics.RetrieveUpdateDestroyAPIView):
# queryset = get_user_model().objects.all()
# serializer_class = UserSerializer
| [
"rodrigoavilasolis@gmail.com"
] | rodrigoavilasolis@gmail.com |
ab0875ad6d3c9469a5818f5710f7de6bd10097db | 5117fdfc5ce8399f72d12d519d1c1b5ea4a64c63 | /examples/hh/ilql_hh.py | 57500a06f128d6bc82a7bee9df5a855b5e5ca234 | [
"MIT"
] | permissive | neevaco/trlx | cd1ccd0ad308742eaa1dcaedc64a3103637ff839 | 46231059f5a3fa256a25ec2a16f36b24b2b34055 | refs/heads/main | 2023-05-25T01:33:48.486866 | 2023-03-13T22:04:24 | 2023-03-13T22:04:24 | 615,039,599 | 0 | 0 | null | 2023-03-16T20:36:57 | 2023-03-16T20:36:56 | null | UTF-8 | Python | false | false | 3,530 | py | import json
import os
import sys
from datasets import load_dataset
from ppo_hh import create_reward_fn
import trlx
from trlx.data.default_configs import (
ILQLConfig,
ModelConfig,
OptimizerConfig,
SchedulerConfig,
TokenizerConfig,
TrainConfig,
TRLConfig,
)
default_config = TRLConfig(
train=TrainConfig(
seq_length=1024,
batch_size=4,
epochs=100,
total_steps=20000,
checkpoint_interval=10000,
eval_interval=1000,
pipeline="PromptPipeline",
trainer="AccelerateILQLTrainer",
checkpoint_dir="checkpoints/ilql_hh",
),
model=ModelConfig(model_path="EleutherAI/gpt-j-6B", num_layers_unfrozen=-1),
tokenizer=TokenizerConfig(tokenizer_path="EleutherAI/gpt-j-6B", truncation_side="left"),
optimizer=OptimizerConfig(name="adamw", kwargs=dict(lr=1e-6, betas=(0.9, 0.95), eps=1.0e-8, weight_decay=1.0e-6)),
scheduler=SchedulerConfig(name="cosine_annealing", kwargs=dict(T_max=1000000000, eta_min=1e-6)),
method=ILQLConfig(
name="ilqlconfig",
tau=0.6,
gamma=0.99,
cql_scale=0.1,
awac_scale=1,
alpha=0.0001,
beta=0,
steps_for_target_q_sync=1,
two_qs=True,
gen_kwargs=dict(max_new_tokens=128, top_k=20, beta=[1, 4], temperature=1.0),
),
)
config_name = os.environ.get("CONFIG_NAME")
if config_name == "125M":
default_config.train.batch_size = 16
default_config.train.checkpoint_dir = "checkpoints/ilql_hh_125M"
default_config.model.model_path = "EleutherAI/pythia-125m-deduped"
default_config.tokenizer.tokenizer_path = "EleutherAI/gpt-neox-20b"
elif config_name == "1B":
default_config.train.batch_size = 8
default_config.train.checkpoint_dir = "checkpoints/ilql_hh_1B"
default_config.model.model_path = "EleutherAI/pythia-1.4b-deduped"
default_config.tokenizer.tokenizer_path = "EleutherAI/gpt-neox-20b"
elif config_name == "6B":
default_config.train.batch_size = 4
default_config.train.checkpoint_dir = "checkpoints/ilql_hh_6B"
default_config.model.model_path = "EleutherAI/pythia-6.9b-deduped"
default_config.tokenizer.tokenizer_path = "EleutherAI/gpt-neox-20b"
elif config_name == "20B":
default_config.train.batch_size = 1
default_config.train.total_steps = 3000
default_config.train.checkpoint_dir = "checkpoints/ilql_hh_20B"
default_config.model.model_path = "EleutherAI/gpt-neox-20b"
default_config.tokenizer.tokenizer_path = "EleutherAI/gpt-neox-20b"
def preprocess(sample):
sample["prompt_output"] = [
[sample["prompt"], sample["chosen"]],
[sample["prompt"], sample["rejected"]],
]
sample["reward"] = [1, -1]
return sample
def main(hparams={}):
config = TRLConfig.update(default_config, hparams)
dataset = load_dataset("Dahoas/full-hh-rlhf").map(preprocess)
prompts_outputs = sum(dataset["train"]["prompt_output"], [])
rewards = sum(dataset["train"]["reward"], [])
eval_prompts = [prompt_output[0][0] for prompt_output in dataset["test"]["prompt_output"]][:280]
reward_fn = create_reward_fn()
trlx.train(
samples=prompts_outputs,
rewards=rewards,
config=config,
eval_prompts=eval_prompts,
metric_fn=lambda **kwargs: {"reward": reward_fn(**kwargs)},
stop_sequences=["Human:", "human:", "Assistant:", "assistant:"],
)
if __name__ == "__main__":
hparams = {} if len(sys.argv) == 1 else json.loads(sys.argv[1])
main(hparams)
| [
"noreply@github.com"
] | neevaco.noreply@github.com |
b7742363da58a3b4a69edc0d133ad46617c3deaf | e31bbc636eb495eed5843d1a4b7f66d3525eecc6 | /Examp/Python Advanced Exam - 27 June 2020/2.Snake.py | 9d6a0ffe5c0adb9f9aea2b696c8ca43593217b01 | [] | no_license | PullBack993/Python-Advanced | c6a24b68d2517589027d4af8dee80fe9f28490a2 | 3a9362b09555649eef797220dac1bba7a39df06e | refs/heads/main | 2023-06-12T08:34:16.379021 | 2021-07-08T14:08:03 | 2021-07-08T14:08:03 | 358,933,332 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,196 | py | FOOD = '*'
SNAKE = 'S'
BURROWS = 'B'
MOVE_SYMBOL = '.'
MAX_FOOD = 9
def get_input(size):
board = []
for _ in range(size):
board.append([el for el in input()])
return board
def find_snake(board, size):
for row_i in range(size):
for col_i in range(size):
if board[row_i][col_i] == SNAKE:
return row_i, col_i
def find_burrows(board):
for row_i in range(len(board)):
for col_i in range(len(board)):
if board[row_i][col_i] == BURROWS:
return row_i, col_i
def check_index(mat, r, c):
size_matrix = len(mat)
if 0 <= r < size_matrix and 0 <= c < size_matrix:
return True
return False
size = int(input())
board = get_input(size)
snake_row, snake_col = find_snake(board, size)
food = 0
game_over = False
while not game_over and MAX_FOOD >= food:
move_command = input()
old_row_position = snake_row
old_col_position = snake_col
if move_command == "up":
snake_row -= 1
elif move_command == "down":
snake_row += 1
elif move_command == "left":
snake_col -= 1
elif move_command == "right":
snake_col += 1
position = check_index(board, snake_row, snake_col)
if position:
new_row = snake_row
new_col = snake_col
new_position = board[new_row][new_col]
if new_position == FOOD:
food += 1
board[old_row_position][old_col_position] = MOVE_SYMBOL
board[new_row][new_col] = SNAKE
elif new_position == BURROWS:
board[old_row_position][old_col_position] = MOVE_SYMBOL
board[new_row][new_col] = MOVE_SYMBOL
row, col = find_burrows(board)
snake_row, snake_col = row, col
board[row][col] = SNAKE
else:
board[old_row_position][old_col_position] = MOVE_SYMBOL
board[new_row][new_col] = SNAKE
else:
board[old_row_position][old_col_position] = MOVE_SYMBOL
game_over = True
if game_over:
print("Game over!")
else:
print("You won! You fed the snake.")
print(f"Food eaten: {food}")
for el in board:
print("".join(i for i in el))
| [
"turgay.durhanov.ismailov@gmail.com"
] | turgay.durhanov.ismailov@gmail.com |
1712f8a239c3cea9372aa7f0697719b9add81465 | 97cbcd454be80f0b6f986b0a81e84570596a9368 | /tests/basics/Functions32.py | a30fc83b58bdb7db17b6699db653ac8fd8964539 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | keitheis/Nuitka | aff7bf348e12d772543018e6b464cbfa7eaf2d30 | 1e4f31e12cbd36ce2f6a785c61e0111639c258a9 | refs/heads/master | 2021-01-17T21:59:05.601349 | 2014-04-21T10:28:13 | 2014-04-21T10:28:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,183 | py | # Copyright 2014, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def kwonlysimple(*, a):
return a
print( "Most simple case", kwonlysimple( a = 3 ) )
def kwonlysimpledefaulted(*, a = 5):
return a
print( "Default simple case", kwonlysimpledefaulted() )
def default1():
print( "Called", default1 )
return 1
def default2():
print( "Called", default2 )
return 2
def default3():
print( "Called", default3 )
return 3
def default4():
print( "Called", default4 )
return 4
def annotation1():
print ( "Called", annotation1 )
return "a1"
def annotation2():
print ( "Called", annotation2 )
return "a2"
def annotation3():
print ( "Called", annotation3 )
return "a3"
def annotation4():
print ( "Called", annotation4 )
return "a4"
def annotation5():
print ( "Called", annotation5 )
return "a5"
def annotation6():
print ( "Called", annotation6 )
return "a6"
def annotation7():
print ( "Called", annotation7 )
return "a7"
def annotation8():
print ( "Called", annotation8 )
return "a8"
def annotation9():
print ( "Called", annotation9 )
return "a9"
def kwonlyfunc(x: annotation1(), y: annotation2() = default1(), z: annotation3() = default2(), *, a: annotation4(), b: annotation5() = default3(), c: annotation6() = default4(), d: annotation7(), **kw: annotation8()) -> annotation9():
print( x, y, z, a, b, c, d )
print( kwonlyfunc.__kwdefaults__ )
print( "Keyword only function" )
kwonlyfunc( 7, a = 8, d = 12 )
print( "Annotations come out as", sorted( kwonlyfunc.__annotations__ ) )
kwonlyfunc.__annotations__ = {}
print( "After updating to None it is", kwonlyfunc.__annotations__ )
kwonlyfunc.__annotations__ = { "k" : 9 }
print( "After updating to None it is", kwonlyfunc.__annotations__ )
def kwonlystarfunc(*, a, b, **d):
return a, b, d
print( "kwonlystarfunc", kwonlystarfunc( a = 8, b = 12, k = 9, j = 7 ) )
def deeplyNestedNonLocalWrite():
x = 0
y = 0
def f():
def g():
nonlocal x
x = 3
return x
return g()
return f(), x
print( "Deeply nested non local writing function", deeplyNestedNonLocalWrite() )
def deletingClosureVariables():
try:
x = 1
def g():
nonlocal x
del x
g()
g()
except Exception as e:
return e
print( "Using deleted non-local vaiables", deletingClosureVariables() )
| [
"kay.hayen@gmail.com"
] | kay.hayen@gmail.com |
f6eb37893e2c8398620ed9ef42a8aeb7319b6a93 | 108fc2873b5c07e4ad9515adc16bc8e9fdf7d021 | /smorest_sfs/utils/imports.py | 5e66ce484a3a2a64d10150b3e01da6c0b90d5117 | [
"Apache-2.0"
] | permissive | ssfdust/yt-media | 4ac5eba6a25830268f42b951e8307bb57e7baeeb | 36c3d1977df5851d8df54846f0bc84be2b86e962 | refs/heads/master | 2021-08-08T09:40:31.241228 | 2020-05-11T03:11:20 | 2020-05-11T03:11:20 | 175,938,603 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | # Copyright 2019 RedLotus <ssfdust@gmail.com>
# Author: RedLotus <ssfdust@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pkgutil
def import_submodules(context: dict, root_module: str, path: str) -> None:
"""
加载文件夹下的所有子模块
https://github.com/getsentry/zeus/blob/97528038a0abfd6f0e300d8d3f276e1b0818c328/zeus/utils/imports.py#L23
>>> import_submodules(locals(), __name__, __path__)
"""
modules = {}
for _, module_name, _ in pkgutil.walk_packages(path, root_module + "."):
# this causes a Runtime error with model conflicts
# module = loader.find_module(module_name).load_module(module_name)
module = __import__(module_name, globals(), locals(), ["__name__"])
keys = getattr(module, "__all__", None)
if keys is None:
keys = [k for k in vars(module).keys() if not k.startswith("_")]
for k in keys:
context[k] = getattr(module, k, None)
modules[module_name] = module
# maintain existing module namespace import with priority
for k, v in modules.items():
context[k] = v
| [
"ssfdust@gmail.com"
] | ssfdust@gmail.com |
169a505b73cec2a76c9acc2e96ffc6cdcd3aaeaa | bc0fc1d06ee2822b696494d3a73eeb6c1af0360a | /androidfs/acestream.engine/data/plugins/viasat_embed.py | 5f69a54cd37d0dae965f2594274dc2cc77b7545d | [] | no_license | elipatov/acestream.engine | cf41472b6435400022f19eb4b48c2a17a3afacbd | 65849f78a0dc7749d0478c387e69bb14865bdaf3 | refs/heads/main | 2023-02-24T22:28:00.045198 | 2021-01-22T18:04:12 | 2021-01-22T18:04:12 | 329,993,224 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 980 | py | #-plugin-sig:Tgg2N/mlOCMuMUWR77aIuGUWncB0O1Mc6rLUOmnvO3hbpruyNpgRfDiH5IScd0JNZvzRHw3chwFWMgPzQskdvfDq8u01ZyGbSY5+Z5jK/bO6xZGV4kQumyH4jv59aQiqEjtHk8u7n7878oi1qpqMY1OEDTn6gK7fNE//2XroR9PfGcNTwhpvfoh6pEB2Yzww5I+8wh35cqtcS/oeIB98bXt3X2XOUb88OF8Oepd63G1OM3Lixc/MdVI37N+Kg8BoyBenl3PSpZwB9w7QJV7rRYWsBpnPmeXjLdrHWjzSDfyCK9U5KW39LhjynZltpD/wBV98tALzALrGY1d5VZAawg==
import re
from ACEStream.PluginsContainer.livestreamer.plugin import Plugin
from ACEStream.PluginsContainer.livestreamer.plugin.api import http
_url_re = re.compile("http(s)?://(www\.)?tv(3|6|8|10)\.se")
_embed_re = re.compile('<iframe class="iframe-player" src="([^"]+)">')
class ViasatEmbed(Plugin):
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
def _get_streams(self):
res = http.get(self.url)
match = _embed_re.search(res.text)
if match:
url = match.group(1)
return self.session.streams(url)
__plugin__ = ViasatEmbed
| [
"evgeni.lipatov@idt.net"
] | evgeni.lipatov@idt.net |
3cc3d36f75e036fa7a5e0def03e659cc73015e62 | 51363872687318ac54e815b51d16d44d214974a2 | /build/turtlebot_msgs/catkin_generated/pkg.develspace.context.pc.py | 278898f50424d4529ca17bcfc9c3b65ea13b5ba3 | [] | no_license | pirmou/catkin_ws | 2acee80a43f17841326d1b917931866d561648c3 | abaac27209016a944bd3520d84e4dc3aab1abf2e | refs/heads/main | 2023-02-17T04:44:03.927127 | 2021-01-10T17:40:02 | 2021-01-10T17:40:02 | 328,440,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/pierremoussa/catkin_ws/devel/include".split(';') if "/home/pierremoussa/catkin_ws/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "std_msgs;sensor_msgs;std_srvs;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlebot_msgs"
PROJECT_SPACE_DIR = "/home/pierremoussa/catkin_ws/devel"
PROJECT_VERSION = "2.2.1"
| [
"pierre.moussa18@me.com"
] | pierre.moussa18@me.com |
96c92caa5ed1befcc1f92e68dcd7f0678b33848b | 28e997a25e62b9c76fd4d3fd1e87436dc54b3178 | /2_bioinformatics_stronghold/rosalind_RNAS.py | b9ef53081808f87d60e58a271a0f329028991977 | [] | no_license | kangli-bionic/Rosalind | 0bba97d86db51b91af480766155816ec9e1f05e6 | 6cae380f31498a991381c7c6f1d479d302615571 | refs/heads/master | 2021-01-02T09:13:49.665498 | 2016-08-16T20:54:48 | 2016-08-16T20:54:48 | 99,171,765 | 1 | 0 | null | 2017-08-03T00:07:24 | 2017-08-03T00:07:24 | null | UTF-8 | Python | false | false | 1,289 | py | #!/usr/bin/python
'''
Rosalind: Bioinformatics Stronghold
Problem: Wobble Bonding and RNA Secondary Structures
URL: http://rosalind.info/problems/rnas/
Given: An RNA string s (of length at most 200 bp).
Return: The total number of distinct valid matchings of basepair edges in the
bonding graph of s. Assume that wobble base pairing is allowed.
'''
def pair(seq):
# Only one possible match for a seq of length one.
if len(seq) < 4:
return 1
# No need to recalculate a sequence if we've already done so.
if seq in prev:
return prev[seq]
# Otherwise, do the calculation and add it to the dictionary.
else:
prev[seq] = pair(seq[1:])
for i in range(4, len(seq)):
if seq[i] in match[seq[0]]:
prev[seq] += pair(seq[1:i]) * pair(seq[i+1:])
return prev[seq]
if __name__ == '__main__':
# Read sequence.
with open('problem_datasets/rosalind_rnas.txt', 'r') as infile:
seq = infile.read().replace('\n', '')
# The possible basepair matchings including wobble base pairing.
match = {'A':'U', 'U':'AG', 'C':'G', 'G':'CU'}
# Keep track of the number of the valid pairs.
prev = {}
# Print answer.
print(pair(seq))
| [
"kapoozy@gmail.com"
] | kapoozy@gmail.com |
8329046888d401d5fffd22ffe9b07d4646213ac0 | 0567517ff7c0366b58e52d7fa96b651e97af5d82 | /apps/smartpipe/migrations/0014_project_geometry.py | 7afb86374c9d472435fcd7586edc5c61f5c73554 | [] | no_license | m6ttl/smartpipes | fdb9976b11d6c520953c240872d2574b1a69ec55 | 2d5846143dbf7b44c36491dd1787c36ebbe4fe0d | refs/heads/master | 2022-12-09T10:46:38.594820 | 2020-03-09T13:01:07 | 2020-03-09T13:01:07 | 246,028,233 | 0 | 0 | null | 2022-12-08T03:46:02 | 2020-03-09T12:20:32 | HTML | UTF-8 | Python | false | false | 490 | py | # Generated by Django 2.0.1 on 2020-02-16 15:47
import DjangoUeditor.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('smartpipe', '0013_pipedetail_tem'),
]
operations = [
migrations.AddField(
model_name='project',
name='geometry',
field=DjangoUeditor.models.UEditorField(blank=True, default='', null=True, verbose_name='地理数据'),
),
]
| [
"steve_wei@163.net"
] | steve_wei@163.net |
69e0a610209790bf516dcdab0f40d9cfdbf81ce1 | de915073d42855cafad97adca348a46b2de92626 | /test_settings.py | 78b2b91a07ed1d32b6f0899527bb6cb81f060ef5 | [
"MIT"
] | permissive | koonalb/django-knockout-modeler | f4b1418594701d270e1a5e1a45a5dcdec9a88f73 | 29e5e939acba8fb3bd7fcad7726eb1115bd1e420 | refs/heads/master | 2021-01-17T21:43:28.496690 | 2015-11-10T23:45:15 | 2015-11-10T23:45:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
SECRET_KEY = "secret_key_for_testing"
INSTALLED_APPS = ['knockout_modeler']
| [
"rich@anomos.info"
] | rich@anomos.info |
55ccd49a7f8ec347c90757fd4cb841d531674772 | 05fda9758bb71133b85ce6a58ce9dbc8fdc18fc7 | /resources/eth/history.py | 9d047cedb23e4c491af577a0ccc572b8f5791404 | [
"MIT"
] | permissive | yanrising/bitez | 82e3572d689989e37f5d8d3ab06bd764b036e64f | c0d9b052cbc8eb1c9884c287e34705b0a2f73bb1 | refs/heads/master | 2023-01-04T14:12:18.651916 | 2019-10-17T14:31:25 | 2019-10-17T14:31:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | # tx count
import requests
from config import INFURA_API_KEY, CRYPTO_NETWORK
# block param: loop 'latest', 'pending'
def eth_tx_count(address, block):
if CRYPTO_NETWORK == 'mainnet':
net = 'https://mainnet.infura.io/v3/'+INFURA_API_KEY
elif CRYPTO_NETWORK == 'testnet':
net = 'https://ropsten.infura.io/v3/'+INFURA_API_KEY
hist = requests.post(net, json={"jsonrpc":"2.0","method":"eth_getTransactionCount","params": [address, block],"id":1})
txs = hist.json()
return int(txs['result'], 0)
| [
"merwanedr@gmail.com"
] | merwanedr@gmail.com |
dbfbc4a50c99a3dd737fc5753fcef35b7ebc2477 | 723ea3f47a45fe756c4a77809eb2a4d6b98bc733 | /crackfun/1. Two Sum.py | cadb1db2bf7fd90a71675399516afeef26c591c5 | [] | no_license | JoyiS/Leetcode | a625e7191bcb80d246328121669a37ac81e30343 | 5510ef424135783f6dc40d3f5e85c4c42677c211 | refs/heads/master | 2021-10-21T05:41:00.706086 | 2019-03-03T06:29:14 | 2019-03-03T06:29:14 | 110,296,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | '''
1/22/2018 Understand the hash function usage here
'''
class Solution:
# @return a tuple, (index1, index2)
# 8:42
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
d={}
for i, n in enumerate(nums):
if n in d:
return (d[n], i)
else:
d[target-n]=i
return (0,0) | [
"california.sjy@gmail.com"
] | california.sjy@gmail.com |
3c735398dd09e42e3d9e33e053474404254a5988 | 6f06a519bc5a89464d95702fa27fa663ad4eb8f8 | /stateChangeTest_Full.py | bf9ac9366ed460aba7182baba230030da5b92572 | [] | no_license | chrismaurer/chrism | 64e71397bfffcd76aa5a12cc1394fad2c8939d76 | b0a30d5cbe4602a32ad494e2550219d633edf2db | refs/heads/master | 2021-06-04T07:53:15.212812 | 2021-01-19T13:23:22 | 2021-01-19T13:23:22 | 112,128,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,032 | py | import time
import pyscreenshot
from pyrate.builder import Builder
from pyrate.ttapi.predicates import CallbackType
from pyrate.ttapi.manager import Manager
from pyrate.ttapi.order import TTAPIOrder
from ttapi import aenums, cppclient
from pyrate.exceptions import TimeoutError
from captain.controlled import controlled_name_type, ControlledName
from captain.lib.controlled_types import Tif
priceSession = Manager().getPriceSession()
orderSession = Manager().getOrderFillSession()
allCustDefaults = Manager().getCustomers()
ordSrv = Manager().getOrderServer()
priceSrv = Manager().getPriceServer()
products = priceSession.getProducts(prodName='HSI', prodType=aenums.TT_PROD_FUTURE)
product = products[0]
contracts = priceSession.getContracts(product)
contract = contracts[3]
custDefaults = allCustDefaults[0]
run_now = True
prev_trading_status = None
curr_trading_status = None
pricey = None
while run_now is True:
try:
if not priceSession.feed_down:
for enum, price in priceSession.getPrices(contract).items():
if "SETTL" in str(enum):
pricey = price.value
elif "LAST_TRD_PRC" in str(enum):
pricey = price.value
elif "SRS_STATUS" in str(enum):
curr_trading_status = price.value
if curr_trading_status == prev_trading_status:
pass
else:
orderSession.deleteMyOrders()
if "FUTURE" not in str(product.prod_type) and pricey is None:
pricey = 10
if pricey is None:
pricey = 30000
else:
pricey = pricey
order_qty = 100
for side in [aenums.TT_BUY, aenums.TT_SELL]:
orderParams = dict(order_qty=order_qty, buy_sell=side, order_action=aenums.TT_ORDER_ACTION_ADD, limit_prc=pricey, order_type=aenums.TT_LIMIT_ORDER, tif="GTD", srs=contract, exchange_clearing_account=custDefaults.exchange_clearing_account, free_text=custDefaults.free_text, acct_type=cppclient.AEnum_Account.TT_ACCT_AGENT_1)
newOrder = TTAPIOrder()
newOrder.setFields(**orderParams)
myOrder = orderSession.sendAndWait(newOrder)
if "BUY" in str(side):
newOrder2 = TTAPIOrder()
newOrder2.setFields(**orderParams)
newOrder2.buy_sell = aenums.TT_SELL
newOrder2.order_qty = 1
orderSession.sendAndWait(newOrder2)
time.sleep(3)
pyscreenshot.grab_to_file(r"C:\tt\screenshot_" + str(curr_trading_status) + "_" + "-".join([str(time.localtime()[3]), str(time.localtime()[4]), str(time.localtime()[5])]) + ".png")
prev_trading_status = curr_trading_status
time.sleep(15)
except TimeoutError:
pass
| [
"chris.maurer@tradingtechnologies.com"
] | chris.maurer@tradingtechnologies.com |
7902f1f901a4002347a8a5287c8699239944a7d0 | 0b6966a75a4c62393a38a73df5a779228639c42c | /active_selection/softmax_entropy.py | 024feddbae9822212fb02b22c24cb73a610c2eaf | [] | no_license | Shuai-Xie/DEAL | 7cbec778bcc83b633a1c3319d9b00c8b0f98aa00 | 06ff3ba29196e276376a9cf8d868d54fd2db2680 | refs/heads/master | 2023-08-29T20:03:44.867280 | 2021-11-12T07:50:28 | 2021-11-12T07:50:28 | 300,126,893 | 25 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,612 | py | import torch
from datasets.base_dataset import BaseDataset
from datasets.transforms import get_transform
from tqdm import tqdm
import numpy as np
from torch.utils.data import DataLoader
from utils.misc import *
class SoftmaxEntropySelector:
def __init__(self, dataset, img_size):
self.dataset = dataset
self.img_size = img_size
self.softmax = torch.nn.Softmax2d()
@torch.no_grad()
def select_next_batch(self, model, active_trainset, select_num):
model.eval()
# get a subset from the whole unlabelset
subset_img_paths, subset_target_paths, remset_img_paths, remset_target_paths = get_subset_paths(
active_trainset.unlabel_img_paths, active_trainset.unlabel_target_paths,
)
print('subset_img_paths', len(subset_img_paths))
print('remset_img_paths', len(remset_img_paths))
unlabelset = BaseDataset(subset_img_paths, subset_target_paths)
unlabelset.transform = get_transform('test', base_size=self.img_size)
dataloader = DataLoader(unlabelset,
batch_size=8, shuffle=False,
pin_memory=True, num_workers=4)
scores = []
tbar = tqdm(dataloader, desc='\r')
tbar.set_description(f'cal_entropy_score')
for sample in tbar:
img = sample['img'].cuda()
probs = self.softmax(model(img)) # B,C,H,W
probs = probs.detach().cpu().numpy()
scores += self.cal_entropy_score(probs)
select_idxs = get_topk_idxs(scores, select_num)
# 从 subset 中选出样本
select_img_paths, select_target_paths, remain_img_paths, remain_target_paths = get_select_remain_paths(
subset_img_paths, subset_target_paths, select_idxs
)
# remset 补充回去
remain_img_paths += remset_img_paths
remain_target_paths += remset_target_paths
print('select_img_paths', len(select_img_paths))
print('remain_img_paths', len(remain_img_paths))
# 更新 DL, DU
active_trainset.add_by_select_remain_paths(select_img_paths, select_target_paths,
remain_img_paths, remain_target_paths)
@staticmethod
def cal_entropy_score(probs): # C,H,W 熵越大,越难分
batch_scores = []
for i in range(len(probs)): # prob img
entropy = np.mean(-np.nansum(np.multiply(probs[i], np.log(probs[i] + 1e-12)), axis=0)) # 表示沿着第1维计算
batch_scores.append(entropy)
return batch_scores
| [
"shuaixie@zju.edu.cn"
] | shuaixie@zju.edu.cn |
64f6d5d175541c818f8df3fab0c13c4a310d2b8c | b521802cca8e4ee4ff5a5ffe59175a34f2f6d763 | /maya/maya-utils/Scripts/Animation/2019-2-15 Tim Cam_Route_Manager/.history/Cam_Con/Cam_Con/Cam_Con_20190119103655.py | 82dc55f54efa3014614d149f07da48a4e3fa3c34 | [] | no_license | all-in-one-of/I-Do-library | 2edf68b29558728ce53fe17168694ad0353a076e | 8972ebdcf1430ccc207028d8482210092acf02ce | refs/heads/master | 2021-01-04T06:58:57.871216 | 2019-12-16T04:52:20 | 2019-12-16T04:52:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,917 | py | # -*- coding:utf-8 -*-
# Require Header
import os
import json
from functools import partial
# Sys Header
import sys
import traceback
import subprocess
# Maya Header
import maya.cmds as cmds
import maya.mel as mel
import maya.OpenMayaUI as omui
import plugin.Qt as Qt
from Qt.QtCore import *
from Qt.QtGui import *
from Qt.QtWidgets import *
def loadUiType(uiFile):
import plugin.Qt as Qt
if Qt.__binding__.startswith('PyQt'):
from Qt import _uic as uic
return uic.loadUiType(uiFile)
elif Qt.__binding__ == 'PySide':
import pysideuic as uic
else:
import pyside2uic as uic
import xml.etree.ElementTree as xml
from cStringIO import StringIO
parsed = xml.parse(uiFile)
widget_class = parsed.find('widget').get('class')
form_class = parsed.find('class').text
with open(uiFile, 'r') as f:
o = StringIO()
frame = {}
uic.compileUi(f, o, indent=0)
pyc = compile(o.getvalue(), '<string>', 'exec')
exec pyc in frame
# Fetch the base_class and form class based on their type
# in the xml from designer
form_class = frame['Ui_%s'%form_class]
base_class = eval('%s'%widget_class)
return form_class, base_class
from Qt.QtCompat import wrapInstance
DIR = os.path.dirname(__file__)
UI_PATH = os.path.join(DIR,"ui","Cam_Con.ui")
GUI_STATE_PATH = os.path.join(DIR, "json" ,'GUI_STATE.json')
form_class , base_class = loadUiType(UI_PATH)
import Cam_Item
reload(Cam_Item)
from Cam_Item import Cam_Item
from maya import cmds
class Cam_Con(form_class,base_class):
def __init__(self,dock="dock"):
super(Cam_Con,self).__init__()
self.setupUi(self)
self.Get_Constraint_BTN.clicked.connect(self.Get_Constraint_Fn)
def Get_Constraint_Fn(self):
selection = cmds.ls(sl=1)[0]
constraintNode = cmds.listConnections(selection,type="constraint")[0]
print constraintNode
AttrList = cmds.listAttr( constraintNode,r=True, s=True )
constraintNode = cmds.listConnections(constraintNode,type="constraint")[0]
print AttrList
self.Save_Json_Fun()
def Save_Json_Fun(self,path=GUI_STATE_PATH):
GUI_STATE = {}
GUI_STATE['DOCK'] = self.DOCK
try:
with open(path,'w') as f:
json.dump(GUI_STATE,f,indent=4)
except:
if path != "":
QMessageBox.warning(self, u"Warning", u"保存失败")
def Load_Json_Fun(self,path=GUI_STATE_PATH,load=False):
if os.path.exists(path):
GUI_STATE = {}
with open(path,'r') as f:
GUI_STATE = json.load(f)
return True
else:
if load==True:
QMessageBox.warning(self, u"Warning", u"加载失败\n检查路径是否正确")
return False
| [
"2595715768@qq.com"
] | 2595715768@qq.com |
bf8e2477790da52a16f688d27eb6a02702cb161f | f8e8e365c9cf58b61d72655bc2340baeaed5baff | /Leetcode/Python Solutions/Graphs/Topological Sort/CourseSchedule.py | f05e3d1eba4cfe067b0698fe737dc72053b0f877 | [
"MIT"
] | permissive | Mostofa-Najmus-Sakib/Applied-Algorithm | 39a69f6b9ed113efe4a420d19cad79e0aa317637 | bc656fd655617407856e0ce45b68585fa81c5035 | refs/heads/master | 2023-08-31T19:54:34.242559 | 2021-11-05T03:43:35 | 2021-11-05T03:43:35 | 412,263,430 | 0 | 0 | MIT | 2021-09-30T23:45:29 | 2021-09-30T23:45:25 | null | UTF-8 | Python | false | false | 1,985 | py | """
LeetCode Problem: 207. Course Schedule
Link: https://leetcode.com/problems/course-schedule/
Language: Python
Written by: Mostofa Adib Shakib
Time Complexity: O(V+E)
Space Complexity: O(V)
"""
# Kahn's Topological Sort Algorithm
from collections import defaultdict
class Solution(object):
def __init__(self):
self.graph = defaultdict(list)
def addEdge(self, u, v):
self.graph[u].append(v)
def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:
# building up the DAG
for u, v in prerequisites:
self.addEdge(u,v)
# initializing the in_degree array
in_degree = [0] * numCourses
# finding the number of incoming edges for every vertices
for i in self.graph:
for j in self.graph[i]:
in_degree[j] += 1
# a queue to track the next vertex to be processed
queue = []
# finding the initial batch of vertex which has 0 incoming nodes
for i in range(numCourses):
if in_degree[i] == 0:
queue.append(i)
result = [] # stores the resulting topological sort
count = 0 # keeps count of the number of visited vertices
while queue:
u = queue.pop(0) # pops the first vertex from the queue
result.append(u) # appends the vertex to the result array
# traverses all the neighbors and decrements their incoming edges by 1
for i in self.graph[u]:
in_degree[i] -= 1
if in_degree[i] == 0:
queue.append(i) # pushes the neighboring vertex if their no. of incoming edges is 0
count += 1 # increments the visited vertices count by 1
if count != numCourses:
return False
else:
return True | [
"adibshakib@gmail.com"
] | adibshakib@gmail.com |
ee0d4a993477b4bc71f38b3426fb8bd6b5200825 | edc1134436a79ca883a0d25f3c8dfffc4235c514 | /pyro/infer/reparam/projected_normal.py | 4a4f1a27789a4f797c3902a0494b284ab57b3005 | [
"Apache-2.0"
] | permissive | pyro-ppl/pyro | 2283d8ca528fc090c724a3a6e0f344e505ebbf77 | 0e82cad30f75b892a07e6c9a5f9e24f2cb5d0d81 | refs/heads/dev | 2023-08-18T00:35:28.014919 | 2023-08-06T21:01:36 | 2023-08-06T21:01:36 | 94,506,832 | 3,647 | 606 | Apache-2.0 | 2023-09-14T13:52:14 | 2017-06-16T05:03:47 | Python | UTF-8 | Python | false | false | 1,697 | py | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import torch
import pyro
import pyro.distributions as dist
from pyro.ops.tensor_utils import safe_normalize
from .reparam import Reparam
class ProjectedNormalReparam(Reparam):
"""
Reparametrizer for :class:`~pyro.distributions.ProjectedNormal` latent
variables.
This reparameterization works only for latent variables, not likelihoods.
"""
def apply(self, msg):
name = msg["name"]
fn = msg["fn"]
value = msg["value"]
is_observed = msg["is_observed"]
if is_observed:
raise NotImplementedError(
"ProjectedNormalReparam does not support observe statements"
)
fn, event_dim = self._unwrap(fn)
assert isinstance(fn, dist.ProjectedNormal)
# Differentiably invert transform.
value_normal = None
if value is not None:
# We use an arbitrary injection, which works only for initialization.
value_normal = value - fn.concentration
# Draw parameter-free noise.
new_fn = dist.Normal(torch.zeros_like(fn.concentration), 1).to_event(1)
x = pyro.sample(
"{}_normal".format(name),
self._wrap(new_fn, event_dim),
obs=value_normal,
infer={"is_observed": is_observed},
)
# Differentiably transform.
if value is None:
value = safe_normalize(x + fn.concentration)
# Simulate a pyro.deterministic() site.
new_fn = dist.Delta(value, event_dim=event_dim).mask(False)
return {"fn": new_fn, "value": value, "is_observed": True}
| [
"noreply@github.com"
] | pyro-ppl.noreply@github.com |
321daaf3a06bfa4f6dc23b6ebd71a79346bc88c6 | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/datalake/azure-mgmt-datalake-store/azure/mgmt/datalake/store/models/create_trusted_id_provider_with_account_parameters_py3.py | 66fc5888e5af2ea2e35259a9e9c04b265632b181 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 1,472 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CreateTrustedIdProviderWithAccountParameters(Model):
"""The parameters used to create a new trusted identity provider while
creating a new Data Lake Store account.
All required parameters must be populated in order to send to Azure.
:param name: Required. The unique name of the trusted identity provider to
create.
:type name: str
:param id_provider: Required. The URL of this trusted identity provider.
:type id_provider: str
"""
_validation = {
'name': {'required': True},
'id_provider': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id_provider': {'key': 'properties.idProvider', 'type': 'str'},
}
def __init__(self, *, name: str, id_provider: str, **kwargs) -> None:
super(CreateTrustedIdProviderWithAccountParameters, self).__init__(**kwargs)
self.name = name
self.id_provider = id_provider
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
2e8ab7828b6fafbd986642be92a72261fc8d5428 | 34578a08451dc124f02fbba92a219da3347059cd | /.history/tools/views_20190430183042.py | 79c4d1509e1b4cb84291f467d8bc49e00bd374b8 | [] | no_license | gwjczwy/CTF-Exercises | b35d938b30adbc56c1b6f45dc36cea1421c702fb | c2d5c47f5047b1601564453e270ce50aad7f56fc | refs/heads/master | 2020-05-25T23:51:26.190350 | 2019-05-22T13:18:59 | 2019-05-22T13:18:59 | 188,042,255 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,688 | py | from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from json import dumps
from .models import Url,Money
import time
#########################
#主页
@login_required
def index(requests):
data={'toolname':'index','user':requests.user}
return render(requests,'tools/index.html',data)
#########################
#短链接
@login_required
def surl(requests):#短链接 index
data={}
data['toolName']="surl"
data['parameter']="index"
return render(requests, 'tools/index.html', data)
def surls(requests,parameter):#带参数的短链接跳转
data={}
data['toolName']="surl"
data['parameter']="link"
print('短链接参数',parameter)
try:
req=Url.objects.get(sUrl=parameter)
print('获取对象成功')
except:
return HttpResponse('你来错地方了,悟空')
req=req.fullUrl
return HttpResponse('<script>window.location.href="'+req+'";</script>')
@csrf_exempt
@login_required
def createSUrl(requests):
if not (requests.method == 'POST' and requests.POST['fullUrl']):
req={'message':'fail'}
return HttpResponse(dumps(req),content_type="application/json")
fullUrl=requests.POST['fullUrl']
while True:
randUrl=randStr(5)#随机长度为5的字符串
try:
Url.objects.get(sUrl=randUrl)#如果重复就继续随机
print('再!来!一!次!')
except:
break
randUrl=randStr(5)
Url(sUrl=randUrl,fullUrl=fullUrl).save()
req={'message':'success','url':randUrl}
return HttpResponse(dumps(req),content_type="application/json")
def randStr(l):
import random
import string
seed = "1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
sa = []
for i in range(l):
sa.append(random.choice(seed))
salt = ''.join(sa)
return salt
#########################
#商店
@login_required
def shop(requests):
data={}
data['toolName']="shop"
money = Money.objects.get(user=requests.user)
data['money']=money
return render(requests, 'tools/index.html', data)
#商店兑换
@csrf_exempt
@login_required
def shopExchange(requests):
if not (requests.method == 'POST' and 'rule' in requests.POST and 'num' in requests.POST):
print('非法请求')
req={'message':'fail','reason':'非法请求'}
return HttpResponse(dumps(req),content_type="application/json")
rule=requests.POST['rule']
num=requests.POST['num']
if not rule in ['m2b','b2m']:# 判断转换规则是否合法
print('rule参数不合法')
req={'message':'fail','reason':'rule参数不合法'}
return HttpResponse(dumps(req),content_type="application/json")
if num.isdigit():# 判断数字是否合法
num=int(num)
if num<0:
req={'message':'fail','reason':'非法参数'}
return HttpResponse(dumps(req),content_type="application/json")
else:
req={'message':'fail','reason':'非法参数'}
return HttpResponse(dumps(req),content_type="application/json")
# 获取货币对象
money = Money.objects.get(user=requests.user)
if rule=='m2b':
if money.monero>=num:
money.bitcoin+=num
money.save()
time.sleep(5) #等待时间 造成条件竞争
money.monero-=num
money.save()
else:
req={'message':'fail','reason':'monero 不足'}
return HttpResponse(dumps(req),content_type="application/json")
elif rule=='b2m':
if money.bitcoin>=num:
money.monero+=num
money.save()
time.sleep(5)
money.bitcoin-=num
money.save()
else:
req={'message':'fail','reason':'bitcoin 不足'}
return HttpResponse(dumps(req),content_type="application/json")
else:
req={'message':'fail','reason':'未知错误'}
return HttpResponse(dumps(req),content_type="application/json")
req={'message':'success','monero':money.monero,'bitcoin':money.bitcoin}
return HttpResponse(dumps(req),content_type="application/json")
#########################
#日志
@login_required
def logs(requests):
data={}
data['toolName']="logs"
return render(requests, 'tools/index.html', data)
# 添加日志
@csrf_exempt
@login_required
def addLog(requests):
if not (requests.method == 'POST' and 'path' in requests.POST and 'content' in requests.POST):
req={'message':'fail','reason':'非法请求'}
return HttpResponse(dumps(req),content_type="application/json")
path=requests.POST['path']
content=requests.POST['content']
# 获取货币对象
money = Money.objects.get(user=requests.user)
if money.bitcoin >=100:
try:
with open(path,'at') as file:
file.write(content)
money.bitcoin-=100
money.save()
req={'message':'success','reason':'操作成功'}
return HttpResponse(dumps(req),content_type="application/json")
except:
req={'message':'fail','reason':'写入文件错误'}
return HttpResponse(dumps(req),content_type="application/json")
else:
req={'message':'fail','reason':'货币不足'}
return HttpResponse(dumps(req),content_type="application/json")
#下载源代码
def downSource(requests):
# 获取货币对象
money = Money.objects.get(user=requests.user)
if money.bitcoin >=1000:
money.bitcoin-=1000
money.save() | [
"zwy053@163.com"
] | zwy053@163.com |
b80454a40c9ebcd6c05af124a3088166decdd9de | 9064fc0b780d3632163e3a73e9f73fdf8aa7f80e | /battle.py | 437281049435ea30d822f1403221dcdffae51323 | [] | no_license | poteto1212/myapp-flask- | c752751cb26898eaa419b410d7b02ae4a608712a | 3c6714dfad0ca59b92c5a2ab261f4dcc11e255b1 | refs/heads/master | 2023-03-15T02:14:11.041245 | 2021-03-28T07:51:34 | 2021-03-28T07:51:34 | 351,810,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | from flask import Flask, request, render_template
app = Flask(__name__)
players = ["勇者", "戦士", "魔法使い", "忍者"]
@app.route("/")#テンプレートのgetメソッドに直接表示
def show():
message = "あらたなモンスターがあらわれた!"
return render_template("battle.html", message = message, players = players)
@app.route("/result", methods=["POST"])#テンプレートのPOSTメソッドからの入力処理
def result():
name = request.form["name"]
message = name + "はモンスターと戦った!"
return render_template("battle.html", message = message, players = players)
| [
"you@example.com"
] | you@example.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.