blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f6b334de835c54ff274cbcae4e9c5b5691a10e1e | 45f93a9d47204d76b8bf25a71dfb79403e75c33c | /Threading/long-running-thread.py | 3222c862dc2c304516b29971b5c97a2bdb95d5e9 | [] | no_license | tahmid-tanzim/problem-solving | 0173bce1973ac3e95441a76c10324c0e1b0a57c3 | 6ddb51de6772130f209474e76f39ca2938f444f0 | refs/heads/master | 2023-06-25T02:18:03.690263 | 2023-06-20T06:58:46 | 2023-06-20T06:58:46 | 137,173,850 | 4 | 1 | null | 2022-03-30T08:28:41 | 2018-06-13T06:44:25 | Python | UTF-8 | Python | false | false | 756 | py | from threading import Timer
import time
def heartbeat_tick():
current_time = time.strftime("%H:%M:%S", time.localtime())
print('im ok ' + current_time)
def heartbeat_tick2():
current_time = time.strftime("%H:%M:%S", time.localtime())
print('im very good ' + current_time)
class RepeatingTimer(Timer):
def run(self):
while not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.wait(self.interval)
if __name__ == '__main__':
t1 = RepeatingTimer(20, heartbeat_tick)
t1.start() # every 30 seconds, call heartbeat_tick
t2 = RepeatingTimer(5, heartbeat_tick2)
t2.start() # every 30 seconds, call heartbeat_tick
# later
# t.cancel() # cancels execution | [
"tahmid.tanzim@gmail.com"
] | tahmid.tanzim@gmail.com |
d1659d658ee81928f513e875f7d9f2e78a75d540 | 4b7e282fe480415f5d52c0fc0429f144156190fe | /google/ads/googleads/v7/services/services/ad_group_service/transports/base.py | ce6b4b5bb906932a8cc9e5393ecafc79ed352104 | [
"Apache-2.0"
] | permissive | Z2Xsoft/google-ads-python | c4750357bb19da91bb3b6bf2fa84bef9d2df36d3 | 1779d52a0446c8afb2437b0a9e103dcb849f5590 | refs/heads/main | 2023-08-18T15:22:17.840364 | 2021-09-26T04:08:53 | 2021-09-26T04:08:53 | 410,444,398 | 0 | 0 | Apache-2.0 | 2021-09-26T04:08:53 | 2021-09-26T03:55:38 | null | UTF-8 | Python | false | false | 3,978 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.ads.googleads.v7.resources.types import ad_group
from google.ads.googleads.v7.services.types import ad_group_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class AdGroupServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for AdGroupService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_ad_group: gapic_v1.method.wrap_method(
self.get_ad_group,
default_timeout=None,
client_info=client_info,
),
self.mutate_ad_groups: gapic_v1.method.wrap_method(
self.mutate_ad_groups,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_ad_group(
self,
) -> typing.Callable[
[ad_group_service.GetAdGroupRequest], ad_group.AdGroup
]:
raise NotImplementedError
@property
def mutate_ad_groups(
self,
) -> typing.Callable[
[ad_group_service.MutateAdGroupsRequest],
ad_group_service.MutateAdGroupsResponse,
]:
raise NotImplementedError
__all__ = ("AdGroupServiceTransport",)
| [
"noreply@github.com"
] | Z2Xsoft.noreply@github.com |
62d51938566b1760ee0a7773969750876072c711 | a03b30ee77b49e19a72b647e984b98f878c2847a | /Anaconda-files/Program_1d.py | b629bba7ac959034d527e2162c17ccbb88a23184 | [
"BSD-2-Clause"
] | permissive | SSalaPla/dynamical-systems-with-applications-using-python | d47f46dfbe7195d2446cdee7f874cc3e4a5ab90a | c80582ae3559230d12e2aee15f94c465e367fdda | refs/heads/master | 2021-05-03T16:00:31.561907 | 2018-02-05T15:16:13 | 2018-02-05T15:16:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | # Program 1d: Subplots.
# See Figure 1.15.
import matplotlib.pyplot as plt
import numpy as np
def f(t):
return np.exp(-t) * np.cos(2*np.pi*t)
t1=np.arange(0.0,5.0,0.1)
t2=np.arange(0.0,5.0,0.02)
plt.figure(1)
plt.subplot(211) #subplot(num rows,num cols,fig num)
plt.plot(t1,f(t1),'bo',t2,f(t2),'k',label='damping')
plt.xlabel('time (s)')
plt.ylabel('amplitude (m)')
plt.title('Damped pendulum')
legend = plt.legend(loc='upper center',shadow=True)
plt.subplot(212)
plt.plot(t2, np.cos(2*np.pi*t2),'g--',linewidth=4)
plt.xlabel('time (s)')
plt.ylabel('amplitude (m)')
plt.title('Undamped pendulum')
plt.subplots_adjust(hspace=0.8)
plt.show() | [
"samuel.dibella@gmail.com"
] | samuel.dibella@gmail.com |
1e0810823638b12185d64ebe70744a50b7bdcd48 | 717ae7ee216675ba0fb31358000dde3d2fc11c5c | /chart_of_accounts_builder/config/desktop.py | 598a493fd9079f73d008fdb162af58221aa05684 | [
"MIT"
] | permissive | sihaysistema/chart_of_accounts_builder | 7cf2bfb23eeb254d89b083dccd146fc60736eb9b | 23a94ddbdae4a36c6d318e148c47e68a36eb177b | refs/heads/master | 2020-05-31T21:16:47.958250 | 2019-06-06T15:31:48 | 2019-06-06T15:31:48 | 190,494,216 | 1 | 0 | null | 2019-06-06T01:35:37 | 2019-06-06T01:35:37 | null | UTF-8 | Python | false | false | 281 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return {
"Chart Of Accounts Builder": {
"color": "grey",
"icon": "octicon octicon-file-directory",
"type": "module",
"label": _("Chart Of Accounts Builder")
}
}
| [
"rmehta@gmail.com"
] | rmehta@gmail.com |
6fd9f40b3a240f53ce47870b44e87e64f7bffccf | c9273bbb39b2f9bade816ae0d4d57ba664f599c7 | /setup.py | 1858551b34353e1bcc19f247f13c75e79207cc58 | [] | no_license | alunduil/singularity | 0eaefdbee20880146cd07fae7445387c16ab861b | 600f864628743472226755ad0fe7a4c7a0d2ef28 | refs/heads/master | 2021-01-21T11:45:16.242749 | 2014-06-22T17:01:03 | 2014-06-22T17:01:03 | 5,211,203 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,874 | py | # Copyright (C) 2012 by Alex Brandt <alunduil@alunduil.com>
#
# singularity is freely distributable under the terms of an MIT-style license.
# See COPYING or http://www.opensource.org/licenses/mit-license.php.
from distutils.core import setup
try:
from singularity import information
from singularity import helpers
except ImportError:
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
from singularity import information
from singularity import helpers
PARAMS = {}
PARAMS["name"] = information.NAME
PARAMS["version"] = information.VERSION
PARAMS["description"] = information.DESCRIPTION
PARAMS["long_description"] = information.LONG_DESCRIPTION
PARAMS["author"] = information.AUTHOR
PARAMS["author_email"] = information.AUTHOR_EMAIL
PARAMS["url"] = information.URL
PARAMS["license"] = information.LICENSE
PARAMS["scripts"] = [
"bin/singularity",
]
PARAMS["packages"] = [
"singularity",
"singularity.configurators",
"singularity.configurators.gentoo",
"singularity.communicators",
"singularity.helpers",
"singularity.parameters",
]
PARAMS["data_files"] = [
("share/doc/{P[name]}-{P[version]}".format(P = PARAMS), [
"README.md",
]),
("share/doc/{P[name]}-{P[version]}/config".format(P = PARAMS), [
"config/singularity.conf",
"config/init.gentoo",
]),
("share/man/man8", [
"doc/man/man8/singularity.8",
"doc/man/man8/singularity-apply.8",
"doc/man/man8/singularity-daemon.8",
]),
("share/man/man5", [
"doc/man/man5/singularity.conf.5",
]),
]
PARAMS["requires"] = [
"daemon",
"Crypto",
]
if helpers.VIRTUAL == "xenU":
PARAMS["requires"].append("xen")
setup(**PARAMS)
| [
"alunduil@alunduil.com"
] | alunduil@alunduil.com |
36fe5a2469a8db223fcc5567527737d6653e366c | d488f052805a87b5c4b124ca93494bc9b78620f7 | /google-cloud-sdk/.install/.backup/lib/surface/container/clusters/get_iam_policy.py | 6e672f702942bf58fceb409a2804549e7be639d2 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | PacktPublishing/DevOps-Fundamentals | 5ce1fc938db66b420691aa8106ecfb3f9ceb1ace | 60597e831e08325c7e51e8557591917f7c417275 | refs/heads/master | 2023-02-02T04:48:15.346907 | 2023-01-30T08:33:35 | 2023-01-30T08:33:35 | 131,293,311 | 13 | 19 | null | null | null | null | UTF-8 | Python | false | false | 1,625 | py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for getting IAM policies for clusters."""
from googlecloudsdk.calliope import base
@base.Hidden
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class GetIAMPolicy(base.Command):
"""Get the IAM policy for a cluster."""
@staticmethod
def Args(parser):
"""Register flags for this command.
Args:
parser: An argparse.ArgumentParser-like object. It is mocked out in order
to capture some information, but behaves like an ArgumentParser.
"""
parser.add_argument('name', help='The name of this cluster.')
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
adapter = self.context['api_adapter']
location_get = self.context['location_get']
location = location_get(args)
return adapter.GetIamPolicy(adapter.ParseCluster(args.name, location))
| [
"saneetk@packtpub.com"
] | saneetk@packtpub.com |
6161a504a113f3319583c244962a422646113b54 | 5bd1381e5515061b4fdd7284f80f89d0aad3c4e6 | /www/unicooo/views.py | 06970d5519d28f80bee6c5af73a4a63d6bec5113 | [] | no_license | Windsooon/Unicooo-django | 1c1a7643151dffc15cea6ff94b9a80453d1fcfb2 | 7db3f2807bc4802b686f1a4d6bd6fd5b7436611b | refs/heads/master | 2022-12-21T11:16:22.855365 | 2017-12-09T09:24:01 | 2017-12-09T09:24:01 | 47,067,358 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | from django.shortcuts import render
def front_page(request):
return render(request, "frontpage.html")
def public_activities(request):
return render(request, "public_activities.html")
| [
"wiwindson@outlook.com"
] | wiwindson@outlook.com |
705a4ea02f2b08df2528ae1bb2b39cb9b998c9bd | 28ca060efe83304c6174c3411cd9105537adf6bd | /fs_image/rpm/tests/test_rpm_metadata.py | dba5130b5b6b441078f54df13cb8562ac0f85bf7 | [
"MIT"
] | permissive | singhaditya28/fs_image | 6eee93a3663f36894f2e26efef9f2f961f11d76b | 3d122da48eab8b26e5add6754cc1f91296139c58 | refs/heads/master | 2022-09-25T04:52:58.206356 | 2020-06-05T18:27:39 | 2020-06-05T18:29:57 | 269,931,605 | 0 | 0 | MIT | 2020-06-06T09:24:29 | 2020-06-06T09:24:28 | null | UTF-8 | Python | false | false | 5,792 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib.resources
import os
import re
import shutil
import unittest
from fs_image.fs_utils import temp_dir
from .temp_repos import get_test_signing_key, temp_repos_steps, Repo, Rpm
from ..rpm_metadata import compare_rpm_versions, RpmMetadata, _compare_values
from fs_image.find_built_subvol import find_built_subvol
class RpmMetadataTestCase(unittest.TestCase):
def _load_canonical_tests(self):
STMT = re.compile(
r"(.*)RPMVERCMP\(([^, ]*) *, *([^, ]*) *, *([^\)]*)\).*")
for line in importlib.resources.open_text(
'fs_image.rpm', 'version-compare-tests').readlines():
m = STMT.match(line)
if m:
yield m.group(2), m.group(3), int(m.group(4))
def test_rpm_metadata_from_subvol(self):
layer_path = os.path.join(os.path.dirname(__file__), 'child-layer')
child_subvol = find_built_subvol(layer_path)
a = RpmMetadata.from_subvol(child_subvol, 'rpm-test-mice')
self.assertEqual(a.name, 'rpm-test-mice')
self.assertEqual(a.epoch, 0)
self.assertEqual(a.version, '0.1')
self.assertEqual(a.release, 'a')
# not installed
with self.assertRaises(RuntimeError):
a = RpmMetadata.from_subvol(child_subvol, 'rpm-test-carrot')
# subvol with no RPM DB
layer_path = os.path.join(os.path.dirname(__file__), 'hello-layer')
hello_subvol = find_built_subvol(layer_path)
with self.assertRaisesRegex(ValueError, ' does not exist$'):
a = RpmMetadata.from_subvol(hello_subvol, 'rpm-test-mice')
def test_rpm_metadata_from_file(self):
with temp_repos_steps(
gpg_signing_key=get_test_signing_key(),
repo_change_steps=[{
'repo': Repo([Rpm('sheep', '0.3.5.beta', 'l33t.deadbeef.777')]),
}],
) as repos_root, temp_dir() as td:
src_rpm_path = repos_root / ('0/repo/repo-pkgs/' +
'rpm-test-sheep-0.3.5.beta-l33t.deadbeef.777.x86_64.rpm')
dst_rpm_path = td / 'arbitrary_unused_name.rpm'
shutil.copy(src_rpm_path, dst_rpm_path)
a = RpmMetadata.from_file(dst_rpm_path)
self.assertEqual(a.name, 'rpm-test-sheep')
self.assertEqual(a.epoch, 0)
self.assertEqual(a.version, '0.3.5.beta')
self.assertEqual(a.release, 'l33t.deadbeef.777')
# non-existent file
with self.assertRaisesRegex(RuntimeError, '^Error querying RPM:'):
a = RpmMetadata.from_file(b'idontexist.rpm')
# missing extension
with self.assertRaisesRegex(ValueError, ' needs to end with .rpm$'):
a = RpmMetadata.from_file(b'idontendwithdotrpm')
def test_rpm_query_arg_check(self):
with self.assertRaisesRegex(ValueError, '^Must pass only '):
RpmMetadata._repo_query(RpmMetadata, b"dbpath", None, b"path")
def test_rpm_compare_versions(self):
# name mismatch
a = RpmMetadata('test-name1', 1, '2', '3')
b = RpmMetadata('test-name2', 1, '2', '3')
with self.assertRaises(ValueError):
compare_rpm_versions(a, b)
# Taste data was generated with:
# rpmdev-vercmp <epoch1> <ver1> <release1> <epoch2> <ver2> <release2>
# which also uses the same Python rpm lib.
#
# This number of test cases is excessive but does show how interesting
# RPM version comparisons can be.
test_evr_data = [
# Non-alphanumeric (except ~) are ignored for equality
((1, '2', '3'), (1, '2', '3'), 0), # 1:2-3 == 1:2-3
((1, ':2>', '3'), (1, '-2-', '3'), 0), # 1::2>-3 == 1:-2--3
((1, '2', '3?'), (1, '2', '?3'), 0), # 1:2-?3 == 1:2-3?
# epoch takes precedence no matter what
((0, '2', '3'), (1, '2', '3'), -1), # 0:2-3 < 1:2-3
((1, '1', '3'), (0, '2', '3'), 1), # 1:1-3 > 0:2-3
# version and release trigger the real comparison rules
((0, '1', '3'), (0, '2', '3'), -1), # 0:1-3 < 0:2-3
((0, '~2', '3'), (0, '1', '3'), -1), # 0:~2-3 < 0:1-3
((0, '~', '3'), (0, '1', '3'), -1), # 0:~-3 < 0:1-3
((0, '1', '3'), (0, '~', '3'), 1), # 0:1-3 > 0:~-3
((0, '^1', '3'), (0, '^', '3'), 1), # 0:^1-3 > 0:^-3
((0, '^', '3'), (0, '^1', '3'), -1), # 0:^-3 < 0:^1-3
((0, '0333', 'b'), (0, '0033', 'b'), 1), # 0:0333-b > 0:0033-b
((0, '0033', 'b'), (0, '0333', 'b'), -1), # 0:0033-b < 0:0333-b
((0, '3', '~~'), (0, '3', '~~~'), 1), # 0:3-~~ > 0:3-~~~
((0, '3', '~~~'), (0, '3', '~~'), -1), # 0:3-~~~ < 0:3-~~
((0, '3', '~~~'), (0, '3', '~~~'), 0), # 0:3-~~~ == 0:3-~~~
((0, 'a2aa', 'b'), (0, 'a2a', 'b'), 1), # 0:a2aa-b > 0:a2a-b
((0, '33', 'b'), (0, 'aaa', 'b'), 1), # 0:33-b > 0:aaa-b
]
for evr1, evr2, expected in test_evr_data:
a = RpmMetadata('test-name', *evr1)
b = RpmMetadata('test-name', *evr2)
self.assertEqual(compare_rpm_versions(a, b),
expected, f'failed: {evr1}, {evr2}, {expected}')
# Test against some more canonical tests. These are derived from
# actual tests used for rpm itself.
for ver1, ver2, expected in self._load_canonical_tests():
self.assertEqual(_compare_values(ver1, ver2),
expected, f'failed: {ver1}, {ver2}, {expected}')
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
0f7f483f687b8b4897064f7ac52ff924a951cd9d | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_321/ch14_2020_03_02_17_36_10_914758.py | e0c3dc8afc80fcd598ed32def9202927e6ac0a30 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | import math
def calcula_distancia_do_projetil (v,o,y):
return (math.pow(v,2)/2*9.8)*(1+math.sqrt(1+(2*9.8*y)/math.pow(v,2)*math.sin(o)**2))*(math.sin(2*o)) | [
"you@example.com"
] | you@example.com |
2af32f7a4d9ae29e4db70a69549fc1bbab5cd4ac | b59f66a9c4b5492b95c767b7ca76cd026f6f572a | /aac/metrics/rouge_l.py | 33b0b9c2183a7a51bf1d84b3ce3d73b265416c3c | [] | no_license | Labbeti/dcase2021task6 | b50f51370af15c241bd9f257920e2df4bc925669 | 2e792749bd9b2a495fa4b870f6190f6fb389fc56 | refs/heads/main | 2023-06-11T07:10:50.179348 | 2021-07-05T09:28:11 | 2021-07-05T09:28:11 | 377,414,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,048 | py |
from rouge_metric import PyRouge
from torch.nn import Module
class RougeL(Module):
def __init__(self):
"""
Recall Oriented Understudy of Gisting Evaluation.
Output values are in range [0, 1]. Higher is better.
Use 'rouge-metric' package as backend.
Original paper: https://www.aclweb.org/anthology/W04-1013.pdf
"""
super().__init__()
self.rouge = PyRouge(rouge_l=True)
def forward(self, hypothesis: list[list[str]], references: list[list[list[str]]]) -> float:
if len(hypothesis) != len(references):
raise ValueError(f'Number of hypothesis and references are different ({len(hypothesis)} != {len(references)}).')
hypothesis_join = [' '.join(hyp) for hyp in hypothesis]
references_join = [[' '.join(ref) for ref in refs] for refs in references]
scores = self.rouge.evaluate(hypotheses=hypothesis_join, multi_references=references_join)
rouge_l_scores = scores['rouge-l']
# 3 scores = Recall r, Precision p, FScore f
# {'r': ..., 'p': ..., 'f': ...}
f_score = rouge_l_scores['f']
return f_score
| [
"etienne.labbe31@gmail.com"
] | etienne.labbe31@gmail.com |
fdd57a47de5f55ea470232da56fddf9705ce85ae | d442044fca0cb5c2102845c93194246273b3778b | /event_participant_labels/event_participant.py | fa3118b33c26f27f76ace611464af7b6d91e8720 | [] | no_license | vertelab/odoo-event-extra | f9d0ee7ac5140ee2cbcb15fbb7af61d1772aaa2e | 63dd0b981b23941bae18d2d968b34a7977bc7699 | refs/heads/master | 2022-07-09T00:26:50.416313 | 2020-07-01T15:28:58 | 2020-07-01T15:28:58 | 47,540,528 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,562 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution, third party addon
# Copyright (C) 2004-2016 Vertel AB (<http://vertel.se>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, _
import logging
_logger = logging.getLogger(__name__)
class event_participant(models.Model):
_inherit = 'event.participant'
parent_name = fields.Char(related="partner_id.parent_id.name")
participant_name = fields.Char(related="partner_id.name")
event_name = fields.Char(related="event_id.name")
#~ event_type = fields.Char(related="event_id.event_type.name")
course_leader = fields.Char(related="event_id.course_leader.name")
#~ event_date = fields.Datetime(related="event_id.date_start")
| [
"anders.wallenquist@vertel.se"
] | anders.wallenquist@vertel.se |
2c2813d3ae5577ac8185b8e3feff5f018541b05e | 35b59ea3b2800b170f82ccec229c88e102e1f4bd | /calculator/codes/solution/pythonprogs/calc_mul.py | 30e609967c7df02a1c063aeb0c570826c1787858 | [] | no_license | gadepall/LA1400 | efcc47c708efee9bebe8db415cd80997db64e834 | bbdccf26b2f1c6a40325c139e8e406f923eb023f | refs/heads/master | 2020-03-24T16:32:50.508068 | 2018-07-30T05:14:02 | 2018-07-30T05:14:02 | 142,827,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,687 | py | #-*-coding: utf-8-*-
#Don't remove the above line
#This program uses a C routine for multiplication
#in the calculator. Other arithmetic operations are not included.
from Tkinter import *
from ctypes import *
import math
class calc:
def getandreplace(self):
"""replace x with * and ÷ with /"""
self.expression = self.e.get()
self.newtext=self.expression.replace(self.newdiv,'/')
self.newtext=self.newtext.replace('x','*')
def equals(self):
"""when the equal button is pressed"""
self.getandreplace()
try:
for i in self.newtext:
if(i=='*'):
multi=CDLL('./mul.so')
y=self.newtext.split('*')
a=c_float(float(y[0]))
b=c_float(float(y[1]))
mul=multi.mul
mul.restype=c_float
self.value=mul(a,b)
except SyntaxError or NameErrror:
self.e.delete(0,END)
self.e.insert(0,'Invalid Input!')
else:
self.e.delete(0,END)
self.e.insert(0,self.value)
def clearall(self):
"""when clear button is pressed,clears the text input area"""
self.e.delete(0,END)
def clear1(self):
self.txt=self.e.get()[:-1]
self.e.delete(0,END)
self.e.insert(0,self.txt)
def action(self,argi):
"""pressed button's value is inserted into the end of the text area"""
self.e.insert(END,argi)
def __init__(self,master):
"""Constructor method"""
master.title('Calulator')
master.geometry()
self.e = Entry(master)
self.e.grid(row=0,column=0,columnspan=6,pady=3)
self.e.focus_set() #Sets focus on the input text area
self.div='÷'
self.newdiv=self.div.decode('utf-8')
#Generating Buttons
Button(master,text="=",width=10,command=lambda:self.equals()).grid(row=4, column=4,columnspan=2)
Button(master,text='AC',width=3,command=lambda:self.clearall()).grid(row=1, column=4)
Button(master,text='C',width=3,command=lambda:self.clear1()).grid(row=1, column=5)
Button(master,text="+",width=3,command=lambda:self.action('+')).grid(row=4, column=3)
Button(master,text="x",width=3,command=lambda:self.action('x')).grid(row=2, column=3)
Button(master,text="-",width=3,command=lambda:self.action('-')).grid(row=3, column=3)
Button(master,text="÷",width=3,command=lambda:self.action(self.newdiv)).grid(row=1, column=3)
Button(master,text="%",width=3,command=lambda:self.action('%')).grid(row=4, column=2)
Button(master,text="7",width=3,command=lambda:self.action('7')).grid(row=1, column=0)
Button(master,text="8",width=3,command=lambda:self.action(8)).grid(row=1, column=1)
Button(master,text="9",width=3,command=lambda:self.action(9)).grid(row=1, column=2)
Button(master,text="4",width=3,command=lambda:self.action(4)).grid(row=2, column=0)
Button(master,text="5",width=3,command=lambda:self.action(5)).grid(row=2, column=1)
Button(master,text="6",width=3,command=lambda:self.action(6)).grid(row=2, column=2)
Button(master,text="1",width=3,command=lambda:self.action(1)).grid(row=3, column=0)
Button(master,text="2",width=3,command=lambda:self.action(2)).grid(row=3, column=1)
Button(master,text="3",width=3,command=lambda:self.action(3)).grid(row=3, column=2)
Button(master,text="0",width=3,command=lambda:self.action(0)).grid(row=4, column=0)
Button(master,text=".",width=3,command=lambda:self.action('.')).grid(row=4, column=1)
Button(master,text="(",width=3,command=lambda:self.action('(')).grid(row=2, column=4)
Button(master,text=")",width=3,command=lambda:self.action(')')).grid(row=2, column=5)
Button(master,text="√",width=3,command=lambda:self.squareroot()).grid(row=3, column=4)
Button(master,text="x²",width=3,command=lambda:self.square()).grid(row=3, column=5)
#Main
root = Tk()
obj=calc(root) #object instantiated
root.mainloop()
| [
"gadepall@gmail.com"
] | gadepall@gmail.com |
484d13dc81d16a486d91bb29c9c89c4680416a38 | f8933a29319d9062b3f0070d133b9e533efbbc50 | /trilpy/ldpnr.py | 75544f25ee6722c4f87c617177f690c39f7b0b18 | [] | no_license | zimeon/trilpy | 13c999eb164c0a935abfdd4c94a7ab842c53cf67 | 825bd803ed5e5d7b6c906067a4c406a4db18c9c6 | refs/heads/master | 2021-01-16T23:24:39.374726 | 2018-07-24T03:28:09 | 2018-07-24T03:28:09 | 95,748,971 | 1 | 2 | null | 2018-02-07T20:39:00 | 2017-06-29T07:07:14 | Python | UTF-8 | Python | false | false | 771 | py | """An LDPNR - LDP Non-RDF Source."""
from .ldpr import LDPR
from .namespace import LDP
class LDPNR(LDPR):
"""LDPNR - A binary object.
An LDPR whose state is not represented in RDF. For example,
these can be binary or text documents that do not have useful
RDF representations.
See <https://www.w3.org/TR/ldp/#ldpnr>.
"""
type_label = 'LDPNR'
def __init__(self, uri=None, content=None, content_type=None, describedby=None):
"""Initialize LDPNR."""
super(LDPNR, self).__init__(uri)
self.content = content
self.content_type = content_type
self.describedby = describedby
@property
def rdf_types(self):
"""List of RDF types for this resource."""
return([LDP.NonRDFSource])
| [
"simeon.warner@cornell.edu"
] | simeon.warner@cornell.edu |
8968aa043a78587dbecbfc6589d1382d05f98626 | 814992618962991b1b6dd6f1cdf2853687cbfcd0 | /quantarhei/qm/propagators/svpropagator.py | e235da8d523f349c30ad53ed9290882148e6a24d | [
"MIT"
] | permissive | MichalPt/quantarhei | a5db7916405236dc78778e4ef378141a19a28ff2 | 536d4f39bb7f7d6893664520351d93eac2bc90f1 | refs/heads/master | 2022-12-15T09:36:53.108896 | 2022-07-28T09:44:12 | 2022-07-28T09:44:12 | 226,359,238 | 1 | 0 | MIT | 2019-12-06T15:37:24 | 2019-12-06T15:37:23 | null | UTF-8 | Python | false | false | 2,624 | py | # -*- coding: utf-8 -*-
"""
StateVector propagator
"""
import numpy
from .statevectorevolution import StateVectorEvolution
from ..hilbertspace.evolutionoperator import EvolutionOperator
from ... import REAL
class StateVectorPropagator:
def __init__(self, timeaxis, ham):
self.timeaxis = timeaxis
self.ham = ham
self.Odt = self.timeaxis.data[1]-self.timeaxis.data[0]
self.dt = self.Odt
self.Nref = 1
self.Nt = self.timeaxis.length
N = self.ham.data.shape[0]
self.N = N
self.data = numpy.zeros((self.Nt,N),dtype=numpy.complex64)
def setDtRefinement(self, Nref):
"""
The TimeAxis object specifies at what times the propagation
should be stored. We can tell the propagator to use finer
time step for the calculation by setting the refinement. The
refinement is an integer by which the TimeAxis time step should
be devided to get the finer time step. In the code below, we
have dt = 10 in the TimeAxis, but we want to calculate with
dt = 1
>>> HH = numpy.array([[0.0, 0.0],[0.0,1.0]])
>>> times = numpy.linspace(0,1000,10)
>>> pr = StateVectorPropagator(HH,times)
>>> pr.setDtRefinement(10)
"""
self.Nref = Nref
self.dt = self.Odt/self.Nref
def propagate(self, psii):
return self._propagate_short_exp(psii,L=4)
def get_evolution_operator(self):
eop = 0.0
return EvolutionOperator(self.timeaxis, data=eop)
def _propagate_short_exp(self, psii, L=4):
"""
Short exp integration
"""
pr = StateVectorEvolution(self.timeaxis, psii)
psi1 = psii.data
psi2 = psii.data
#
# RWA is applied here
#
if self.ham.has_rwa:
HH = self.ham.get_RWA_data()
else:
HH = self.ham.data
indx = 1
for ii in range(1,self.Nt):
for jj in range(0,self.Nref):
for ll in range(1,L+1):
pref = (self.dt/ll)
psi1 = -1j*pref*numpy.dot(HH,psi1)
psi2 = psi2 + psi1
psi1 = psi2
pr.data[indx,:] = psi2
indx += 1
if self.ham.has_rwa:
pr.is_in_rwa = True
return pr
| [
"tmancal74@gmail.com"
] | tmancal74@gmail.com |
f7c48c6551b9cc700832df0bd94985a171d283a6 | 8c643886e810f09a2c596066911300ceec64605b | /exercises/chapter03/data/simple2_solution.py | ad2ae388f43b368e5a606e792400b47e8732cef7 | [
"MIT"
] | permissive | matt-gardner/test-allennlp-course | fea62c18c983e5a76927a2e8b97ba30081f6838a | 15b18144c30cfcdbe9acf5ad9bb30e24f6522d11 | refs/heads/master | 2023-01-28T05:10:03.787036 | 2019-06-13T20:16:12 | 2019-06-13T20:16:12 | 191,823,476 | 6 | 1 | MIT | 2023-01-11T20:28:39 | 2019-06-13T19:44:21 | CSS | UTF-8 | Python | false | false | 1,214 | py | from allennlp.data.fields import TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenCharactersIndexer
from allennlp.data.tokenizers import WordTokenizer, CharacterTokenizer
from allennlp.data import Vocabulary
# Splits text into characters (instead of words or wordpieces).
tokenizer = CharacterTokenizer()
# Represents each token (which is a _character_) with a single id from a vocabulary.
token_indexer = SingleIdTokenIndexer(namespace='character_vocab')
vocab = Vocabulary()
vocab.add_tokens_to_namespace(['T', 'h', 'i', 's', ' ', 'o', 'm', 'e', 't', 'x', '.'],
namespace='character_vocab')
text = "This is some text."
tokens = tokenizer.tokenize(text)
print(tokens)
text_field = TextField(tokens, {'token_characters': token_indexer})
# In order to convert the token strings into integer ids, we need to tell the
# TextField what Vocabulary to use.
text_field.index(vocab)
# We typically batch things together when making tensors, which requires some
# padding computation. Don't worry too much about the padding for now.
padding_lengths = text_field.get_padding_lengths()
tensor_dict = text_field.as_tensor(padding_lengths)
print(tensor_dict)
| [
"mattg@allenai.org"
] | mattg@allenai.org |
4e618625235505db5cacd9c840d94bebff946ac7 | 75d8667735782cd1d0eb4877e52c89da5cd92dde | /nova/scheduler/driver.py | da8568c970018cf13ea07a0bcacab608bf2c9ac6 | [
"Apache-2.0"
] | permissive | bopopescu/nova-token | ffecfd3ec561936b7d9d7e691bc57383cde05436 | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | refs/heads/master | 2022-11-22T09:53:31.073483 | 2016-05-14T02:47:01 | 2016-05-15T22:02:55 | 282,105,621 | 0 | 0 | Apache-2.0 | 2020-07-24T02:42:19 | 2020-07-24T02:42:18 | null | UTF-8 | Python | false | false | 4,264 | py | begin_unit
comment|'# Copyright (c) 2010 OpenStack Foundation'
nl|'\n'
comment|'# Copyright 2010 United States Government as represented by the'
nl|'\n'
comment|'# Administrator of the National Aeronautics and Space Administration.'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
string|'"""\nScheduler base class that all Schedulers should inherit from\n"""'
newline|'\n'
nl|'\n'
name|'import'
name|'abc'
newline|'\n'
nl|'\n'
name|'from'
name|'oslo_log'
name|'import'
name|'log'
name|'as'
name|'logging'
newline|'\n'
name|'import'
name|'six'
newline|'\n'
name|'from'
name|'stevedore'
name|'import'
name|'driver'
newline|'\n'
nl|'\n'
name|'import'
name|'nova'
op|'.'
name|'conf'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'objects'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'servicegroup'
newline|'\n'
nl|'\n'
DECL|variable|CONF
name|'CONF'
op|'='
name|'nova'
op|'.'
name|'conf'
op|'.'
name|'CONF'
newline|'\n'
nl|'\n'
DECL|variable|LOG
name|'LOG'
op|'='
name|'logging'
op|'.'
name|'getLogger'
op|'('
name|'__name__'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
op|'@'
name|'six'
op|'.'
name|'add_metaclass'
op|'('
name|'abc'
op|'.'
name|'ABCMeta'
op|')'
newline|'\n'
DECL|class|Scheduler
name|'class'
name|'Scheduler'
op|'('
name|'object'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""The base class that all Scheduler classes should inherit from."""'
newline|'\n'
nl|'\n'
DECL|member|__init__
name|'def'
name|'__init__'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'host_manager'
op|'='
name|'driver'
op|'.'
name|'DriverManager'
op|'('
nl|'\n'
string|'"nova.scheduler.host_manager"'
op|','
nl|'\n'
name|'CONF'
op|'.'
name|'scheduler_host_manager'
op|','
nl|'\n'
name|'invoke_on_load'
op|'='
name|'True'
op|')'
op|'.'
name|'driver'
newline|'\n'
name|'self'
op|'.'
name|'servicegroup_api'
op|'='
name|'servicegroup'
op|'.'
name|'API'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|member|run_periodic_tasks
dedent|''
name|'def'
name|'run_periodic_tasks'
op|'('
name|'self'
op|','
name|'context'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Manager calls this so drivers can perform periodic tasks."""'
newline|'\n'
name|'pass'
newline|'\n'
nl|'\n'
DECL|member|hosts_up
dedent|''
name|'def'
name|'hosts_up'
op|'('
name|'self'
op|','
name|'context'
op|','
name|'topic'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Return the list of hosts that have a running service for topic."""'
newline|'\n'
nl|'\n'
name|'services'
op|'='
name|'objects'
op|'.'
name|'ServiceList'
op|'.'
name|'get_by_topic'
op|'('
name|'context'
op|','
name|'topic'
op|')'
newline|'\n'
name|'return'
op|'['
name|'service'
op|'.'
name|'host'
nl|'\n'
name|'for'
name|'service'
name|'in'
name|'services'
nl|'\n'
name|'if'
name|'self'
op|'.'
name|'servicegroup_api'
op|'.'
name|'service_is_up'
op|'('
name|'service'
op|')'
op|']'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'abc'
op|'.'
name|'abstractmethod'
newline|'\n'
DECL|member|select_destinations
name|'def'
name|'select_destinations'
op|'('
name|'self'
op|','
name|'context'
op|','
name|'spec_obj'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Must override select_destinations method.\n\n :return: A list of dicts with \'host\', \'nodename\' and \'limits\' as keys\n that satisfies the request_spec and filter_properties.\n """'
newline|'\n'
name|'return'
op|'['
op|']'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| [
"dmg@uvic.ca"
] | dmg@uvic.ca |
3aa77a9c0f183d217f06a630f4191d7fb841a73d | ca3ff0bc4f7e9e8fcf677afa3a1a18dd2129a5d4 | /daily_problems/problem_0_to_100/problem_86.py | 42defc172f8b8a0ea3af081435c7fda161acd383 | [
"MIT"
] | permissive | rrwt/daily-coding-challenge | d9b23a82a1a3c4824b8f1aeacf6584afc5189ce7 | 4dcd59eaff021be0b9b1aba1dda73248c81454b7 | refs/heads/master | 2022-05-29T04:32:44.406196 | 2022-05-25T01:12:01 | 2022-05-25T01:12:01 | 181,972,357 | 1 | 0 | MIT | 2021-04-20T19:58:43 | 2019-04-17T21:41:25 | Python | UTF-8 | Python | false | false | 1,125 | py | """
Given a string of parentheses, write a function to compute the minimum number of parentheses
to be removed to make the string valid (i.e. each open parenthesis is eventually closed).
For example,
given the string "()())()", you should return 1.
Given the string ")(", you should return 2, since we must remove all of them.
"""
def count_remove_parenthesis(text: str) -> int:
count_removal = 0
stack = []
for char in text:
if char == "(":
stack.append(char)
elif char == ")":
if not stack or stack[-1] == ")":
count_removal += 1
else:
stack.pop()
else:
raise AssertionError(f"{char} is unacceptable as a parenthesis")
return count_removal + len(stack)
if __name__ == "__main__":
assert count_remove_parenthesis("()())()") == 1
assert count_remove_parenthesis(")(") == 2
assert count_remove_parenthesis("") == 0
assert count_remove_parenthesis("((()))") == 0
assert count_remove_parenthesis("()(") == 1
assert count_remove_parenthesis("((()())())()()()(())") == 0
| [
"rohitrawat2000@gmail.com"
] | rohitrawat2000@gmail.com |
42ea4f4a66ce8ed2ede113d9666e461fe30a5244 | af43615e07f2bfaa908d6d96b4c90f98ce3ad47b | /rdr_service/lib_fhir/fhirclient_1_0_6/models/deviceusestatement.py | 642e4f165a8b132be219bba1db9b4de5dc7ab412 | [
"BSD-3-Clause"
] | permissive | all-of-us/raw-data-repository | 11aa25385543f5f8ef706663b79ce181775c1c9a | 461ae46aeda21d54de8a91aa5ef677676d5db541 | refs/heads/devel | 2023-09-01T06:47:25.710651 | 2023-09-01T01:18:56 | 2023-09-01T01:18:56 | 66,000,771 | 46 | 22 | BSD-3-Clause | 2023-09-14T21:06:38 | 2016-08-18T13:47:08 | Python | UTF-8 | Python | false | false | 3,846 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/DeviceUseStatement) on 2016-06-23.
# 2016, SMART Health IT.
from . import domainresource
class DeviceUseStatement(domainresource.DomainResource):
""" None.
A record of a device being used by a patient where the record is the result
of a report from the patient or another clinician.
"""
resource_name = "DeviceUseStatement"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.bodySiteCodeableConcept = None
""" Target body site.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.bodySiteReference = None
""" Target body site.
Type `FHIRReference` referencing `BodySite` (represented as `dict` in JSON). """
self.device = None
""" None.
Type `FHIRReference` referencing `Device` (represented as `dict` in JSON). """
self.identifier = None
""" None.
List of `Identifier` items (represented as `dict` in JSON). """
self.indication = None
""" None.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.notes = None
""" None.
List of `str` items. """
self.recordedOn = None
""" None.
Type `FHIRDate` (represented as `str` in JSON). """
self.subject = None
""" None.
Type `FHIRReference` referencing `Patient` (represented as `dict` in JSON). """
self.timingDateTime = None
""" None.
Type `FHIRDate` (represented as `str` in JSON). """
self.timingPeriod = None
""" None.
Type `Period` (represented as `dict` in JSON). """
self.timingTiming = None
""" None.
Type `Timing` (represented as `dict` in JSON). """
self.whenUsed = None
""" None.
Type `Period` (represented as `dict` in JSON). """
super(DeviceUseStatement, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(DeviceUseStatement, self).elementProperties()
js.extend([
("bodySiteCodeableConcept", "bodySiteCodeableConcept", codeableconcept.CodeableConcept, False, "bodySite", False),
("bodySiteReference", "bodySiteReference", fhirreference.FHIRReference, False, "bodySite", False),
("device", "device", fhirreference.FHIRReference, False, None, True),
("identifier", "identifier", identifier.Identifier, True, None, False),
("indication", "indication", codeableconcept.CodeableConcept, True, None, False),
("notes", "notes", str, True, None, False),
("recordedOn", "recordedOn", fhirdate.FHIRDate, False, None, False),
("subject", "subject", fhirreference.FHIRReference, False, None, True),
("timingDateTime", "timingDateTime", fhirdate.FHIRDate, False, "timing", False),
("timingPeriod", "timingPeriod", period.Period, False, "timing", False),
("timingTiming", "timingTiming", timing.Timing, False, "timing", False),
("whenUsed", "whenUsed", period.Period, False, None, False),
])
return js
from . import codeableconcept
from . import fhirdate
from . import fhirreference
from . import identifier
from . import period
from . import timing
| [
"noreply@github.com"
] | all-of-us.noreply@github.com |
316f82491c8f735a152f72a405f04e15d34d706e | 0cf054b1740339b22d3211695e44e11e68c81328 | /suggestion/analysis_util.py | d03d3e3988a20faeabe5cc43261d257a8d83659e | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | kcarnold/sentiment-slant-gi18 | 076aaf557c8e33c84349f78a883c0fa3210e9ada | 6028b42627e3eec14a1f27986f8925d8b1e6ad9c | refs/heads/master | 2022-07-01T10:20:50.314847 | 2017-09-13T23:03:14 | 2017-09-13T23:03:14 | 125,105,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,623 | py | import os
try:
import ujson as json
except ImportError:
import json
import re
import numpy as np
from suggestion.util import mem
from suggestion.paths import paths
import subprocess
#
# Data for decoding surveys.
#
skip_col_re = re.compile(
r'Great.job|Q_\w+|nextURL|clientId|Timing.*|Browser.*|Location.*|Recipient.*|Response.+|ExternalDataReference|Finished|Status|IPAddress|StartDate|EndDate|Welcome.+|Display Order|Demographic Questions|Closing Survey.+|revisionDesc|prewrite')
prefix_subs = {
"How much do you agree with the following statements about the suggestions that the system gave?-They ": "suggs-",
"How much do you agree with the following statements?-The suggestions ": "suggs-",
"How much do you agree with the following statements about the words or phrases that the keyboard...-They ": "suggs-",
"Now think about the brainstorming you did before the final writing. How much do you agree with th...-": "brainstorm-",
"Think about when you were typing out your ${e://Field/revisionDesc}. How much do you agree with t...-": "final-",
"How Accurately Can You Describe Yourself? Describe yourself as you generally are now, not as you...-": "pers-",
"Describe yourself as you generally are now, not as you wish to be in the future. Describe yoursel...-": "pers-",
}
decode_scales = {
"Strongly disagree": 1,
"Disagree": 2,
"Somewhat disagree": 3,
"Neither agree nor disagree": 4,
"Somewhat agree": 5,
"Agree": 6,
"Strongly agree": 7,
"Very Inaccurate": 1,
"Moderately Inaccurate": 2,
"Neither Accurate Nor Inaccurate": 3,
"Moderately Accurate": 4,
"Very Accurate": 5}
def get_rev(logpath):
with open(logpath) as logfile:
for line in logfile:
line = json.loads(line)
if 'rev' in line:
return line['rev']
def checkout_old_code(git_rev):
import shutil
by_rev = paths.parent / 'old-code'
rev_root = by_rev / git_rev
if not os.path.isdir(rev_root):
print("Checking out repository at", git_rev)
subprocess.check_call(['git', 'clone', '..', git_rev], cwd=by_rev)
subprocess.check_call(['git', 'checkout', git_rev], cwd=rev_root)
print("Installing npm packages")
subprocess.check_call(['yarn'], cwd=os.path.join(rev_root, 'frontend'))
@mem.cache
def get_log_analysis_raw(logpath, logfile_size, git_rev=None, analysis_files=None):
# Ignore analysis_files; just use them to know when to invalidate the cache.
checkout_old_code(git_rev)
analyzer_path = os.path.join(paths.parent, 'frontend', 'analysis')
with open(logpath) as logfile:
result = subprocess.check_output([analyzer_path], stdin=logfile)
assert len(result) > 0
return result
def get_log_analysis(participant, git_rev=None):
analysis_files = {
name: open(paths.parent / 'frontend' / name).read()
for name in ['analyze.js', 'analysis', 'src/Analyzer.js']
}
logpath = paths.parent / 'logs' / (participant+'.jsonl')
if git_rev is None:
git_rev = get_rev(logpath)
logfile_size = os.path.getsize(logpath)
result = get_log_analysis_raw(logpath, logfile_size, git_rev=git_rev, analysis_files=analysis_files)
analyzed = json.loads(result)
analyzed['git_rev'] = git_rev
return analyzed
def classify_annotated_event(evt):
typ = evt['type']
if typ in {'externalAction', 'next', 'resized', 'tapText'}:
return None
text = evt['curText']
null_word = len(text) == 0 or text[-1] == ' '
text = text.strip()
bos = len(text) == 0 or text[-1] in '.?!'
if typ == 'tapKey':
return 'tapKey'
if typ == 'tapBackspace':
return 'tapBackspace'
if typ == 'tapSuggestion':
if bos:
sugg_mode = 'bos'
elif null_word:
sugg_mode = 'full'
else:
sugg_mode = 'part'
return 'tapSugg_' + sugg_mode
assert False, typ
def get_content_stats_single_suggestion(sugg, word_freq_analyzer):
from suggestion import suggestion_generator
sugg = sugg.copy()
meta = sugg.pop('flags')
if not meta['domain'].startswith('yelp'):
return
if sugg['cur_word']:
# Skip partial words.
return
model = suggestion_generator.Model.get_or_load_model(meta['domain'])
try:
toks = suggestion_generator.tokenize_sofar(sugg['sofar'])
except:
# Tokenization failed.
return
# Optimization: trim context to the n-gram level, plus some padding.
toks = toks[-10:]
state = model.get_state(toks)[0]
clf_startstate = suggestion_generator.sentiment_classifier.get_state(toks)
res = []
for sugg_slot, rec in enumerate(sugg['recs']['predictions']):
phrase = rec['words']
if phrase:
sentiment_posteriors = suggestion_generator.sentiment_classifier.classify_seq_by_tok(clf_startstate, phrase)
sentiment = np.mean(sentiment_posteriors, axis=0) @ suggestion_generator.sentiment_classifier.sentiment_weights
else:
sentiment = None
analyzer_indices = [word_freq_analyzer.word2idx.get(tok) for tok in phrase]
res.append(dict(
request_id=sugg['request_id'],
sugg_slot=sugg_slot,
sugg_contextual_llk=model.score_seq(state, phrase)[0],
sugg_unigram_llk=np.nanmean(np.array([word_freq_analyzer.log_freqs[idx] if idx is not None else np.nan for idx in analyzer_indices])),
sugg_sentiment=sentiment))
return res
| [
"kcarnold@alum.mit.edu"
] | kcarnold@alum.mit.edu |
04aa06f4bac7b7bbb2b7030e09e1f27f1ed8fde4 | 6206ad73052b5ff1b6690c225f000f9c31aa4ff7 | /Code/Reshape the Matrix.py | 360911bc17af54a593ab369ee1c177df5d68cbb7 | [] | no_license | mws19901118/Leetcode | 7f9e3694cb8f0937d82b6e1e12127ce5073f4df0 | 752ac00bea40be1e3794d80aa7b2be58c0a548f6 | refs/heads/master | 2023-09-01T10:35:52.389899 | 2023-09-01T03:37:22 | 2023-09-01T03:37:22 | 21,467,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py | class Solution:
def matrixReshape(self, mat: List[List[int]], r: int, c: int) -> List[List[int]]:
m, n = len(mat), len(mat[0]) #Get dimensions.
if m * n != r * c: #If total number of elements do not match, cannot reshape so return mat.
return mat
reshape = [[0 for _ in range(c)] for _ in range(r)] #Initialize the reshape matrix.
row, col = 0, 0 #Initialize the pointer to current row and column of pointer traversing reshape matrix.
for i in range(m): #Traverse mat.
for j in range(n):
reshape[row][col] = mat[i][j] #Set the current element of reshape to current element of mat.
row += (col + 1) // c #Update row.
col = (col + 1) % c #Update col.
return reshape
| [
"noreply@github.com"
] | mws19901118.noreply@github.com |
778b02ce1304dfc64f45b5c82dc3ad7d820143a9 | bad62c2b0dfad33197db55b44efeec0bab405634 | /sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/activestamp/aio/operations/_job_operation_results_operations.py | 669fb507126bbbd85b124e09a62c97d867745408 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | test-repo-billy/azure-sdk-for-python | 20c5a2486456e02456de17515704cb064ff19833 | cece86a8548cb5f575e5419864d631673be0a244 | refs/heads/master | 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 | MIT | 2019-07-25T22:28:52 | 2019-04-19T20:59:15 | Python | UTF-8 | Python | false | false | 4,956 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._job_operation_results_operations import build_get_request
from .._vendor import MixinABC
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class JobOperationResultsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.recoveryservicesbackup.activestamp.aio.RecoveryServicesBackupClient`'s
:attr:`job_operation_results` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def get( # pylint: disable=inconsistent-return-statements
self,
vault_name: str,
resource_group_name: str,
job_name: str,
operation_id: str,
**kwargs: Any
) -> None:
"""Fetches the result of any operation.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param job_name: Job name whose operation result has to be fetched.
:type job_name: str
:param operation_id: OperationID which represents the operation whose result has to be fetched.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-06-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
request = build_get_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
job_name=job_name,
operation_id=operation_id,
api_version=api_version,
template_url=self.get.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupJobs/{jobName}/operationResults/{operationId}"} # type: ignore
| [
"noreply@github.com"
] | test-repo-billy.noreply@github.com |
aa9712bce0f3ef3690bb9a42fab492d7fae1aaf2 | 5332fef91e044555e605bb37cbef7c4afeaaadb0 | /hy-data-analysis-with-python-spring-2019-OLD/part01-e04_multiplication_table/src/multiplication_table.py | aa3628c78721753935ebdfc0a18b3e1524ce97a2 | [] | no_license | nopomi/hy-data-analysis-python-2019 | f3baa96bbe9b6ee7f0b3e6f6b8b0f3adfc3b6cc8 | 464685cb377cfdeee890a008fbfbd9ed6e3bcfd0 | refs/heads/master | 2021-07-10T16:16:56.592448 | 2020-08-16T18:27:38 | 2020-08-16T18:27:38 | 185,044,621 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | #!/usr/bin/env python3
def main():
for i in range(1,11):
for j in range(1,11):
print(str(i*j)+'\t', end="")
print('')
if __name__ == "__main__":
main()
| [
"miska.noponen@gmail.com"
] | miska.noponen@gmail.com |
24a21563e8b0f54b4f865519a0221f033b7f345c | 2ab759b4796e36cee89df3718b7042eb527e4b26 | /args.py | 2ad011c5c0ff5f89e7e555c02e1b52b469c6fbe4 | [
"Apache-2.0"
] | permissive | zhangjunjieGit/bert-utils | b78d289effedfa64716219b771a370c410f3d066 | 7142632ea6b2e6656a2873a60971dbf7330f9550 | refs/heads/master | 2020-08-22T06:47:33.103393 | 2019-10-20T09:58:54 | 2019-10-20T09:59:05 | 216,340,794 | 1 | 0 | Apache-2.0 | 2019-10-20T10:00:53 | 2019-10-20T10:00:53 | null | UTF-8 | Python | false | false | 748 | py | import os
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
file_path = os.path.dirname(__file__)
model_dir = os.path.join(file_path, 'chinese_L-12_H-768_A-12/')
config_name = os.path.join(model_dir, 'bert_config.json')
ckpt_name = os.path.join(model_dir, 'bert_model.ckpt')
output_dir = os.path.join(model_dir, '../tmp/result/')
vocab_file = os.path.join(model_dir, 'vocab.txt')
data_dir = os.path.join(model_dir, '../data/')
num_train_epochs = 10
batch_size = 128
learning_rate = 0.00005
# gpu使用率
gpu_memory_fraction = 0.8
# 默认取倒数第二层的输出值作为句向量
layer_indexes = [-2]
# 序列的最大程度,单文本建议把该值调小
max_seq_len = 5
# graph名字
graph_file = 'tmp/result/graph' | [
"zjiuzhou@gmail.com"
] | zjiuzhou@gmail.com |
399eeab0b7aac90ad1fbae0cecd4ca49be31ec81 | 7437b9482592119dd6b4fc78706fed1c7c7df89f | /modules/neighbour_covariance_op.py | 520f8756b84359cebb69699864b5c50b00d2df88 | [
"BSD-3-Clause"
] | permissive | cms-pepr/HGCalML | 163c5f2b0ca1079003628b5a53a4aee2305fb3e9 | d28477501d93992d25c22b7d5c355a3da3bffa5c | refs/heads/master | 2023-08-31T03:33:31.294369 | 2023-05-10T15:51:59 | 2023-05-10T15:51:59 | 236,987,707 | 11 | 13 | BSD-3-Clause | 2023-08-08T12:43:50 | 2020-01-29T13:25:18 | Python | UTF-8 | Python | false | false | 1,973 | py |
import tensorflow as tf
from tensorflow.python.framework import ops
from accknn_op import AccumulateKnn
def NeighbourCovariance(coordinates, distsq, features, n_idxs):
'''
expands to V x F x C**2, but not in the neighbour dimension
Feed features without activation!
'''
features = tf.nn.sigmoid(features) + 1e-3 #make sure they're in a good range
nF = features.shape[1]
nC = coordinates.shape[1]
nKf = tf.cast(distsq.shape[1],dtype='float32')
#calc mean of features over all neighbours (1/K factor too much)
sum_F = AccumulateKnn(distsq, features, n_idxs, mean_and_max=False)[0] * nKf
#not gonna work like this
#build feature-weighted coordinates: V x 1 x C * V x F x 1
FC = tf.expand_dims(coordinates,axis=1) * tf.expand_dims(features,axis=2)
#reshape to V x F*C
FC = tf.reshape(FC, [-1, nF*nC])
#sum over neighbours (factor 1/K too much)
sum_FC = AccumulateKnn(distsq, FC, n_idxs, mean_and_max=False)[0] * nKf
#reshape back to V x F x C
mean_C = tf.reshape(sum_FC, [-1, nF, nC])
mean_C = tf.math.divide_no_nan(mean_C, tf.expand_dims(sum_F, axis=2)+1e-3)
#now we have centred coordinates: V x F x C
centered_C = tf.expand_dims(coordinates,axis=1) - mean_C
#build covariance input: V x F x C x 1 * V x F x 1 x C
cov = tf.expand_dims(centered_C, axis=3) * tf.expand_dims(centered_C, axis=2)
# reshape to something useful
cov = tf.reshape(cov, [-1, nF,nC**2])
cov *= tf.expand_dims(features, axis=2) #add feature weights
cov = tf.reshape(cov, [-1, nF*nC**2])
#sum over neighbours
cov = AccumulateKnn(distsq, cov, n_idxs, mean_and_max=False)[0] * nKf
#reshape back
cov = tf.reshape(cov, [-1, nF, nC**2])
cov = tf.math.divide_no_nan(cov, tf.expand_dims(sum_F, axis=2)+1e-3)
cov = tf.reshape(cov, [-1, nF, nC**2])#just for keras
return cov, mean_C
| [
"jkiesele@cern.ch"
] | jkiesele@cern.ch |
d7552e3b106bb7b596621fa2810ed985b0175dd9 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03013/s787334180.py | 28e033a57e8473172944172d8c1031ab7c555e8e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | N, M = map(int, input().split())
if N == 1:
print(1)
else:
s = [-1] * N
s[0] = 1
s[1] = 1
for m in range(0, M):
i = int(input())
s[i] = 0
for n in range(2, N):
if s[n] == 0:
continue
s[n] = s[n - 1] + s[n - 2]
print((s[N-1] + s[N-2]) % 1000000007)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
ab08ca6833912ff2321dd2234faded534afab1ab | c13ccf912360f02010f3185dc29f3e72205984dd | /Hauz/migrations/0015_auto_20180119_0824.py | a94915c8159be33de7285c6226946eca01f3f871 | [
"MIT"
] | permissive | VirginiaNdungu1/HauzBox | e3dd238b8ef302e69dd4cefa2036a50500b6f3bc | c586d221a903f2be681b895eec20dd01664ce141 | refs/heads/master | 2021-05-15T05:41:37.189912 | 2018-01-31T14:45:43 | 2018-01-31T14:46:18 | 116,761,502 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | # Generated by Django 2.0.1 on 2018-01-19 05:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Hauz', '0014_auto_20180119_0821'),
]
operations = [
migrations.AlterField(
model_name='house',
name='tenant_id',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='house_tenant', to='Hauz.Tenant'),
),
]
| [
"ndungu.wairimu22@gmail.com"
] | ndungu.wairimu22@gmail.com |
79dcbfe3d5544e48a61ca3c29afc26988d642d42 | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/chocolatey/chocolatey/plugins/modules/win_chocolatey_source.py | cae7637ea3d821e51ef6c10dfcf1d8769d184a97 | [
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-only",
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 3,734 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2020, Chocolatey Software
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_chocolatey_source
version_added: '2.7'
short_description: Manages Chocolatey sources
description:
- Used to managed Chocolatey sources configured on the client.
- Requires Chocolatey to be already installed on the remote host.
options:
admin_only:
description:
- Makes the source visible to Administrators only.
- Requires Chocolatey >= 0.10.8.
- When creating a new source, this defaults to C(no).
type: bool
allow_self_service:
description:
- Allow the source to be used with self-service
- Requires Chocolatey >= 0.10.4.
- When creating a new source, this defaults to C(no).
type: bool
bypass_proxy:
description:
- Bypass the proxy when using this source.
- Requires Chocolatey >= 0.10.4.
- When creating a new source, this defaults to C(no).
type: bool
certificate:
description:
- The path to a .pfx file to use for X509 authenticated feeds.
- Requires Chocolatey >= 0.9.10.
type: str
certificate_password:
description:
- The password for I(certificate) if required.
- Requires Chocolatey >= 0.9.10.
name:
description:
- The name of the source to configure.
required: yes
priority:
description:
- The priority order of this source compared to other sources, lower is
better.
- All priorities above C(0) will be evaluated first, then zero-based values
will be evaluated in config file order.
- Requires Chocolatey >= 0.9.9.9.
- When creating a new source, this defaults to C(0).
type: int
source:
description:
- The file/folder/url of the source.
- Required when I(state) is C(present) or C(disabled) and the source does
not already exist.
source_username:
description:
- The username used to access I(source).
source_password:
description:
- The password for I(source_username).
- Required if I(source_username) is set.
state:
description:
- When C(absent), will remove the source.
- When C(disabled), will ensure the source exists but is disabled.
- When C(present), will ensure the source exists and is enabled.
choices:
- absent
- disabled
- present
default: present
update_password:
description:
- When C(always), the module will always set the password and report a
change if I(certificate_password) or I(source_password) is set.
- When C(on_create), the module will only set the password if the source
is being created.
choices:
- always
- on_create
default: always
seealso:
- module: win_chocolatey
- module: win_chocolatey_config
- module: win_chocolatey_facts
- module: win_chocolatey_feature
author:
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
- name: Remove the default public source
win_chocolatey_source:
name: chocolatey
state: absent
- name: Add new internal source
win_chocolatey_source:
name: internal repo
state: present
source: http://chocolatey-server/chocolatey
- name: Create HTTP source with credentials
win_chocolatey_source:
name: internal repo
state: present
source: https://chocolatey-server/chocolatey
source_username: username
source_password: password
- name: Disable Chocolatey source
win_chocolatey_source:
name: chocolatey
state: disabled
'''
RETURN = r'''
'''
| [
"sifang@cisco.com"
] | sifang@cisco.com |
a9daa25aa586b9c519f86b630fdd29e6a115d159 | c81ea73e93df307d35191ab184a85d6c67c57112 | /dockers/rotnet/prepare_nets.py | 63510dd367617d93fe4ec8309755cc686e82c50c | [] | no_license | BlenderCN-Org/diplomka | 8d0503fc5902dfede8317aed84f5a17f691f687f | 575fe3f2436b9c511496c1dc019d9cc3423ba5f0 | refs/heads/master | 2020-05-22T15:42:00.143738 | 2019-05-07T07:37:46 | 2019-05-07T07:37:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | import os
def replace(file, what, for_what):
# Read in the file
with open(file, 'r') as f:
filedata = f.read()
# Replace the target string
filedata = filedata.replace(what, for_what)
#print(filedata)
# Write the file out again
with open(file, 'w') as f:
f.write(filedata)
def set_num_cats(file, num_cats, views):
replace(file, '$NUMCATS', str(num_cats+1))
replace(file, "$INNER", str((num_cats+1) * views))
def set_batch_size(file, batch_size):
replace(file, "$BATCHSIZE", str(batch_size))
| [
"miroslavkrabec@seznam.cz"
] | miroslavkrabec@seznam.cz |
cc9e9573db428adfe20c7a930d036f9c4a3eb3ba | 29eacf3b29753d65d8ec0ab4a60ea1f7ddecbd68 | /examples/pytorch_lightning_distributed/dcl.py | ecf6e7bcd014282101b6c04ec204b23362d2d964 | [
"MIT"
] | permissive | lightly-ai/lightly | 5b655fe283b7cc2ddf1d7f5bd098603fc1cce627 | 5650ee8d4057139acf8aa10c884d5d5cdc2ccb17 | refs/heads/master | 2023-08-17T11:08:00.135920 | 2023-08-16T12:43:02 | 2023-08-16T12:43:02 | 303,705,119 | 2,473 | 229 | MIT | 2023-09-14T14:47:16 | 2020-10-13T13:02:56 | Python | UTF-8 | Python | false | false | 2,288 | py | # Note: The model and training settings do not follow the reference settings
# from the paper. The settings are chosen such that the example can easily be
# run on a small dataset with a single GPU.
import pytorch_lightning as pl
import torch
import torchvision
from torch import nn
from lightly.loss import DCLLoss
from lightly.models.modules import SimCLRProjectionHead
from lightly.transforms.simclr_transform import SimCLRTransform
class DCL(pl.LightningModule):
def __init__(self):
super().__init__()
resnet = torchvision.models.resnet18()
self.backbone = nn.Sequential(*list(resnet.children())[:-1])
self.projection_head = SimCLRProjectionHead(512, 2048, 2048)
# enable gather_distributed to gather features from all gpus
# before calculating the loss
self.criterion = DCLLoss(gather_distributed=True)
# or use the weighted DCLW loss:
# self.criterion = DCLWLoss(gather_distributed=True)
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(x)
return z
def training_step(self, batch, batch_index):
(x0, x1) = batch[0]
z0 = self.forward(x0)
z1 = self.forward(x1)
loss = self.criterion(z0, z1)
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(self.parameters(), lr=0.06)
return optim
model = DCL()
transform = SimCLRTransform(input_size=32)
dataset = torchvision.datasets.CIFAR10(
"datasets/cifar10", download=True, transform=transform
)
# or create a dataset from a folder containing images or videos:
# dataset = LightlyDataset("path/to/folder", transform=transform)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=256,
shuffle=True,
drop_last=True,
num_workers=8,
)
# Train with DDP and use Synchronized Batch Norm for a more accurate batch norm
# calculation. Distributed sampling is also enabled with replace_sampler_ddp=True.
trainer = pl.Trainer(
max_epochs=10,
devices="auto",
accelerator="gpu",
strategy="ddp",
sync_batchnorm=True,
use_distributed_sampler=True, # or replace_sampler_ddp=True for PyTorch Lightning <2.0
)
trainer.fit(model=model, train_dataloaders=dataloader)
| [
"noreply@github.com"
] | lightly-ai.noreply@github.com |
0ccaa945ee61fe6e02bfa5331ec1bacd14f0ee07 | a560269290749e10466b1a29584f06a2b8385a47 | /Notebooks/py/sonukumarsaw/fork-of-titanic-survivors-dataset/fork-of-titanic-survivors-dataset.py | e15a01032f6d55138434ecd5ca6cd6a3f757da58 | [] | no_license | nischalshrestha/automatic_wat_discovery | c71befad1aa358ae876d5494a67b0f4aa1266f23 | 982e700d8e4698a501afffd6c3a2f35346c34f95 | refs/heads/master | 2022-04-07T12:40:24.376871 | 2020-03-15T22:27:39 | 2020-03-15T22:27:39 | 208,379,586 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,475 | py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().magic(u'matplotlib inline')
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
# Any results you write to the current directory are saved as output.
# In[ ]:
train = pd.read_csv('../input/train.csv')
gender_sub = pd.read_csv('../input/gender_submission.csv')
test = pd.read_csv('../input/test.csv')
# **Training Dataset**
# In[ ]:
#showing the sample of train data
train.head()
# In[ ]:
# describe the train dataset
train.describe()
# In[ ]:
#checking for null values in data
sns.heatmap(train.isnull(),yticklabels=False,cbar=False,cmap='viridis')
# We can see here in the heat map Age and Cabin columns have lots of null data.
# What we can do here is either drop the column or fill the null values with average age.
# We cant fill cabin values becouse there isn't any relation between cabin and other columns so we will drop it from the table.
# In[ ]:
# Count of survived and those who don't
sns.set_style('whitegrid')
sns.countplot(x='Survived',data=train,palette='RdBu_r')
# In[ ]:
# Those who survived (male /female)
sns.set_style('whitegrid')
sns.countplot(x='Survived',hue='Sex',data=train,palette='RdBu_r')
# In[ ]:
# survived on basis of class
sns.set_style('whitegrid')
sns.countplot(x='Survived',hue='Pclass',data=train,palette='rainbow')
# In[ ]:
# column has so much null values
train=train.drop('Cabin',axis=1)
# In[ ]:
train.head()
# In[ ]:
sns.countplot(x='SibSp',data=train)
# Below Graph shows the relation between the age of passanger and there class
# In[ ]:
# Average age and passanger class
plt.figure(figsize=(16, 10))
sns.boxplot(x='Pclass',y='Age',data=train,palette='winter')
# Above graph shows that Passangers having class 1 have average age of 37 similarly class 2 average age is 29 and class 3 have age of 24 years.
# In[ ]:
def impute_age(cols):
Age = cols[0]
Pclass = cols[1]
if pd.isnull(Age):
if Pclass == 1:
return 37
elif Pclass == 2:
return 29
else:
return 24
else:
return Age
train['Age']=train[['Age','Pclass']].apply(impute_age,axis=1)
# In[ ]:
train.head()
# We Just filled all the null values with the average age of passangers.
# In[ ]:
sex = pd.get_dummies(train['Sex'],drop_first=True)
embark = pd.get_dummies(train['Embarked'],drop_first=True)
# In[ ]:
train.drop(['Sex','Embarked','Name','Ticket'],axis=1,inplace=True)
# In[ ]:
train = pd.concat([train,sex,embark],axis=1)
# Regression model cant predict on strings therefore we converted the string here to binaries
# In[ ]:
train.head()
# In[ ]:
plt.figure(figsize=(16, 10))
# this graph is showing that there is no null value in dataset
sns.heatmap(train.isnull(),yticklabels=False,cbar=False,cmap='viridis')
# Now dataset is ready for fitting in algorithm
#
# **Testing dataset**
#
# In[ ]:
sns.heatmap(test.isnull(),yticklabels=False,cbar=False,cmap='viridis')
# Here, in testing set also we have null values.
# What we have done with training set we will repeat the same with testing set.
# In[ ]:
# droping the cabin
test = test.drop('Cabin',axis=1)
#here axis 1 specifies that we are searching for columns if it is 0 then rows.
# In[ ]:
test.head()
# * Now we have to convert Sex and Embarked columns from string to binaries.
# * Fill the age with average values
# In[ ]:
sex = pd.get_dummies(test['Sex'],drop_first=True)
embark = pd.get_dummies(test['Embarked'],drop_first=True)
test.drop(['Sex','Embarked','Name','Ticket'],axis=1,inplace=True)
test= pd.concat([test,sex,embark],axis=1)
# In[ ]:
test.head()
# In[ ]:
plt.figure(figsize=(16, 10))
sns.boxplot(x='Pclass',y='Age',data=test,palette='winter')
# We can see here that there is slight deference in average age between training set and testing dataset. We will now impute age on the basis of this new graph.
# In[ ]:
def impute_age(cols):
Age = cols[0]
Pclass = cols[1]
if pd.isnull(Age):
if Pclass == 1:
return 42
elif Pclass == 2:
return 28
else:
return 24
else:
return Age
test['Age']=test[['Age','Pclass']].apply(impute_age,axis=1)
# In[ ]:
test.head()
# In[ ]:
plt.figure(figsize=(16, 10))
sns.heatmap(test.isnull(),yticklabels=False,cbar=False,cmap='viridis')
# Fare column is having a null value. Better we should fill it with average value rather than droping it.
#
# For this we have to check is there is any relation between pclass.
# In[ ]:
plt.figure(figsize=(16, 10))
sns.boxplot(x='Pclass',y='Fare',data=test,palette='winter')
plt.ylim(0,100)
# In[ ]:
def impute_fare(cols):
Fare = cols[0]
Pclass = cols[1]
if pd.isnull(Fare):
if Pclass == 1:
return 60
elif Pclass == 2:
return 16
else:
return 10
else:
return Fare
test['Fare']=test[['Fare','Pclass']].apply(impute_fare,axis=1)
# Now our test set is also ready for fitting in algorithm.
# **MACHINE LEARNING**
# In[ ]:
X_train=train.drop('Survived',axis=1)
X_train.head()
# In[ ]:
y_train=train['Survived']
y_train.head()
# In[ ]:
y_test=gender_sub['Survived']
# In[ ]:
from sklearn.linear_model import LogisticRegression
# In[ ]:
logmodel = LogisticRegression()
logmodel.fit(X_train,y_train)
# In[ ]:
X_test=test
# In[ ]:
predictions = logmodel.predict(X_test)
# In[ ]:
from sklearn.metrics import classification_report
# In[ ]:
print(classification_report(y_test,predictions))
# In[ ]:
from sklearn.metrics import jaccard_similarity_score
jaccard_similarity_score(y_test, predictions)
# In[ ]:
passid=np.array(list(range(892,1310)))
df = pd.DataFrame({'PassengerId':passid,'Survived':predictions})
df.to_csv('submission.csv',index=False)
# In[ ]:
| [
"bitsorific@gmail.com"
] | bitsorific@gmail.com |
bc7efdd131ecce9958da72f2a08feb76ecb8da2f | b24c7086c8174023177f67a88980cb2b4a92522d | /src/robots/naoqi/res.py | 22647370c74cb65b99651d261179ecfc88cc6c6b | [
"ISC"
] | permissive | chili-epfl/pyrobots-nao | f67da21112dcb3cb33b5c5336a4d0c1abb090673 | 981addf10beda75466dc3e0a7a4be223b39c260c | refs/heads/master | 2021-01-20T11:09:57.440700 | 2015-02-28T11:42:54 | 2015-02-28T11:42:54 | 29,869,379 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | from robots.resources import Resource, CompoundResource
# hardware resource that need to be shared
LEYE = Resource("left eye")
REYE = Resource("right eye")
EYES = CompoundResource(LEYE, REYE, name = "eyes")
AUDIO = Resource("audio")
HEAD = Resource("head")
| [
"severin.lemaignan@epfl.ch"
] | severin.lemaignan@epfl.ch |
f2056e40108d671f9b0355deab9f4e12a257eebc | 6564b596ec27e67ee1b48377da1e7cee59cdcfe9 | /shenfun/optimization/__init__.py | d1a6e8057a3b55601f99b532c29a35d270f271d9 | [
"BSD-2-Clause"
] | permissive | GeraintPratten/shenfun | 077b13d904fd6bf6880c412f74300d78494bee11 | d92eb058c9969175da19b23926fb80148cf92ace | refs/heads/master | 2023-07-04T13:46:27.969149 | 2021-08-10T11:48:32 | 2021-08-10T11:48:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,095 | py | """Module for optimized functions
Some methods performed in Python may be slowing down solvers. In this optimization
module we place optimized functions that are to be used instead of default
Python methods. Some methods are implemented solely in Cython and only called
from within the regular Python modules.
"""
import os
import importlib
from functools import wraps
from . import cython
try:
from . import numba
except ModuleNotFoundError:
numba = None
def optimizer(func):
"""Decorator used to wrap calls to optimized versions of functions."""
mod = os.environ.get('SHENFUN_OPTIMIZATION', 'cython')
if mod.lower() not in ('cython', 'numba'):
# Use python function
#print(func.__name__ + ' not optimized')
return func
mod = importlib.import_module('shenfun.optimization.'+mod.lower())
fun = getattr(mod, func.__name__, func)
#if fun is func:
# print(fun.__name__ + ' not optimized')
@wraps(func)
def wrapped_function(*args, **kwargs):
u0 = fun(*args, **kwargs)
return u0
return wrapped_function
| [
"mikaem@math.uio.no"
] | mikaem@math.uio.no |
b2c253c3b2b826b2d4c91a55667970a087f6c604 | 1786dad5941d4b50561e04104d11d1412433d1f3 | /core/admin.py | 10d5f471b467447c2538cf468cbb6db779b18fd1 | [] | no_license | daryabsb/imdb | 5dc4704cb589d97815d98cfa97866a50055ab690 | 649f9dcc673a3b56c28329af15d1d1bae5f3c370 | refs/heads/master | 2022-12-25T06:59:26.738297 | 2020-09-19T13:55:05 | 2020-09-19T13:55:05 | 290,747,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | from django.contrib import admin
from .models import Movie, MovieLink
# Register your models here.
admin.site.register(Movie)
admin.site.register(MovieLink)
| [
"daryabsb@gmail.com"
] | daryabsb@gmail.com |
22c3b4bba12d8a2089c01805105ab6950a3845eb | 23db23583a49dd42002f3815bcfb26249096cb99 | /input/channelConfig_dielectron_Legacy2018_EBEE.py | b14170031da10b7294d93389ddde0313092551aa | [] | no_license | JanFSchulte/BiasTests | 34560adb1bc747aa3b594dd2c1014dee6093a25e | ff9ad12a2391fe0f409bac2945d21576f4a0cb3d | refs/heads/master | 2021-04-22T01:00:35.124191 | 2020-03-24T23:23:07 | 2020-03-24T23:23:07 | 249,836,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,672 | py | import ROOT,sys
ROOT.gROOT.SetBatch(True)
ROOT.gErrorIgnoreLevel = 1
from ROOT import *
from math import sqrt
from resolution_cfg_2018 import DCB_para
nBkg = -1
dataFile = "input/eventList_ele_2018_BE.txt"
def addBkgUncertPrior(ws,label,channel,uncert):
beta_bkg = RooRealVar('beta_%s_%s'%(label,channel),'beta_%s_%s'%(label,channel),0,-5,5)
getattr(ws,'import')(beta_bkg,ROOT.RooCmdArg())
uncert = 1. + uncert
bkg_kappa = RooRealVar('%s_%s_kappa'%(label,channel),'%s_%s_kappa'%(label,channel),uncert)
bkg_kappa.setConstant()
getattr(ws,'import')(bkg_kappa,ROOT.RooCmdArg())
ws.factory("PowFunc::%s_%s_nuis(%s_%s_kappa, beta_%s_%s)"%(label,channel,label,channel,label,channel))
ws.factory("prod::%s_%s_forUse(%s_%s, %s_%s_nuis)"%(label,channel,label,channel,label,channel))
def provideSignalScaling(mass,spin2=False):
nz = 3401386
nsig_scale = 1./0.031377
eff = signalEff(mass,spin2)
result = (nsig_scale*nz*eff)
return result
def signalEff(mass,spin2=False):
eff_a = 0.01461
eff_b = 479.6
eff_c = 635.3
eff_d = -1.071e+05
eff_e = 8.345e+04
eff_f = 1.302e+07
eff_g = 2.337e+07
if spin2:
eff_a = 0.06212
eff_b = -7.192
eff_c = 56.72
eff_d = -43.69
eff_e = 822.9
eff_f = 3.579e08
eff_g = 3.048e09
return (eff_a+eff_b/(mass+eff_c)+eff_d/(mass*mass+eff_e))+eff_f/(mass**3+eff_g)
def provideUncertainties(mass):
result = {}
result["sigEff"] = [1.08] # must be list in case the uncertainty is asymmetric
result["massScale"] = 0.01
result ["bkgUncert"] = 1.4
result ["res"] = 0.0
result ["reco"] = [0.0]
result["bkgParams"] = {"bkg_a":0.00313740766932294824, "bkg_b":0.01135596583199909373, "bkg_c":0.54125714622824727673, "bkg_d":0.00000000000000000000, "bkg_e":0.00194931370350556223, "bkg_b2":0.01948124695032613443, "bkg_c2":0.28782235398250377578, "bkg_d2":0.41138999196844272532, "bkg_thr":0.01928153410885654132}
return result
def provideCorrelations():
result = {}
''' Combine correlates uncertainties that have the same name. So wa hve to adjust the names to achieve what we want.
1) put the full channel name. That will make it uncorrelated with all other channels
2) keep the channel name but remove the last bit: will correlate between the two subcategories within a year
3) Just keep the dimuon or dielectron name, so we correlate between the years
4) To correlate some specific combination of uncertainties, come up with a name and add it to all releavent channel configs
'''
#result['sigEff'] = 'dielectron'
#result['massScale'] = 'dielectron'
#result['bkgUncert'] = 'dielectron_Legacy2018_EBEE'
#result['res'] = 'dielectron'
#result['bkgParams'] = 'dielectron_Legacy2018_EBEE'
result['sigEff'] = 'dielectron'
result['massScale'] = 'dielectron_Legacy2018_EBEE'
result['bkgUncert'] = 'dielectron_Legacy2018_EBEE'
result['res'] = 'dielectron_Legacy2018_EBEE'
result['reco'] = 'dielectron_Legacy2018_EBEE'
result['bkgParams'] = 'dielectron_Legacy2018_EBEE'
return result
def getResolution(mass):
CBObject = DCB_para("dcb")
CBObject.get_value(mass,False)
result = {}
result["res"] = CBObject.sigma
result["scale"] = CBObject.mean
result["nR"] = CBObject.PowerR
result["nL"] = CBObject.PowerL
result["alphaL"] = CBObject.CutL
result["alphaR"] = CBObject.CutR
if result["nR"] < 0:
result["nR"] = 0.
return result
def loadBackgroundShape(ws,useShapeUncert=False):
bkg_a = RooRealVar('bkg_a_dielectron_Legacy2018_EBEE','bkg_a_dielectron_Legacy2018_EBEE',11.76585112)
bkg_b = RooRealVar('bkg_b_dielectron_Legacy2018_EBEE','bkg_b_dielectron_Legacy2018_EBEE',-0.003566666494)
bkg_c = RooRealVar('bkg_c_dielectron_Legacy2018_EBEE','bkg_c_dielectron_Legacy2018_EBEE',-2.513733207e-07)
bkg_d = RooRealVar('bkg_d_dielectron_Legacy2018_EBEE','bkg_d_dielectron_Legacy2018_EBEE',0.0)
bkg_e = RooRealVar('bkg_e_dielectron_Legacy2018_EBEE','bkg_e_dielectron_Legacy2018_EBEE',-2.860692377)
bkg_b2 = RooRealVar('bkg_b2_dielectron_Legacy2018_EBEE','bkg_b2_dielectron_Legacy2018_EBEE',-0.00159101029)
bkg_c2 = RooRealVar('bkg_c2_dielectron_Legacy2018_EBEE','bkg_c2_dielectron_Legacy2018_EBEE',-2.610407295e-08)
bkg_d2 = RooRealVar('bkg_d2_dielectron_Legacy2018_EBEE','bkg_d2_dielectron_Legacy2018_EBEE',2.822681727e-12)
bkg_thr = RooRealVar('bkg_thr_dielectron_Legacy2018_EBEE','bkg_thr_dielectron_Legacy2018_EBEE',537.7173207)
bkg_a.setConstant()
bkg_b.setConstant()
bkg_c.setConstant()
bkg_d.setConstant()
bkg_e.setConstant()
getattr(ws,'import')(bkg_a,ROOT.RooCmdArg())
getattr(ws,'import')(bkg_b,ROOT.RooCmdArg())
getattr(ws,'import')(bkg_c,ROOT.RooCmdArg())
getattr(ws,'import')(bkg_d,ROOT.RooCmdArg())
getattr(ws,'import')(bkg_e,ROOT.RooCmdArg())
bkg_b2.setConstant()
bkg_c2.setConstant()
bkg_d2.setConstant()
bkg_thr.setConstant()
getattr(ws,'import')(bkg_b2,ROOT.RooCmdArg())
getattr(ws,'import')(bkg_c2,ROOT.RooCmdArg())
getattr(ws,'import')(bkg_d2,ROOT.RooCmdArg())
getattr(ws,'import')(bkg_thr,ROOT.RooCmdArg())
# background systematics
bkg_syst_a = RooRealVar('bkg_syst_a_dielectron_Legacy2018_EBEE','bkg_syst_a_dielectron_Legacy2018_EBEE',1.0)
bkg_syst_b = RooRealVar('bkg_syst_b_dielectron_Legacy2018_EBEE','bkg_syst_b_dielectron_Legacy2018_EBEE',0.0)
bkg_syst_a.setConstant()
bkg_syst_b.setConstant()
getattr(ws,'import')(bkg_syst_a,ROOT.RooCmdArg())
getattr(ws,'import')(bkg_syst_b,ROOT.RooCmdArg())
# background shape
if useShapeUncert:
bkgParamsUncert = provideUncertainties(1000)["bkgParams"]
for uncert in bkgParamsUncert:
addBkgUncertPrior(ws,uncert,"dielectron_Legacy2018_EBEE",bkgParamsUncert[uncert] )
ws.factory("ZPrimeEleBkgPdf5::bkgpdf_dielectron_Legacy2018_EBEE(mass_dielectron_Legacy2018_EBEE, bkg_a_dielectron_Legacy2018_EBEE_forUse, bkg_b_dielectron_Legacy2018_EBEE_forUse, bkg_c_dielectron_Legacy2018_EBEE_forUse,bkg_d_dielectron_Legacy2018_EBEE_forUse,bkg_e_dielectron_Legacy2018_EBEE_forUse, bkg_b2_dielectron_Legacy2018_EBEE_forUse, bkg_c2_dielectron_Legacy2018_EBEE_forUse,bkg_d2_dielectron_Legacy2018_EBEE_forUse,bkg_thr_dielectron_Legacy2018_EBEE_forUse,bkg_syst_a_dielectron_Legacy2018_EBEE,bkg_syst_b_dielectron_Legacy2018_EBEE)")
ws.factory("ZPrimeEleBkgPdf5::bkgpdf_fullRange(massFullRange, bkg_a_dielectron_Legacy2018_EBEE_forUse, bkg_b_dielectron_Legacy2018_EBEE_forUse, bkg_c_dielectron_Legacy2018_EBEE_forUse,bkg_d_dielectron_Legacy2018_EBEE_forUse,bkg_e_dielectron_Legacy2018_EBEE_forUse, bkg_b2_dielectron_Legacy2018_EBEE_forUse, bkg_c2_dielectron_Legacy2018_EBEE_forUse,bkg_d2_dielectron_Legacy2018_EBEE_forUse,bkg_thr_dielectron_Legacy2018_EBEE_forUse,bkg_syst_a_dielectron_Legacy2018_EBEE,bkg_syst_b_dielectron_Legacy2018_EBEE)")
else:
ws.factory("ZPrimeEleBkgPdf5::bkgpdf_dielectron_Legacy2018_EBEE(mass_dielectron_Legacy2018_EBEE, bkg_a_dielectron_Legacy2018_EBEE, bkg_b_dielectron_Legacy2018_EBEE, bkg_c_dielectron_Legacy2018_EBEE,bkg_d_dielectron_Legacy2018_EBEE,bkg_e_dielectron_Legacy2018_EBEE, bkg_b2_dielectron_Legacy2018_EBEE, bkg_c2_dielectron_Legacy2018_EBEE,bkg_d2_dielectron_Legacy2018_EBEE,bkg_thr_dielectron_Legacy2018_EBEE,bkg_syst_a_dielectron_Legacy2018_EBEE,bkg_syst_b_dielectron_Legacy2018_EBEE)")
ws.factory("ZPrimeEleBkgPdf5::bkgpdf_fullRange(massFullRange, bkg_a_dielectron_Legacy2018_EBEE, bkg_b_dielectron_Legacy2018_EBEE, bkg_c_dielectron_Legacy2018_EBEE,bkg_d_dielectron_Legacy2018_EBEE,bkg_e_dielectron_Legacy2018_EBEE, bkg_b2_dielectron_Legacy2018_EBEE, bkg_c2_dielectron_Legacy2018_EBEE,bkg_d2_dielectron_Legacy2018_EBEE,bkg_thr_dielectron_Legacy2018_EBEE,bkg_syst_a_dielectron_Legacy2018_EBEE,bkg_syst_b_dielectron_Legacy2018_EBEE)")
return ws
| [
"jschulte@cern.ch"
] | jschulte@cern.ch |
33a273361603bc9162e66b2ca04a3dc441178775 | 8ef477149fdd8cd8c0ad88f160e2b8a445550a1e | /base_ecommerce_v13/models/stock.py | 86a7f7c483c94f799c99fe122bff69e115aeeda4 | [] | no_license | cokotracy/ebay_connector_v13 | 97f3e23ba951f14457514b71b408b389a7c16dc7 | 3c08603875d464e5bee818091fa704f5f7192499 | refs/heads/master | 2022-07-19T21:15:23.065880 | 2020-05-20T15:20:23 | 2020-05-20T15:20:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | from odoo import api, fields, models, _
import odoo.netsvc
class stock_picking(models.Model):
_inherit = "stock.picking"
shop_id = fields.Many2one('sale.shop', string='Shop') | [
"https://Anjeel@bitbucket.org"
] | https://Anjeel@bitbucket.org |
4889cedcd8425ab04971c8f8ff25cf9734da01c9 | e5e8553fe434f399d24c1a8f981d5d258574e4af | /universal_landmark_detection/model/networks/globalNet.py | 0b2bab7140532e0dbb277efecde6822b98a6b8f8 | [
"MIT"
] | permissive | egozoro/YOLO_Universal_Anatomical_Landmark_Detection | 4dee00cf4ee140fa373b2dafeea8c95b69ff66c7 | 465a3d6afcdb23fdec609efe336beebdc9ed61f4 | refs/heads/main | 2023-08-24T02:04:40.543561 | 2021-10-07T13:58:16 | 2021-10-07T13:58:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,472 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
class myConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1):
super(myConv2d, self).__init__()
padding = (kernel_size-1)//2
self.conv = nn.Conv2d(in_channels, out_channels,
kernel_size=kernel_size, padding=padding)
def forward(self, x):
return self.conv(x)
class dilatedConv(nn.Module):
''' stride == 1 '''
def __init__(self, in_channels, out_channels, kernel_size=3, dilation=1):
super(dilatedConv, self).__init__()
# f = (kernel_size-1) * d +1
# new_width = (width - f + 2 * padding)/stride + stride
padding = (kernel_size-1) * dilation // 2
self.conv = nn.Conv2d(in_channels, out_channels,
kernel_size, dilation=dilation, padding=padding)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return self.relu(self.bn(self.conv(x)))
class GlobalNet(nn.Module):
def __init__(self, in_channels, out_channels, scale_factor=0.25, kernel_size=3, dilations=None):
super(GlobalNet, self).__init__()
self.scale_factor = scale_factor
if not isinstance(in_channels, list):
in_channels = [in_channels]
if not isinstance(out_channels, list):
out_channels = [out_channels]
mid_channels = 128
if dilations is None:
dilations = [1, 2, 5]
for i, n_chan in enumerate(in_channels):
setattr(self, 'in{i}'.format(i=i),
myConv2d(n_chan, mid_channels, 3))
for i, n_chan in enumerate(out_channels):
setattr(self, 'out{i}'.format(i=i),
myConv2d(mid_channels, n_chan, 1))
convs = [dilatedConv(mid_channels, mid_channels,
kernel_size, dilation) for dilation in dilations]
convs = nn.Sequential(*convs)
setattr(self, 'convs{}'.format(i), convs)
def forward(self, x, task_idx=0):
size = x.size()[2:]
sf = self.scale_factor
x = F.interpolate(x, scale_factor=sf)
x = getattr(self, 'in{}'.format(task_idx))(x)
x = getattr(self, 'convs{}'.format(task_idx))(x)
x = getattr(self, 'out{}'.format(task_idx))(x)
x = F.interpolate(x, size=size)
return {'output': torch.sigmoid(x)}
| [
"zhuheqin1@gmail.com"
] | zhuheqin1@gmail.com |
847b6985e225a008a43c6455650f7843ffa2f1d6 | 99697559d046cdd04dd9068bd518e4da4177aaa2 | /Finish/M019_Remove_Nth_Node_From_End_of_List.py | d1a7c1a10eb90195f94c135192f33c5f6de79380 | [] | no_license | Azurisky/Leetcode | 3e3621ef15f2774cfdfac8c3018e2e4701760c3b | 8fa215fb0d5b2e8f6a863756c874d0bdb2cffa04 | refs/heads/master | 2020-03-18T22:46:35.780864 | 2018-10-07T05:45:30 | 2018-10-07T05:45:30 | 135,364,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
first = head
ans = second = ListNode(0)
second.next = head
for i in range(n):
first = first.next
while first:
first = first.next
second = second.next
second.next = second.next.next
return ans.next
| [
"andrew0704us@gmail.com"
] | andrew0704us@gmail.com |
f8b639c29269c2aae5cac88c606ca55cac9e1ec6 | 1d871064c463d4e55c6eec8e479dd3a594e07593 | /tests/sequence_classification_tests/dataset_test.py | d892a00ba89dddefe0212dea0065e6d00cbed7d5 | [
"Apache-2.0"
] | permissive | little-bigtiger/transformers-keras | 76ad5f67e71a1286971735208a13b2b235afc281 | d8712a21e0a34a3f26d1e48459d7505c96931a5d | refs/heads/master | 2023-08-17T04:19:05.080257 | 2021-09-29T03:08:42 | 2021-09-29T03:08:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,295 | py | import unittest
from transformers_keras.sequence_classification.dataset import SequenceClassificationDataset
class DatasetTest(unittest.TestCase):
"""Dataset test."""
def test_sequence_classification_dataset_examples(self):
print()
print("====from_jsonl_files")
d = SequenceClassificationDataset.from_jsonl_files(
"testdata/sequence_classify.jsonl", vocab_file="testdata/vocab.bert.txt", batch_size=2
)
print(next(iter(d)))
print("====jsonl_to_examples")
examples = SequenceClassificationDataset.jsonl_to_examples(
"testdata/sequence_classify.jsonl", vocab_file="testdata/vocab.bert.txt"
)
for i in range(2):
print(examples[i])
print("====from_examples")
d = SequenceClassificationDataset.from_examples(examples, batch_size=2)
print(next(iter(d)))
print("====examples_to_tfrecord")
SequenceClassificationDataset.examples_to_tfrecord(examples, output_files=["testdata/sequence_classify.tfrecord"])
print("====from_tfrecord_files")
d = SequenceClassificationDataset.from_tfrecord_files("testdata/sequence_classify.tfrecord", batch_size=2)
print(next(iter(d)))
if __name__ == "__main__":
unittest.main()
| [
"zhouyang.luo@gmail.com"
] | zhouyang.luo@gmail.com |
1c29f61d1b995d9e89dc5f3a52a350d1a6308b32 | 3ab7695bfc34355ba579bc43b9fea396933514dc | /dbaas_cloudstack/util/models.py | 79546d1939ff00c64b02437d9629be07d551d26d | [] | no_license | globocom/dbaas-cloudstack | d59ee6147235c5933eb5fa36a3047c61a9de9e5a | 8445dde83c231a6af932ef179821c3e0b62485ff | refs/heads/master | 2023-01-05T00:18:36.304237 | 2018-04-18T20:51:30 | 2018-04-18T20:51:30 | 18,342,699 | 8 | 1 | null | 2022-12-26T19:43:38 | 2014-04-01T20:02:04 | Python | UTF-8 | Python | false | false | 769 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
class BaseModel(models.Model):
"""Base model class"""
created_at = models.DateTimeField(verbose_name=_("created_at"), auto_now_add=True)
updated_at = models.DateTimeField(verbose_name=_("updated_at"), auto_now=True)
class Meta:
abstract = True
def __unicode__(self):
if hasattr(self, 'name'):
result = "%s" % self.name
if hasattr(self, 'region'):
result = result + " - %s" % self.region
return result
elif hasattr(self, '__unicode__'):
result = self.__unicode__()
return result
| [
"raposo.felippe@gmail.com"
] | raposo.felippe@gmail.com |
113a7bac74c95751ac2210ca63365a1b2e1fd96e | 42b61bf376b172a36759e6c3264562e585630d47 | /ascent/wsgi.py | e0b9ba7dc44cd2c055733c9d3a2cd7ec618e6449 | [] | no_license | destinymalone/ascent | 9ad4d9e1a0db017c6ff8d4820fb92e46b0f282e6 | 526227115ce6703f66f4c39a7bb2e12153427757 | refs/heads/master | 2020-09-23T05:03:44.071148 | 2020-01-27T14:54:59 | 2020-01-27T14:54:59 | 225,411,341 | 1 | 0 | null | 2019-12-05T18:50:43 | 2019-12-02T15:44:27 | null | UTF-8 | Python | false | false | 389 | py | """
WSGI config for ascent project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ascent.settings")
application = get_wsgi_application()
| [
"natec425@gmail.com"
] | natec425@gmail.com |
13284b6e642981ad9bab7c9f4b5bd1a8b32ea645 | 88553181929aa251bc5ae02d81f17970249bca40 | /django/angular/aenv/Scripts/painter.py | f7b0e2584de0e394bb34f5a8be649eb0ce7ac5de | [] | no_license | reddymadhira111/Python | 2d9c3e5dba8238df6be9a67d422468ac1ca16d35 | 0f1a0c87748e67a879cd8c31eda7b65c69c5d648 | refs/heads/master | 2022-11-05T07:23:30.211706 | 2017-06-13T19:11:25 | 2017-06-13T19:11:25 | 94,243,400 | 0 | 1 | null | 2022-10-29T10:56:29 | 2017-06-13T18:13:59 | Python | UTF-8 | Python | false | false | 2,205 | py | #!c:\users\reddy\desktop\python\django\angular\aenv\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
# this demo script illustrates pasting into an already displayed
# photoimage. note that the current version of Tk updates the whole
# image every time we paste, so to get decent performance, we split
# the image into a set of tiles.
#
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
#
# painter widget
class PaintCanvas(tkinter.Canvas):
def __init__(self, master, image):
tkinter.Canvas.__init__(self, master,
width=image.size[0], height=image.size[1])
# fill the canvas
self.tile = {}
self.tilesize = tilesize = 32
xsize, ysize = image.size
for x in range(0, xsize, tilesize):
for y in range(0, ysize, tilesize):
box = x, y, min(xsize, x+tilesize), min(ysize, y+tilesize)
tile = ImageTk.PhotoImage(image.crop(box))
self.create_image(x, y, image=tile, anchor=tkinter.NW)
self.tile[(x, y)] = box, tile
self.image = image
self.bind("<B1-Motion>", self.paint)
def paint(self, event):
xy = event.x - 10, event.y - 10, event.x + 10, event.y + 10
im = self.image.crop(xy)
# process the image in some fashion
im = im.convert("L")
self.image.paste(im, xy)
self.repair(xy)
def repair(self, box):
# update canvas
dx = box[0] % self.tilesize
dy = box[1] % self.tilesize
for x in range(box[0]-dx, box[2]+1, self.tilesize):
for y in range(box[1]-dy, box[3]+1, self.tilesize):
try:
xy, tile = self.tile[(x, y)]
tile.paste(self.image.crop(xy))
except KeyError:
pass # outside the image
self.update_idletasks()
#
# main
if len(sys.argv) != 2:
print("Usage: painter file")
sys.exit(1)
root = tkinter.Tk()
im = Image.open(sys.argv[1])
if im.mode != "RGB":
im = im.convert("RGB")
PaintCanvas(root, im).pack()
root.mainloop()
| [
"reddymadhira111gmail.com"
] | reddymadhira111gmail.com |
62af4afbd3bef2ec5d05634ac926d9d41c739692 | 4a9dada02c749e9e5277fe1e35357d7b2b28ad5c | /顾天媛2018010980/操作系统实验/作业3.py | 7e4ba1e0148c936d956319f37d9a32e6f1f3efc9 | [] | no_license | wanghan79/2020_Option_System | 631cc80f52829390a128a86677de527472470348 | f37b870614edf7d85320da197d932df2f25a5720 | refs/heads/master | 2021-01-09T13:10:05.630685 | 2020-07-10T03:30:39 | 2020-07-10T03:30:39 | 242,312,271 | 13 | 9 | null | 2020-07-04T16:13:11 | 2020-02-22T09:12:56 | Python | UTF-8 | Python | false | false | 1,037 | py | # !/usr/bin/python3
# -*- coding: UTF-8 -*-
"""
Author: Ty.Gu
Purpose: multiprocessing
Created: 26/6/2020
"""
# 作业3. 采用Python语言创建多进程;提示:采用Python内置工具包multiprocessing
from multiprocessing import Pool
import time, os
from random import random
def task(task_name):
print('开始做任务啦!', task_name)
start = time.time()
time.sleep(random() * 2)
end = time.time()
print('完成任务:{}! 耗时:{} ,进程ID:{}'.format(task_name, (end - start), os.getpid()))
# 容器
container = []
def callback_func(n):
container.append(n)
if __name__ == '__main__':
pool = Pool(5)
tasks = ['听音乐', '看电影', '读书', '看报', '玩游戏', '打篮球', '弹钢琴']
for t in tasks:
pool.apply_async(task, args=(t,))
pool.close() # 关闭进程池,不允许继续添加进程
pool.join() # 等待进程池中的所有进程结束
print('------' * 10)
for c in container:
print(c)
print('over!!!!!!!!!!')
| [
"noreply@github.com"
] | wanghan79.noreply@github.com |
b7e58fa1fcf498998c1ed4a723ca290fc280f15c | 23514a0e2baf6da053690dd511f1eef75a573e6b | /log-mining/com/haodou/log-mining/log/searchKeyword45.py | 57442a3fa8cb219cd0a04d9be9ebe10fd6f97859 | [] | no_license | rainly/scripts-1 | b5f31880a1a917df23e4c110bb7661685851eff5 | 3ef01a58162b94fb36cdd38581c899d8a118eda0 | refs/heads/master | 2020-07-12T01:41:31.491344 | 2019-08-27T08:50:10 | 2019-08-27T08:50:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,296 | py | #encoding=utf-8
#
#专门为4.5版本定制
import sys
import operator
sys.path.append("./")
sys.path.append("../")
sys.path.append("../abtest")
import column
import column2
from clickCount import *
from cardClick import *
CardFix="##Card##_"
def getRidPos(ret):
rids={}
if "rids" in ret:
size=FirstPageNum
if len(ret["rids"]) < FirstPageNum:
size=len(ret["rids"])
for i in range(size):
rids[ret["rids"][i]]=i
return rids
THead=".html?id="
THeadLen=len(THead)
inn=0
def searchFollowMapper(f):
lastIP=""
rids={}
oc=0
otherRids={}
otherAids={}
otherTids={}
searchRet={}
click=None
keywordClick={}
lastK=""
hc=0
keyword=""
cardBank=CardBank()
for line in f:
if True:
cols=line.strip().split("\t")
if len(cols) < 3:
continue
ip=cols[0]
if lastIP == "":
lastIP=ip
if lastIP != ip:
lastIP=ip
keyword=""
rids={}
uc={}
otherRids={}
otherAids={}
otherTids={}
searchRet={}
click=None
lastSearchRet={}
lastClick=None
cardBank.reset()
if line.find("m.haodou.com") > 0 and line.find("GET") > 0:
cols=cols[2].strip().split("\01")
if len(cols) < 5:
continue
p=cols[4].find(THead)
if p > 0:
end=cols[4].find("&",p+THeadLen)
if end < 0:
end=len(cols[4])
id=cols[4][p+THeadLen:end]
if id !="":
if "topicId" in searchRet and searchRet["topicId"] == id:
click.addTopicHit(id)
oc+=1
if "ttitle" in searchRet:
title=searchRet["ttitle"]
click.addTopicTitleHit(title,keyword)
elif id in otherTids:
oc+=1
click.getMs().tb+=1
else:
if cols[4].find("uuid") > 0:
sys.stderr.write(line)
mtid=id
continue
if len(cols) < column.APP_LOG_COLUMNS+1:
continue
version=cols[column.VERSION_CID+1]
v=column.intVersion(version)
if v < 400:
continue
u=column.uuidFirst(cols[1:]) #获得uuid
if u == None or u.find("{") >= 0:
u=""
para=cols[column.PARA_ID+1] #得到请求参数
method=cols[column.METHOD_CID+1] #获得请求的方法
hasSearch=False
if method == 'search.getsearchindex': #搜索方法
keyword=V45Fix+column.getValue(para,"keyword") #搜索的关键字
hasSearch=True
searchRet=column2.FuncMap[method](cols[-1]) #获得搜索返回的食谱列表
rids=getRidPos(searchRet)
if keyword not in keywordClick:
keywordClick[keyword]=Click()
click=keywordClick[keyword]
click.addSearchRet(searchRet,keyword)
cardBank.addSearch(keyword[len(V45Fix):],searchRet)
otherRids={}
otherAids={}
otherTids={}
if "rids" in searchRet:
for i in range(FirstPageNum,len(searchRet["rids"]),1):
otherRids[searchRet["rids"][i]]=i
elif method == "search.getlist" and (v < 450 or v >= 480):
offset=column.getValue(para,"offset")
scene=column.getValue(para,"scene")
if scene != "k1":
continue
#keyword=column.getValue(para,"keyword") #搜索的关键字
searchRet=column2.FuncMap[method](cols[-1]) #
if offset != "0":
kw=column.getValue(para,"keyword")
if kw != keyword:
continue
if "rids" in searchRet:
for i in range(len(searchRet["rids"])):
otherRids[searchRet["rids"][i]]=i
continue
keyword=column.getValue(para,"keyword")
hasSearch=True
rids=getRidPos(searchRet)
otherRids={}
#otherAids={}
#otherTids={}
if "rids" in searchRet:
for i in range(len(searchRet["rids"])):
if i >= FirstPageNum:
otherRids[searchRet["rids"][i]]=i
continue
rids[searchRet["rids"][i]]=i
if keyword not in keywordClick:
keywordClick[keyword]=Click()
click=keywordClick[keyword]
click.addSearchRet(searchRet,keyword)
cardBank.addSearch(keyword,searchRet,False)
if hasSearch:
if lastK != "":
keywordClick[lastK].addHitCount(hc)
if hc + oc > 0:
keywordClick[lastK].addHasHit()
hc=0
oc=0
lastK=keyword
if click == None:
continue
if method == "info.getinfo":
rid=column.getValue(para,"rid") #获得点击的食谱id
pos=-1
if rid in rids:
click.addRecipeHit(rid,rids[rid])
kw=keyword
if v >= 450:
kw=keyword[len(V45Fix):]
cardBank.addHit(kw,rid,rids[rid],(v >= 450))
hc+=1
pos=rids[rid]
elif rid in otherRids:
oc+=1
click.getMs().rb+=1
if pos >= 0:
ret=column2.FuncMap[method](cols[-1])
if ret == None:
pass
#sys.stderr.write(cols[-1])
elif "title" in ret:
title=ret["title"]
click.addTitleHit(title,pos,keyword)
elif method == "info.getfoodinfo":
fid=column.getValue(para,"foodid")
if fid != "" and "food" in searchRet and fid == searchRet["food"]:
click.addFoodHit(fid)
oc+=1
elif v >= 450:
if method == "info.getalbuminfo":
id=column.getValue(para,"aid") #获取点击的专辑id
if id !="":
if "aid" in searchRet and searchRet["aid"]==id :
click.addAlbumHit(id)
oc+=1
if "atitle" in searchRet:
title=searchRet["atitle"]
click.addAlbumTitleHit(title,keyword)
elif id in otherAids:
oc+=1
click.getMs().ab+=1
elif method == "search.gettags":
kw=V45Fix+column.getValue(para,"keyword") #搜索的关键字
if kw != keyword:
continue
tags=column2.FuncMap[method](cols[-1])
for tagid in tags:
if tagid not in click.getMs().rTagShow:
click.getMs().rTagShow[tagid]=1
else:
click.getMs().rTagShow[tagid]+=1
elif method == "search.getlist":
offset=column.getValue(para,"offset")
scene=column.getValue(para,"scene")
if scene != "k1":
continue
kw=V45Fix+column.getValue(para,"keyword") #搜索的关键字
if kw != keyword:
continue
searchRet=column2.FuncMap[method](cols[-1]) #
if "rids" in searchRet:
for i in range(len(searchRet["rids"])):
otherRids[searchRet["rids"][i]]=i
if offset != "0":
continue
tagid=column.getValue(para,"tagid")
if tagid == "" or tagid == "null":
click.getMs().r+=1
else:
if tagid not in click.getMs().rt:
click.getMs().rt[tagid]=1
else:
click.getMs().rt[tagid]+=1
click.getMs().rtn=1
elif method == "search.getalbumlist":
offset=column.getValue(para,"offset")
kw=V45Fix+column.getValue(para,"keyword") #搜索的关键字
if kw != keyword:
continue
ret=column2.getList(cols[-1],"AlbumId")
for aid in ret:
otherAids[aid]=1
if offset == "0":
click.getMs().a+=1
elif method == "search.gettopiclist":
offset=column.getValue(para,"offset")
kw=V45Fix+column.getValue(para,"keyword") #搜索的关键字
if kw != keyword:
continue
ret=column2.getList(cols[-1],"TopicId","int")
for tid in ret:
#sys.stderr.write("tid:"+tid+"\n")
otherTids[tid]=1
if offset == "0":
click.getMs().t+=1
if lastK != "":
keywordClick[lastK].addHitCount(hc)
if hc + oc > 0:
keywordClick[lastK].addHasHit()
ck45=Click()
ck44=Click()
for kw in keywordClick:
if kw.startswith(V45Fix):
ck45.merge(keywordClick[kw])
else:
ck44.merge(keywordClick[kw])
print kw+"\t"+str(keywordClick[kw])
print "ck45_##total##"+"\t"+str(ck45)
print "ck44_##total##"+"\t"+str(ck44)
for card in cardBank.bank:
print CardFix+str(cardBank.bank[card])
def searchFollowReducer(f):
lastK=""
ck=Click()
for line in sys.stdin:
cols=line.split("\t")
kw=cols[0]
#空搜索
if kw.strip() == "":
kw="[VOID]"
if lastK == "":
lastK=kw
if kw.startswith(CardFix):
cc=CardClick(kw)
if lastK != kw:
if lastK.startswith(CardFix):
print cc
else:
print lastK+"\t"+str(ck)
if kw.startswith(CardFix):
cc=CardClick(kw)
else:
ck=Click()
lastK=kw
try:
if line.startswith(CardFix):
tcc=readCardClick(cols)
cc.merge(tcc)
else:
(kw,tck)=readClick(cols)
ck.merge(tck)
except:
sys.stderr.write(line)
if lastK != "":
if lastK.startswith(CardFix):
print cc
else:
print lastK+"\t"+str(ck)
if __name__=="__main__":
if sys.argv[1] == "map":
searchFollowMapper(sys.stdin)
elif sys.argv[1] == "reduce":
searchFollowReducer(sys.stdin)
| [
"zhaoweiguo@vxiaoke360.com"
] | zhaoweiguo@vxiaoke360.com |
d8c7a38a8aeec36a0e7f7cb277b9cbcea34d348d | 1fbb308b15a83fd53d7d1eeee1ad103d3ba36ac4 | /venv/bin/pip | 87b74d7a8f139ff92ee41d635bc4ef98e35fbf19 | [] | no_license | jcohen66/twit | a2b585f523a7168c1438c4eea6cf872d7dc3a985 | 3d164947a63d3102b2b34c86199b26b2f17cc4db | refs/heads/master | 2023-03-25T01:29:25.814931 | 2021-03-16T00:33:07 | 2021-03-16T00:33:07 | 347,211,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | #!/Users/jcohen66/PycharmProjects/twit/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"jcohen66@optonline.net"
] | jcohen66@optonline.net | |
4266bb3abd19427b2cd8572b67e7b4af4d73d196 | 54708c482b13760fc4a747274b62849b5e8b8277 | /leetcode_python/Hash_table/contains-duplicate-ii.py | cc82bea65b88eb03d3c4b67b958a77ccc25b4d1e | [] | no_license | DataEngDev/CS_basics | 6520818caa61609eae9b026fb5b25ef9e4ea6961 | 05e8f5a4e39d448eb333c813093fc7c1df4fc05e | refs/heads/master | 2023-03-02T18:58:42.469872 | 2021-02-14T02:23:27 | 2021-02-14T02:23:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,021 | py | """
Given an array of integers and an integer k, find out whether there are two distinct indices i and j in the array such that nums[i] = nums[j] and the absolute difference between i and j is at most k.
Example 1:
Input: nums = [1,2,3,1], k = 3
Output: true
Example 2:
Input: nums = [1,0,1,1], k = 1
Output: true
Example 3:
Input: nums = [1,2,3,1,2,3], k = 2
Output: false
"""
# Time: O(n)
# Space: O(n)
#
# Given an array of integers and an integer k, return true if
# and only if there are two distinct indices i and j in the array
# such that nums[i] = nums[j] and the difference between i and j is at most k.
#
# V0
# V1
# https://blog.csdn.net/coder_orz/article/details/51674266
# IDEA : HASH TABLE
class Solution(object):
def containsNearbyDuplicate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
num_map = {}
for i in range(len(nums)):
if nums[i] in num_map and i - num_map[nums[i]] <= k:
return True
else:
num_map[nums[i]] = i
return False
# V1'
# https://blog.csdn.net/coder_orz/article/details/51674266
# IDEA : SET
# IDEA : SET OPERATION
# In [12]: window = set([1,3,4,])
# ...:
# In [13]: window
# Out[13]: {1, 3, 4}
# In [14]: window.discard(1)
# In [15]: window
# Out[15]: {3, 4}
# In [16]: window.discard(3)
# In [17]: window
# Out[17]: {4}
class Solution(object):
def containsNearbyDuplicate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
window = set([])
for i in range(len(nums)):
if i > k:
window.discard(nums[i-k-1])
if nums[i] in window:
return True
else:
window.add(nums[i])
return False
# V1'
# https://www.jiuzhang.com/solution/contains-duplicate-ii/#tag-highlight-lang-python
class Solution:
"""
@param nums: the given array
@param k: the given number
@return: whether there are two distinct indices i and j in the array such that nums[i] = nums[j] and the absolute difference between i and j is at most k
"""
def containsNearbyDuplicate(self, nums, k):
# Write your code here
dic = {}
for index, value in enumerate(nums):
if value in dic and index - dic[value] <= k:
return True
dic[value] = index
return False
# V2
class Solution:
# @param {integer[]} nums
# @param {integer} k
# @return {boolean}
def containsNearbyDuplicate(self, nums, k):
lookup = {}
for i, num in enumerate(nums):
if num not in lookup:
lookup[num] = i
else:
# It the value occurs before, check the difference.
if i - lookup[num] <= k:
return True
# Update the index of the value.
lookup[num] = i
return False
| [
"f339339@gmail.com"
] | f339339@gmail.com |
af950af06cd4d8cb80ff701f37666dc4d78deba6 | 34652a47355a8dbe9200db229a1bbc62619de364 | /Matlibplots/samples2/axes_demo.py | 4d3fa3d8cc09e86facd7b5e96a3059c143a7f1e4 | [] | no_license | btrif/Python_dev_repo | df34ab7066eab662a5c11467d390e067ab5bf0f8 | b4c81010a1476721cabc2621b17d92fead9314b4 | refs/heads/master | 2020-04-02T13:34:11.655162 | 2019-11-10T11:08:23 | 2019-11-10T11:08:23 | 154,487,015 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | #!/usr/bin/env python
from pylab import *
# create some data to use for the plot
dt = 0.001
t = arange(0.0, 10.0, dt)
r = exp(-t[:1000] / 0.05) # impulse response
x = randn(len(t))
s = convolve(x, r)[:len(x)] * dt # colored noise
# the main axes is subplot(111) by default
plot(t, s)
axis([0, 1, 1.1 * amin(s), 2 * amax(s) ])
xlabel('time (s)')
ylabel('current (nA)')
title('Gaussian colored noise')
# this is an inset axes over the main axes
a = axes([.65, .6, .2, .2], axisbg='y')
n, bins, patches = hist(s, 400, normed=1)
title('Probability')
setp(a, xticks=[], yticks=[])
# this is another inset axes over the main axes
a = axes([0.2, 0.6, .2, .2], axisbg='y')
plot(t[:len(r)], r)
title('Impulse response')
setp(a, xlim=(0, .2), xticks=[], yticks=[])
show()
| [
"bogdan.evanzo@gmail.com"
] | bogdan.evanzo@gmail.com |
027f90da6ae7a9f981c03a08fedff984b0d56959 | c309e7d19af94ebcb537f1e8655c0122dbe0cb13 | /Chapter03/01-chapter-content/read_video_file_all_properties.py | db4569016c150e9cb77c318253011c5af90209bc | [
"MIT"
] | permissive | PacktPublishing/Mastering-OpenCV-4-with-Python | 0fb82c88cb7205c7050c8db9f95a6deb3b1b3333 | 4194aea6f925a4b39114aaff8463be4d18e73aba | refs/heads/master | 2023-03-07T04:51:16.071143 | 2023-02-13T10:17:48 | 2023-02-13T10:17:48 | 151,057,527 | 375 | 226 | MIT | 2022-08-27T13:32:19 | 2018-10-01T08:27:29 | Python | UTF-8 | Python | false | false | 3,750 | py | """
Example to introduce how to read a video file and get all properties
"""
# Import the required packages:
import cv2
import argparse
def decode_fourcc(fourcc):
"""Decodes the fourcc value to get the four chars identifying it"""
# Convert to int:
fourcc_int = int(fourcc)
# We print the int value of fourcc
print("int value of fourcc: '{}'".format(fourcc_int))
# We can also perform this in one line:
# return "".join([chr((fourcc_int >> 8 * i) & 0xFF) for i in range(4)])
fourcc_decode = ""
for i in range(4):
int_value = fourcc_int >> 8 * i & 0xFF
print("int_value: '{}'".format(int_value))
fourcc_decode += chr(int_value)
return fourcc_decode
# We first create the ArgumentParser object
# The created object 'parser' will have the necessary information
# to parse the command-line arguments into data types.
parser = argparse.ArgumentParser()
# We add 'video_path' argument using add_argument() including a help.
parser.add_argument("video_path", help="path to the video file")
args = parser.parse_args()
# Create a VideoCapture object and read from input file
# If the input is the camera, pass 0 instead of the video file name
capture = cv2.VideoCapture(args.video_path)
# Get and print these values:
print("CV_CAP_PROP_FRAME_WIDTH: '{}'".format(capture.get(cv2.CAP_PROP_FRAME_WIDTH)))
print("CV_CAP_PROP_FRAME_HEIGHT : '{}'".format(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
print("CAP_PROP_FPS : '{}'".format(capture.get(cv2.CAP_PROP_FPS)))
print("CAP_PROP_POS_MSEC : '{}'".format(capture.get(cv2.CAP_PROP_POS_MSEC)))
print("CAP_PROP_POS_FRAMES : '{}'".format(capture.get(cv2.CAP_PROP_POS_FRAMES)))
print("CAP_PROP_FOURCC : '{}'".format(decode_fourcc(capture.get(cv2.CAP_PROP_FOURCC))))
print("CAP_PROP_FRAME_COUNT : '{}'".format(capture.get(cv2.CAP_PROP_FRAME_COUNT)))
print("CAP_PROP_MODE : '{}'".format(capture.get(cv2.CAP_PROP_MODE)))
print("CAP_PROP_BRIGHTNESS : '{}'".format(capture.get(cv2.CAP_PROP_BRIGHTNESS)))
print("CAP_PROP_CONTRAST : '{}'".format(capture.get(cv2.CAP_PROP_CONTRAST)))
print("CAP_PROP_SATURATION : '{}'".format(capture.get(cv2.CAP_PROP_SATURATION)))
print("CAP_PROP_HUE : '{}'".format(capture.get(cv2.CAP_PROP_HUE)))
print("CAP_PROP_GAIN : '{}'".format(capture.get(cv2.CAP_PROP_GAIN)))
print("CAP_PROP_EXPOSURE : '{}'".format(capture.get(cv2.CAP_PROP_EXPOSURE)))
print("CAP_PROP_CONVERT_RGB : '{}'".format(capture.get(cv2.CAP_PROP_CONVERT_RGB)))
print("CAP_PROP_RECTIFICATION : '{}'".format(capture.get(cv2.CAP_PROP_RECTIFICATION)))
print("CAP_PROP_ISO_SPEED : '{}'".format(capture.get(cv2.CAP_PROP_ISO_SPEED)))
print("CAP_PROP_BUFFERSIZE : '{}'".format(capture.get(cv2.CAP_PROP_BUFFERSIZE)))
# Check if camera opened successfully
if capture.isOpened() is False:
print("Error opening video stream or file")
# Read until video is completed
while capture.isOpened():
# Capture frame-by-frame
ret, frame = capture.read()
if ret is True:
# Print current frame number per iteration
print("CAP_PROP_POS_FRAMES : '{}'".format(capture.get(cv2.CAP_PROP_POS_FRAMES)))
# Get the timestamp of the current frame in milliseconds
print("CAP_PROP_POS_MSEC : '{}'".format(capture.get(cv2.CAP_PROP_POS_MSEC)))
# Display the resulting frame
cv2.imshow('Original frame', frame)
# Convert the frame to grayscale:
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the grayscale frame
cv2.imshow('Grayscale frame', gray_frame)
# Press q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# Break the loop
else:
break
# Release everything:
capture.release()
cv2.destroyAllWindows()
| [
"fernandezvillan.alberto@gmail.com"
] | fernandezvillan.alberto@gmail.com |
418631b09fa5046da457ec6b6d12fde3127fe3a9 | fab14fae2b494068aa793901d76464afb965df7e | /benchmarks/f3_wrong_hints/scaling_software_termination/11-2Nested_false-termination_13.py | e93f6811be066b40a1149e24f28d064d54e66d26 | [
"MIT"
] | permissive | teodorov/F3 | 673f6f9ccc25acdfdecbfc180f439253474ba250 | c863215c318d7d5f258eb9be38c6962cf6863b52 | refs/heads/master | 2023-08-04T17:37:38.771863 | 2021-09-16T07:38:28 | 2021-09-16T07:38:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,279 | py | from typing import Tuple, FrozenSet
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
import pysmt.typing as types
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
symbols = frozenset([pc, x, y])
m_1 = mgr.Int(-1)
n_locs = 3
max_int = n_locs
ints = []
pcs = []
x_pcs = []
for idx in range(n_locs):
num = mgr.Int(idx)
ints.append(num)
pcs.append(mgr.Equals(pc, num))
x_pcs.append(mgr.Equals(x_pc, num))
for idx in range(n_locs, max_int):
num = mgr.Int(idx)
ints.append(num)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
init = pcs[0]
cfg = []
# pc = 0 & (x >= 0) -> pc' = 1
cond = mgr.GE(x, ints[0])
cfg.append(mgr.Implies(mgr.And(pcs[0], cond), x_pcs[1]))
# pc = 0 & !(x >= 0) -> pc' = -1
cfg.append(mgr.Implies(mgr.And(pcs[0], mgr.Not(cond)), x_pcend))
# pc = 1 -> pc' = 2
cfg.append(mgr.Implies(pcs[1], x_pcs[2]))
# pc = 2 -> pc' = 0
cfg.append(mgr.Implies(pcs[2], x_pcs[0]))
# pc = -1 -> pc' = -1
cfg.append(mgr.Implies(pcend, x_pcend))
trans = []
same_x = mgr.Equals(x_x, x)
same_y = mgr.Equals(x_y, y)
same = mgr.And(same_x, same_y)
# pc = 0 -> same
trans.append(mgr.Implies(pcs[0], same))
# pc = 1 -> x' = x + y & same_y
trans.append(mgr.Implies(pcs[1],
mgr.And(mgr.Equals(x_x, mgr.Plus(x, y)),
same_y)))
# pc = 2 -> same_x & y' = y + 1
trans.append(mgr.Implies(pcs[2],
mgr.And(same_x,
mgr.Equals(x_y, mgr.Plus(y, ints[1])))))
# pc = end -> same
trans.append(mgr.Implies(pcend, same))
trans = mgr.And(*cfg, *trans)
fairness = mgr.Not(mgr.Equals(pc, m_1))
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
symbs = frozenset([pc, x, y])
m_100 = mgr.Int(-100)
m_1 = mgr.Int(-1)
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_4 = mgr.Int(4)
i_20 = mgr.Int(20)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
res = []
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GE(x, i_20), mgr.GE(y, i_1), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x0", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
stutter = mgr.Equals(x_y, y)
loc = Location(env, mgr.TRUE(), mgr.LE(x, i_20), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_y, mgr.Plus(x, y)))
h_y = Hint("h_y1", env, frozenset([y]), symbs)
h_y.set_locs([loc])
res.append(h_y)
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x1", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(0, mgr.Equals(x_y, m_100))
h_y = Hint("h_y2", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(x, y)))
loc1 = Location(env, mgr.GE(x, i_2), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, y))
h_x = Hint("h_x2", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Times(x, y)))
loc1 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, y))
h_x = Hint("h_x3", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.Equals(pc, i_1))
loc0.set_progress(1, mgr.GT(x_pc, pc))
loc1 = Location(env, mgr.GE(pc, i_2))
loc1.set_progress(0, mgr.Equals(x_pc, mgr.Div(pc, pc)))
h_pc = Hint("h_pc2", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
loc0 = Location(env, mgr.GE(y, m_100))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(y, y)))
loc1 = Location(env, mgr.GE(y, i_0))
loc1.set_progress(0, mgr.GE(x_y, mgr.Plus(y, i_1)))
h_y = Hint("h_y6", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(mgr.Times(x, x), i_1)))
loc1 = Location(env, mgr.GE(x, i_20))
loc1.set_progress(0, mgr.LT(x_x, mgr.Times(m_1, x, x)))
h_x = Hint("h_x6", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, i_0), mgr.GE(pc, i_1))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, pc)))
loc1 = Location(env, mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_y, y))
h_y = Hint("h_y7", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GT(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(mgr.Times(x, y), i_1)))
loc1 = Location(env, mgr.GE(x, i_2))
loc1.set_progress(2, mgr.LT(x_x, mgr.Times(m_1, x, x)))
loc2 = Location(env, mgr.LE(x, i_4))
loc2.set_progress(0, mgr.GE(x_x, mgr.Div(x, x)))
h_x = Hint("h_x7", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
return frozenset(res)
| [
"en.magnago@gmail.com"
] | en.magnago@gmail.com |
13cd64bc158351be30552a0174bbd7805c1ee073 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_8/ksskou001/question1.py | cbf7c2c649b94f8451d86b9956d6a62f42752649 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,146 | py | '''This program uses a recursion function to shows whether or not a string
is palindromique
By Hermann KOUASSI: KSSKOU001
On 3 May 2014'''
def pal(the_str):
'''check if a given string is palindromic'''
#in case empty string is the input
if len(the_str)==0:
print('Palindrome!')
#stop condition
elif len(the_str) == 2 or len(the_str)==3:
#when first character == last character
if the_str[0]==the_str[-1]:
print('Palindrome!')
#stop checking
else: print('Not a palindrome!')
#when more than 3 characters in string
else:
# call function to carry on checking if same first and last character
if the_str[0]==the_str[-1]:
#new string leaves out the first and last characters
pal(the_str[1:len(the_str)-1])
else:
#otherwise stop checking
print('Not a palindrome!')
def main():
'''main function'''
#get the string
the_str = input('Enter a string:\n')
#call palindromic function
pal(the_str)
if __name__=="__main__":
main() | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
856eaeeccbd6c812aee69d362e74757971e1e1b9 | 391937be6d8c8bcf5c3fe9fae8790024fc1e1727 | /Starctf_2019/girlfriend/changeld.py | 03c035cf38d567f31740c7a68a2268662cc4b46a | [] | no_license | n132/Watermalon | a9d93d3f0d598de7f8ed9bbe13ed02af364f770f | 79b4479458ae0884d9cdd52d317674298d601d0a | refs/heads/master | 2022-05-13T21:04:08.108705 | 2022-04-08T23:29:48 | 2022-04-08T23:29:48 | 154,925,917 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,358 | py | import os
from pwn import *
def change_ld(binary, ld):
"""
Force to use assigned new ld.so by changing the binary
"""
if not os.access(ld, os.R_OK):
log.failure("Invalid path {} to ld".format(ld))
return None
if not isinstance(binary, ELF):
if not os.access(binary, os.R_OK):
log.failure("Invalid path {} to binary".format(binary))
return None
binary = ELF(binary)
for segment in binary.segments:
if segment.header['p_type'] == 'PT_INTERP':
size = segment.header['p_memsz']
addr = segment.header['p_paddr']
data = segment.data()
if size <= len(ld):
log.failure("Failed to change PT_INTERP from {} to {}".format(data, ld))
return None
binary.write(addr, ld.ljust(size, '\0'))
if not os.access('/tmp/pwn', os.F_OK): os.mkdir('/tmp/pwn')
path = '/tmp/pwn/{}_debug'.format(os.path.basename(binary.path))
if os.access(path, os.F_OK):
os.remove(path)
info("Removing exist file {}".format(path))
binary.save(path)
os.chmod(path, 0b111000000) #rwx------
success("PT_INTERP has changed from {} to {}. Using temp file {}".format(data, ld, path))
return ELF(path)
| [
"986256128@qq.com"
] | 986256128@qq.com |
db424bfcd1d912fa13fa0d54efe254c2af125fb6 | 93ccc1138fd28385e7cebf69fda8327cbf974d13 | /test/generate_runtime.py | 59710979953e46f6194872112714237b001bfb78 | [] | no_license | hbcbh1999/Phy-Net | 09613fbd146b6c70e4b52e7e00837ae92c9e6ef8 | 06ba854b3f281027546b8acfdec29fdbe6eb6649 | refs/heads/master | 2021-01-19T22:53:19.430191 | 2017-04-07T19:10:32 | 2017-04-07T19:10:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,327 | py | import math
import numpy as np
import tensorflow as tf
import sys
sys.path.append('../')
from model.ring_net import *
from model.loss import *
from utils.experiment_manager import make_checkpoint_path
from systems.fluid_createTFRecords import generate_feed_dict
import random
import time
from tqdm import *
FLAGS = tf.app.flags.FLAGS
# get restore dir
RESTORE_DIR = make_checkpoint_path(FLAGS.base_dir, FLAGS)
# shape of test simulation
shape = FLAGS.test_dimensions.split('x')
shape = map(int, shape)
def evaluate():
""" Eval the system"""
with tf.Graph().as_default():
# make inputs
state, boundary = inputs(empty=True)
state = state[0:1,0]
boundary = boundary[0:1,0]
# unwrap
y_1, small_boundary_mul, small_boundary_add, x_2, y_2 = continual_unroll_template(state, boundary)
# make variable to iterate
compressed_shape = [x / pow(2,FLAGS.nr_downsamples) for x in shape]
print(compressed_shape)
compressed_state_1 = tf.Variable(np.zeros([1] + compressed_shape + [FLAGS.filter_size_compression], dtype=np.float32), trainable=False)
small_boundary_mul_var = tf.Variable(np.zeros([1] + compressed_shape + [FLAGS.filter_size_compression], dtype=np.float32), trainable=False)
small_boundary_add_var = tf.Variable(np.zeros([1] + compressed_shape + [FLAGS.filter_size_compression], dtype=np.float32), trainable=False)
# make steps to init
assign_compressed_state_step = tf.group(compressed_state_1.assign(y_1))
assign_boundary_mul_step = tf.group(small_boundary_mul_var.assign(small_boundary_mul))
assign_boundary_add_step = tf.group(small_boundary_add_var.assign(small_boundary_add))
# computation!
compressed_state_1_boundary = (small_boundary_mul_var * compressed_state_1) + small_boundary_add_var
compressed_state_2 = compress_template(compressed_state_1_boundary)
run_step = tf.group(compressed_state_1.assign(compressed_state_2))
state_out = decoding_template(compressed_state_2)
# restore network
init = tf.global_variables_initializer()
#variables_to_restore = tf.trainable_variables()
#saver = tf.train.Saver(variables_to_restore)
sess = tf.Session()
sess.run(init)
#ckpt = tf.train.get_checkpoint_state(RESTORE_DIR)
#if ckpt and ckpt.model_checkpoint_path:
# print("restoring file from " + ckpt.model_checkpoint_path)
# saver.restore(sess, ckpt.model_checkpoint_path)
#else:
# print("no chekcpoint file found from " + RESTORE_DIR + ", this is an error")
# exit()
# make fake zero frame to test on
state_feed_dict = np.zeros([1]+shape+[FLAGS.lattice_size])
boundary_feed_dict = np.zeros([1]+shape+[1])
feed_dict = {state:state_feed_dict, boundary:boundary_feed_dict}
assign_compressed_state_step.run(session=sess, feed_dict=feed_dict)
assign_boundary_mul_step.run(session=sess, feed_dict=feed_dict)
assign_boundary_add_step.run(session=sess, feed_dict=feed_dict)
run_step.run(session=sess)
# open file to log results
with open("figs/" + "runtime_log.txt", "a") as myfile:
# run no state_out
t = time.time()
run_length = 1000
for step in tqdm(xrange(run_length)):
run_step.run(session=sess)
elapsed = time.time() - t
print("time per " + str(run_length) + " step is " + str(elapsed) + " with shape " + str(shape) + " and compression shape " + str(compressed_shape) + "\n")
myfile.write("no decompression time per " + str(run_length) + " step is " + str(elapsed) + " with shape " + str(shape) + " and compression shape " + str(compressed_shape) + "\n")
# run with state out
t = time.time()
run_length = 1000
for step in tqdm(xrange(run_length)):
run_step.run(session=sess)
state_out.eval(session=sess)
elapsed = time.time() - t
print("with decompression time per " + str(run_length) + " step is " + str(elapsed) + " with shape " + str(shape) + " and compression shape " + str(compressed_shape))
myfile.write("with decompression time per " + str(run_length) + " step is " + str(elapsed) + " with shape " + str(shape) + " and compression shape " + str(compressed_shape) + "\n")
def main(argv=None): # pylint: disable=unused-argument
evaluate()
if __name__ == '__main__':
tf.app.run()
| [
"loliverhennigh101@gmail.com"
] | loliverhennigh101@gmail.com |
a127a74237962f2453d6466dd7901670b2f745be | c67b74a8de4d60f2aba025dfba8351996cdaa46c | /tkinter/other/autologging/grid_layout/target_ui.py | f97b6f974a38b238aa9d4c893c0ccf4e81479fff | [] | no_license | texttest/storytext-selftest | a30a58a0ab75fd26e60056222150cf6ae773470e | 438977bf044c1ebc98089b667f0ae3d835bc7f37 | refs/heads/master | 2020-04-26T18:16:20.242220 | 2015-02-17T15:53:27 | 2015-02-17T15:53:27 | 173,739,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | # File: hello2.py
try:
from tkinter import *
except ImportError:
from Tkinter import *
class App:
def __init__(self, master):
Label(master, text="Top Left").grid(row=0, column=0)
Label(master, text="Top Right").grid(row=0, column=1, rowspan=3)
Label(master, text="Bottom Left").grid(row=1, column=0)
Button(master, text="QUIT", fg="red", command=master.quit).grid(row=2, column=0)
root = Tk()
app = App(root)
root.mainloop()
| [
"geoff.bache@jeppesen.com"
] | geoff.bache@jeppesen.com |
fb98535b70b7993474b77e91b3feff3d46b9b3de | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /7BYXC8befjYqzhMsc_11.py | 66660ac86c91acd077e3841dd8119a65d3ac8e96 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,737 | py | """
Kathleen owns a beautiful rug store. She likes to group the rugs into 4
mutually exclusive categories.
* imperfect
* horizontally symmetric
* vertically symmetric
* perfect
An **imperfect** rug is one that is **neither horizontally nor vertically
symmetric**. Here is an example of an **imperfect** rug:
[
["a", "a", "a", "a"],
["a", "a", "a", "a"],
["a", "a", "b", "b"]
]
The following is an **horizontally symmetric** rug. You could "fold" the rug
across a hypothetical x-axis, and both sides would be identical. A
horizontally symmetric rug is **not** vertically symmetric (otherwise this rug
would be classified as **perfect** ).
[
["c", "a", "a", "a"],
["b", "b", "b", "b"],
["c", "a", "a", "a"]
]
The following is a **vertically symmetric** rug. You could "fold" the rug
across a hypothetical y-axis, and both sides would be identical. A vertically
symmetric is **not** horizontally symmetric (otherwise this rug would be
classified as **perfect** ).
[
["a", "b", "a"],
["b", "b", "b"],
["a", "b", "a"],
["a", "b", "a"]
]
Finally, a **perfect** rug is one that is **both vertically and horizontally
symmetric**. That is, folded either length-wise or width-wise will yield two
identical pieces.
[
["a", "b", "b", "a"],
["b", "b", "b", "b"],
["a", "b", "b", "a"]
]
Given a rug of `m x n` dimension, determine whether it is **imperfect,
horizontally symmetric, vertically symmetric or perfect**. Rugs are
represented using a two-dimensional list.
### Examples
classify_rug([
["a", "a"],
["a", "a"]
]) ➞ "perfect"
classify_rug([
["a", "a", "b"],
["a", "a", "a"],
["b", "a", "a"]
]) ➞ "imperfect"
classify_rug([
["b", "a"],
["b", "a"]
]) ➞ "horizontally symmetric"
classify_rug([
["a", "a"],
["b", "b"]
]) ➞ "vertically symmetric"
### Notes
You can consider a `1 x n` rug as being trivially **horizontally symmetric** ,
an `n x 1` rug as being trivially **vertically symmetric** , and a `1 x 1` rug
as being trivially **perfect**.
"""
def classify_rug(pattern):
p = pattern
c = [1,1]
for i in range(len(p)//2):
if not p[i]==p[-i-1]:
c[0]=0
break
for e in p:
if c[1]==0:
break
for j in range(len(e)//2):
if not e[j]==e[-j-1]:
c[1]=0
break
if sum(c)==0:
return "imperfect"
elif sum(c)==1:
if c[0]==1:
return "horizontally symmetric"
else:
return "vertically symmetric"
else:
return "perfect"
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
8f17fad612ed72e9207bd0bc6151fd98258a7479 | c047518e0bc0be1d1a46b734fbf53610cb8a407f | /URI/1564.py | 3cf5007b193896b8264fe8720ab63225560887f8 | [] | no_license | fernandozanutto/competitive_programming | c3e006544ddba1702a37eeb437cb015713e8c2d1 | cf721a7bcce6c5d5fc9f739ad729079c939fc421 | refs/heads/master | 2020-06-19T06:57:32.288602 | 2020-04-04T14:58:45 | 2020-04-04T14:58:45 | 196,607,123 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | while True:
try:
x = int(input())
if x > 0:
print('vai ter duas!')
else:
print('vai ter copa!')
except EOFError:
break
| [
"ferzanutto1999@gmail.com"
] | ferzanutto1999@gmail.com |
6bafb7696525ab7f42a5402714c53c58186334ea | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/machinelearning/v20160501preview/get_web_service.py | 0310879ee63fd0a9c81caf2427c8125f783e25fc | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 3,930 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetWebServiceResult',
'AwaitableGetWebServiceResult',
'get_web_service',
]
@pulumi.output_type
class GetWebServiceResult:
"""
Instance of an Azure ML web service resource.
"""
def __init__(__self__, location=None, name=None, properties=None, tags=None, type=None):
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def location(self) -> str:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Specifies the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.WebServicePropertiesForGraphResponse':
"""
Contains the property payload that describes the web service.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Specifies the type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetWebServiceResult(GetWebServiceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebServiceResult(
location=self.location,
name=self.name,
properties=self.properties,
tags=self.tags,
type=self.type)
def get_web_service(resource_group_name: Optional[str] = None,
web_service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebServiceResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: Name of the resource group in which the web service is located.
:param str web_service_name: The name of the web service.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['webServiceName'] = web_service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:machinelearning/v20160501preview:getWebService', __args__, opts=opts, typ=GetWebServiceResult).value
return AwaitableGetWebServiceResult(
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
tags=__ret__.tags,
type=__ret__.type)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
9821c77472261b94840d40045f8030e18e1d5e13 | 927748a4de2b1388d83e554eb76deaa61c1ef167 | /namer/admin.py | b47d64e5ce90b02485e6bdd8686abc92b7c2286c | [
"Apache-2.0"
] | permissive | grahamgilbert/macnamer | f2486758ac25ce8fb93a9cabaa8d56600f7f5d71 | 809345a5c82d890ece2ee6a26e797f540561f49c | refs/heads/master | 2021-06-06T20:50:29.651924 | 2014-11-01T12:06:29 | 2014-11-01T12:06:29 | 6,016,337 | 23 | 6 | Apache-2.0 | 2019-03-01T01:36:12 | 2012-09-30T06:08:49 | Python | UTF-8 | Python | false | false | 158 | py | from django.contrib import admin
from namer.models import *
admin.site.register(Computer)
admin.site.register(ComputerGroup)
admin.site.register(Network) | [
"graham@grahamgilbert.com"
] | graham@grahamgilbert.com |
cffd172dc336a08ee43dd702322e72e587f814a8 | ae7ba9c83692cfcb39e95483d84610715930fe9e | /csrgxtu/maxent/src/basketball/CVKNN.py | 5cd2ae78495e3c298550c337b20cafe503e6473c | [] | no_license | xenron/sandbox-github-clone | 364721769ea0784fb82827b07196eaa32190126b | 5eccdd8631f8bad78eb88bb89144972dbabc109c | refs/heads/master | 2022-05-01T21:18:43.101664 | 2016-09-12T12:38:32 | 2016-09-12T12:38:32 | 65,951,766 | 5 | 7 | null | null | null | null | UTF-8 | Python | false | false | 5,299 | py | #!/usr/bin/env python
# coding = utf-8
#
# Author: Archer Reilly
# Date: 23/DEC/2014
# File: CVKNN.py
# Desc: KNN -- K Nearest Neighbours, use KNN classifier
#
# Produced By CSRGXTU
import cv2
import numpy as np
from Utility import loadMatrixFromFile, loadSeasons, loadTeamIds
# buildTrainingSets
# build training sets from raw data file
#
# @param inputFile
# @return numpy.ndarray
def buildTrainingSets(inputFile):
res = []
mat = loadMatrixFromFile(inputFile)
for row in mat:
if (float(row[1]) - float(row[2])) < 0:
leaguerank = 0
else:
leaguerank = 1
res.append([row[0], leaguerank])
return np.array(res).astype(np.float32)
# buildTrainingLabels
# build training labels from raw data file
#
# @param inputFile
# @return numpy.ndarray
def buildTrainingLabels(inputFile):
res = []
mat = loadMatrixFromFile(inputFile)
for row in mat:
if row[3] == 'W':
WIN = 1
else:
WIN = 0
res.append([[WIN]])
return np.array(res).astype(np.float32)
# buildTestingSets
# build testing sets from raw data file
#
# @param inputFile
# @return numpy.ndarray
def buildTestingSets(inputFile):
res = []
mat = loadMatrixFromFile(inputFile)
for row in mat:
if (float(row[1]) - float(row[2])) < 0:
leaguerank = 0
else:
leaguerank = 1
res.append([row[0], leaguerank])
return np.array(res).astype(np.float32)
# buildTestingLabels
# build testing labels from raw data file
#
# @param inputFile
# @return numpy.ndarray
def buildTestingLabels(inputFile):
res = []
mat = loadMatrixFromFile(inputFile)
for row in mat:
if row[3] == 'W':
WIN = 1
else:
WIN = 0
res.append([[WIN]])
return np.array(res).astype(np.float32)
# teamMain
# train and test for team
def teamMain():
DIR = '/home/archer/Documents/maxent/data/basketball/leaguerank/'
teamIds = loadTeamIds(DIR + 'teamidshortname.csv')
teamNames = [x[1] for x in loadMatrixFromFile(DIR + 'teamidshortname.csv')]
countTotal = 0
total = 0
for team in teamIds:
trainData = buildTrainingSets(DIR + team + '-train.csv')
trainLabels = buildTrainingLabels(DIR + team + '-train.csv')
testData = buildTestingSets(DIR + team + '-test.csv')
testLabels = buildTestingLabels(DIR + team + '-test.csv')
total = total + len(testLabels)
knn = cv2.KNearest()
knn.train(trainData, trainLabels)
# Accuracy
count = 0
for i in range(len(testLabels)):
ret, results, neighbours, dist = knn.find_nearest(np.array([testData[i]]), 11)
if results[0][0] == testLabels[i][0]:
count = count + 1
countTotal = countTotal + count
print 'INFO: Accuracy(', teamNames[teamIds.index(team)], ')', count/float(len(testLabels))
print 'INFO: Total Accuracy: ', countTotal/float(total)
# seasonMain
# train and test for seasons
def seasonMain():
DIR = '/home/archer/Documents/maxent/data/basketball/leaguerank/'
seasons = loadSeasons(DIR + 'seasons-18-Nov-2014.txt')
countTotal = 0
total = 0
for season in seasons:
trainData = buildTrainingSets(DIR + season + '-train.csv')
testData = buildTestingSets(DIR + season + '-test.csv')
trainLabels = buildTestingLabels(DIR + season + '-train.csv')
testLabels = buildTestingLabels(DIR + season + '-test.csv')
total = total + len(testLabels)
knn = cv2.KNearest()
knn.train(trainData, trainLabels)
# Accuracy
count = 0
for i in range(len(testLabels)):
ret, results, neighbours, dist = knn.find_nearest(np.array([testData[i]]), 11)
if results[0][0] == testLabels[i][0]:
count = count + 1
countTotal = countTotal + count
print 'INFO: Accuracy(', season, ')', count/float(len(testLabels))
print 'INFO: Total Accuracy: ', countTotal/float(total)
# main
# train and test for all
def main():
DIR = '/home/archer/Documents/maxent/data/basketball/leaguerank/'
seasons = loadSeasons(DIR + 'seasons-18-Nov-2014.txt')
total = 0
count = 0
trainData = []
trainLabels = []
testData = []
testLabels = []
for season in seasons:
tmpTrainData = buildTrainingSets(DIR + season + '-train.csv').tolist()
tmpTrainLabels = buildTestingLabels(DIR + season + '-train.csv').tolist()
tmpTestData = buildTestingSets(DIR + season + '-test.csv').tolist()
tmpTestLabels = buildTestingLabels(DIR + season + '-test.csv').tolist()
trainData.extend(tmpTrainData)
trainLabels.extend(tmpTrainLabels)
testData.extend(tmpTestData)
testLabels.extend(tmpTestLabels)
trainData = np.array(trainData).astype(np.float32)
trainLabels = np.array(trainLabels).astype(np.float32)
testData = np.array(testData).astype(np.float32)
testLabels = np.array(testLabels).astype(np.float32)
total = len(testLabels)
knn = cv2.KNearest()
knn.train(trainData, trainLabels)
for i in range(len(testLabels)):
ret, results, neighbours, dist = knn.find_nearest(np.array([testData[i]]), 21)
if results[0][0] == testLabels[i][0]:
count = count + 1
print 'INFO: Total Accuracy: ', count/float(total)
if __name__ == '__main__':
print "+++++++++++++++++Main+++++++++++++++++++++++++"
main()
print "+++++++++++++++++teamMain+++++++++++++++++++++++++"
teamMain()
print "+++++++++++++++++seasonMain+++++++++++++++++++++++++"
seasonMain()
| [
"xenron@outlook.com"
] | xenron@outlook.com |
1c7d4811f1c3dfd5bc63c22ca4fa582315a02824 | 0809673304fe85a163898983c2cb4a0238b2456e | /tmp/ve_asus-rt-n14uhp-mrtg/lib/python3.4/site-packages/setuptools/command/test.py | 39746a02bf60246a3d7f1feb3e2f9243977c6177 | [
"Apache-2.0"
] | permissive | jasonwee/asus-rt-n14uhp-mrtg | 244092292c94ff3382f88f6a385dae2aa6e4b1e1 | 4fa96c3406e32ea6631ce447db6d19d70b2cd061 | refs/heads/master | 2022-12-13T18:49:02.908213 | 2018-10-05T02:16:41 | 2018-10-05T02:16:41 | 25,589,776 | 3 | 1 | Apache-2.0 | 2022-11-27T04:03:06 | 2014-10-22T15:42:28 | Python | UTF-8 | Python | false | false | 7,134 | py | import sys
import contextlib
from distutils.errors import DistutilsOptionError
from unittest import TestLoader
from setuptools.extern import six
from setuptools.extern.six.moves import map
from pkg_resources import (resource_listdir, resource_exists, normalize_path,
working_set, _namespace_packages,
add_activation_listener, require, EntryPoint)
from setuptools import Command
from setuptools.py31compat import unittest_main
class ScanningLoader(TestLoader):
def loadTestsFromModule(self, module, pattern=None):
"""Return a suite of all tests cases contained in the given module
If the module is a package, load tests from all the modules in it.
If the module has an ``additional_tests`` function, call it and add
the return value to the tests.
"""
tests = []
tests.append(TestLoader.loadTestsFromModule(self, module))
if hasattr(module, "additional_tests"):
tests.append(module.additional_tests())
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file.endswith('.py') and file != '__init__.py':
submodule = module.__name__ + '.' + file[:-3]
else:
if resource_exists(module.__name__, file + '/__init__.py'):
submodule = module.__name__ + '.' + file
else:
continue
tests.append(self.loadTestsFromName(submodule))
if len(tests) != 1:
return self.suiteClass(tests)
else:
return tests[0] # don't create a nested suite for only one return
# adapted from jaraco.classes.properties:NonDataProperty
class NonDataProperty(object):
def __init__(self, fget):
self.fget = fget
def __get__(self, obj, objtype=None):
if obj is None:
return self
return self.fget(obj)
class test(Command):
"""Command to run unit tests after in-place build"""
description = "run unit tests after in-place build"
user_options = [
('test-module=', 'm', "Run 'test_suite' in specified module"),
('test-suite=', 's',
"Test suite to run (e.g. 'some_module.test_suite')"),
('test-runner=', 'r', "Test runner to use"),
]
def initialize_options(self):
self.test_suite = None
self.test_module = None
self.test_loader = None
self.test_runner = None
def finalize_options(self):
if self.test_suite and self.test_module:
msg = "You may specify a module or a suite, but not both"
raise DistutilsOptionError(msg)
if self.test_suite is None:
if self.test_module is None:
self.test_suite = self.distribution.test_suite
else:
self.test_suite = self.test_module + ".test_suite"
if self.test_loader is None:
self.test_loader = getattr(self.distribution, 'test_loader', None)
if self.test_loader is None:
self.test_loader = "setuptools.command.test:ScanningLoader"
if self.test_runner is None:
self.test_runner = getattr(self.distribution, 'test_runner', None)
@NonDataProperty
def test_args(self):
return list(self._test_args())
def _test_args(self):
if self.verbose:
yield '--verbose'
if self.test_suite:
yield self.test_suite
def with_project_on_sys_path(self, func):
"""
Backward compatibility for project_on_sys_path context.
"""
with self.project_on_sys_path():
func()
@contextlib.contextmanager
def project_on_sys_path(self):
with_2to3 = six.PY3 and getattr(self.distribution, 'use_2to3', False)
if with_2to3:
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
old_path = sys.path[:]
old_modules = sys.modules.copy()
try:
sys.path.insert(0, normalize_path(ei_cmd.egg_base))
working_set.__init__()
add_activation_listener(lambda dist: dist.activate())
require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
yield
finally:
sys.path[:] = old_path
sys.modules.clear()
sys.modules.update(old_modules)
working_set.__init__()
def run(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(
self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
cmd = ' '.join(self._argv)
if self.dry_run:
self.announce('skipping "%s" (dry run)' % cmd)
return
self.announce('running "%s"' % cmd)
with self.project_on_sys_path():
self.run_tests()
def run_tests(self):
# Purge modules under test from sys.modules. The test loader will
# re-import them from the build location. Required when 2to3 is used
# with namespace packages.
if six.PY3 and getattr(self.distribution, 'use_2to3', False):
module = self.test_suite.split('.')[0]
if module in _namespace_packages:
del_modules = []
if module in sys.modules:
del_modules.append(module)
module += '.'
for name in sys.modules:
if name.startswith(module):
del_modules.append(name)
list(map(sys.modules.__delitem__, del_modules))
unittest_main(
None, None, self._argv,
testLoader=self._resolve_as_ep(self.test_loader),
testRunner=self._resolve_as_ep(self.test_runner),
)
@property
def _argv(self):
return ['unittest'] + self.test_args
@staticmethod
def _resolve_as_ep(val):
"""
Load the indicated attribute value, called, as a as if it were
specified as an entry point.
"""
if val is None:
return
parsed = EntryPoint.parse("x=" + val)
return parsed.resolve()()
| [
"peichieh@gmail.com"
] | peichieh@gmail.com |
ee5cb4dba4207a57b701f57cb2ad43198828c213 | 096fde1f6e629de8b921de621c1e2eaed3d149e7 | /src/android/toga_android/widgets/label.py | 66e971a274fe62d5a88ba01b124bb779f3afec2b | [
"BSD-3-Clause"
] | permissive | zeerorg/toga | 78079ccde9fc33970f03dd0718cb191d037d9db8 | be8182e6131938982dc757f847f938349009de7e | refs/heads/master | 2021-01-23T03:16:17.750455 | 2017-03-19T06:44:37 | 2017-03-19T06:44:37 | 86,062,968 | 0 | 0 | null | 2017-03-24T11:54:09 | 2017-03-24T11:54:09 | null | UTF-8 | Python | false | false | 1,250 | py | # from ..app import MobileApp
# from .base import Widget
#
# from toga.constants import *
#
#
# class Label(Widget):
# def __init__(self, text=None, alignment=LEFT_ALIGNED):
# super(Label, self).__init__()
#
# self.startup()
#
# self.alignment = alignment
# self.text = text
#
# def startup(self):
# print ("startup label")
# self._impl = TextView(MobileApp._impl)
#
# @property
# def alignment(self):
# return self._alignment
#
# @alignment.setter
# def alignment(self, value):
# self._alignment = value
# self._impl.setGravity({
# LEFT_ALIGNED: Gravity.CENTER_VERTICAL | Gravity.LEFT,
# RIGHT_ALIGNED: Gravity.CENTER_VERTICAL | Gravity.RIGHT,
# CENTER_ALIGNED: Gravity.CENTER_VERTICAL | Gravity.CENTER_HORIZONTAL,
# JUSTIFIED_ALIGNED: Gravity.CENTER_VERTICAL | Gravity.CENTER_HORIZONTAL,
# NATURAL_ALIGNED: Gravity.CENTER_VERTICAL | Gravity.CENTER_HORIZONTAL,
# }[value])
#
# @property
# def text(self):
# return self._text
#
# @text.setter
# def text(self, value):
# self._text = value
# self._impl.setHint(self._text)
| [
"russell@keith-magee.com"
] | russell@keith-magee.com |
c5ee9c15ca65ada5c73aba41feef5b69a74b50d4 | 9805edf2b923c74cf72a3cfb4c2c712255256f15 | /python/120_triangle.py | 719010ee065e6c6d8cc995ea1c85fc78889cc7a4 | [
"MIT"
] | permissive | jixinfeng/leetcode-soln | 5b28e49c2879cdff41c608fc03628498939b0e99 | 24cf8d5f1831e838ea99f50ce4d8f048bd46c136 | refs/heads/master | 2022-10-12T17:02:53.329565 | 2022-10-06T03:21:56 | 2022-10-06T03:21:56 | 69,371,757 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,283 | py | """
Given a triangle, find the minimum path sum from top to bottom. Each step you
may move to adjacent numbers on the row below.
For example, given the following triangle
[
[2],
[3,4],
[6,5,7],
[4,1,8,3]
]
The minimum path sum from top to bottom is 11 (i.e., 2 + 3 + 5 + 1 = 11).
Note:
Bonus point if you are able to do this using only O(n) extra space, where n is
the total number of rows in the triangle.
In oldMinSum=newMinSum[:] the [:] can not be omitted.
"""
class Solution(object):
def minimumTotal(self, triangle):
"""
:type triangle: List[List[int]]
:rtype: int
"""
if triangle is None or triangle == []:
return 0
height = len(triangle)
newMinSum = [0] * height
for row in triangle:
oldMinSum = newMinSum[:] #deep copy
for i in range(len(row)):
if i == 0:
newMinSum[i] = oldMinSum[i] + row[i]
elif i == len(row) - 1:
newMinSum[i] = oldMinSum[i - 1] + row[i]
else:
newMinSum[i] = min(oldMinSum[i], oldMinSum[i - 1]) + row[i]
return min(newMinSum)
a = Solution()
print(a.minimumTotal([[-1],[2,3],[1,-1,-3]]))
| [
"ufjfeng@users.noreply.github.com"
] | ufjfeng@users.noreply.github.com |
3605b8c88df214a553152d099f2136282236394f | bd01fbbc28b98814c0deb428b412aeec456a3712 | /make_train_target.py | 360bc19136e82616fd92eb8929c7c0ac37f2beb0 | [] | no_license | thusodangersimon/sanral_hack | d693a8fedf6de4f10dde5efcdb1bc5e0a95b0b9d | 69c71fc80935ee42488226962b29d643d0a6e75a | refs/heads/master | 2020-08-11T18:09:23.997842 | 2019-10-12T14:31:24 | 2019-10-12T14:31:24 | 214,606,084 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,579 | py | """
This file creates training data set.
"""
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
class MakeTarget(BaseEstimator, TransformerMixin):
def __init__(self, start, end, time_col, segment_col, agg_col):
self.start = start
self.end = end
self.time_col = time_col
self.segment_col = segment_col
self.agg_col = agg_col
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
train_df = X.set_index(self.time_col)
train_df = train_df.groupby([pd.Grouper(freq='H'), self.segment_col])[self.agg_col].count()
# get daterange
date_range = pd.date_range(self.start, self.end, freq='H')
# fill in data
out_df = train_df.reset_index().groupby(self.segment_col).apply(self._reindex, date_range=date_range)
return out_df
def _reindex(self, df, date_range):
df = df.set_index(self.time_col)
out_df = df.reindex(date_range, fill_value=0)
out_df = out_df[[self.agg_col]]
return out_df
if __name__ == '__main__':
train_path = 'data/train.csv'
train_df = pd.read_csv(train_path)
# make col datetime
train_df['Occurrence Local Date Time'] = pd.to_datetime(train_df['Occurrence Local Date Time'])
# init transformer
make_target = MakeTarget('2016-01-01', '2019-01-01', 'Occurrence Local Date Time', 'road_segment_id', 'EventId')
target = make_target.fit_transform(train_df)
print('found events =', target.EventId.sum())
target.to_csv('train_target.csv')
| [
"admin@example.com"
] | admin@example.com |
213eae2e246ee6a732fe0b4e2584202dfc940337 | b6a97526938a923f442d54b3c02e82051c0df1ff | /tests/functions_tests/test_concat.py | 32c9def4fe7e3bdeef21c63267e77160d6a381ac | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | amoliu/chainer | 96992ff235dec616e39edb0a840d621fbea4ce12 | df4362bc8a2a7bef6513ac788f373f0b5028e03b | refs/heads/master | 2021-01-22T16:38:16.727714 | 2015-06-18T08:31:13 | 2015-06-18T08:31:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,853 | py | from unittest import TestCase
import numpy
from chainer import cuda, Variable
from chainer.cuda import to_gpu, GPUArray
from chainer.gradient_check import assert_allclose
from chainer.functions import concat
cuda.init()
class Concat(TestCase):
def setUp(self):
self.y0 = numpy.arange(42, dtype=numpy.float32).reshape(2, 7, 3)
self.xs0 = [self.y0[:, :2], self.y0[:, 2:5], self.y0[:, 5:]]
self.y1 = numpy.arange(21, dtype=numpy.float32).reshape(7, 3)
self.xs1 = [self.y1[:2], self.y1[2:5], self.y1[5:]]
def check_forward(self, xs_data, y_data, axis):
xs = tuple(Variable(x_data) for x_data in xs_data)
y = concat(xs, axis=axis)
assert_allclose(y_data, y.data, atol=0, rtol=0)
def test_forward_cpu_0(self):
self.check_forward(self.xs0, self.y0, axis=1)
def test_forward_cpu_1(self):
self.check_forward(self.xs1, self.y1, axis=0)
def test_forward_gpu_0(self):
self.check_forward(
[to_gpu(x.copy()) for x in self.xs0], to_gpu(self.y0), axis=1)
def test_forward_gpu_1(self):
self.check_forward(
[to_gpu(x.copy()) for x in self.xs1], to_gpu(self.y1), axis=0)
def check_backward(self, xs_data, axis):
xs = tuple(Variable(x_data) for x_data in xs_data)
y = concat(xs, axis=axis)
y.grad = y.data
y.backward()
for x in xs:
assert_allclose(x.data, x.grad, atol=0, rtol=0)
def test_backward_cpu_0(self):
self.check_backward(self.xs0, axis=1)
def test_backward_cpu_1(self):
self.check_backward(self.xs1, axis=0)
def test_backward_gpu_0(self):
self.check_backward([to_gpu(x.copy()) for x in self.xs0], axis=1)
def test_backward_gpu_1(self):
self.check_backward([to_gpu(x.copy()) for x in self.xs1], axis=0)
| [
"beam.web@gmail.com"
] | beam.web@gmail.com |
efd3b2095fe805965d530e8825bb361b4af2b186 | dfab6798ece135946aebb08f93f162c37dd51791 | /core/luban/cli/db/help.py | efa834580fb7667beb71a842f62daa146103d562 | [] | no_license | yxqd/luban | 405f5f7dcf09015d214079fe7e23d644332be069 | 00f699d15c572c8bf160516d582fa37f84ac2023 | refs/heads/master | 2020-03-20T23:08:45.153471 | 2012-05-18T14:52:43 | 2012-05-18T14:52:43 | 137,831,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | # -*- Python -*-
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2006-2011 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
import os
def run(*args, **kwds):
from . import public_commands
print( 'luban db -- db management commands')
print( 'http://lubanui.org')
print()
print('Commands:')
for cmd in public_commands:
print(' luban db %s' % cmd)
continue
return
def parse_cmdline():
return [], {}
# End of file
| [
"linjiao@caltech.edu"
] | linjiao@caltech.edu |
bc78baed56d1faf996c6ec825a93f0f64c9c943f | 8e18c91dae4787b53e1ff6b35dc04fa38aa374d3 | /Pautas Interrogaciones/Examen/Pregunta 2/P2a.py | ccb0956454e0a77b4a1a6193255f4836793b511d | [] | no_license | GbPoblete/syllabus | 4a0cb0a2d92fea04b7891c84efef678e4596fa08 | ca0e8c44d7c13c98ce4e4b99b4559daf99804cab | refs/heads/master | 2020-05-16T00:49:10.663749 | 2015-12-01T23:22:14 | 2015-12-01T23:22:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,391 | py | __author__ = 'figarrido'
def _promedio(datos):
return sum(datos) / len(datos)
def _varianza(datos):
prom = _promedio(datos)
suma = 0
for i in datos:
suma += (i - prom)**2
return suma / len(datos)
class Estrella:
def __init__(self, clase, RA, DEC, id, observaciones=[]):
self.clase = clase
self.RA = RA
self.DEC = DEC
self.id = id
self.observaciones = observaciones
def get_brillos(self):
return [i.brillo for i in self.observaciones]
@property
def promedio(self):
brillos = self.get_brillos()
return _promedio(brillos)
@property
def varianza(self):
brillos = self.get_brillos()
return _varianza(brillos)
def agregar_observacion(self, observacion):
self.observaciones.append(observacion)
class Observacion(object):
def __init__(self, brillo, tiempo, error):
self.brillo = brillo
self.tiempo = tiempo
self.error = error
class Field:
def __init__(self, estrellas=[]):
self.estrellas = estrellas
def agregar_estrella(self, estrella):
self.estrella.append(estrella)
class Cielo:
def __init__(self, fields=[]):
self.fields = fields
def agregar_field(self, field):
self.fields.append(field)
| [
"lopezjuripatricio@gmail.com"
] | lopezjuripatricio@gmail.com |
9d6b2500746c6557a739357ecf53aed73bbf15d8 | 55a849e02a9a3819c72d67e0ef52cee2b5223db2 | /ftrace/parsers/sched_task_usage_ratio.py | e6f8c72c3122c2121fd387d2e76aaa87edd6c514 | [
"Apache-2.0"
] | permissive | Gracker/SystraceAnalysis | 08055bceea6018f6e1dd425d1976c1893bd9f945 | b27217e33a260614a9fe9ff3f8c3e470efdbd9a3 | refs/heads/master | 2023-02-18T19:34:26.071277 | 2021-01-21T01:51:37 | 2021-01-21T01:57:40 | 248,390,276 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,032 | py | #!/usr/bin/python
# Copyright 2015 Huawei Devices USA Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Authors:
# Chuk Orakwue <chuk.orakwue@huawei.com>
import re
from ftrace.common import ParserError
from .register import register_parser
try:
from ftrace.third_party.cnamedtuple import namedtuple
except ImportError:
from collections import namedtuple
TRACEPOINT = 'sched_task_usage_ratio'
__all__ = [TRACEPOINT]
SchedTaskUsageRatioBase = namedtuple(TRACEPOINT,
[
'comm',
'pid',
'ratio'
]
)
class SchedTaskUsageRatio(SchedTaskUsageRatioBase):
"""
Tracked task cpu usage ratio [0..1023].
"""
__slots__ = ()
def __new__(cls, comm, pid, ratio):
pid = int(pid)
ratio = float(self.ratio)/1023.0
return super(cls, SchedTaskUsageRatio).__new__(
cls,
comm=comm,
pid=pid,
ratio=ratio,
)
sched_task_usage_ratio_pattern = re.compile(
r"""comm=(?P<comm>.*)\s+
pid=(?P<pid>\d+)\s+
ratio=(?P<ratio>\d+)\s+
""",
re.X|re.M
)
@register_parser
def sched_task_usage_ratio(payload):
"""Parser for `sched_task_usage_ratio` tracepoint"""
try:
match = re.match(sched_task_usage_ratio_pattern, payload)
if match:
match_group_dict = match.groupdict()
return SchedTaskUsageRatio(**match_group_dict)
except Exception, e:
raise ParserError(e.message)
| [
"dreamtale.jg@gmail.com"
] | dreamtale.jg@gmail.com |
d2791fd72bd25721bcc5818685206a2359cb9cb0 | 3f100a1002a1f8ed453c8b81a9b403444d77b4c6 | /assignment_2/assignment_2_final.py | d1cdbd0f6ced27c57032a5e8f8d15b90444bac53 | [] | no_license | Kimuda/Phillip_Python | c19c85a43c5a13760239e4e94c08436c99787ebf | 59d56a0d45839656eb15dbe288bdb0d18cb7df2b | refs/heads/master | 2016-09-09T22:19:02.347744 | 2015-05-01T10:56:49 | 2015-05-01T10:56:49 | 32,330,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,362 | py |
uniprot_text="""Entry Gene names Cross-reference (PDB) Length
O95813 CER1 DAND4 267
Q8N907 DAND5 CER2 CKTSF1B3 GREM3 SP1 189
O60565 GREM1 CKTSF1B1 DAND2 DRM PIG2 184
P41271 NBL1 DAN DAND1 4X1J; 181
Q96S42 NODAL 4N1D; 347
Q15465 SHH 3HO5;3M1N;3MXW; 462"""
uniprot_text_list=uniprot_text.split("\n")
protein_list=uniprot_text_list[1:]
#print(uniprot_text_list,protein_list)
d=[]
newlist=[]
for i in protein_list:
d=i.split("\t")
newlist+=[d]
#print(newlist)
list3=[]
counter=0
counter2=len(newlist)-1
while counter<len(newlist):
newlist2={}
newlist2["Entry"]=newlist[counter2][0]
newlist2["Gene_name"]=newlist[counter2][1]
newlist2["Cross_ref_pdb"]=newlist[counter2][2]
newlist2["Length"]=newlist[counter2][3]
list3+=[newlist2]
counter=counter+1
counter2=counter2-1
#print(list3)
#----Question 1. Create a function that returns the protein ID of the shortest protein----
def ID_of_shortest_protein():
list_sorted_by_length = sorted(list3, key=lambda k: k['Length'])
protein_with_shortest_length=list_sorted_by_length[0]
print("The protein ID of the shortest protein is-",protein_with_shortest_length["Entry"])
ID_of_shortest_protein()
#----Question 2. Create a function that receives a gene name and returns the protein ID.----
def genesearch():
query=(input("Enter a gene name to retrieve the protein ID or a blank line to exit: ")).upper()
listofgenes=""
while query!="":
for gene in list3:
listofgenes=gene["Gene_name"]
if query in listofgenes:
print("Protein ID for",query,"is",gene["Entry"])
query=(input("To exit enter a blank line or Enter a gene name to continue: ")).upper()
genesearch()
#GLITCHES; even a single letter in query, returns a result, and when no match is found the user is not informed.
#----Question 3. Create a function that receives protein ID and returns the PDB IDs. If the protein doesn’t have PDBs reported, the function should return False.
def pdbqueryusingproteinID():
query=(input("Enter a protein ID to retrieve the protein PDB IDs or a blank line to exit: ")).upper()
listofproteinIDs=""
while query!="":
for proteinID in list3:
listofproteinIDs=proteinID["Entry"]
if query in listofproteinIDs:
if proteinID["Cross_ref_pdb"]!="":
commaseperatedpdbs=proteinID["Cross_ref_pdb"].split(";")
for i in commaseperatedpdbs:
if i!="":
print(i+(","),end="")
print()
else:
print("False")
query=(input("To exit enter a blank line or Enter a protein ID to continue: ")).upper()
pdbqueryusingproteinID()
#GLITCHES; even a single letter can return a result, and when no match is found the user is not informed.
#----Question 4. Create a function that prints the proteins IDs and the number of reported genes. The list should be sorted by the number of genes.
def proteinIDsnumberofgenes():
print("Protein ID\tNumber of genes")
list2=[]
for item in list3:
list2=list2 + [[item["Entry"]]+item["Gene_name"].split()]
newdictionary={t[0]:t[1:] for t in list2}
#print(newdictionary
numberofgeneslist=[]
for key in newdictionary:
numberofgeneslist+=[[key]+[len(newdictionary[key])]]
sortednumberofgeneslist=sorted(numberofgeneslist, key=lambda k: k[1])
#print(key,len(newdictionary[key]))
#print(numberofgeneslist)
for item in sortednumberofgeneslist:
print(item[0],"\t\t",item[1])
#print(sortednumberofgeneslist)
proteinIDsnumberofgenes()
#----Question 5. Create a function that prints a list of pairs of all the reported combinations of genes and PDBs
def gene_names_paired_with_pdbs():
print("Gene_name\tCross_ref_pdb")
for item in list3:
for item2 in item["Cross_ref_pdb"].split(";"):
if item2!="":
for item3 in item["Gene_name"].split():
print(item3,'\t\t',item2)
gene_names_paired_with_pdbs()
#GlITCHES; the tabulation, sometimes makes the results look misaligned when gene_names of varying word length are used (i tried it with a different batch of uniprot data)
| [
"pjkanywa@gmail.com"
] | pjkanywa@gmail.com |
0fa399848f39ede5587c2770483c24ccf4e954b1 | dd14fd0545093bc7fd3ff8b93e32180cec00e24d | /data_utils/load_dataset.py | fb31455fa3afab4b18465f204cfa5061c220c449 | [] | no_license | bigdatasciencegroup/PyTorch-GAN-Shop | ce38abf86185c6894ee46166bd41419260b026e5 | 95d4c3f8d836255cea162b8af38f810b5c638f2a | refs/heads/master | 2022-11-08T20:59:29.839442 | 2020-06-19T05:32:56 | 2020-06-19T05:32:56 | 273,416,695 | 2 | 0 | null | 2020-06-19T06:03:16 | 2020-06-19T06:03:16 | null | UTF-8 | Python | false | false | 4,837 | py | from torch.utils.data import Dataset
import os
import h5py as h5
import numpy as np
from scipy import io
import torch
import torchvision.transforms as transforms
from torchvision.datasets import CIFAR10, STL10
from torchvision.datasets import ImageFolder
class LoadDataset(Dataset):
def __init__(self, dataset_name, data_path, train, download, resize_size, hdf5_path=None, consistency_reg=False, make_positive_aug=False):
super(LoadDataset, self).__init__()
self.dataset_name = dataset_name
self.data_path = data_path
self.train = train
self.download = download
self.resize_size = resize_size
self.hdf5_path = hdf5_path
self.consistency_reg = consistency_reg
self.make_positive_aug = make_positive_aug
self.load_dataset()
def load_dataset(self):
if self.dataset_name == 'cifar10':
if self.hdf5_path is not None:
print('Loading %s into memory...' % self.hdf5_path)
with h5.File(self.hdf5_path, 'r') as f:
self.data = f['imgs'][:]
self.labels = f['labels'][:]
else:
self.data = CIFAR10(root=os.path.join('data', self.dataset_name),
train=self.train,
download=self.download)
elif self.dataset_name == 'imagenet':
if self.hdf5_path is not None:
print('Loading %s into memory...' % self.hdf5_path)
with h5.File(self.hdf5_path, 'r') as f:
self.data = f['imgs'][:]
self.labels = f['labels'][:]
else:
mode = 'train' if self.train == True else 'val'
root = os.path.join('data','ILSVRC2012', mode)
self.data = ImageFolder(root=root)
elif self.dataset_name == "tiny_imagenet":
if self.hdf5_path is not None:
print('Loading %s into memory...' % self.hdf5_path)
with h5.File(self.hdf5_path, 'r') as f:
self.data = f['imgs'][:]
self.labels = f['labels'][:]
else:
mode = 'train' if self.train == True else 'val'
root = os.path.join('data','TINY_ILSVRC2012', mode)
self.data = ImageFolder(root=root)
else:
raise NotImplementedError
def __len__(self):
if self.hdf5_path is not None:
num_dataset = self.data.shape[0]
else:
num_dataset = len(self.data)
return num_dataset
@staticmethod
def _decompose_index(index):
index = index % 18
flip_index = index // 9
index = index % 9
tx_index = index // 3
index = index % 3
ty_index = index
return flip_index, tx_index, ty_index
def __getitem__(self, index):
if self.hdf5_path is not None:
img = np.asarray((self.data[index]-127.5)/127.5, np.float32)
label = int(self.labels[index])
elif self.hdf5_path is None and self.dataset_name == 'imagenet':
img, label = self.data[index]
size = (min(img.size), min(img.size))
i = (0 if size[0] == img.size[0]
else (img.size[0] - size[0]) // 2)
j = (0 if size[1] == img.size[1]
else (img.size[1] - size[1]) // 2)
img = img.crop((i, j, i + size[0], j + size[1]))
img = np.asarray(img.resize((self.resize_size, self.resize_size)), np.float32)
img = np.transpose((img-127.5)/127.5, (2,0,1))
else:
img, label = self.data[index]
img = np.asarray(img, np.float32)
img = np.transpose((img-127.5)/127.5, (2,0,1))
if self.consistency_reg or self.make_positive_aug:
flip_index, tx_index, ty_index = self._decompose_index(index)
img_aug = np.copy(img)
c,h,w = img_aug.shape
if flip_index == 0:
img_aug = img_aug[:,:,::-1]
pad_h = int(h//8)
pad_w = int(w//8)
img_aug = np.pad(img_aug, [(0, 0), (pad_h, pad_h), (pad_w, pad_w)], mode='reflect')
if ty_index == 0:
i = 0
elif ty_index == 1:
i = pad_h
else:
i = 2*pad_h
if tx_index == 0:
j = 0
elif tx_index == 1:
j = pad_w
else:
j = 2*pad_w
img_aug = img_aug[:, i:i+h, j:j+w]
img = torch.from_numpy(img)
img_aug = torch.from_numpy(img_aug)
return img, label, img_aug
img = torch.from_numpy(img)
return img, label
| [
"noreply@github.com"
] | bigdatasciencegroup.noreply@github.com |
e0e2a417665d20c8069db0592caec6107e06bf18 | 328afd873e3e4fe213c0fb4ce6621cb1a450f33d | /W3School/conditional_statement_loops/s.py | 46f9fed08f807249747610045fd405d9e9df72d3 | [] | no_license | TorpidCoder/Python | 810371d1bf33c137c025344b8d736044bea0e9f5 | 9c46e1de1a2926e872eee570e6d49f07dd533956 | refs/heads/master | 2021-07-04T08:21:43.950665 | 2020-08-19T18:14:09 | 2020-08-19T18:14:09 | 148,430,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | row=15
col=18
result_str=""
for i in range(1,row+1):
if((i<=3)or(i>=7 and i<=9)or(i>=13 and i<=15)):
for j in range(1,col):
result_str=result_str+"o"
result_str=result_str+"\n"
elif(i>=4 and i<=6):
for j in range(1,5):
result_str=result_str+"o"
result_str=result_str+"\n"
else:
for j in range(1,14):
result_str=result_str+" "
for j in range(1,5):
result_str=result_str+"o"
result_str=result_str+"\n"
print(result_str);
| [
"sahilexemplary@gmail.com"
] | sahilexemplary@gmail.com |
81e30fa5ec864972411e2d70f75db454d407a91c | 15581a76b36eab6062e71d4e5641cdfaf768b697 | /LeetCode_30days_challenge/2020/November/Longest Mountain in Array.py | 027f86eb75dd45b219ebf43ad294d9e1baa32db8 | [] | no_license | MarianDanaila/Competitive-Programming | dd61298cc02ca3556ebc3394e8d635b57f58b4d2 | 3c5a662e931a5aa1934fba74b249bce65a5d75e2 | refs/heads/master | 2023-05-25T20:03:18.468713 | 2023-05-16T21:45:08 | 2023-05-16T21:45:08 | 254,296,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,061 | py | class Solution:
def longestMountain(self, A):
if len(A) < 3:
return 0
peak = False
max_length = curr_length = 1
for i in range(1, len(A)):
if A[i] > A[i - 1]:
if peak:
max_length = max(max_length, curr_length)
curr_length = 2
peak = False
else:
curr_length += 1
elif A[i] < A[i - 1]:
if curr_length == 1:
continue
if not peak:
peak = True
curr_length += 1
else:
if curr_length == 1:
continue
if not peak:
curr_length = 1
else:
max_length = max(curr_length, max_length)
curr_length = 1
if peak:
max_length = max(max_length, curr_length)
if max_length < 3:
return 0
else:
return max_length
| [
"mariandanaila01@gmail.com"
] | mariandanaila01@gmail.com |
010db5b2f3c269b146f0cb527d435487f4a08c5e | c548c10c4fd0b6c1d1c10cc645cb3b90b31f2de6 | /keras/keras35_4_load_model.py | b8b3c7da89651bd7a87c1f5e27aeb77c05f64640 | [] | no_license | sswwd95/Study | caf45bc3c8c4301260aaac6608042e53e60210b6 | 3c189090c76a68fb827cf8d6807ee1a5195d2b8b | refs/heads/master | 2023-06-02T21:44:00.518810 | 2021-06-26T03:01:26 | 2021-06-26T03:01:26 | 324,061,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,019 | py | import numpy as np
a = np.array(range(1,11))
size = 5
def split_x(seq, size):
aaa = []
for i in range(len(seq) - size +1):
subset = seq[i : (i+size)]
aaa.append(subset)
print(type(aaa))
return np.array(aaa)
dataset = split_x(a, size)
print("=======================")
print(dataset)
x = dataset[:,:4]
y = dataset[:,4]
print(x.shape)
print(y.shape)
x = x.reshape(x.shape[0],x.shape[1],1)
from tensorflow.keras.models import load_model
model = load_model("../data/h5/save_keras35.h5")
'''
###############테스트 #####################
from tensorflow.keras.layers import Dense
model.add(Dense(5)) # summary 이름 : dense
model.add(Dense(1)) # summary 이름 : dense_1
# 에러 뜨는 이유 : 이름 중복
###########################################
'''
from tensorflow.keras.layers import Dense
model.add(Dense(10, name = 'kingkeras1'))
model.add(Dense(1, name = 'kingkeras2'))
# 위, 아래 레이어 붙이기 가능
# dense_3 (Dense) (None, 110) 12210
# _________________________________________________________________
# dense_4 (Dense) (None, 1) 111
# _________________________________________________________________
# kingkeras1 (Dense) (None, 10) 20
# _________________________________________________________________
# kingkeras2 (Dense) (None, 1) 11
# =================================================================
model.summary()
# 3. 컴파일, 훈련
model.compile(loss = 'mse', optimizer='adam')
from tensorflow.keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor='loss',patience=20, mode='min')
model.fit(x, y, batch_size=1, callbacks=[early_stopping],epochs=1000)
#4. 평가,예측
loss = model.evaluate(x,y, batch_size=1)
print('loss : ',loss)
x_pred = np.array([7,8,9,10])
x_pred = x_pred.reshape(1,4,1)
result = model.predict(x_pred)
print('result : ',result)
# loss : 0.0750507190823555
# result : [[10.734548]]
| [
"sswwd95@gmail.com"
] | sswwd95@gmail.com |
e3d34ccc3b810f8a408faaffe681a75d70af1e98 | 213b8cab639c7d45cbf6a4fd46eb23e379d9d374 | /python/curses_examples/tutorial/05_center_text.py | a255e9f843510cbe683534ba35f7dd302d8b021c | [] | no_license | DevDungeon/Cookbook | f85b04b690ea0a202ddfaeda6460b6ba5797cb70 | a49a1c77f2b89dc303fa9f2563bb3c19777e4c6c | refs/heads/master | 2023-05-12T06:58:50.606019 | 2022-03-30T04:48:16 | 2022-03-30T04:48:16 | 34,371,982 | 307 | 94 | null | 2023-05-03T22:53:45 | 2015-04-22T06:02:53 | HTML | UTF-8 | Python | false | false | 651 | py | # Draw text to center of screen
import curses
import time
screen = curses.initscr()
num_rows, num_cols = screen.getmaxyx()
def print_center(message):
# Calculate center row
middle_row = int(num_rows / 2)
# Calculate center column, and then adjust starting position based
# on the length of the message
half_length_of_message = int(len(message) / 2)
middle_column = int(num_cols / 2)
x_position = middle_column - half_length_of_message
# Draw the text
screen.addstr(middle_row, x_position, message)
screen.refresh()
print_center("Hello from the center!")
# Wait and cleanup
time.sleep(3)
curses.endwin()
| [
"nanodano@devdungeon.com"
] | nanodano@devdungeon.com |
1e4ec58320505450428e6b302e9b75e278535852 | cbd2f187fb60939c49a00f154570f53d4bb19910 | /rxpy/src/rxpy/engine/parallel/beam/engine.py | 3fab110bdf23a65bf2b9be38f20a95c3820dac6c | [] | no_license | andrewcooke/rxpy | 3c4443f3ccba479d936f0e49d7d009a64dfc89b3 | e7f330dc8c5fa49392a1a018ceda6312270e9a93 | refs/heads/master | 2021-01-10T13:46:37.129155 | 2011-06-03T23:29:58 | 2011-06-03T23:29:58 | 52,740,676 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,500 | py |
# The contents of this file are subject to the Mozilla Public License
# (MPL) Version 1.1 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License
# at http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
# the License for the specific language governing rights and
# limitations under the License.
#
# The Original Code is RXPY (http://www.acooke.org/rxpy)
# The Initial Developer of the Original Code is Andrew Cooke.
# Portions created by the Initial Developer are Copyright (C) 2010
# Andrew Cooke (andrew@acooke.org). All Rights Reserved.
#
# Alternatively, the contents of this file may be used under the terms
# of the LGPL license (the GNU Lesser General Public License,
# http://www.gnu.org/licenses/lgpl.html), in which case the provisions
# of the LGPL License are applicable instead of those above.
#
# If you wish to allow use of your version of this file only under the
# terms of the LGPL License and not to allow others to use your version
# of this file under the MPL, indicate your decision by deleting the
# provisions above and replace them with the notice and other provisions
# required by the LGPL License. If you do not delete the provisions
# above, a recipient may use your version of this file under either the
# MPL or the LGPL License.
from rxpy.engine.parallel.base import ParallelEngine
from rxpy.engine.parallel.beam.support import States
class BeamEngine(ParallelEngine):
'''
Restrict the total number of states under consideration, doubling on
failure until we either match, or fail with no discards.
'''
def __init__(self, parser_state, graph, hash_state=False,
beam_start=1, beam_scale=2):
super(BeamEngine, self).__init__(parser_state, graph,
hash_state=hash_state)
self.__beam_start = beam_start
self.__beam_scale = beam_scale
def _new_states(self, initial):
return States(initial, self._hash_state,
beam_start=self.__beam_start, beam_scale=self.__beam_scale)
def _outer_loop(self, states, search, new_state):
initial_offset = self._offset
growing = True
while not states.final_state and growing:
super(BeamEngine, self)._outer_loop(states, search, new_state)
if not states.final_state and states.overflowed:
growing = True
states.grow()
self._set_offset(initial_offset)
else:
growing = False
class HashingBeamEngine(BeamEngine):
def __init__(self, parser_state, graph, hash_state=True,
beam_start=1, beam_scale=2):
super(HashingBeamEngine, self).__init__(parser_state, graph,
hash_state=hash_state,
beam_start=beam_start, beam_scale=beam_scale)
| [
"andrew@acooke.org"
] | andrew@acooke.org |
d6d449b80e62d56b88fbfb2e7e3bc89530bb83f6 | 1aa5216e8ed1fc53999637a46c6af0716a8a8cdf | /disk_snapshot_service/business_logic/locker_manager.py | 3668ebcb7834e64b93ca17fbb9226de6467e3525 | [] | no_license | ShawnYi5/DiskInProgress | c3a47fd5c52b1efeeaeee5b0de56626077a947a4 | b13d0cdcd0ab08b6dd5b106cda739d7c8ac9e41a | refs/heads/master | 2020-05-17T06:19:40.579905 | 2019-08-22T03:29:31 | 2019-08-22T03:29:31 | 183,555,060 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | import threading
class LockWithTrace(object):
def __init__(self):
self._locker = threading.Lock()
self._current_trace = None
def acquire(self, trace):
self._locker.acquire()
self._current_trace = trace
return self
def release(self):
self._current_trace = None
self._locker.release()
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
@property
def current_trace(self):
return self._current_trace
| [
"yi.shihong@aliyun.com"
] | yi.shihong@aliyun.com |
576462d84b48c93ce54baefe84067f27897cbe9b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_neutral.py | 49c9babdf333c189682df5fb0fa4e46a59f17ec3 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py |
#calss header
class _NEUTRAL():
def __init__(self,):
self.name = "NEUTRAL"
self.definitions = [u'not saying or doing anything that would encourage or help any of the groups involved in an argument or war: ', u'A neutral ground or field is a sports stadium that does not belong to either of the two teams taking part in a competition or game: ', u'having features or characteristics that are not easily noticed: ', u'A neutral chemical substance is neither an acid nor an alkali: ', u'A neutral object in physics has no electrical charge: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
7847b19733a6494cd75d660ff936f2f6c1b72c56 | 17658f39e410b6a179f4640e49ff86d86a6541ba | /taxonomic-annotation/reconcile.py | f76cf0b4962f05e96ca6ebeb9d0de3c6da90d91f | [
"MIT"
] | permissive | luispedro/Coelho2021_GMGCv1 | f91f82d3c78cf0773b69ce39cd0143d872933716 | caa9b5e156f5f74e147fde371e36ea259fee1407 | refs/heads/main | 2023-04-16T05:49:27.375227 | 2022-02-22T14:38:21 | 2022-02-22T14:38:21 | 368,166,901 | 14 | 8 | null | null | null | null | UTF-8 | Python | false | false | 1,654 | py | import pandas as pd
from taxonomic import ncbi
n = ncbi.NCBI()
taxonomic = pd.read_table('/g/bork1/coelho/DD_DeCaF/genecats.cold/GMGC10.taxonomic.map', index_col=0, engine='c')
species = pd.read_table('/g/bork1/coelho/DD_DeCaF/genecats.cold/GMGC10.species.match.map', header=None, usecols=[1,2], index_col=0, squeeze=True, names=['gene', 'TaxID'], engine='c')
superkingdom = pd.read_table('/g/bork1/coelho/DD_DeCaF/genecats.cold/GMGC10.kingdom.annotation', header=None, names=['gene', 'superkingdom'], index_col=0, squeeze=True, engine='c')
taxid = taxonomic['NCBI TaxID'].to_dict()
d_superkingdom = superkingdom.to_dict()
d_species = species.to_dict()
d_predicted_taxid = taxonomic['NCBI TaxID'].to_dict()
taxid = taxonomic['NCBI TaxID'][taxonomic.Rank == 'species'].to_dict()
gs = {}
for g,t in taxid.items():
gs[g] = n.ancestors.get(str(t), '1')
if len(gs) % 10_000_000 == 0:
print(len(gs) // 1_000_000)
no_match = {'None', 'no_match'}
prok = {'Bacteria', 'Archaea'}
final = d_species.copy()
for g,sk in d_superkingdom.items():
if sk in no_match:
continue
if g in d_species:
continue
elif sk not in prok:
final[g] = sk
elif g in gs:
final[g] = gs[g]
else:
final[g] = d_predicted_taxid.get(g, 1)
for g,p in d_predicted_taxid.items():
if g not in final:
final[g] = 1
final = pd.Series(final)
finalstr = final.map(str)
finalnames = finalstr.map(n.names)
finalranks = finalstr.map(n.ranks)
finalframe = pd.DataFrame({'taxid' : finalstr, 'rank' : finalranks, 'name': finalnames})
finalframe.to_csv('taxonomic.final.tsv', sep='\t')
| [
"luis@luispedro.org"
] | luis@luispedro.org |
a873f56004bb76bdee75e786e67d6e1897053095 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/semantic_segmentation/FastSCNN/segmentron/models/unet.py | f6425505373e615f46bcd51c550b3f3410fe3164 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 4,555 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.import torch
import torch.nn as nn
import torch.nn.functional as F
from .segbase import SegBaseModel
from .model_zoo import MODEL_REGISTRY
from ..modules import _FCNHead
from ..config import cfg
__all__ = ['UNet']
@MODEL_REGISTRY.register()
class UNet(SegBaseModel):
def __init__(self):
super(UNet, self).__init__(need_backbone=False)
self.inc = DoubleConv(3, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
self.down4 = Down(512, 512)
self.head = _UNetHead(self.nclass)
self.__setattr__('decoder', ['head', 'auxlayer'] if self.aux else ['head'])
def forward(self, x):
size = x.size()[2:]
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
outputs = list()
x = self.head(x1, x2, x3, x4, x5)
x = F.interpolate(x, size, mode='bilinear', align_corners=True)
outputs.append(x)
return tuple(outputs)
class _UNetHead(nn.Module):
def __init__(self, nclass, norm_layer=nn.BatchNorm2d):
super(_UNetHead, self).__init__()
bilinear = True
self.up1 = Up(1024, 256, bilinear)
self.up2 = Up(512, 128, bilinear)
self.up3 = Up(256, 64, bilinear)
self.up4 = Up(128, 64, bilinear)
self.outc = OutConv(64, nclass)
def forward(self, x1, x2, x3, x4, x5):
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
logits = self.outc(x)
return logits
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_channels // 2, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = torch.tensor([x2.size()[2] - x1.size()[2]])
diffX = torch.tensor([x2.size()[3] - x1.size()[3]])
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
c59757992997a99fd2580f5c29f1c4843bfa9307 | e4ee9f2ca60b60ea9fa1b05c982594a2c1b10484 | /day30 课堂笔记以及代码/day30/验证客户端合法性/server.py | 2e9c5a0e7e6055064d96994f662687f7d8d4dcdc | [] | no_license | tianshang486/Pythonlaonanhai | 100df2cc437aad1ee1baf45bdfc4500b1302092b | 2a5b46986f5ca684b2ae350596e293db54e1e2f4 | refs/heads/master | 2022-09-19T02:16:56.972160 | 2020-06-04T09:24:30 | 2020-06-04T09:24:30 | 269,314,860 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | import os
import socket
import hashlib
secret_key = b'alex_sb'
sk = socket.socket()
sk.bind(('127.0.0.1',9001))
sk.listen()
conn,addr = sk.accept()
# 创建一个随机的字符串
rand = os.urandom(32)
# 发送随机字符串
conn.send(rand)
# 根据发送的字符串 + secrete key 进行摘要
sha = hashlib.sha1(secret_key)
sha.update(rand)
res = sha.hexdigest()
# 等待接收客户端的摘要结果
res_client = conn.recv(1024).decode('utf-8')
# 做比对
if res_client == res:
print('是合法的客户端')
# 如果一致,就显示是合法的客户端
# 并可以继续操作
conn.send(b'hello')
else:
conn.close()
# 如果不一致,应立即关闭连接 | [
"tianshang486@.com"
] | tianshang486@.com |
0007bb4b25661e5bdbf01bf24eb7cc44d2721b2d | b11899d2edfa17f88da4f45cc828f092125091a0 | /udacity/wsgi.py | 79b9e9c0657245bbc91b43c1df6900d525977d0f | [] | no_license | chemalle/udacity | 15726c03a108dc0e68952027e63b5689870cc5b0 | 69ee8e5acda4776df1f46c922b30ec799f5589af | refs/heads/master | 2020-03-09T14:11:32.381362 | 2018-04-09T20:17:43 | 2018-04-09T20:17:43 | 128,828,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | """
WSGI config for udacity project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "udacity.settings")
application = get_wsgi_application()
| [
"chemalle@econobilidade.com"
] | chemalle@econobilidade.com |
84cd1164a4f1b0551e720d90ed604c3b8a83c45b | d83fde3c891f44014f5339572dc72ebf62c38663 | /_bin/google-cloud-sdk/.install/.backup/lib/surface/access_context_manager/levels/__init__.py | fd037667cf0efbce2318d315932364a60be5025f | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | gyaresu/dotfiles | 047cc3ca70f4b405ba272856c69ee491a79d2ebe | e5e533b3a081b42e9492b228f308f6833b670cfe | refs/heads/master | 2022-11-24T01:12:49.435037 | 2022-11-01T16:58:13 | 2022-11-01T16:58:13 | 17,139,657 | 1 | 1 | null | 2020-07-25T14:11:43 | 2014-02-24T14:59:59 | Python | UTF-8 | Python | false | false | 1,021 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The command group for the Access Context Manager levels CLI."""
from __future__ import absolute_import
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
class AccessContextManager(base.Group):
"""Manage Access Context Manager levels.
An access level is a classification of requests based on raw attributes of
that request (e.g. IP address, device identity, time of day, etc.).
"""
| [
"me@gareth.codes"
] | me@gareth.codes |
71b112d6742fd0c9701974a2bddaacf7c0f5fc91 | dde1cf596cf5969812ecda999828baa9c73e788d | /isi_sdk_8_1_1/api/sync_policies_api.py | b89f81af89e7ae2f788de9d37722a0e1d4e5d839 | [] | no_license | dctalbot/isilon_sdk_python3.7 | bea22c91096d80952c932d6bf406b433af7f8e21 | 4d9936cf4b9e6acbc76548167b955a7ba8e9418d | refs/heads/master | 2020-04-25T20:56:45.523351 | 2019-02-28T19:32:11 | 2019-02-28T19:32:11 | 173,065,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,319 | py | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 6
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from isi_sdk_8_1_1.api_client import ApiClient
class SyncPoliciesApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_policy_reset_item(self, policy_reset_item, policy, **kwargs): # noqa: E501
"""create_policy_reset_item # noqa: E501
Reset a SyncIQ policy incremental state and force a full sync/copy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_policy_reset_item(policy_reset_item, policy, async=True)
>>> result = thread.get()
:param async bool
:param Empty policy_reset_item: (required)
:param str policy: (required)
:return: CreateResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_policy_reset_item_with_http_info(policy_reset_item, policy, **kwargs) # noqa: E501
else:
(data) = self.create_policy_reset_item_with_http_info(policy_reset_item, policy, **kwargs) # noqa: E501
return data
def create_policy_reset_item_with_http_info(self, policy_reset_item, policy, **kwargs): # noqa: E501
"""create_policy_reset_item # noqa: E501
Reset a SyncIQ policy incremental state and force a full sync/copy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_policy_reset_item_with_http_info(policy_reset_item, policy, async=True)
>>> result = thread.get()
:param async bool
:param Empty policy_reset_item: (required)
:param str policy: (required)
:return: CreateResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_reset_item', 'policy'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_policy_reset_item" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_reset_item' is set
if ('policy_reset_item' not in params or
params['policy_reset_item'] is None):
raise ValueError("Missing the required parameter `policy_reset_item` when calling `create_policy_reset_item`") # noqa: E501
# verify the required parameter 'policy' is set
if ('policy' not in params or
params['policy'] is None):
raise ValueError("Missing the required parameter `policy` when calling `create_policy_reset_item`") # noqa: E501
collection_formats = {}
path_params = {}
if 'policy' in params:
path_params['Policy'] = params['policy'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'policy_reset_item' in params:
body_params = params['policy_reset_item']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/sync/policies/{Policy}/reset', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"dctalbot@umich.edu"
] | dctalbot@umich.edu |
a720749f0cff19ebb480f74da0728b668a147c1e | 276e15e7426b97ae550e45f2f7ffb6c3acc3ce07 | /company/meituan/1.py | 2f23806abc88bfd559908d4e1caca1b0600cb164 | [] | no_license | JDer-liuodngkai/LeetCode | 1115b946a1495622b1a5905257a8c92523022b8b | 4ca0ec2ab9510b12b7e8c65af52dee719f099ea6 | refs/heads/master | 2023-03-25T03:43:16.630977 | 2020-11-13T00:44:01 | 2020-11-13T00:44:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 973 | py | """
每天最多 n 个,各有1个正整数重量
已做好 m 个
买 最重 最轻 a,b; 保证 a/b 大小关系
剩余 n - m 在烤
"""
# 不保证 a/b 大小关系
# 1 ≤ n,m,a,b ≤ 1000 , m≤n , 蛋糕重量不会超过1000
def cake():
vmax, vmin = max(arr), min(arr)
tmax, tmin = max(a, b), min(a, b)
# 已做出的蛋糕 不满足要求
if vmax > tmax or vmin < tmin:
return 'NO'
# 仍在区间内
remain = n - m
if remain == 0:
if vmax == tmax and vmin == tmin: # 比如两个都相等
return 'YES'
else:
return 'NO'
elif remain == 1:
if vmax == tmax or vmin == tmin: # 只要有1个已经相等 即可
return 'YES'
else:
return 'NO'
else: # 仍在区间内,并且还有两个能做
return 'YES'
while True:
n, m, a, b = list(map(int, input().split()))
arr = list(map(int, input().split()))
print(cake())
| [
"shuaixie@zju.edu.cn"
] | shuaixie@zju.edu.cn |
1c15f3b8d623086176e436fd2f27d05307235689 | caed98915a93639e0a56b8296c16e96c7d9a15ab | /Walmart Labs/Container With Most Water.py | fa9f0a5f15d485592776bc34920e9a6c3966b44c | [] | no_license | PiyushChandra17/365-Days-Of-LeetCode | 0647787ec7e8f1baf10b6bfc687bba06f635838c | 7e9e9d146423ca2c5b1c6a3831f21dd85fa376d5 | refs/heads/main | 2023-02-13T10:41:36.110303 | 2021-01-17T11:58:51 | 2021-01-17T11:58:51 | 319,974,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | class Solution:
def maxArea(self, height: List[int]) -> int:
i,j = 0,len(height)-1
water = 0
while i < j:
water = max(water,(j-i)*min(height[i],height[j]))
if height[i] < height[j]:
i += 1
else:
j -= 1
return water | [
"noreply@github.com"
] | PiyushChandra17.noreply@github.com |
7221f4e4fe00f96cc27131f8659619b01f6e8124 | 84bdc0fd6aaaac7c519866fef855be8eae88a80f | /0x07-python-classes/3-square.py | 1c50d5d34600f02b131a03adfde770787a230345 | [] | no_license | KatyaKalache/holbertonschool-higher_level_programming | b74ca3e3c32ded6f54a40748775d0d4475e32409 | e746a41ccb3f268c9d6d4578b80a0b9e7cf7d067 | refs/heads/master | 2021-01-20T02:29:04.972257 | 2017-10-11T06:24:17 | 2017-10-11T06:24:17 | 89,413,672 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | #!/usr/bin/python3
class Square:
def __init__(self, size=0):
if not isinstance(size, int):
raise TypeError("size must be an integer")
if (size < 0):
raise ValueError("size must be >= 0")
self.__size = size
def area(self):
return self.__size ** 2
| [
"katya@kalache.fr"
] | katya@kalache.fr |
f3ae81f967741b44a1451c0540bfd316e877f45c | 9804b20e9bbd2b4ac405700b920b93fb0b4394c0 | /server/plugins/admin.py | 2d1770caa66eb0d80b9d35ee8f10c5ece83289ef | [] | no_license | jonathanverner/brython-misc | ba5c53989159fdf1684cc956e51e6a22728ca75b | b9d6d243000aa0c4dc587fbcd0e6cf3e7359a9fe | refs/heads/master | 2020-04-06T07:02:30.362571 | 2016-08-20T11:28:22 | 2016-08-20T11:28:22 | 58,376,490 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | from ..lib.tornado import RPCService, export
from tornado.gen import coroutine
class AdminService(RPCService):
SERVICE_NAME='admin'
@coroutine
@export
def persists_storage(self):
yield self._api.store.persist()
services = [AdminService] | [
"jonathan.verner@matfyz.cz"
] | jonathan.verner@matfyz.cz |
1e3830f608817ee5f8ae218d7fe951a627fe886e | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startCirq2360.py | e567cdcc03d3850100d2ad44ef059ff6d601d9e8 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,005 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=36
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.Y.on(input_qubit[2])) # number=18
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=33
c.append(cirq.Z.on(input_qubit[3])) # number=34
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=35
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[3])) # number=20
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=21
c.append(cirq.H.on(input_qubit[3])) # number=22
c.append(cirq.X.on(input_qubit[3])) # number=13
c.append(cirq.H.on(input_qubit[3])) # number=23
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=24
c.append(cirq.H.on(input_qubit[3])) # number=25
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=10
c.append(cirq.H.on(input_qubit[1])) # number=19
c.append(cirq.H.on(input_qubit[0])) # number=15
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0])) # number=16
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.Y.on(input_qubit[1])) # number=26
c.append(cirq.Y.on(input_qubit[1])) # number=27
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=29
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=30
c.append(cirq.X.on(input_qubit[0])) # number=31
c.append(cirq.X.on(input_qubit[0])) # number=32
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq2360.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
5bdfd26ec6382992c81f1083de5d813e77df1989 | 41710e9133d660739f8f9f17040a2a8a6082e9fb | /python/aa_modules/argsparse/mutually_exclusive1.py | 922f775acf7b00de572185f03d88335e0519a460 | [] | no_license | hanjiangxue007/Programming | 591678150e2e300051fdeaf09124d3893076d3a9 | 7a545ef2300b004497f30d27d1f2aaa032e26af5 | refs/heads/master | 2020-06-29T18:50:27.776557 | 2016-10-27T18:31:39 | 2016-10-27T18:31:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author : Bhishan Poudel; Physics PhD Student, Ohio University
# Date : Oct-15-2016 Sat
# Last update :
#
# Ref: https://docs.python.org/3.5/howto/argparse.html
#
# Imports
import argparse
parser = argparse.ArgumentParser(description="calculate X to the power of Y")
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", action="store_true")
group.add_argument("-q", "--quiet", action="store_true")
parser.add_argument("x", type=int, help="the base")
parser.add_argument("y", type=int, help="the exponent")
args = parser.parse_args()
answer = args.x**args.y
if args.quiet:
print(answer)
elif args.verbose:
print("{} to the power {} equals {}".format(args.x, args.y, answer))
else:
print("{}^{} == {}".format(args.x, args.y, answer))
| [
"bhishantryphysics@gmail.com"
] | bhishantryphysics@gmail.com |
92aea1fc2467f5278d784d1a69ffb5aecdbbc629 | 1858001ecc913ad5270f12b2e69e46eead3c7970 | /awx/main/tests/functional/models/test_job_options.py | c601413a5d8f46e7ff479e88b5d1b8d2466d3b78 | [
"Apache-2.0"
] | permissive | mabashian/awx | e3274046cabe3e539bf842f1f8ee2409efb28677 | 904cb4af34141ca0cfec6f597eb8016e575a670e | refs/heads/devel | 2023-09-04T04:30:31.256861 | 2017-09-22T17:39:24 | 2017-09-22T17:39:24 | 104,504,771 | 0 | 1 | Apache-2.0 | 2021-03-22T20:41:15 | 2017-09-22T18:12:24 | Python | UTF-8 | Python | false | false | 1,245 | py | import pytest
from django.core.exceptions import ValidationError
from awx.main.models import Credential
@pytest.mark.django_db
def test_clean_credential_with_ssh_type(credentialtype_ssh, job_template):
credential = Credential(
name='My Credential',
credential_type=credentialtype_ssh
)
credential.save()
job_template.credential = credential
job_template.full_clean()
@pytest.mark.django_db
def test_clean_credential_with_invalid_type_xfail(credentialtype_aws, job_template):
credential = Credential(
name='My Credential',
credential_type=credentialtype_aws
)
credential.save()
with pytest.raises(ValidationError):
job_template.credential = credential
job_template.full_clean()
@pytest.mark.django_db
def test_clean_credential_with_custom_types(credentialtype_aws, credentialtype_net, job_template):
aws = Credential(
name='AWS Credential',
credential_type=credentialtype_aws
)
aws.save()
net = Credential(
name='Net Credential',
credential_type=credentialtype_net
)
net.save()
job_template.extra_credentials.add(aws)
job_template.extra_credentials.add(net)
job_template.full_clean()
| [
"rpetrell@redhat.com"
] | rpetrell@redhat.com |
7963ef741b939e0ecb1eb8a00e0e97309e07b69b | 8a378ddae37f834a1c00ba6e63d3ff8f9cabbffb | /tailpos_sync/tailpos_sync/report/product_bundle__register_report/other_methods.py | 44e1da2be0547326648c3e5adb24f39630bb769d | [
"MIT"
] | permissive | aakvatech/tailpos-sync | 2985d998d0e7ee394c68da578e19819f00cc4acd | 7f199459769395d8d21e8effad1af39b1512c205 | refs/heads/master | 2020-09-08T19:16:57.633724 | 2020-04-22T05:56:49 | 2020-04-22T05:56:49 | 221,221,323 | 2 | 0 | NOASSERTION | 2020-04-22T05:56:50 | 2019-11-12T13:13:32 | null | UTF-8 | Python | false | false | 5,896 | py | import frappe
def get_columns(columns):
columns.append({"fieldname": "invoice_date", "label": "Invoice Date", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "invoice_number", "label": "Invoice Number", "fieldtype": "Link", "options": "Sales Invoice", "width": 150, })
columns.append({"fieldname": "receipt_no", "label": "Receipt Number", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "receipt_date", "label": "Receipt Date", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "cost_center", "label": "Cost Center", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "pos_profile", "label": "POS Profile", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "store_id", "label": "Store ID", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "item_code", "label": "Item Code", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "item_name", "label": "Item Name", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "uom", "label": "UOM", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "qty", "label": "QTY", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "rate", "label": "Rate", "fieldtype": "Float", "width": 150, "precision" : 2})
columns.append({"fieldname": "amount", "label": "Amount", "fieldtype": "Float", "width": 150, "precision" : 2})
columns.append({"fieldname": "packed_items", "label": "Packed Items", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "packed_item_code", "label": "Item Code", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "packed_item_name", "label": "Item Name", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "packed_qty", "label": "Qty", "fieldtype": "Float", "width": 150, "precision" : 2})
columns.append({"fieldname": "packed_uom", "label": "UOM", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "packed_valuation_rate", "label":"Valuation Rate", "fieldtype": "Float", "width": 150, "precision" : 2})
columns.append({"fieldname": "warehouse", "label":"Warehouse", "fieldtype": "Data", "width": 150})
def get_invoices(filters, data):
from_date = filters.get("from_date")
to_date = filters.get("to_date")
print(from_date)
print(to_date)
query = """
SELECT
SI.name as name,
SI.posting_date as posting_date,
R.receiptnumber as receiptnumber,
R.date as date,
R.deviceid as deviceid
FROM `tabSales Invoice` AS SI
INNER JOIN `tabReceipts` AS R ON SI.name = R.reference_invoice
WHERE SI.posting_date BETWEEN '{0}' AND '{1}' ORDER BY SI.name
""".format(from_date,to_date)
invoices = frappe.db.sql(query, as_dict=1)
modify_records(invoices, data)
return invoices
def modify_records(invoices, data):
for idx, value in enumerate(invoices):
total = {
"qty": "Total",
"rate": 0,
"amount": 0,
"packed_valuation_rate": 0,
}
sales_invoice_item = frappe.db.sql(""" SELECT * FROM `tabSales Invoice Item` WHERE parent=%s """, value.name, as_dict=True)
device = frappe.db.sql(""" SELECT * FROM `tabDevice` WHERE name=%s""",value.deviceid, as_dict=True)
obj = {
"invoice_date": value.posting_date,
"invoice_number": value.name,
"receipt_no": value.receiptnumber,
"receipt_date": value.date,
"packed_items": "",
}
if len(device) > 0:
pos_profile = frappe.db.sql(""" SELECT * FROM `tabPOS Profile` WHERE name=%s""", device[0].pos_profile, as_dict=True)
if len(pos_profile) > 0:
obj['cost_center'] = pos_profile[0].cost_center
obj['pos_profile'] = device[0].pos_profile
obj['store_id'] = device[0].name
for idxx, i in enumerate(sales_invoice_item):
if idxx == 0:
obj['item_code'] = i.item_code
obj['item_name'] = i.item_name
obj['qty'] = i.qty
obj['rate'] = i.rate
obj['amount'] = i.amount
else:
obj = {
"item_code": i.item_code,
"item_name": i.item_name,
"qty": i.qty,
"rate": i.rate,
"amount": i.amount,
}
total["rate"] += i.rate
total["amount"] += i.amount
packed_items = frappe.db.sql(""" SELECT * FROM `tabPacked Item` WHERE parent_item=%s and parent=%s """, (i.item_code, value.name) , as_dict=True)
for idxxx,ii in enumerate(packed_items):
if idxxx == 0:
obj['packed_item_code'] = ii.item_code
obj['packed_item_name'] = ii.item_name
obj['packed_qty'] = ii.qty
obj['packed_uom'] = ii.uom
obj['warehouse'] = ii.warehouse
else:
obj = {
"packed_item_code": ii.item_code,
"packed_item_name": ii.item_name,
"packed_qty": ii.qty,
"packed_uom": ii.uom,
"warehouse": ii.warehouse,
}
valuation_rate = frappe.db.sql(""" SELECT * FROM tabItem WHERE name=%s""", ii.item_code, as_dict=True)
if len(valuation_rate) > 0:
obj['packed_valuation_rate'] = valuation_rate[0].valuation_rate
total["packed_valuation_rate"] += valuation_rate[0].valuation_rate
data.append(obj)
data.append(obj)
data.append(total)
| [
"jangeles@bai.ph"
] | jangeles@bai.ph |
ada51a31e0faed934a41299946478c420f2573b6 | 4d51aa09c7b65d2c54212a3004794a50f36a7a89 | /leetcode/DP/213.HouseRobber/213HouserRoober.py | 87c13157a361c4607ac3426c9c4425ec78b843c3 | [] | no_license | letterbeezps/leetcode-algorithm | 30bd7335e96fdcca3c3ec1269c1c6fa78afd2f3b | b6211b1bfd699e45164a8cb5a8fbf2b4ec9756f9 | refs/heads/master | 2021-07-06T18:29:18.666607 | 2020-08-06T17:52:50 | 2020-08-06T17:52:50 | 158,728,987 | 10 | 2 | null | 2020-08-06T17:50:17 | 2018-11-22T16:56:13 | JavaScript | UTF-8 | Python | false | false | 674 | py | class Solution:
def rob(self, nums: List[int]) -> int:
if not nums:
return 0
if len(nums) <= 3:
return max(nums)
if len(nums) == 4:
return max(nums[0]+nums[2], nums[1]+nums[3])
n = len(nums)
f = [0] * n
num1 = nums[:-1]
f[1] = num1[0]
for i in range(2, n):
f[i] = max(f[i-2]+num1[i-1], f[i-1])
temp1 = f[n-1]
f = [0] * n
num2 = nums[1:]
f[1] = num2[0]
for i in range(2, n):
f[i] = max(f[i-2]+num2[i-1], f[i-1])
temp2 = f[n-1]
return max(temp1, temp2) | [
"unlimitedzazp@outlook.com"
] | unlimitedzazp@outlook.com |
5d758d4d95cc1b6405db90c73d4118106a45b53a | 6219e6536774e8eeb4cadc4a84f6f2bea376c1b0 | /scraper/storage_spiders/aobongdacom.py | 73b1780602e6b41472488d0a26444813f9b8abeb | [
"MIT"
] | permissive | nguyenminhthai/choinho | 109d354b410b92784a9737f020894d073bea1534 | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | refs/heads/master | 2023-05-07T16:51:46.667755 | 2019-10-22T07:53:41 | 2019-10-22T07:53:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | # Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//h1[@class='product-title']",
'price' : "//div[@class='price-box']/h4[@class='product-price']",
'category' : "//ol[@class='breadcrumb pull-left']/li/a",
'description' : "//div[@class='art-content']",
'images' : "//div[@class='main-slider']//li/a/@href",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'aobongda.com'
allowed_domains = ['aobongda.com']
start_urls = ['http://aobongda.com/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+-id\d+-c\d+\.htm$']), 'parse_item'),
Rule(LinkExtractor(deny=['/[a-zA-Z0-9-]+-id\d+-c\d+\.htm$']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| [
"nguyenchungthuy.hust@gmail.com"
] | nguyenchungthuy.hust@gmail.com |
4a1e2aa3c3a7ae5b3de5e9f843d7313f73b1a3d0 | 9b3f578e63a7e17e2b1bab5f38aa8625b8a80251 | /descarteslabs/workflows/types/array/array_.py | 30e0fa674c82ddb4c8c20b53cc3d60ea823a17f7 | [
"Apache-2.0"
] | permissive | carderne/descarteslabs-python | e6f7000f08cd1569e0ddd0f7fb8e53abb6765183 | 757b480efb8d58474a3bf07f1dbd90652b46ed64 | refs/heads/master | 2022-12-09T23:19:02.361226 | 2020-08-13T11:52:30 | 2020-08-13T11:52:30 | 287,264,851 | 0 | 0 | NOASSERTION | 2020-08-13T11:46:58 | 2020-08-13T11:46:57 | null | UTF-8 | Python | false | false | 3,060 | py | import numpy as np
from descarteslabs.common.graft import client
from ...cereal import serializable
from ..core import ProxyTypeError
from ..containers import List
from ..primitives import Int, Float, Bool
from .base_array import BaseArray
DTYPE_KIND_TO_WF = {"b": Bool, "i": Int, "f": Float}
WF_TO_DTYPE_KIND = dict(zip(DTYPE_KIND_TO_WF.values(), DTYPE_KIND_TO_WF.keys()))
@serializable()
class Array(BaseArray):
"""
Proxy Array representing a multidimensional, homogenous array of fixed-size items.
Can be instantiated from a NumPy ndarray (via `from_numpy`), or a Python iterable.
Currently, Arrays can only be constructed from small local arrays (< 10MB).
Array follows the same syntax as NumPy arrays. It supports vectorized operations, broadcasting,
and multidimensional indexing. There are some limitations including slicing with lists/arrays in multiple
axes (``x[[1, 2, 3], [3, 2, 1]]``) and slicing with a multidimensional list/array of integers.
Note
----
Array is an experimental API. It may be changed in the future, will not necessarily be
backwards compatible, and may have unexpected bugs. Please contact us with any feedback!
Examples
--------
>>> import descarteslabs.workflows as wf
>>> # Create a 1-dimensional Array of Ints
>>> arr = wf.Array([1, 2, 3, 4, 5])
>>> arr
<descarteslabs.workflows.types.array.array_.Array object at 0x...>
>>> arr.compute(geoctx) # doctest: +SKIP
array([1, 2, 3, 4, 5])
>>> import numpy as np
>>> import descarteslabs.workflows as wf
>>> ndarray = np.ones((3, 10, 10))
>>> # Create an Array from the 3-dimensional numpy array
>>> arr = wf.Array(ndarray)
>>> arr
<descarteslabs.workflows.types.array.array_.Array object at 0x...>
"""
def __init__(self, arr):
if isinstance(arr, np.generic):
arr = arr.tolist()
if isinstance(arr, (int, float, bool)):
self._literal_value = arr
self.graft = client.apply_graft("wf.array.create", arr)
elif isinstance(arr, (Int, Float, Bool, List)):
self.graft = client.apply_graft("wf.array.create", arr)
else:
if not isinstance(arr, np.ndarray):
try:
arr = np.asarray(arr)
except Exception:
raise ValueError("Cannot construct Array from {!r}".format(arr))
if arr.dtype.kind not in ("b", "i", "f"):
raise TypeError("Invalid dtype {} for an Array".format(arr.dtype))
self._literal_value = arr
arr_list = arr.tolist()
self.graft = client.apply_graft("wf.array.create", arr_list)
@classmethod
def _promote(cls, obj):
if isinstance(obj, cls):
return obj
try:
return obj.cast(cls)
except Exception:
try:
return Array(obj)
except Exception as e:
raise ProxyTypeError("Cannot promote {} to Array: {}".format(obj, e))
| [
"support@descarteslabs.com"
] | support@descarteslabs.com |
395840ba68ac389c3468cbd55fcc66294d3322da | c2f42e145c03feb891d83ea294cdda9f37cfc717 | /src/modelling/capacity_planning/erlang/erlangb.py | 7695f16ce57e10ce6be5b40622e5f76eb9e2d211 | [] | no_license | FelixKleineBoesing/queuingSystem | 5b38c123f206d9c71014064404b2f50f0f4491a5 | 09ff583831aa7f8b604f01dc97cf0284ed342f77 | refs/heads/master | 2023-04-12T00:00:20.309232 | 2021-04-25T11:55:04 | 2021-04-25T11:55:04 | 361,413,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | import numpy as np
from src.modelling.capacity_planning.erlang.erlang_base import ErlangBase
from src.modelling.helpers import power_faculty
class ErlangB(ErlangBase):
def get_probability(self, number_agents: int, lambda_: float, mu: float):
"""
calculates the probability that there are c number people in the system
:param number_agents: number of available agents
:param lambda_: average arrival time in times per second
:param mu: average supply time in times per second
:return: probability of a blocked queue
"""
workload = lambda_ / mu
sum = 0.0
for i in range(number_agents + 1):
sum += power_faculty(workload, i)
if np.isnan(sum):
break
return power_faculty(workload, number_agents) / sum | [
"felix.boesing@t-online.de"
] | felix.boesing@t-online.de |
9899e3436a3df3f0622c8329670adccfbfa8ae22 | f8961fc56e95ea75f2edbb08fae5ad1af102f6f0 | /chapter_12/when_to_use_threading.py | 08613b1bc4647ea050623289f895564f0a8b60d5 | [] | no_license | larago/python_cookbook | eb2cea7425033802775a168d63199eb8e43e2d50 | 34abdb173c78d9eea046707f88a4bd976dfa26e9 | refs/heads/master | 2021-01-09T20:12:50.605774 | 2016-07-13T14:06:22 | 2016-07-13T14:06:22 | 63,230,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | # GIL limit python only run in single thread, not proper to run cpu concentration task
# Better to handle them with mutiple cpu
# But thread is appropriate to use when handling potential blocking muti-task
# like IO / get result from databse these kind of jobs
from threading import Thread
class CountdownThread(Thread):
def __init__(self, n):
super(CountdownThread,self).__init__()
self.n = 0
def run(self):
while self.n > 0:
print('T-minus', self.n)
self.n -= 1
time.sleep(5)
c = CountdownThread(5)
c.start() | [
"bingeye@foxmail.com"
] | bingeye@foxmail.com |
a8b67575d0a9a5cbdc8f1fe45b98a99aa43eb729 | 1978a9455159b7c2f3286e0ad602652bc5277ffa | /exercises/05_basic_scripts/task_5_1c.py | 4613a5511bcd55aadf4fb864116bb31116c9e7d4 | [] | no_license | fortredux/py_net_eng | 338fd7a80debbeda55b5915dbfba4f5577279ef0 | 61cf0b2a355d519c58bc9f2b59d7e5d224922890 | refs/heads/master | 2020-12-03T17:32:53.598813 | 2020-04-08T20:55:45 | 2020-04-08T20:55:45 | 231,409,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,620 | py | # -*- coding: utf-8 -*-
'''
Задание 5.1c
Переделать скрипт из задания 5.1b таким образом, чтобы, при запросе параметра,
которого нет в словаре устройства, отображалось сообщение 'Такого параметра нет'.
> Попробуйте набрать неправильное имя параметра или несуществующий параметр,
чтобы увидеть какой будет результат. А затем выполняйте задание.
Если выбран существующий параметр,
вывести информацию о соответствующем параметре, указанного устройства.
Пример выполнения скрипта:
$ python task_5_1c.py
Введите имя устройства: r1
Введите имя параметра (ios, model, vendor, location, ip): ips
Такого параметра нет
Ограничение: нельзя изменять словарь london_co.
Все задания надо выполнять используя только пройденные темы.
То есть эту задачу можно решить без использования условия if.
'''
london_co = {
'r1': {
'location': '21 New Globe Walk',
'vendor': 'Cisco',
'model': '4451',
'ios': '15.4',
'ip': '10.255.0.1'
},
'r2': {
'location': '21 New Globe Walk',
'vendor': 'Cisco',
'model': '4451',
'ios': '15.4',
'ip': '10.255.0.2'
},
'sw1': {
'location': '21 New Globe Walk',
'vendor': 'Cisco',
'model': '3850',
'ios': '3.6.XE',
'ip': '10.255.0.101',
'vlans': '10,20,30',
'routing': True
}
}
device = input('Введите имя устройства: ')
'''
vocab = london_co[device].keys()
vocab = str(vocab)
vocab = vocab[11:-2]
'''
vocab = str(london_co[device].keys()) #Shorter version
vocab = vocab[11:-2]
vocab = vocab.replace("'", "")
paste = 'Введите имя параметра (' + vocab + '):'
param = input(paste)
variable0 = london_co[device]
'''
variable1 = variable0.get(param, 'Такого параметра нет') # Вариант менее громоздкий
# variable1 = variable0[param] # Так было
print(variable1)
'''
print(variable0.get(param, 'Такого параметра нет')) # Вариант сложнее | [
"fortunaredux@protonmail.com"
] | fortunaredux@protonmail.com |
e48b036b804b5b19e1e4bd9da499b64f55ab174e | fcc955fd5b3fc997f5b1651c5c8b9032a6b9b177 | /bqskit/passes/search/generator.py | e9d081a87e21f8745e3f3d6b693ef54e11204717 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | BQSKit/bqskit | cf393d75b26349f7258e9caf9d5c8fa37d0c8de6 | c89112d15072e8ffffb68cf1757b184e2aeb3dc8 | refs/heads/main | 2023-09-01T04:11:18.212722 | 2023-08-29T17:34:38 | 2023-08-29T17:34:38 | 331,370,483 | 54 | 18 | NOASSERTION | 2023-09-14T14:33:26 | 2021-01-20T16:49:36 | OpenQASM | UTF-8 | Python | false | false | 926 | py | """This module implements the LayerGenerator base class."""
from __future__ import annotations
import abc
from bqskit.compiler.passdata import PassData
from bqskit.ir.circuit import Circuit
from bqskit.qis.state.state import StateVector
from bqskit.qis.state.system import StateSystem
from bqskit.qis.unitary.unitarymatrix import UnitaryMatrix
class LayerGenerator(abc.ABC):
"""
The LayerGenerator base class.
Search based synthesis uses the layer generator to generate the root node
and the successors of a node.
"""
@abc.abstractmethod
def gen_initial_layer(
self,
target: UnitaryMatrix | StateVector | StateSystem,
data: PassData,
) -> Circuit:
"""Generate the initial layer for search."""
@abc.abstractmethod
def gen_successors(self, circuit: Circuit, data: PassData) -> list[Circuit]:
"""Generate the successors of a circuit node."""
| [
"edyounis123@gmail.com"
] | edyounis123@gmail.com |
5856221a71dc642ec3f93b8731bbf1ea07fa2377 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03806/s174409855.py | f3f10e7dddb189ad08ecede3f07387e8d52479df | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | import sys
input = sys.stdin.buffer.readline
N, Ma, Mb = map(int, input().split())
ABC = [list(map(int, input().split())) for _ in range(N)]
sumA = sum([ABC[i][0] for i in range(N)])
sumB = sum([ABC[i][1] for i in range(N)])
INF = 10 ** 15
dp = [[INF for j in range(sumB + 1)] for i in range(sumA + 1)]
dp[0][0] = 0
for a, b, c in ABC:
for i in range(sumA, -1, -1):
for j in range(sumB, -1, -1):
if dp[i][j] != INF:
dp[i + a][j + b] = min(dp[i + a][j + b], dp[i][j] + c)
answer = INF
for i in range(1, sumA + 1):
for j in range(1, sumB + 1):
if dp[i][j] != INF and i / j == Ma / Mb:
answer = min(answer, dp[i][j])
print(answer if answer != INF else -1) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4348f18e7053dec8ee530cf954bd323e2780a2a6 | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-drds/aliyunsdkdrds/request/v20190123/DescribeDbInstancesRequest.py | 4fc670ea5087265f9b54638259b120fe48033076 | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 2,127 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdrds.endpoint import endpoint_data
class DescribeDbInstancesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Drds', '2019-01-23', 'DescribeDbInstances','drds')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_DrdsInstanceId(self):
return self.get_query_params().get('DrdsInstanceId')
def set_DrdsInstanceId(self,DrdsInstanceId):
self.add_query_param('DrdsInstanceId',DrdsInstanceId)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_Search(self):
return self.get_query_params().get('Search')
def set_Search(self,Search):
self.add_query_param('Search',Search)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_DbInstType(self):
return self.get_query_params().get('DbInstType')
def set_DbInstType(self,DbInstType):
self.add_query_param('DbInstType',DbInstType) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
429edffa026b5304c1b8fa929f0ee668951964d3 | 43e2e801f6df426a9b923828e8ee3c0b0f022c66 | /vocab.py | ea1b9279eff7d539a5a6955ac364e0923c3847ee | [
"MIT"
] | permissive | mrdrozdov/knnlm | 0333eadbd1d0c6e16521475dc07d57d7dce8b02e | 61419077eb7f79c3ba7a196b4cc7cf722f4ba8f4 | refs/heads/master | 2023-04-19T14:13:58.242980 | 2021-05-20T02:34:17 | 2021-05-20T02:34:17 | 317,321,152 | 0 | 0 | MIT | 2021-01-26T17:44:06 | 2020-11-30T19:12:33 | null | UTF-8 | Python | false | false | 5,773 | py | import collections
class Dictionary(object):
"""
A mapping from symbols to consecutive integers.
Taken from fairseq repo.
"""
def __init__(
self,
pad="<pad>",
eos="</s>",
unk="<unk>",
bos="<s>",
extra_special_symbols=None,
):
self.unk_word, self.pad_word, self.eos_word = unk, pad, eos
self.symbols = []
self.count = []
self.indices = {}
self.bos_index = self.add_symbol(bos)
self.pad_index = self.add_symbol(pad)
self.eos_index = self.add_symbol(eos)
self.unk_index = self.add_symbol(unk)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(s)
self.nspecial = len(self.symbols)
def __eq__(self, other):
return self.indices == other.indices
def __getitem__(self, idx):
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__(self):
"""Returns the number of symbols in the dictionary"""
return len(self.symbols)
def __contains__(self, sym):
return sym in self.indices
def index(self, sym):
"""Returns the index of the specified symbol"""
assert isinstance(sym, str)
if sym in self.indices:
return self.indices[sym]
return self.unk_index
def unk_string(self, escape=False):
"""Return unknown string, optionally escaped as: <<unk>>"""
if escape:
return "<{}>".format(self.unk_word)
else:
return self.unk_word
def add_symbol(self, word, n=1):
"""Adds a word to the dictionary"""
if word in self.indices:
idx = self.indices[word]
self.count[idx] = self.count[idx] + n
return idx
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(n)
return idx
def update(self, new_dict):
"""Updates counts from new dictionary."""
for word in new_dict.symbols:
idx2 = new_dict.indices[word]
if word in self.indices:
idx = self.indices[word]
self.count[idx] = self.count[idx] + new_dict.count[idx2]
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(new_dict.count[idx2])
def finalize(self, threshold=-1, nwords=-1, padding_factor=8):
"""Sort symbols by frequency in descending order, ignoring special ones.
Args:
- threshold defines the minimum word count
- nwords defines the total number of words in the final dictionary,
including special symbols
- padding_factor can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
if nwords <= 0:
nwords = len(self)
new_indices = dict(zip(self.symbols[: self.nspecial], range(self.nspecial)))
new_symbols = self.symbols[: self.nspecial]
new_count = self.count[: self.nspecial]
c = collections.Counter(
dict(
sorted(zip(self.symbols[self.nspecial :], self.count[self.nspecial :]))
)
)
for symbol, count in c.most_common(nwords - self.nspecial):
if count >= threshold:
new_indices[symbol] = len(new_symbols)
new_symbols.append(symbol)
new_count.append(count)
else:
break
assert len(new_symbols) == len(new_indices)
self.count = list(new_count)
self.symbols = list(new_symbols)
self.indices = new_indices
self.pad_to_multiple_(padding_factor)
def pad_to_multiple_(self, padding_factor):
"""Pad Dictionary size to be a multiple of *padding_factor*."""
if padding_factor > 1:
i = 0
while len(self) % padding_factor != 0:
symbol = "madeupword{:04d}".format(i)
self.add_symbol(symbol, n=0)
i += 1
def bos(self):
"""Helper to get index of beginning-of-sentence symbol"""
return self.bos_index
def pad(self):
"""Helper to get index of pad symbol"""
return self.pad_index
def eos(self):
"""Helper to get index of end-of-sentence symbol"""
return self.eos_index
def unk(self):
"""Helper to get index of unk symbol"""
return self.unk_index
def add_from_file(self, f):
"""
Loads a pre-existing dictionary from a text file and adds its symbols
to this instance.
"""
if isinstance(f, str):
try:
with open(f, "r", encoding="utf-8") as fd:
self.add_from_file(fd)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(
"Incorrect encoding detected in {}, please "
"rebuild the dataset".format(f)
)
return
for line in f.readlines():
idx = line.rfind(" ")
if idx == -1:
raise ValueError(
"Incorrect dictionary format, expected '<token> <cnt>'"
)
word = line[:idx]
count = int(line[idx + 1 :])
self.indices[word] = len(self.symbols)
self.symbols.append(word)
self.count.append(count)
| [
"andrew@mrdrozdov.com"
] | andrew@mrdrozdov.com |
0ec7be58bdfad20324b305d2f182ce221998241b | 721406d87f5086cfa0ab8335a936ece839ab2451 | /.venv/lib/python3.8/site-packages/opencensus/trace/config_integration.py | a0d8e5d3c146b4424f29d79fecff324c6344b651 | [
"MIT"
] | permissive | MarkusMeyer13/graph-teams-presence | 661296b763fe9e204fe1e057e8bd6ff215ab3936 | c302b79248f31623a1b209e098afc4f85d96228d | refs/heads/main | 2023-07-09T03:34:57.344692 | 2021-07-29T07:16:45 | 2021-07-29T07:16:45 | 389,268,821 | 0 | 0 | MIT | 2021-07-29T07:16:46 | 2021-07-25T05:23:08 | Python | UTF-8 | Python | false | false | 1,330 | py | # Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import logging
log = logging.getLogger(__name__)
def trace_integrations(integrations, tracer=None):
"""Enable tracing on the selected integrations.
:type integrations: list
:param integrations: The integrations to be traced.
"""
integrated = []
for item in integrations:
module_name = 'opencensus.ext.{}.trace'.format(item)
try:
module = importlib.import_module(module_name)
module.trace_integration(tracer=tracer)
integrated.append(item)
except Exception as e:
log.warning('Failed to integrate module: {}'.format(module_name))
log.warning('{}'.format(e))
return integrated
| [
"meyer_markus@gmx.de"
] | meyer_markus@gmx.de |
fd29d423ff124d7289cca464d2dbd43fdc7dae05 | 8f6cc0e8bd15067f1d9161a4b178383e62377bc7 | /__OLD_CODE_STORAGE/tensorflow_PLAYGROUND/lessons/from_internet/uuuuu.py | 3fc0fc1f64c4d43958978fe8396f928cf7251a2f | [] | no_license | humorbeing/python_github | 9c4dfc61a3cefbb266fefff335f6b28d05797e5e | e4b4b49bee7e7e3843c6874717779ce8d619bd02 | refs/heads/master | 2023-01-22T21:51:20.193131 | 2020-01-26T21:47:23 | 2020-01-26T21:47:23 | 163,707,778 | 0 | 0 | null | 2022-12-27T15:37:48 | 2019-01-01T01:58:18 | Python | UTF-8 | Python | false | false | 2,703 | py | import numpy as np
import tensorflow_playground as tf
from tensorflow_playground.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
image_size = 28
labels_size = 10
learning_rate = 0.05
steps_number = 1000
batch_size = 100
tf.logging.set_verbosity(tf.logging.INFO)
if __name__ == "__main__":
tf.app.run()
def cnn_model_fn(features, labels, mode):
"""model function for CNN."""
# Input Layer
input_layer = tf.reshape(features["x"], [-1, 28, 28, 1])
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu
)
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(
inputs=conv1,
pool_size=[2, 2],
strides=2
)
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu
)
pool2 = tf.layers.max_pooling2d(
inputs=conv2,
pool_size=[2, 2],
strides=2
)
# Dense Layer
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
dense = tf.layers.dense(
inputs=pool2_flat,
units=1024,
activation=tf.nn.relu
)
dropout = tf.layers.dropout(
inputs=dense,
rate=0.4,
training= mode == tf.estimator.ModeKeys.TRAIN
)
# Logits Layer
logits = tf.layers.dense(inputs=dropout, units=10)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step()
)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predctions["classes"]
)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops
) | [
"geemguang@gmail.com"
] | geemguang@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.