blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6794fead579bf4d1b7bfe5452a1893e0a5b910d7 | cc1b87f9368e96e9b3ecfd5e0822d0037e60ac69 | /telemetry/telemetry/internal/platform/desktop_platform_backend_unittest.py | fe8ed448e677934ec0a9d9f2589cb9bca8531572 | [
"BSD-3-Clause"
] | permissive | CTJyeh/catapult | bd710fb413b9058a7eae6073fe97a502546bbefe | c98b1ee7e410b2fb2f7dc9e2eb01804cf7c94fcb | refs/heads/master | 2020-08-19T21:57:40.981513 | 2019-10-17T09:51:09 | 2019-10-17T18:30:16 | 215,957,813 | 1 | 0 | BSD-3-Clause | 2019-10-18T06:41:19 | 2019-10-18T06:41:17 | null | UTF-8 | Python | false | false | 1,019 | py | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import mock
from telemetry.internal.platform import linux_platform_backend
from telemetry.internal.platform import win_platform_backend
from telemetry.internal.platform import cros_platform_backend
from telemetry.internal.platform import mac_platform_backend
class DesktopPlatformBackendTest(unittest.TestCase):
def testDesktopTagInTypExpectationsTags(self):
desktop_backends = [
linux_platform_backend.LinuxPlatformBackend,
win_platform_backend.WinPlatformBackend,
cros_platform_backend.CrosPlatformBackend,
mac_platform_backend.MacPlatformBackend]
for db in desktop_backends:
with mock.patch.object(db, 'GetOSVersionDetailString', return_value=''):
with mock.patch.object(db, 'GetOSVersionName', return_value=''):
self.assertIn('desktop', db().GetTypExpectationsTags())
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
7ceea105afcc67fa44c8cad1a7685eb46fca8ee6 | 214230d0796377be0bfdda286c2c389b92a19555 | /SCTF/2018 Quals/ding_j_max/solver.py | deea8232606f93e63bd58390cfbbbbe41597a98c | [
"Unlicense"
] | permissive | Qwaz/solved-hacking-problem | fa5ebfeb98ec979cf57dac1470a651199f2dc50d | cda0db4888322cce759a7362de88fff5cc79f599 | refs/heads/master | 2023-08-24T03:45:12.481496 | 2023-07-16T12:38:08 | 2023-07-16T12:38:08 | 49,208,719 | 100 | 28 | null | 2022-03-24T00:51:04 | 2016-01-07T14:18:18 | HTML | UTF-8 | Python | false | false | 1,379 | py | import sys
from pwn import *
# SCTF{I_w0u1d_l1k3_70_d3v3l0p_GUI_v3rs10n_n3x7_t1m3}
GDB_HEADER = '(gdb) '
BREAK_ADDR = 0x401412
PATCH_ADDR = 0x401415
def gdb_command(cmd):
gdb.recvuntil(GDB_HEADER)
gdb.sendline(cmd)
gdb = process(['gdb', './dingJMax', sys.argv[1]])
gdb_command('b *0x%x' % BREAK_ADDR)
context.arch = 'amd64'
press_none = asm('mov %eax, 0')
press_d = asm('mov %eax, 0x64')
press_f = asm('mov %eax, 0x66')
press_j = asm('mov %eax, 0x6a')
press_k = asm('mov %eax, 0x6b')
for i in range(42259):
gdb_command('c')
gdb_command('x/gd ($rbp-0x40)')
timing = int(gdb.recvline().strip().split()[1])
code = press_none
if timing % 20 == 0 and timing // 20 >= 19:
print timing
gdb_command('x/gx 0x%x' % (0x603280 + 8*(timing // 20 - 19)))
str_addr = int(gdb.recvline().strip().split()[1], 16)
print '0x%x' % str_addr
gdb_command('x/s 0x%x' % str_addr)
keypress = gdb.recvline().strip().split('"')[1]
print keypress
try:
code = [
press_d,
press_f,
press_j,
press_k,
][keypress.index('o')]
except ValueError:
pass
assert len(code) == 5
for i in range(5):
gdb_command('set *(unsigned char*)0x%x = %d' % (PATCH_ADDR + i, ord(code[i])))
gdb.interactive()
| [
"qwazpia@gmail.com"
] | qwazpia@gmail.com |
08b3ed5e21ee24030807a466581e83ed6918823d | 3bc089a77598694aace6b060c3aca5e9bb1e156b | /exercises/1901100254/1001S02E05_string.py | ce51182891da02309452743bc68fdcb04a6192ba | [] | no_license | Lily0905/selfteaching-python-camp | 8a91dc47b707a0e605c0722e7a50c402e3c61968 | bf1b8ea3b064937f650d50e83d98847bfc567bae | refs/heads/master | 2020-07-29T19:56:24.761264 | 2019-09-20T08:46:48 | 2019-09-20T08:46:48 | 209,940,920 | 1 | 0 | null | 2019-09-21T07:06:42 | 2019-09-21T07:06:42 | null | UTF-8 | Python | false | false | 1,720 | py |
text = '''
The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambxiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
'''
string = text
string.replace("better", "worse") #替换函数str.replace()
print('\n替换better后的结果:',string.replace("better","worse"))
#从第2步的结果⾥,将单词中包含 ea 的单词剔除
text1 = text.replace('better','worse')
text2 =text1.replace('ea', '__')
print ('\n删除含ea的单词的结果:\n',text2)
#将第3步的结果里的字⺟进⾏大小写翻转(将大写字母转成小写,⼩写字母转成大写)
text3 = ' '.join(text2) #列表转换成字符串
text4 = text3.swapcase() #逐个字符更替大小写
print('\n大小写翻转后新字符串text4为:\n',text4)
#将第4步的结里所有单词按a...z升序排列,并输出结果
text5 = text4.split() # 排序需要通过列表,上一步结果字符串转换成列表
text5.sort()
print('\n排列结果如下:\n',text5)
| [
"43633521+liujiayi0042@users.noreply.github.com"
] | 43633521+liujiayi0042@users.noreply.github.com |
aff0f112469e96ffe9c9ca4c1b77779cb217de75 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/portal/v20181001/get_console.py | e0491ed0df58d54c014286ff88faba9f6cd5b828 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 1,891 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetConsoleResult',
'AwaitableGetConsoleResult',
'get_console',
]
@pulumi.output_type
class GetConsoleResult:
"""
Cloud shell console
"""
def __init__(__self__, properties=None):
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter
def properties(self) -> 'outputs.ConsolePropertiesResponse':
"""
Cloud shell console properties.
"""
return pulumi.get(self, "properties")
class AwaitableGetConsoleResult(GetConsoleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetConsoleResult(
properties=self.properties)
def get_console(console_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConsoleResult:
"""
Use this data source to access information about an existing resource.
:param str console_name: The name of the console
"""
__args__ = dict()
__args__['consoleName'] = console_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:portal/v20181001:getConsole', __args__, opts=opts, typ=GetConsoleResult).value
return AwaitableGetConsoleResult(
properties=__ret__.properties)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
ecf43cb2b8cf8ecea51db2d3ff7ead25d563ea67 | 70026e9e5a6d9f70057574c749d0ef8b603763f5 | /audits/admin.py | b0e64b5c5137ec5c2e55b741815363af7c6ef548 | [
"MIT"
] | permissive | GreenBankObservatory/nrqz_admin | 96b113e640bfdec221631626114665ef1f684cc5 | c8e0876c3ab7a3feae578fa725dbfd1bdbe52a61 | refs/heads/master | 2023-05-11T16:28:21.731339 | 2023-05-01T14:25:25 | 2023-05-01T14:25:25 | 159,407,852 | 2 | 0 | MIT | 2022-12-14T04:13:35 | 2018-11-27T22:22:48 | Python | UTF-8 | Python | false | false | 307 | py | from django.contrib import admin
from django_import_data.models import (
ModelImportAttempt,
FileImporter,
FileImportAttempt,
)
@admin.register(FileImporter)
class FileImporterAdmin(admin.ModelAdmin):
fields = ("file_path",)
admin.site.register([ModelImportAttempt, FileImportAttempt])
| [
"tchamber@nrao.edu"
] | tchamber@nrao.edu |
4e75fe3dfeffd6dd6d9727a0a14677fe8e3f681b | bd10d096a40f6ac88ea4ade678297cb4552626b3 | /core/nginx/config.py | 360ce683f0779e1c31e947f3ddf4d8ce13b88a42 | [
"MIT"
] | permissive | rageOS/Mailu | 26db34b082251673de5e6ff91f4668578bb996ac | 319965a4afa461a3cb63e6cf20100d9d7fe80c48 | refs/heads/master | 2021-08-23T11:38:20.771680 | 2017-12-03T18:37:36 | 2017-12-03T18:37:36 | 113,082,186 | 0 | 0 | null | 2017-12-04T18:57:07 | 2017-12-04T18:57:06 | null | UTF-8 | Python | false | false | 1,036 | py | #!/usr/bin/python
import jinja2
import os
convert = lambda src, dst, args: open(dst, "w").write(jinja2.Template(open(src).read()).render(**args))
args = os.environ.copy()
# Get the first DNS server
with open("/etc/resolv.conf") as handle:
content = handle.read().split()
args["RESOLVER"] = content[content.index("nameserver") + 1]
# TLS configuration
args["TLS"] = {
"cert": ("/certs/cert.pem", "/certs/key.pem"),
"mail": ("/certs/cert.pem", "/certs/key.pem"),
"letsencrypt": ("/certs/letsencrypt/live/mailu/fullchain.pem",
"/certs/letsencrypt/live/mailu/privkey.pem"),
"notls": None
}[args["TLS_FLAVOR"]]
if args["TLS"] and not all(os.path.exists(file_path) for file_path in args["TLS"]):
print("Missing cert or key file, disabling TLS")
args["TLS_ERROR"] = "yes"
# Build final configuration paths
convert("/conf/tls.conf", "/etc/nginx/tls.conf", args)
convert("/conf/nginx.conf", "/etc/nginx/nginx.conf", args)
if os.path.exists("/var/log/nginx.pid"):
os.system("nginx -s reload")
| [
"pierre@jaury.eu"
] | pierre@jaury.eu |
22dc7e14aaa24e3ff8644caf46c78a53ff96d37b | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /GIT-USERS/TOM-Lambda/CSEUFLEX_Intro_Python_GP/equipment.py | 8b4dff5975c361bf9cd8643cdde3ddad059ce7ea | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 676 | py | <<<<<<< HEAD
# make an equipment class with the fields of
=======
# make an equipment class with the fields of
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
# name, price, style and weight
# that inherits from the product class
from product import Product
<<<<<<< HEAD
=======
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
class Equipment(Product):
def __init__(self, name, price, style, weight):
super().__init__(name, price)
self.style = style
self.weight = weight
def __str__(self):
return super().__str__() + f" comes in {self.style}, {self.weight}"
<<<<<<< HEAD
=======
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
312b2e57144f885c8394f8deee7110fb7f0dddc8 | a6ff5be50b499ffb36294e1e93ce59b138bfe622 | /test/test_integration_event.py | 636dbfb935bca44a3e065c688ab7c799920b2635 | [
"MIT"
] | permissive | MostafaSalah222/talon_one.py | 7221ebc54831dce33f1724fe0856093145d7add8 | f863bb3c2cc5ddc94d9227adcf14947b2ea7db41 | refs/heads/master | 2023-07-27T06:03:43.849686 | 2021-09-07T08:56:41 | 2021-09-07T09:01:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,069 | py | # coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import talon_one
from talon_one.models.integration_event import IntegrationEvent # noqa: E501
from talon_one.rest import ApiException
class TestIntegrationEvent(unittest.TestCase):
"""IntegrationEvent unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test IntegrationEvent
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = talon_one.models.integration_event.IntegrationEvent() # noqa: E501
if include_optional :
return IntegrationEvent(
profile_id = '0',
type = '0',
attributes = None
)
else :
return IntegrationEvent(
type = '0',
attributes = None,
)
def testIntegrationEvent(self):
"""Test IntegrationEvent"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | MostafaSalah222.noreply@github.com |
123ace045db3cd03e44cc51df57ed19c1a98d7f3 | 15b12d69ac3123d1562986970ce01d7a47d171de | /SetOperation.py | 34a7d487b53786c9b2b62ea9c17e5f2fef42dc6c | [
"Apache-2.0"
] | permissive | simplymanas/python-learning | 9b67b5a7acfb3a7c2455a7d1fc66203a2b419c37 | 75bc99c0dce211fd1bce5f6ce1155e0f4c71d7d0 | refs/heads/master | 2021-07-11T06:40:24.803589 | 2021-06-20T12:06:02 | 2021-06-20T12:06:02 | 241,769,614 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,103 | py |
# Date: 27th Jun 2020
# Lets learn Set Theory in Python
# Few Operations on Sets
# Let's take two sets
first_set = {11, 21, 31, 41, 51}
second_set = {11, 61, 71, 81, 31}
print('First Set : ' + str(first_set))
print('Second Set : ' + str(second_set))
# The basic operations are:
# 1. Union of Sets
print('\nUNION of the two sets are (Both in first and second)')
print(set(first_set) | set(second_set))
# inbuilt function
print(first_set.union(second_set))
# 2. Intersection of sets
print('\nIntersection of the two sets are (common to both)')
print(set(first_set) & set(second_set))
# inbuilt function
print(first_set.intersection(second_set))
# 3. Difference of two sets
print('\nDifference of the two sets are (in first but not in second) ')
print(set(first_set) - set(second_set))
# inbuilt function
print(first_set.difference(second_set))
# 4. Symmetric difference of two sets
print('\nSymmetric Difference of the two sets are (excluding the common element of both) ')
print(set(first_set) ^ set(second_set))
# inbuilt function
print(first_set.symmetric_difference(second_set))
print() | [
"manas.dash@tesco.com"
] | manas.dash@tesco.com |
9bb15e41ae62f4d5977caeaa5b592386b4a87562 | 6219e6536774e8eeb4cadc4a84f6f2bea376c1b0 | /scraper/storage_spiders/kemlrocrecom.py | 4b4717cfcc86c97a8622f2359d87ec9eb1eaf697 | [
"MIT"
] | permissive | nguyenminhthai/choinho | 109d354b410b92784a9737f020894d073bea1534 | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | refs/heads/master | 2023-05-07T16:51:46.667755 | 2019-10-22T07:53:41 | 2019-10-22T07:53:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | # Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='detai_product_right']/h1",
'price' : "//div[@class='detai_product_gia']/span",
'category' : "//h2[@class='dita_name_home']/a",
'description' : "//div[@class='ibm-active ibm-columns']/div[@class='dita_detail']",
'images' : "//div[@class='detai_product']/div/div/img/@src",
'canonical' : "//link[@rel='canonical']/@href",
'base_url' : "",
'brand' : ""
}
name = 'kemlrocre.com'
allowed_domains = ['kemlrocre.com']
start_urls = ['http://kemlrocre.com/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/\d+-[a-zA-Z0-9-]+\.html$']), 'parse_item'),
Rule(LinkExtractor(allow=['/.+\.html'], deny=['/\d+-[a-zA-Z0-9-]+\.html$']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| [
"nguyenchungthuy.hust@gmail.com"
] | nguyenchungthuy.hust@gmail.com |
aa1f36de03e34eaff8ca75902441da8ade24fd46 | 2106e17ab0e564e8ad0b2c907e47795ec6d4835b | /examples/neural_networks/plot_mlp_alpha.py | f04f3462ba3d1a818977763991e332ac8f655e0d | [
"BSD-3-Clause"
] | permissive | smartscheduling/scikit-learn | 31eca6d5894399f003fcc607224c28831bdf86e8 | f773bb5413bf367167ce265df019c3237096ef49 | refs/heads/master | 2021-01-24T15:44:04.473676 | 2014-10-13T20:42:16 | 2014-10-13T20:42:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,120 | py | """
================================================
Varying regularization in Multi-layer Perceptron
================================================
A comparison of different regularization term 'alpha' values on synthetic
datasets. The plot shows that different alphas yield different decision
functions.
Alpha is a regularization term, or also known as penalty term, that combats
overfitting by constraining the weights' size. Increasing alpha may fix high
variance (a sign of overfitting) by encouraging smaller weights, resulting
in a decision function plot that may appear with lesser curvatures.
Similarly, decreasing alpha may fix high bias (a sign of underfitting) by
encouraging larger weights, potentially resulting in more curvatures in the
decision function plot.
"""
print(__doc__)
# Author: Issam H. Laradji
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MultilayerPerceptronClassifier
h = .02 # step size in the mesh
alphas = np.logspace(-4, 4, 5)
names = []
for i in alphas:
names.append('alpha ' + str(i))
classifiers = []
for i in alphas:
classifiers.append(MultilayerPerceptronClassifier(alpha=i, random_state=1))
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=0, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable]
figure = plt.figure(figsize=(17, 9))
i = 1
# iterate over datasets
for X, y in datasets:
# preprocess dataset, split into training and test part
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
| [
"issam.laradji@gmail.com"
] | issam.laradji@gmail.com |
f1cde5699db50e9afd62a6633e1d18b2c8f11428 | 9a6ae760c478f136e09c78eeff06770929f93afb | /demo2/polls/migrations/0003_auto_20190705_1417.py | 31d6dc20dd57a17db99af07be3c9ff60fe917e9e | [] | no_license | zzy0371/Py1904 | adb0faa2e29abefe08ed81835573626ce2bcd899 | 47e24a34d49356f64ffdf87bb7e1b7009b215511 | refs/heads/master | 2022-12-11T08:10:06.160540 | 2019-07-18T08:11:33 | 2019-07-18T08:11:33 | 194,625,297 | 0 | 0 | null | 2022-04-22T21:54:48 | 2019-07-01T07:50:54 | JavaScript | UTF-8 | Python | false | false | 377 | py | # Generated by Django 2.2.1 on 2019-07-05 06:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0002_temp'),
]
operations = [
migrations.AlterField(
model_name='temp',
name='desc',
field=models.CharField(max_length=20, null=True),
),
]
| [
"496575233@qq.com"
] | 496575233@qq.com |
fb1c05fe450cfdeacced4b7a11ff507c7d783914 | 1f79d9d02810a944c45fc962c62159035c5a2247 | /migrations/versions/37878b76721_.py | b8ea073ef84a3fa65312468c4ebe2d712c125626 | [] | no_license | qsq-dm/mff | 5f17d6ffd1d4742dc46d1367cff35233af08a450 | d7f1e6f3fba95fe0d8ebb8937dda64a17e71f048 | refs/heads/master | 2020-12-29T02:19:29.037394 | 2016-08-01T15:40:42 | 2016-08-01T15:40:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | """empty message
Revision ID: 37878b76721
Revises: 3107ca470fdf
Create Date: 2016-02-27 10:16:36.087236
"""
# revision identifiers, used by Alembic.
revision = '37878b76721'
down_revision = '3107ca470fdf'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('school', sa.Column('pics_count', sa.Integer(), nullable=True))
op.create_index(op.f('ix_school_pics_count'), 'school', ['pics_count'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_school_pics_count'), table_name='school')
op.drop_column('school', 'pics_count')
### end Alembic commands ###
| [
"root@localhost"
] | root@localhost |
c3f357915308c406450f3af0a4a588a2ddcb9b30 | 7d667b70c8ae1c8f214b85d613d3a98462af9d0c | /froide/foirequestfollower/tests.py | 68fc03b7be15659bbbca939ec138dc0a38e27a53 | [
"MIT"
] | permissive | handlingar/froide | c57653a87a05fb402c1fe61f0df1ff480391f911 | 5ed80cf6550fb4cbc757029b2c860b53e784eb93 | refs/heads/master | 2021-05-28T18:13:17.573095 | 2015-06-18T13:00:16 | 2015-06-18T13:00:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,564 | py | from __future__ import with_statement
import re
import factory
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.core import mail
from django.contrib.auth import get_user_model
from django.contrib.comments.forms import CommentForm
from django.contrib.comments.models import Comment
from froide.foirequest.models import FoiRequest
from froide.foirequest.tests import factories
from .models import FoiRequestFollower
from .tasks import _batch_update
User = get_user_model()
class FoiRequestFollowerFactory(factory.DjangoModelFactory):
FACTORY_FOR = FoiRequestFollower
request = factory.SubFactory(factories.FoiRequestFactory)
user = factory.SubFactory(factories.UserFactory)
email = ''
confirmed = True
class FoiRequestFollowerTest(TestCase):
def setUp(self):
self.site = factories.make_world()
def test_following(self):
req = FoiRequest.objects.all()[0]
user = User.objects.get(username='sw')
self.client.login(username='sw', password='froide')
response = self.client.post(reverse('foirequestfollower-follow',
kwargs={"slug": req.slug}))
# Can't follow my own requests
self.assertEqual(response.status_code, 400)
followers = FoiRequestFollower.objects.filter(request=req, user=user)
self.assertEqual(followers.count(), 0)
self.client.logout()
user = User.objects.get(username='dummy')
self.client.login(username='dummy', password='froide')
response = self.client.post(reverse('foirequestfollower-follow',
kwargs={"slug": req.slug}))
self.assertEqual(response.status_code, 302)
follower = FoiRequestFollower.objects.get(request=req, user=user)
self.assertEqual(len(mail.outbox), 0)
req.add_postal_reply.send(sender=req)
self.assertEqual(len(mail.outbox), 1)
mes = mail.outbox[0]
match = re.search('/%d/(\w+)/' % follower.pk, mes.body)
check = match.group(1)
response = self.client.get(
reverse('foirequestfollower-confirm_unfollow',
kwargs={'follow_id': follower.id,
'check': "a" * 32}))
self.assertEqual(response.status_code, 302)
follower = FoiRequestFollower.objects.get(request=req, user=user)
response = self.client.get(
reverse('foirequestfollower-confirm_unfollow',
kwargs={'follow_id': follower.id,
'check': check}))
self.assertEqual(response.status_code, 302)
try:
FoiRequestFollower.objects.get(request=req, user=user)
except FoiRequestFollower.DoesNotExist:
pass
else:
self.assertTrue(False)
def test_unfollowing(self):
req = FoiRequest.objects.all()[0]
user = User.objects.get(username='dummy')
self.client.login(username='dummy', password='froide')
response = self.client.post(reverse('foirequestfollower-follow',
kwargs={"slug": req.slug}))
self.assertEqual(response.status_code, 302)
follower = FoiRequestFollower.objects.filter(request=req, user=user).count()
self.assertEqual(follower, 1)
response = self.client.post(reverse('foirequestfollower-follow',
kwargs={"slug": req.slug}))
self.assertEqual(response.status_code, 302)
follower = FoiRequestFollower.objects.filter(request=req, user=user).count()
self.assertEqual(follower, 0)
def test_updates(self):
mail.outbox = []
req = FoiRequest.objects.all()[0]
comment_user = factories.UserFactory()
user = User.objects.get(username='dummy')
self.client.login(username='dummy', password='froide')
response = self.client.post(reverse('foirequestfollower-follow',
kwargs={"slug": req.slug}))
self.assertEqual(response.status_code, 302)
self.client.logout()
self.client.login(username=comment_user.username, password='froide')
mes = list(req.messages)[-1]
d = {
'name': 'Jim Bob',
'email': 'jim.bob@example.com',
'url': '',
'comment': 'This is my comment',
}
f = CommentForm(mes)
d.update(f.initial)
self.client.post(reverse("comments-post-comment"), d)
_batch_update()
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].to[0], req.user.email)
self.assertEqual(mail.outbox[1].to[0], user.email)
def test_updates_avoid(self):
mail.outbox = []
req = FoiRequest.objects.all()[0]
dummy_user = User.objects.get(username='dummy')
req2 = factories.FoiRequestFactory.create(
site=self.site, user=req.user)
mes = list(req.messages)[-1]
mes2 = factories.FoiMessageFactory.create(request=req2)
self.client.login(username=req.user.username, password='froide')
d = {
'name': 'Jim Bob',
'email': 'jim.bob@example.com',
'url': '',
'comment': 'This is my comment',
}
f = CommentForm(mes)
d.update(f.initial)
self.client.post(reverse("comments-post-comment"), d)
_batch_update(update_requester=False)
self.assertEqual(len(mail.outbox), 0)
mail.outbox = []
self.client.logout()
def do_follow(req, username):
self.client.login(username=username, password='froide')
response = self.client.post(reverse('foirequestfollower-follow',
kwargs={"slug": req.slug}))
self.assertEqual(response.status_code, 302)
self.client.logout()
def do_comment(mes, username):
self.client.login(username=username, password='froide')
f = CommentForm(mes)
d.update(f.initial)
self.client.post(
reverse("comments-post-comment"),
d
)
do_follow(req, 'dummy')
do_comment(mes, 'sw')
do_follow(req2, 'dummy')
do_comment(mes2, 'sw')
_batch_update()
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to[0], dummy_user.email)
Comment.objects.all().delete()
mail.outbox = []
do_comment(mes2, 'dummy')
_batch_update()
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to[0], req.user.email)
| [
"mail@stefanwehrmeyer.com"
] | mail@stefanwehrmeyer.com |
99206d1556504fc1696b6afc8d4d1e3e31d90434 | 5ef87d7308fd7a6a56c1fa4795e733820144fdac | /dataloader.py | 17f7c33ace3b82ace471b974c9a8a181e6639d91 | [
"Apache-2.0"
] | permissive | pprp/SimpleClassifier | 19bdcdbad5a9f3d3cd6b22f545fa0037fd94f659 | ad6d664364ebdba0efcab7366a75a179995e43cb | refs/heads/master | 2020-09-27T10:41:20.682317 | 2019-12-16T13:43:12 | 2019-12-16T13:43:12 | 226,497,408 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,608 | py | import torch
from config import cfg
from torchvision import transforms, datasets
# part 0: parameter
input_size = cfg.INPUT_SIZE
batch_size = cfg.BATCH_SIZE
# part 1: transforms
train_transforms = transforms.Compose([
transforms.RandomRotation(5),
transforms.RandomResizedCrop(input_size[0]),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((.5, .5, .5), (.5, .5, .5))
])
valid_transforms = transforms.Compose([
transforms.Resize(input_size),
transforms.RandomResizedCrop(input_size[0]),
transforms.ToTensor(),
transforms.Normalize((.5, .5, .5), (.5, .5, .5))
])
# part 2: dataset
train_dataset = datasets.ImageFolder(root=cfg.TRAIN_DATASET_DIR,
transform=train_transforms)
valid_dataset = datasets.ImageFolder(root=cfg.VALID_DATASET_DIR,
transform=valid_transforms)
# part 3: dataloader
train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=1)
valid_dataloader = torch.utils.data.DataLoader(dataset=valid_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=1)
# part 4: test
if __name__ == "__main__":
for image, label in train_dataloader:
print(image.shape, label.shape, len(train_dataloader)) | [
"1115957667@qq.com"
] | 1115957667@qq.com |
c79bbac857dfd71d4ab4da5645e78f296e69721d | 49a15ea9bdbf68575c034f2428ddc5bdc9b897d2 | /mysite/polls/urls.py | 5d787f4eae86d603e7098327aedff2e742373a0f | [] | no_license | chenhanfang/djangotest | 277a23c62cbf6b4d5e336642352e06d16c0238f3 | 96eeb865a4bc51fea345e54108081ae08a150e4f | refs/heads/master | 2020-12-30T18:03:24.955470 | 2017-05-17T09:41:52 | 2017-05-17T09:41:52 | 90,949,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | from django.conf.urls import url
from . import views
app_name = 'polls'
urlpatterns=[
url(r'^$',views.index,name='index'),
url(r'^(?P<question_id>[0-9]+)/$',views.detail,name='detail'),
url(r'^(?P<question_id>[0-9]+)/results/$',views.results,name='results'),
url(r'^(?P<question_id>[0-9]+)/vote/$',views.vote,name='vote'),
url(r'^latest/.html$',views.index,name='index')
] | [
"chenhanfang@zhixunkeji.cn"
] | chenhanfang@zhixunkeji.cn |
cff098258b5b5b7e9de922770351d0c19a9f6d4d | 7d4e8492de331f8bed4ef625132a3c8bb1e44b3e | /src/exceptions/aws_exceptions.py | 90b29576c3400d52412ae100877495c039908dec | [
"ISC"
] | permissive | uk-gov-mirror/dwp.dataworks-behavioural-framework | f6d1d7a94a2b18be659acd444ae8d88615e4a162 | d7c143c0fc0c4ae9e86ece34ccc1a480df1f65ad | refs/heads/master | 2023-04-09T01:09:37.313078 | 2021-04-14T15:43:44 | 2021-04-14T15:43:44 | 356,707,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | class AthenaQueryException(Exception):
"""Used when an exception occurs during an Athena query"""
pass
class HBaseQueryException(Exception):
"""Used when an exception occurs during an HBase query"""
pass
| [
"noreply@github.com"
] | uk-gov-mirror.noreply@github.com |
7323ef87e2046adea00e9391a8c96ad2e572d5a9 | fc5734ad9b0dc154b3a36ec2f5d848b3d693473f | /solutions/Trees and Graphs/Graphs/max_area_of_island.py | d75e31146c0bbabbdf6159e062b5edde686ccf2e | [
"MIT"
] | permissive | aimdarx/data-structures-and-algorithms | 8e51ec2144b6e0c413bc7ef0c46aba749fd70a99 | 1659887b843c5d20ee84a24df152fb4f763db757 | refs/heads/master | 2023-08-28T12:00:33.073788 | 2021-11-07T08:31:28 | 2021-11-07T08:31:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,754 | py | """
Max Area of Island:
You are given an m x n binary matrix grid. An island is a group of 1's (representing land) connected 4-directionally (horizontal or vertical.)
You may assume all four edges of the grid are surrounded by water.
The area of an island is the number of cells with a value 1 in the island.
Return the maximum area of an island in grid. If there is no island, return 0.
Example 1:
Input: grid = [[0,0,1,0,0,0,0,1,0,0,0,0,0],[0,0,0,0,0,0,0,1,1,1,0,0,0],[0,1,1,0,1,0,0,0,0,0,0,0,0],[0,1,0,0,1,1,0,0,1,0,1,0,0],[0,1,0,0,1,1,0,0,1,1,1,0,0],[0,0,0,0,0,0,0,0,0,0,1,0,0],[0,0,0,0,0,0,0,1,1,1,0,0,0],[0,0,0,0,0,0,0,1,1,0,0,0,0]]
Output: 6
Explanation: The answer is not 11, because the island must be connected 4-directionally.
Example 2:
Input: grid = [[0,0,0,0,0,0,0,0]]
Output: 0
Example 3:
[[1,1,0],[0,0,0]]
2
https://leetcode.com/problems/max-area-of-island/
"""
class Solution:
def maxAreaOfIsland(self, grid):
maximum = 0
for row in range(len(grid)):
for col in range(len(grid[0])):
if grid[row][col] == 1:
maximum = max(
maximum, self.areOfIsland(grid, row, col))
return maximum
def areOfIsland(self, grid, row, col):
if not (row >= 0 and row < len(grid) and col >= 0 and col < len(grid[0]) and grid[row][col] == 1):
return 0
grid[row][col] = 0 # remove
count = 1
# up
count += self.areOfIsland(grid, row-1, col)
# down
count += self.areOfIsland(grid, row+1, col)
# left
count += self.areOfIsland(grid, row, col-1)
# right
count += self.areOfIsland(grid, row, col+1)
return count
| [
"noreply@github.com"
] | aimdarx.noreply@github.com |
314d7d7f762ea47030a3ad526c45bdbd5b548225 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_0825+428/sdB_pg_0825+428_lc.py | 6a60c6b876ec9ccf2d2ed0b809b4bb5139d1047f | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[127.239792,42.678167], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_pg_0825+428/sdB_pg_0825+428_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
c45a634d4051faca587362c4c6d02f814a05afe5 | 260133e46c0c88fd20f2ed18309c5f46508b7fb9 | /opengever/base/monkey/patches/action_info.py | e473fd4111bb55218ed7b680d6aba6c87f420f45 | [] | no_license | robertmuehsig/opengever.core | 4180fbea1436fade9b33232a293b0d43ebfc6c51 | 63b3747793d5b824c56eb3659987bb361d25d8d8 | refs/heads/master | 2020-09-08T14:55:00.340222 | 2019-11-08T10:16:02 | 2019-11-08T10:16:02 | 221,163,734 | 0 | 0 | null | 2019-11-12T08:08:59 | 2019-11-12T08:08:54 | null | UTF-8 | Python | false | false | 1,603 | py | from opengever.base.monkey.patching import MonkeyPatch
from Products.CMFCore.utils import _checkPermission
class PatchActionInfo(MonkeyPatch):
"""We patch the _checkPermissions() method of the ActionInfo object
in order to also consider our 'file_actions' category one that should
have its actions' permissions checked on the context.
Without this, the permissions would be checked on the Plone Site instead.
"""
def __call__(self):
def _checkPermissions(self, ec):
""" Check permissions in the current context.
"""
category = self['category']
object = ec.contexts['object']
if object is not None and ( category.startswith('object') or
category.startswith('workflow') or
category.startswith('file') or # <-- patched
category.startswith('document') ):
context = object
else:
folder = ec.contexts['folder']
if folder is not None and category.startswith('folder'):
context = folder
else:
context = ec.contexts['portal']
for permission in self._permissions:
if _checkPermission(permission, context):
return True
return False
from Products.CMFCore.ActionInformation import ActionInfo
locals()['__patch_refs__'] = False
self.patch_refs(ActionInfo, '_checkPermissions', _checkPermissions)
| [
"lukas.graf@4teamwork.ch"
] | lukas.graf@4teamwork.ch |
9f1ce72275c26f6742415bae951f4b266f69a90a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03711/s673158923.py | 24d0cec24c92d3886a0f6f29f107232db5bf4165 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | l = [4, 6, 9, 11]
x, y = map(int, input().split())
if x == 2 or y == 2:
print('No')
elif x in l and y in l:
print('Yes')
elif x not in l and y not in l:
print('Yes')
else:
print('No') | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
b6933de33d4267ec3bf240b424e9fd2bfef3e627 | 39c5e93c07f1d41cb2dd632a858b58ccf9955ab9 | /Methods and Functions/map_Filter_lambda/filter.py | cd5f5b820360883d191acfe3efd4b26516ef0f2a | [] | no_license | amitarvindpatil/Python-Study-Material | 0e8b1ca4361c7ff92f94cc7bf76d4ef2866dddac | b52f96ceb2b9a7dcb0dd979c2f688ea2460cdece | refs/heads/master | 2023-04-21T03:54:01.894374 | 2021-05-11T06:19:06 | 2021-05-11T06:19:06 | 224,249,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | # Filter
# -----The filter() method filters the given sequence with the help
# of a function that tests each element in the sequence to be true or not.
# Example
# def check_even(num):
# return num % 2 == 0
# num = [0, 3, 2, 4, 5, 2, 6, 7, 34, 23, 5, 78, 32, 2, 1, 1, 0]
# filter_even = filter(check_even, num)
# print(list(filter_even))
# Example 2
def vovels(letters):
vows = ['e', 'a', 'i', 'o', 'u']
for v in vows:
print(v)
if letters[0] == v:
return True
else:
return False
letters = ["amit", "patil", "iskon", "ervind"]
filter_vovels = filter(vovels, letters)
print(list(filter_vovels))
| [
"amitpatil04041993@gmail.com"
] | amitpatil04041993@gmail.com |
f72dbe571f55a2c24215509833ecbcfcbfeb6bbd | 7259dbcc9e32502945d362caa43d4ad380cd04ea | /企业数据库爬虫/badwork-master/uptoaliyun/bxzd_uptoaliyun.py | 83b56fccbcfe8eb45924f5fbc87fecc2f32fedf8 | [
"MIT"
] | permissive | Doraying1230/Python-Study | daa143c133262f4305624d180b38205afe241163 | 8dccfa2108002d18251053147ccf36551d90c22b | refs/heads/master | 2020-03-29T13:46:13.061373 | 2018-07-26T15:19:32 | 2018-07-26T15:19:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,964 | py | # coding:utf-8
import requests
import time
import uuid
import threading
import Queue
import MySQLdb
import config
import re
from lxml import etree
from lxml.html import fromstring
from download_center.store.store_mysql_pool import StoreMysqlPool
# from StoreMysqlPool import StoreMysqlPool
from download_center.store.store_oss import StoreOSS
import sys
import base64
reload(sys)
sys.setdefaultencoding('utf8')
class BaiduImage:
def __init__(self):
self.db = StoreMysqlPool(**config.CONN_DB)
self.oss = StoreOSS(**config.EHCO_OSS)
self.q = Queue.Queue()
def get_image_respone(self, url):
'''
下载指定url二进制的文件
'''
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
}
try:
r = requests.get(url, timeout=20, stream=True, headers=headers)
r.raise_for_status()
print '图片下载成功!url: {}'.format(url)
time.sleep(1)
return r.content
except:
# print '图片下载失败!url: {}'.format(url)
time.sleep(1)
return None
def up_to_server(self, respone, filename):
'''
将原图下载,并上传到阿里云服务器
Args:
url :图片的源地址
filename:图片文件名
'''
# 设置文件目录
web_folder = "comments/" + filename
try:
status = self.oss.put(web_folder, respone).status
if status != 200:
print '图片上传失败了'
else:
pass
# print filename, '上传成功'
except:
pass
else:
# print("deal_response_image", url)
pass
def format_img_url(self):
img_head = 'http://website201710.oss-cn-shanghai.aliyuncs.com/comments/'
img_name = '{}.jpg'.format(uuid.uuid1())
aliyun_url = '{}{}'.format(img_head, img_name)
return aliyun_url, img_name
def strip_img(self, html):
try:
tree = fromstring(html.decode('utf-8'))
imgs = tree.xpath('.//img')
for img in imgs:
img_src = img.get('src')
# st = time.time()
response = self.get_image_respone(img_src)
# print("get_image_respone end time:{}".format(time.time() - st))
if response:
aliyun_url,filename = self.format_img_url()
img.set('src',aliyun_url)
self.up_to_server(response, filename)
else:
img.getparent().remove(img)
content = etree.tostring(tree, encoding='utf-8', method='html').strip()
return content[5:-6]
except:
pass
def get_all_id_content(self,id_num=0):
sql = """select id,content from comments limit {},500""".format(id_num)
data = self.db.query(sql)
if data:
for row in data:
_id = row[0]
content = row[1]
yield (_id,content)
else:
time.sleep(60*5)
def get_tasks(self):
while 1:
# if self.q.qsize() < 400:
print("get_tasks")
for each in self.get_all_id_content():
self.q.put(each)
else:
time.sleep(60*5)
@staticmethod
def find_img(s):
pattern = re.compile(r'src="(.*?)"')
return re.search(pattern,s)
def deal_task(self):
time.sleep(2)
while 1:
try:
id_content = self.q.get()
_id = id_content[0]
html = id_content[1]
if self.find_img(id_content[1]):
content = self.strip_img(html)
update_sql = """update `comments` set content="{}" where id = {}""".format(MySQLdb.escape_string(base64.b64encode(str(content))), _id)
self.db.do(update_sql)
print("insert: {}".format(_id))
else:
# i = time.time()
update_sql = """update `comments` set content="{}" where id = {}""".format(MySQLdb.escape_string(base64.b64encode(str(html))), _id)
self.db.do(update_sql)
# print("update_sql:{}".format(time.time() -i))
except:
print('queue is empty!')
time.sleep(60*5)
def start(self):
thread_list = []
thread_list.append(threading.Thread(target=self.get_tasks))
for i in range(10):
t = threading.Thread(target=self.deal_task)
thread_list.append(t)
for t in thread_list:
t.start()
if __name__ == '__main__':
baidu = BaiduImage()
baidu.start()
| [
"2829969299@qq.com"
] | 2829969299@qq.com |
f5ce343944e0e5aa368aec4ed178529bc5d92d25 | 46dc1ef28634ea1a2fdf419aeec4f60a001a3045 | /aldryn_blog/search_indexes.py | 415c439b8ce26c9dfe6cc975732aedf6b932d585 | [] | no_license | growlf/aldryn-blog | f143d256628f9d2584f23ef8c4680c89324b7d21 | 88d484677cb54f8650c4b69e3856e1d84dc7ef73 | refs/heads/master | 2021-01-21T03:09:44.545168 | 2014-09-11T22:32:37 | 2014-09-11T22:32:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,336 | py | # -*- coding: utf-8 -*-
from django.db.models import Q
from django.template import RequestContext
from aldryn_search.utils import get_index_base, strip_tags
from .conf import settings
from .models import Post
class BlogIndex(get_index_base()):
haystack_use_for_indexing = settings.ALDRYN_BLOG_SEARCH
INDEX_TITLE = True
def get_title(self, obj):
return obj.title
def get_description(self, obj):
return obj.lead_in
def get_language(self, obj):
return obj.language
def prepare_pub_date(self, obj):
return obj.publication_start
def get_index_queryset(self, language):
queryset = self.get_model().published.all()
return queryset.filter(Q(language=language)|Q(language__isnull=True))
def get_model(self):
return Post
def get_search_data(self, obj, language, request):
lead_in = self.get_description(obj)
text_bits = [strip_tags(lead_in)]
plugins = obj.content.cmsplugin_set.filter(language=language)
for base_plugin in plugins:
instance, plugin_type = base_plugin.get_plugin_instance()
if not instance is None:
content = strip_tags(instance.render_plugin(context=RequestContext(request)))
text_bits.append(content)
return ' '.join(text_bits)
| [
"commonzenpython@gmail.com"
] | commonzenpython@gmail.com |
274610d865dddc45fbe3bc9b639412a3e22bd912 | 56495b71151fb304957a6e4478bcd9538efc3ae4 | /sites/scnews/management/commands/crawl.py | 5a9681735b7b85197bef9db53008ac6c44c75697 | [] | no_license | qq40660/scnews | 4ea6ec18966f9b0662f8c3dc25cd2385bcb1b568 | 565701c3ba42d97cf7fb057b88793c6de2a582e2 | refs/heads/master | 2021-01-24T22:52:13.038343 | 2011-09-22T14:46:58 | 2011-09-22T14:46:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | from datetime import datetime
from django.core.management.base import NoArgsCommand
from scnews.models import Resource
from scnews.management.crawl_helper import fetch
class Command(NoArgsCommand):
help = "do crawl"
def handle_noargs(self, **options):
reses = Resource.objects.all()
for res in reses:
if res.updated_on:
t = datetime.now() - res.updated_on
if (t.seconds + t.days * 3600 * 24)< res.interval:
continue
fetch(res)
| [
"zbirder@gmail.com"
] | zbirder@gmail.com |
494c511f36bae45d4e6f16d0623ac7874be3ea7d | 353def93fa77384ee3a5e3de98cfed318c480634 | /.history/week01/homework02/maoyanspiders/maoyanspiders/spiders/movies_20200627214717.py | 4371ba152db9d2cf063ba237ef9633f663fbe53a | [] | no_license | ydbB/Python001-class01 | d680abc3ea1ccaeb610751e3488421417d381156 | ad80037ccfc68d39125fa94d2747ab7394ac1be8 | refs/heads/master | 2022-11-25T11:27:45.077139 | 2020-07-19T12:35:12 | 2020-07-19T12:35:12 | 272,783,233 | 0 | 0 | null | 2020-06-16T18:28:15 | 2020-06-16T18:28:15 | null | UTF-8 | Python | false | false | 993 | py | # -*- coding: utf-8 -*-
import scrapy
from maoyanspiders.items import MaoyanspidersItem
# import xlml.etree
from bs4 import BeautifulSoup as bs
class MoviesSpider(scrapy.Spider):
name = 'movies'
allowed_domains = ['maoyan.com']
start_urls = ['http://maoyan.com/board/4']
# def parse(self, response):
# pass
def start_requests(self):
url = f'https://maoyan.com/board/4'
print(url)
yield scrapy.Request(url=url,callback=self.parse)
def parse(self, response):
soup = bs(response.text,'html.parser')
print(soup.text)
return soup
for i in soup.find_all('div',attrs={'class' : 'movie-item-info'}):\
item = MaoyanspidersItem()
link = 'https://maoyan.com/'+i.get('href'.text)
item['films_name'] = 'name'
item['release_time'] = "tiome"
yield scrapy.Request(url=link, meta={'item':item},callback=self.parse1)
return item
def parse1()
| [
"31039587+ydbB@users.noreply.github.com"
] | 31039587+ydbB@users.noreply.github.com |
20b05c2331ef559dbd95fad901ddccaa652a44fe | 4c44c593048fa4e00fb0334209632a286886efd9 | /import_template_pricelist_item/wizards/__init__.py | e9e9d3a81880c84d81cb7990a27496f16cf660a7 | [] | no_license | treytux/trey-addons | 0c3fec43c584d46bd299b4bca47dcc334bedca60 | 1cda42c0eae702684badce769f9ec053c59d6e42 | refs/heads/12.0 | 2023-06-08T21:56:09.945084 | 2023-05-29T10:05:53 | 2023-05-29T10:05:53 | 114,281,765 | 19 | 49 | null | 2023-05-29T10:05:55 | 2017-12-14T18:10:39 | Python | UTF-8 | Python | false | false | 285 | py | ###############################################################################
# For copyright and license notices, see __manifest__.py file in root directory
###############################################################################
from . import import_template_pricelist_item
| [
"roberto@trey.es"
] | roberto@trey.es |
32b9d52f74f5208eb15784bcc0eb738b10f01bcc | ec45bee420713f64d2d00a5d1c15a9a5f66a940b | /my_cv/polyp/images_checker.py | 229bbefbb64ab79b935446639eb8c5a0caf9188c | [
"MIT"
] | permissive | strawsyz/straw | a7dc5afef9525eeb3b1a471b5a90d869a3ba5084 | cdf785856941f7ea546aee56ebcda8801cbb04de | refs/heads/master | 2023-06-08T17:18:53.073514 | 2023-06-05T05:51:41 | 2023-06-05T05:51:41 | 253,447,370 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,071 | py | import os
from PIL import Image
from matplotlib import pyplot as plt
def read_img(file_name):
img = Image.open(file_name)
# 防止一个通道的图像无法正常显示
img = img.convert('RGB')
return img
def on_key_release(event):
if event.key == 'n':
if index[0] < len(file_names) - 1:
index[0] += 1
show_images(file_names[index[0]])
else:
print("It's the last image")
elif event.key == "b":
if index[0] > 0:
index[0] -= 1
show_images(file_names[index[0]])
else:
print("It's the first image")
def show_images(file_name):
fig.suptitle(file_name)
for dir_path, ax in zip(dir_paths, axs):
image_path = os.path.join(dir_path, file_name)
ax.imshow(read_img(image_path), cmap='gray')
ax.set_title(file_name)
ax.imshow(read_img(image_path))
plt.axis("off")
# ubuntu上调用两次的plt.show()的话会报错,要用下面的函数
fig.canvas.draw()
if __name__ == '__main__':
"""比较不同文件下的同名图像"""
file_names = []
# MASK_PATH = "D:\Download\datasets\polyp\\06\mask"
# EDGE_PATH = 'D:\Download\datasets\polyp\\06\edge'
# EDGE_PATH1 = "D:\Download\datasets\polyp\\06\edge1"
# dir_paths = [MASK_PATH, EDGE_PATH, EDGE_PATH1]
data_path = "/home/straw/Downloads/dataset/polyp/TMP/07/data"
mask_path = "/home/straw/Downloads/dataset/polyp/TMP/07/mask"
predict_path = "/home/straw/Download\models\polyp\\result/2020-08-06/"
predict_path = "/home/straw/Download\models\polyp\\result/2020-09-01/"
dir_paths = [data_path, mask_path, predict_path]
for file_name in os.listdir(dir_paths[-1]):
file_names.append(file_name)
fig, (axs) = plt.subplots(1, len(dir_paths))
fig.canvas.mpl_connect("key_release_event", on_key_release)
# 取消默认快捷键的注册
fig.canvas.mpl_disconnect(fig.canvas.manager.key_press_handler_id)
index = [0]
show_images(file_names[index[0]])
plt.show()
| [
"836400042@qq.com"
] | 836400042@qq.com |
44eee656ac7d9e1c47346f6e1961f4b82dae1008 | 2c7f025568bceb560888d26828aef30e5ae23393 | /src/concursos/migrations/0002_auto_20170321_1830.py | eb0f05da9b9d6366bebeae666869ac539f4f5492 | [] | no_license | GustavoCruz12/educacao | 6271ebc71830ee1964f8311d3ef21ec8abf58e50 | d0faa633ed1d588d84c74a3e15ccf5fa4dd9839e | refs/heads/master | 2022-12-08T09:34:42.066372 | 2018-08-03T06:38:49 | 2018-08-03T06:38:49 | 143,387,426 | 0 | 0 | null | 2022-12-08T00:01:52 | 2018-08-03T06:31:03 | Python | UTF-8 | Python | false | false | 615 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-21 18:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('concursos', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='concurso',
name='ano',
field=models.IntegerField(verbose_name='ano'),
),
migrations.AlterField(
model_name='concurso',
name='numero',
field=models.IntegerField(verbose_name='numero'),
),
]
| [
"gustavocruz201419@gmail.com"
] | gustavocruz201419@gmail.com |
60686257e8848aa55f3f6ecb9c5d55e0fc77b012 | 00b405a49ac6108d24986243c4b52fa53fb58acc | /0376_wiggle_subsequence.py | 7fd34c5d4608ece36be102ba9360cc42b4674715 | [] | no_license | Shin-jay7/LeetCode | 0325983fff95bfbc43a528812582cbf9b7c0c2f2 | 953b0b19764744753f01c661da969bdab6521504 | refs/heads/master | 2023-07-19T07:17:21.513531 | 2023-07-15T06:05:06 | 2023-07-15T06:05:06 | 231,285,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | from __future__ import annotations
from typing import List
from functools import cache
# Define by dp(i, 1) the biggest length of wiggle subsequense,
# which ends with element nums[i] and has and increasing status,
# and dp(i, -1) is the biggest length of wiggle subsequence,
# which ends with element nums[i] and has decreasing status.
class Solution:
def wiggleMaxLength(self, nums: List[int]) -> int:
n = len(nums)
@cache
def dp(i, s):
if i == 0:
return 1
return dp(i-1, -s) + 1 if (nums[i]-nums[i-1])*s < 0 else dp(i-1, s)
return max(dp(n-1, -1), dp(n-1, 1))
| [
"shin@jay7.net"
] | shin@jay7.net |
3a78b3c9f78cf699f6619052bb18e27fc24b052f | b39d9ef9175077ac6f03b66d97b073d85b6bc4d0 | /Opraz_gastro-resistant_capsule,_hard_SmPC.py | 7a328ba25d2c90a2e76c4dff8053f689782d112c | [] | no_license | urudaro/data-ue | 2d840fdce8ba7e759b5551cb3ee277d046464fe0 | 176c57533b66754ee05a96a7429c3e610188e4aa | refs/heads/master | 2021-01-22T12:02:16.931087 | 2013-07-16T14:05:41 | 2013-07-16T14:05:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,160 | py | {'_data': [['Common',
[['Nervous system', u'Huvudv\xe4rk'],
['GI',
u'diarr\xe9, f\xf6rstoppning, buksm\xe4rtor, illam\xe5ende/ kr\xe4kningar och gasbildning']]],
['Uncommon',
[['Psychiatric', u'S\xf6mnbesv\xe4r Agitation,'],
['Nervous system', u'Yrsel, parestesier, d\xe5sighet'],
['Ear', u'Vertigo'],
['Hepato', u'\xd6kade lever-enzymer'],
['Skin', u'Dermatit, kl\xe5da, hudutslag, urtikaria'],
['Musculoskeletal',
u'H\xf6ft-, handleds-eller kotfrakture r (se Varningar och f\xf6rsiktighet [4.4.])'],
['General', u'Sjukdoms-k\xe4nsla, perifera \xf6dem']]],
['Rare',
[['Blood', u'Leukopeni, trombocytopeni'],
['Immune system',
u'\xd6verk\xe4nslighets-reaktioner s\xe5som feber, angio\xf6dem och anafylaktisk reaktion/chock'],
['Metabolism', u'Hyponatremi'],
['Psychiatric', u'f\xf6rvirring, depression'],
['Nervous system', u'Smakf\xf6r\xe4ndringar'],
['Eye', u'Dimsyn'],
['Respiratory', u'Bronkospasm'],
['GI', u'Muntorrhet, stomatit, gastrointestinal candida'],
['Hepato', u'Hepatit med eller utan gulsot'],
['Skin', u'H\xe5ravfall, fotosensibilitet'],
['Musculoskeletal', u'Artralgi, myalgi'],
['Renal', u'Interstitiell nefrit'],
['General', u'\xd6kad svettning']]],
['Very rare',
[['Blood', u'Pancytopeni, agranulocytos'],
['Psychiatric', u'Aggression, hallucinationer'],
['Hepato', u'Leversvikt, encefalopati hos leversjuka patienter'],
['Skin',
u'Erythema multiforme, Stevens-Johnsons syndrom, toxisk epidermal nekrolys (TEN)'],
['Musculoskeletal', u'Muskeltr\xf6tthet'],
['Reproductive system', u'Gynekomasti']]],
['Unknown',
[['Metabolism', u'Hypo-magnesemi (se Varningar och f\xf6rsiktighet [4.4.])']]]],
'_note': u' ?MSFU',
'_pages': [7, 9],
u'_rank': 29,
u'_type': u'MSFU'} | [
"urudaro@gmail.com"
] | urudaro@gmail.com |
28490f6396d9d23366dc94354780a129da17a33c | b3b713f0a713e14cdab774f5d9703add02fbb136 | /layouts/inconsistencias.py | a0c0f8ce9b95fe65eedb3422c72061127e722d22 | [] | no_license | DS4A-team34/ds4a_application | dba9da1d271396c2f50095ea86230cf2cf9f0c4d | 736c69e002cf4a46f83cbd8c522ee6b0029f0793 | refs/heads/master | 2023-01-10T08:48:44.084758 | 2020-11-16T02:26:31 | 2020-11-16T02:26:31 | 306,533,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,858 | py | import dash_core_components as dcc
import dash_html_components as html
import dash_daq as daq
from controls import df_x, grupo_dict, grupo_options, fig_top_contratistas_good, fig_top_contratistas_bad, figure_fields_incons,figure_similarity
available_indicators = df_x.entidadnombre.unique()
# Colors
bgcolor = "#f3f3f1" # mapbox light map land color
bar_bgcolor = "#b0bec5" # material blue-gray 200
bar_unselected_color = "#78909c" # material blue-gray 400
bar_color = "#546e7a" # material blue-gray 600
bar_selected_color = "#37474f" # material blue-gray 800
bar_unselected_opacity = 0.8
# Figure template
row_heights = [150, 500, 300]
template = {"layout": {"paper_bgcolor": bgcolor, "plot_bgcolor": bgcolor}}
def blank_fig(height):
"""
Build blank figure with the requested height
"""
return {
"data": [],
"layout": {
"height": height,
"template": template,
"xaxis": {"visible": False},
"yaxis": {"visible": False},
},
}
layout = html.Div(
[
html.H3('Métricas generales de inconsistencias'),
html.Div(
[
html.Div(
[dcc.Graph(id="avg-similarity", figure=figure_similarity)],
className="pretty_container twelve columns",
),
],
className="row flex-display",
),
html.Div(
[
html.Div(
[dcc.Graph(id="fields-inconsistencias", figure=figure_fields_incons)],
className="pretty_container twelve columns",
),
],
className="row flex-display",
),
html.Div(
[
html.Div(
[dcc.Graph(id="contratistas-bad", figure=fig_top_contratistas_bad)],
className="pretty_container twelve columns",
),
],
className="row flex-display",
),
# html.Div(
# [
# html.Div(
# [dcc.Graph(id="contratistas-good", figure=fig_top_contratistas_good)],
# className="pretty_container twelve columns",
# ),
# ],
# className="row flex-display",
# ),
html.H3('Control de inconsistencias por entidades'),
html.Div(id='main-selector', children=[
html.Div(id="select-container", children=[
html.P(
id="chart-selector", children="Filtrar por entidad:"),
dcc.Dropdown(id="entidad-dropdown",
options=[
{'label': i, 'value': i} for i in available_indicators],
value="Nombre de entidad",
)
],),
html.Div(id='select-grupo', children=[
html.P(
id="text-grupo", className="control_label", children="Filtrar por grupo del contrato:"),
dcc.RadioItems(id='radio-item-grupo',
className="dcc_control",
options=grupo_options,
value='Grupo',
labelStyle={'display': 'inline-block'}),
]),
],
# className="pretty_container"
),
html.Div(id='contenedor', children=[
html.Div(id='valor-contrato', children=[
html.H4(id='vc1', children=" Total valor cuantías"),
html.H5(id='total-valor-contrato-text', className="valor-text"),
],),
html.Div(id='valor-contrato1', children=[
html.H4(id='vc2', children=" Total valor cuantía con adiciones"),
html.H5(id='total-valor-adiciones-text', className="valor-text"),
],),
html.Div(id='valor-contrato2', children=[
html.H4(id='vc3', children=" Porcentaje promedio de similitud"),
daq.GraduatedBar(
id='ooc_graph_id',
color={
"gradient": True,
"ranges": {
"red": [0, 7],
"yellow": [7, 9],
"green": [9, 10],
}
},
showCurrentValue=True,
max=10,
value=0,
),
],),
html.Div(id='valor-contrato3', children=[
html.H4(id='vc4', children=" Cantidad de contratos"),
html.H5(id='total-cantidad-text', className="valor-text"),
],),
], style={'columnCount': 2}),
]
)
| [
"jjescobar@uninorte.edu.co"
] | jjescobar@uninorte.edu.co |
4c9f7a10aac9bcbb4fdd921caa723ca73f12358e | d475a6cf49c0b2d40895ff6d48ca9b0298643a87 | /pyleecan/Classes/Surface.py | 41c01627adfcca5da4eb333e28f6be696120ded0 | [
"Apache-2.0"
] | permissive | lyhehehe/pyleecan | 6c4a52b17a083fe29fdc8dcd989a3d20feb844d9 | 421e9a843bf30d796415c77dc934546adffd1cd7 | refs/heads/master | 2021-07-05T17:42:02.813128 | 2020-09-03T14:27:03 | 2020-09-03T14:27:03 | 176,678,325 | 2 | 0 | null | 2019-03-20T07:28:06 | 2019-03-20T07:28:06 | null | UTF-8 | Python | false | false | 6,513 | py | # -*- coding: utf-8 -*-
# File generated according to Generator/ClassesRef/Geometry/Surface.csv
# WARNING! All changes made in this file will be lost!
"""Method code available at https://github.com/Eomys/pyleecan/tree/master/pyleecan/Methods/Geometry/Surface
"""
from os import linesep
from logging import getLogger
from ._check import check_var, raise_
from ..Functions.get_logger import get_logger
from ..Functions.save import save
from ._frozen import FrozenClass
# Import all class method
# Try/catch to remove unnecessary dependencies in unused method
try:
from ..Methods.Geometry.Surface.comp_mesh_dict import comp_mesh_dict
except ImportError as error:
comp_mesh_dict = error
try:
from ..Methods.Geometry.Surface.draw_FEMM import draw_FEMM
except ImportError as error:
draw_FEMM = error
try:
from ..Methods.Geometry.Surface.plot import plot
except ImportError as error:
plot = error
try:
from ..Methods.Geometry.Surface.split_line import split_line
except ImportError as error:
split_line = error
from ._check import InitUnKnowClassError
class Surface(FrozenClass):
"""SurfLine define by list of lines that delimit it, label and point reference."""
VERSION = 1
# Check ImportError to remove unnecessary dependencies in unused method
# cf Methods.Geometry.Surface.comp_mesh_dict
if isinstance(comp_mesh_dict, ImportError):
comp_mesh_dict = property(
fget=lambda x: raise_(
ImportError(
"Can't use Surface method comp_mesh_dict: " + str(comp_mesh_dict)
)
)
)
else:
comp_mesh_dict = comp_mesh_dict
# cf Methods.Geometry.Surface.draw_FEMM
if isinstance(draw_FEMM, ImportError):
draw_FEMM = property(
fget=lambda x: raise_(
ImportError("Can't use Surface method draw_FEMM: " + str(draw_FEMM))
)
)
else:
draw_FEMM = draw_FEMM
# cf Methods.Geometry.Surface.plot
if isinstance(plot, ImportError):
plot = property(
fget=lambda x: raise_(
ImportError("Can't use Surface method plot: " + str(plot))
)
)
else:
plot = plot
# cf Methods.Geometry.Surface.split_line
if isinstance(split_line, ImportError):
split_line = property(
fget=lambda x: raise_(
ImportError("Can't use Surface method split_line: " + str(split_line))
)
)
else:
split_line = split_line
# save method is available in all object
save = save
# generic copy method
def copy(self):
"""Return a copy of the class
"""
return type(self)(init_dict=self.as_dict())
# get_logger method is available in all object
get_logger = get_logger
def __init__(self, point_ref=0, label="", init_dict=None, init_str=None):
"""Constructor of the class. Can be use in three ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for Matrix, None will initialise the property with an empty Matrix
for pyleecan type, None will call the default constructor
- __init__ (init_dict = d) d must be a dictionnary with every properties as keys
- __init__ (init_str = s) s must be a string
s is the file path to load
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object"""
if init_str is not None: # Initialisation by str
from ..Functions.load import load
assert type(init_str) is str
# load the object from a file
obj = load(init_str)
assert type(obj) is type(self)
point_ref = obj.point_ref
label = obj.label
if init_dict is not None: # Initialisation by dict
assert type(init_dict) is dict
# Overwrite default value with init_dict content
if "point_ref" in list(init_dict.keys()):
point_ref = init_dict["point_ref"]
if "label" in list(init_dict.keys()):
label = init_dict["label"]
# Initialisation by argument
self.parent = None
self.point_ref = point_ref
self.label = label
# The class is frozen, for now it's impossible to add new properties
self._freeze()
def __str__(self):
"""Convert this objet in a readeable string (for print)"""
Surface_str = ""
if self.parent is None:
Surface_str += "parent = None " + linesep
else:
Surface_str += "parent = " + str(type(self.parent)) + " object" + linesep
Surface_str += "point_ref = " + str(self.point_ref) + linesep
Surface_str += 'label = "' + str(self.label) + '"' + linesep
return Surface_str
def __eq__(self, other):
"""Compare two objects (skip parent)"""
if type(other) != type(self):
return False
if other.point_ref != self.point_ref:
return False
if other.label != self.label:
return False
return True
def as_dict(self):
"""Convert this objet in a json seriable dict (can be use in __init__)
"""
Surface_dict = dict()
Surface_dict["point_ref"] = self.point_ref
Surface_dict["label"] = self.label
# The class name is added to the dict fordeserialisation purpose
Surface_dict["__class__"] = "Surface"
return Surface_dict
def _set_None(self):
"""Set all the properties to None (except pyleecan object)"""
self.point_ref = None
self.label = None
def _get_point_ref(self):
"""getter of point_ref"""
return self._point_ref
def _set_point_ref(self, value):
"""setter of point_ref"""
check_var("point_ref", value, "complex")
self._point_ref = value
point_ref = property(
fget=_get_point_ref,
fset=_set_point_ref,
doc=u"""Center of symmetry
:Type: complex
""",
)
def _get_label(self):
"""getter of label"""
return self._label
def _set_label(self, value):
"""setter of label"""
check_var("label", value, "str")
self._label = value
label = property(
fget=_get_label,
fset=_set_label,
doc=u"""Label of the surface
:Type: str
""",
)
| [
"sebgue@gmx.net"
] | sebgue@gmx.net |
ce83b7e7acb77eba6650ed2dfdf71e0df86e7df0 | feabe8532bfd7656d9a7d72c574ab8bb1bead896 | /py3-study/面向对象课上代码/1901/9-10/作业.py | 5df7340251a636066cdc866a812b5f4d91d93abf | [] | no_license | liuluyang/mk | bbbc887a432d40d23c20bf59453bbece8dc6e72f | 167c86be6241c6c148eb586b5dd19275246372a7 | refs/heads/master | 2020-08-03T15:02:24.406937 | 2020-01-04T08:20:32 | 2020-01-04T08:20:32 | 211,793,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | # ! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = "Miller"
# Datetime: 2019/9/10 16:55
"""
创建一个Person类
1.
有私有属性:name. age, gender
但是有两个公开属性info, isAdult
info属性以元组的形式返回该对象所有私有属性
isAdult属性返回该对象是否成年 True or False 注:>=18是成年
2.
有一个方法birthday
每次调用这个方法,都会长一岁
"""
class Person:
def __init__(self, name, age, gender):
self.__name = name
self.__age = age
self.__gender = gender
@property
def info(self):
"""
所有信息
:return:
"""
return self.__name, self.__age, self.__gender
@property
def isAdult(self):
"""
是否成年
:return:
"""
if self.__age >= 18:
return True
return False
def birthday(self):
"""
过生日
:return:
"""
self.__age += 1 | [
"1120773382@qq.com"
] | 1120773382@qq.com |
cd2dc31139ee6be90efa1bbf27d589a6121b26ea | ca75f7099b93d8083d5b2e9c6db2e8821e63f83b | /z2/part2/batch/jm/parser_errors_2/840141986.py | 0988245bb5abe2feafffcf2b9486dfd0c79acc20 | [
"MIT"
] | permissive | kozakusek/ipp-2020-testy | 210ed201eaea3c86933266bd57ee284c9fbc1b96 | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | refs/heads/master | 2022-10-04T18:55:37.875713 | 2020-06-09T21:15:37 | 2020-06-09T21:15:37 | 262,290,632 | 0 | 0 | MIT | 2020-06-09T21:15:38 | 2020-05-08T10:10:47 | C | UTF-8 | Python | false | false | 1,335 | py | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 840141986
"""
"""
random actions, total chaos
"""
board = gamma_new(2, 3, 3, 2)
assert board is not None
assert gamma_move(board, 1, 1, 0) == 1
assert gamma_busy_fields(board, 1) == 1
assert gamma_move(board, 2, 1, 1) == 1
assert gamma_move(board, 3, 1, 1) == 0
assert gamma_golden_possible(board, 3) == 1
board143215469 = gamma_board(board)
assert board143215469 is not None
assert board143215469 == ("..\n"
".2\n"
".1\n")
del board143215469
board143215469 = None
assert gamma_move(board, 1, 2, 0) == 0
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_move(board, 2, 0, 2) == 1
assert gamma_move(board, 1, 0, 0) == 1
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_move(board, 3, 0, 2) == 0
assert gamma_move(board, 1, 0, 0) == 0
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_busy_fields(board, 2) == 2
assert gamma_move(board, 3, 0, 0) == 0
assert gamma_move(board, 3, 1, 0) == 0
assert gamma_busy_fields(board, 3) == 0
assert gamma_golden_move(board, 3, 0, 1) == 0
gamma_delete(board)
| [
"jakub@molinski.dev"
] | jakub@molinski.dev |
8163b7b83be82baad2d6aafd4d992123d86b5b7d | ae53410a837876abae440d14c243619dedba51f1 | /Solutions/5.py | 22ecdbd42cc865b3db442ae37a6dc36a2c054b12 | [] | no_license | AvivYaniv/Project-Euler | 68e839ae6d4d1a683aa8723f7c9ab3e55ee1dd28 | ec271404a7280129cdc0af9cf3a07f8faa3ab2f4 | refs/heads/master | 2021-06-25T11:20:12.010985 | 2021-04-25T18:39:12 | 2021-04-25T18:39:12 | 225,074,588 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,960 | py | import math
N = 20
PRIMES = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997]
def GetNumberPrimeFactors(n):
prime_factors = []
if n in PRIMES:
prime_factors.append((n, 1))
else:
d = n
for p in PRIMES:
if d == 1:
break
else:
c = 0
while d % p == 0:
d = d / p
c = c + 1
if c > 0:
prime_factors.append((p, c))
return prime_factors
def GetLeastCommonDivisorTill(n):
maximal_prime_multipications = {}
for i in range(2, n + 1):
prime_factors = GetNumberPrimeFactors(i)
for (p, c) in prime_factors:
times = maximal_prime_multipications.get(p)
if times is None or c > times:
maximal_prime_multipications[p] = c
least_common_divisor = 1
for (p, c) in maximal_prime_multipications.items():
least_common_divisor = least_common_divisor * (p ** c)
return least_common_divisor
# Main
def main():
# 232792560
print GetLeastCommonDivisorTill(N)
if __name__ == "__main__":
main()
| [
"avivyaniv@gmx.com"
] | avivyaniv@gmx.com |
dc913812fd4788f60339109e8dd78e0fe48bcd66 | 1ac96e752d08b1b74676262137bdef9071f12827 | /test/pipeline/test_build.py | ba98dade73263dbbca782da216e4aab0ba710c45 | [] | no_license | nornir/nornir-buildmanager | cf8e06e4e0ff769f07ea46345be386152bf02f84 | f9493538945984626e921b453b378b8bcbc117d7 | refs/heads/master | 2023-06-08T21:17:32.049308 | 2018-02-27T01:24:13 | 2018-02-27T01:24:13 | 14,442,428 | 2 | 0 | null | 2013-12-17T22:09:07 | 2013-11-16T05:44:07 | Python | UTF-8 | Python | false | false | 2,340 | py | '''
Created on Feb 22, 2013
@author: u0490822
'''
import glob
import unittest
from setup_pipeline import *
class PrepareThenMosaicTest(PrepareThroughAssembleSetup):
'''Run the build with prepare, then run again with mosiac'''
TransformNames = ["translate", "grid", "zerogrid", "stage"]
@property
def VolumePath(self):
return "6750"
@property
def Platform(self):
return "PMG"
def CheckTransformsExist(self, VolumeObj, TransformNames=None):
if TransformNames is None:
TransformNames = PrepareThenMosaicTest.TransformNames
ChannelNode = VolumeObj.find("Block/Section/Channel")
self.assertIsNotNone(ChannelNode)
for tname in TransformNames:
TransformNode = ChannelNode.GetChildByAttrib("Transform", "Name", tname)
self.assertIsNotNone(ChannelNode)
def TileFiles(self, tilesetNode, downsample):
levelNode = tilesetNode.GetLevel(downsample)
self.assertIsNotNone(levelNode)
files = glob.glob(os.path.join(levelNode.FullPath, "*" + tilesetNode.FilePostfix))
self.assertGreater(len(files), 0, "Missing tiles")
return files
def CheckTilesetExists(self, VolumeObj):
TilesetNode = VolumeObj.find("Block/Section/Channel/Filter/Tileset")
self.assertIsNotNone(TilesetNode)
FullResTiles = self.TileFiles(TilesetNode, 1)
DSTwoTiles = self.TileFiles(TilesetNode, 2)
self.assertGreaterEqual(len(DSTwoTiles), len(FullResTiles) / 4, "Downsample level seems to be missing assembled tiles")
FullResTiles.sort()
DSTwoTiles.sort()
self.assertEqual(os.path.basename(FullResTiles[0]),
os.path.basename(DSTwoTiles[0]),
"Tiles at different downsample levels should use the same naming convention")
def runTest(self):
# Import the files
# self.CheckTransformsExist(VolumeObj)
buildArgs = self._CreateBuildArgs('AssembleTiles', '-Shape', '512,512')
build.Execute(buildArgs)
# Load the meta-data from the volumedata.xml file
VolumeObj = VolumeManager.Load(self.TestOutputPath)
self.CheckTilesetExists(VolumeObj)
if __name__ == "__main__":
# import syssys.argv = ['', 'Test.testName']
unittest.main()
| [
"james.r.andreson@utah.edu"
] | james.r.andreson@utah.edu |
a2e7148df94a9924ffa6228fc87c8ae3b5ed95bd | 66ac12d64422cfbe1aedf34b66ee8750b595ca58 | /spensum/module/position.py | 39645869b7aa8c6d1bf776f418344966744727b0 | [] | no_license | kedz/spensum | 884c2bbfeaacce875adf8fe5dff230e7c47b68ca | 989f5036543abadc616f7ce10477e716f6e88105 | refs/heads/master | 2021-09-13T21:07:33.727862 | 2018-02-02T19:13:14 | 2018-02-02T19:13:14 | 112,295,203 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,619 | py | from .spen_module import SpenModule
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
class Position(SpenModule):
def __init__(self, num_positions=50, name="Position", mask_value=-1,
burn_in=0):
super(Position, self).__init__(
name=name, mask_value=mask_value, burn_in=burn_in)
self.num_positions_ = num_positions
self.embedding = nn.Embedding(num_positions + 1, 1, padding_idx=0)
@property
def num_positions(self):
return self.num_positions_
def compute_features(self, inputs, inputs_mask=None, targets_mask=None):
position = inputs.position.squeeze(2).clamp(0, self.num_positions)
logits = self.embedding(position).squeeze(2)
return logits
def forward_pass(self, inputs, features, inputs_mask=None,
targets_mask=None):
return features
def compute_energy(self, inputs, features, targets, inputs_mask=None,
targets_mask=None):
if targets_mask is None:
targets_mask = inputs.embedding[:,:,0].eq(self.mask_value)
pos_probs = torch.sigmoid(features)
pos_energy = -targets * pos_probs
neg_probs = 1 - pos_probs
neg_energy = -(1 - targets) * neg_probs
pointwise_energy = (pos_energy + neg_energy).masked_fill(
targets_mask, 0)
length = Variable(inputs.length.data.float().view(-1, 1))
total_energy = pointwise_energy.sum(1, keepdim=True)
mean_energy = total_energy / length
return mean_energy
| [
"kedzie@cs.columbia.edu"
] | kedzie@cs.columbia.edu |
cd3406d17daddd03c1243d992e77947b8c7b3e31 | bdbf05347487bc94da2ab43c069030491aad30c1 | /bd_log_analysis.py | 133b4facc0f461d050719f6f7891244e197fe37c | [] | no_license | birkin/bd_log_analysis | d1baf6375bb73ae615ece3ea006f2c78cca93574 | 59b46c92be5beb2fe88590a4d316e55901e6c2d8 | refs/heads/master | 2021-01-22T11:48:00.706991 | 2015-05-20T12:52:20 | 2015-05-20T12:52:20 | 35,901,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,048 | py | # -*- coding: utf-8 -*-
""" Compares new borrowdirect api results against current in-production deprecated code.
Parses logging added to controller that summarizes differences between new and old tunneler calls. """
import glob, json, os, pprint
class Analyzer( object ):
def __init__( self ):
self.LOGS_DIR = unicode( os.environ[u'BDLOG_ANALYSIS__LOGS_DIR'] )
self.filepaths_list = []
self.labels = [ u'new_api_found', u'new_api_requestable', u'old_api_found', u'old_api_requestable' ]
self.summary = {}
def prep_filepaths_list( self ):
""" Creates array filepaths.
Called by if __name__ """
os.chdir( self.LOGS_DIR )
for f in glob.glob("easyborrow_controller.log*"):
filepath = os.path.join( self.LOGS_DIR, f )
self.filepaths_list.append( filepath )
return
def process_log_files( self ):
""" Processes each log file, updating counts.
Called by if __name__ """
for filepath in self.filepaths_list:
with open( filepath ) as f:
lines_utf8 = f.readlines()
self.parse_log_file( lines_utf8 )
# break
return
## helpers
def parse_log_file( self, lines_utf8 ):
""" Parses given lines to update counts.
Called by process_log_files() """
relevant_segments = self.find_relevant_segments( lines_utf8 )
cleaned_lines = self.clean_relevant_segments( relevant_segments )
self.update_counts( cleaned_lines )
return
def find_relevant_segments( self, lines_utf8 ):
""" Finds comparison lines and merges them into single string.
Called by parse_log_file() """
( segments, segment ) = ( [], [] )
for line_utf8 in lines_utf8:
line = line_utf8.decode( u'utf-8' )
for label in self.labels:
if label in line:
segment.append( line )
if len( segment ) == 4:
joined_segment = u''.join( segment )
segments.append( joined_segment )
segment = []
return segments
def clean_relevant_segments( self, relevant_segments ):
""" Turns each messy line into a json string; json isn't used, it's to normalize the strings.
Called by parse_log_file() """
cleaned_lines = []
for line in relevant_segments:
start = line.find( u'`' ) + 1
end = line.rfind( u'`' )
str1 = line[start:end]
str2 = self.run_replaces( str1 )
dct = json.loads( str2 )
jsn = json.dumps( dct, sort_keys=True )
cleaned_lines.append( jsn.decode(u'utf-8') )
return cleaned_lines
def run_replaces( self, str1 ):
""" Runs a series of replaces to normalize string.
Called by clean_relevant_segments() """
str2 = str1.replace( u'\n', u'' )
str3 = str2.replace( u"'", u'"' )
str4 = str3.replace( u'u"', u'"' )
str5 = str4.replace( u'True', u'true' )
str6 = str5.replace( u'False', u'false' )
str7 = str6.replace( u'None', u'null' )
return str7
def update_counts( self, cleaned_lines ):
""" Checks and updates patterns, and counts.
Called by parse_log_file() """
if u'total_entries' in self.summary.keys():
self.summary[u'total_entries'] += len(cleaned_lines)
else:
self.summary[u'total_entries'] = len(cleaned_lines)
for pattern in cleaned_lines:
if pattern in self.summary.keys():
self.summary[pattern] += 1
else:
self.summary[pattern] = 0
return
if __name__ == u'__main__':
""" Loads and parses logs and prints summary.
Called manually. """
anlyzr = Analyzer()
anlyzr.prep_filepaths_list()
# pprint.pprint( anlyzr.filepaths_list )
anlyzr.process_log_files()
pprint.pprint( anlyzr.summary )
| [
"birkin.diana@gmail.com"
] | birkin.diana@gmail.com |
8b58e97d148a2a4044dc82a11131e9a37053dbee | 2c872fedcdc12c89742d10c2f1c821eed0470726 | /pbase/day10/jiangyi/day10/exercise/lambda2.py | 60afaa4052e86373221e016444b15042f5ec9539 | [] | no_license | zuigehulu/AID1811 | 581c3c7a37df9fa928bc632e4891fc9bafe69201 | 10cab0869875290646a9e5d815ff159d0116990e | refs/heads/master | 2020-04-19T16:33:04.174841 | 2019-01-30T07:58:24 | 2019-01-30T07:58:24 | 168,307,918 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | # 2. 写一个lambda 表达式来创建函数,此函数返回两个参数的最大值
# def mymax(x, y):
# ...
# mymax = lambda .....
# print(mymax(100, 200)) # 200
# print(mymax("ABC", '123')) # ABC
mymax = lambda x,y:max(x,y)
print(mymax(100, 200)) # 200
print(mymax("ABC", '123')) # ABC
| [
"442315617@qq.com"
] | 442315617@qq.com |
d287f8c476b7d0115a839f5172afcaeee380108d | d8c07694387202f7c72b30ddc9fc7835637f2f96 | /faith_pms/forms.py | 4cb7682f59fae2453eab3b88c3b57f3418d842f3 | [] | no_license | shuvro-zz/Patient-Management-System-2 | 7f6c357dc64d4e81e1ca976a42b83f915ba7fee2 | f36db339b680e8bbdff1ef3d42ba07809e985bd1 | refs/heads/master | 2020-08-18T20:06:06.021595 | 2018-03-23T12:10:55 | 2018-03-23T12:10:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,164 | py | from django import forms
from .models import Doctor, Patient, NextOfKin, Medicine, MedicalCover, AllergiesAndDirectives, Treatment
#......
class UpdateProfileForm(forms.ModelForm):
class Meta:
model = Doctor
fields = ('profile_photo', 'name', 'hospital', 'email', 'description', 'title')
class NewPatientForm(forms.ModelForm):
class Meta:
model = Patient
exclude = ('doctor',)
class NewNextOfKinForm(forms.ModelForm):
class Meta:
model = NextOfKin
fields = ('name', 'relationship', 'phone_number', 'email')
class NewMedicineForm(forms.ModelForm):
class Meta:
model = Medicine
fields = ('name','date_given', 'doctor_prescribed')
class MedicalCoverForm(forms.ModelForm):
class Meta:
model = MedicalCover
fields = ('name', 'email', 'type_of_cover')
class AllergiesAndDirectivesForm(forms.ModelForm):
class Meta:
model = AllergiesAndDirectives
fields = ('name', 'level')
class TreatmentForm(forms.ModelForm):
class Meta:
model = Treatment
fields = ('symptoms', 'diagnosis', 'recommendations', 'consultation_fee')
| [
"biinewton382@gmail.com"
] | biinewton382@gmail.com |
6c96a3056472a5a6f6bfccf6a0a581f1dff5d3dc | c5bc4b7f885ca87804feb9cb7d416a6a4e9bed82 | /images/fish-u-quartlet-2.py | b16a5e56225a2e55790b47d4348de08d8ed10237 | [] | no_license | anandology/the-joy-of-programming | d3d4a439665d81bf499aabf127b04a5a5a9cd5bb | 231be9dc97fb8935f490237277d1cf16b28fe366 | refs/heads/master | 2023-01-23T07:45:25.811908 | 2020-12-06T12:34:14 | 2020-12-06T12:34:14 | 317,910,429 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py |
fish = get_fish(border=True)
fish2 = flip(rot45(fish))
u = over(
fish2, rot(fish2),
rot(rot(fish2)), rot(rot(rot(fish2)))
)
x = quartlet(u, u, u, u)
y = quartlet(x, x, x, x)
show(y, scale=True)
| [
"anandology@gmail.com"
] | anandology@gmail.com |
48ffe60aba4596db17b05c747beb4dd394451e84 | 6edd5a50f07843de18175c04796348f7fdc4f74d | /Python/simrank.py | 6016316641b030b8fa58298c85fdf50013b95b5c | [] | no_license | rogergranada/_utilsdev | 4f14a1e910103c33e3a8e820bb3e55483bd27e69 | 977a8d98a6934b9354ec233da6e0ef31621282f3 | refs/heads/master | 2021-06-01T17:07:14.773949 | 2020-10-22T22:06:43 | 2020-10-22T22:06:43 | 124,608,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | import copy
from collections import defaultdict
def simrank(G, r=0.9, max_iter=100):
sim_old = defaultdict(list)
sim = defaultdict(list)
for n in G.nodes():
sim[n] = defaultdict(int)
sim[n][n] = 1
sim_old[n] = defaultdict(int)
sim_old[n][n] = 0
# recursively calculate simrank
for iter_ctr in range(max_iter):
if _is_converge(sim, sim_old):
break
sim_old = copy.deepcopy(sim)
for u in G.nodes():
for v in G.nodes():
if u == v:
continue
s_uv = 0.0
for n_u in G.neighbors(u):
for n_v in G.neighbors(v):
s_uv += sim_old[n_u][n_v]
sim[u][v] = (r * s_uv / (len(G.neighbors(u)) * len(G.neighbors(v))))
return sim
def _is_converge(s1, s2, eps=1e-4):
for i in s1.keys():
for j in s1[i].keys():
if abs(s1[i][j] - s2[i][j]) >= eps:
return False
return True
if __name__ == "main":
import networkx
G = networkx.Graph()
G.add_edges_from([('a','b'), ('b', 'c'), ('c','a'), ('c','d')])
simrank(G)
#S(a,b) = r * (S(b,a)+S(b,c)+S(c,a)+S(c,c))/(2*2) = 0.9 * (0.6538+0.6261+0.6261+1)/4 = 0.6538,
| [
"roger.leitzke@gmail.com"
] | roger.leitzke@gmail.com |
b823f84539a45d2ea8af50cf386fc0f68d48c289 | 2b45cbccd03fb09be78b2241d05beeae171a2e18 | /LeetCode 热题 HOT 100/letterCombinations.py | 23b65251a336177d70ae1440f8769a587bcc6b62 | [
"Apache-2.0"
] | permissive | MaoningGuan/LeetCode | c90f78ce87a8116458a86c49dbe32e172036f7b4 | 62419b49000e79962bcdc99cd98afd2fb82ea345 | refs/heads/master | 2023-01-03T14:52:04.278708 | 2020-11-01T12:15:41 | 2020-11-01T12:15:41 | 282,859,997 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,930 | py | # -*- coding: utf-8 -*-
"""
17. 电话号码的字母组合
给定一个仅包含数字 2-9 的字符串,返回所有它能表示的字母组合。
给出数字到字母的映射如下(与电话按键相同)。注意 1 不对应任何字母。
https://assets.leetcode-cn.com/aliyun-lc-upload/original_images/17_telephone_keypad.png
示例:
输入:"23"
输出:["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"].
说明:
尽管上面的答案是按字典序排列的,但是你可以任意选择答案输出的顺序。
"""
from typing import List
class Solution:
def letterCombinations(self, digits: str) -> List[str]:
"""
方法:回溯(使用递归来实现回溯算法)
时间复杂度:O(3^m x 4^n)
空间复杂度:O(m+n)
其中 m 是输入中对应 3 个字母的数字个数(包括数字 2、3、4、5、6、8),
n 是输入中对应 4 个字母的数字个数(包括数字 7、9),m+n 是输入数字的总个数。
:param digits:
:return:
"""
if not digits:
return list()
phoneMap = {
"2": "abc",
"3": "def",
"4": "ghi",
"5": "jkl",
"6": "mno",
"7": "pqrs",
"8": "tuv",
"9": "wxyz",
}
def backtrack(index: int):
if index == len(digits):
combinations.append("".join(combination))
else:
digit = digits[index]
for letter in phoneMap[digit]:
combination.append(letter)
backtrack(index + 1)
combination.pop()
combination = list()
combinations = list()
backtrack(0)
return combinations
if __name__ == '__main__':
digits = "23"
solution = Solution()
print(solution.letterCombinations(digits))
| [
"1812711281@qq.com"
] | 1812711281@qq.com |
a7034ac4e883245f425483f76977c4b5f25b3a3b | 3be95bfd788472dfd73826c6214355788f05f2cc | /rest_framework_swagger/urlparser.py | a6e3ce1f28bfbef884c7eead592e44bdaf2f9a8c | [] | no_license | pleasedontbelong/mystore | a2acba4d3b8dd070471139dfddc5baa5f54393c0 | 8e156f4c9c5d6cd273dfbbd57eb90c65c3986e9f | refs/heads/master | 2020-12-31T03:26:27.074955 | 2016-11-09T23:14:01 | 2016-11-09T23:14:01 | 56,080,261 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,362 | py | from importlib import import_module
from django.core.urlresolvers import RegexURLResolver, RegexURLPattern
from django.contrib.admindocs.views import simplify_regex
from django.conf import settings
from rest_framework.views import APIView
class UrlParser(object):
def __init__(self, config, request):
self.urlconf = settings.ROOT_URLCONF
self.exclude_namespaces = config.get('exclude_namespaces', [])
self.exclude_module_paths = config.get('exclude_module_paths', [])
self.include_module_paths = config.get('include_module_paths', [])
self.exclude_url_patterns = config.get('exclude_url_patterns', [])
self.exclude_url_patterns_names = config.get('exclude_url_patterns_names', [])
def get_apis(self):
"""
Returns all the DRF APIViews found in the project URLs
"""
urls = import_module(self.urlconf)
return self.__flatten_patterns_tree__(urls.urlpatterns)
def __assemble_endpoint_data__(self, pattern, prefix=''):
"""
Creates a dictionary for matched API urls
pattern -- the pattern to parse
prefix -- the API path prefix (used by recursion)
"""
callback = self.__get_pattern_api_callback__(pattern)
if callback is None or self.__exclude_router_api_root__(callback):
return
path = simplify_regex(prefix + pattern.regex.pattern)
path = path.replace('<', '{').replace('>', '}')
if self.__exclude_format_endpoints__(path):
return
return {
'path': path,
'pattern': pattern,
'callback': callback,
}
def __flatten_patterns_tree__(self, patterns, prefix=''):
"""
Uses recursion to flatten url tree.
patterns -- urlpatterns list
prefix -- (optional) Prefix for URL pattern
"""
pattern_list = []
for pattern in patterns:
if isinstance(pattern, RegexURLPattern):
endpoint_data = self.__assemble_endpoint_data__(pattern, prefix)
if endpoint_data is None:
continue
if any(excluded in endpoint_data['path'] for excluded in self.exclude_url_patterns):
continue
if endpoint_data['pattern'].name in self.exclude_url_patterns_names:
continue
pattern_list.append(endpoint_data)
elif isinstance(pattern, RegexURLResolver):
api_urls_module = pattern.urlconf_name.__name__ if hasattr(pattern.urlconf_name, '__name__') else ""
# only modules included on the include_module_paths list
if self.include_module_paths and api_urls_module not in self.include_module_paths:
continue
# except modules included on the exclude_module_paths list
if api_urls_module in self.exclude_module_paths:
continue
if pattern.namespace is not None and pattern.namespace in self.exclude_namespaces:
continue
pref = prefix + pattern.regex.pattern
pattern_list.extend(self.__flatten_patterns_tree__(
pattern.url_patterns,
prefix=pref
))
return pattern_list
def __get_pattern_api_callback__(self, pattern):
"""
Verifies that pattern callback is a subclass of APIView, and returns the class
Handles older django & django rest 'cls_instance'
"""
if not hasattr(pattern, 'callback'):
return
if (hasattr(pattern.callback, 'cls') and
issubclass(pattern.callback.cls, APIView)):
return pattern.callback.cls
elif (hasattr(pattern.callback, 'cls_instance') and
isinstance(pattern.callback.cls_instance, APIView)):
return pattern.callback.cls_instance
def __exclude_router_api_root__(self, callback):
"""
Returns True if the URL's callback is rest_framework.routers.APIRoot
"""
return callback.__module__ == 'rest_framework.routers'
def __exclude_format_endpoints__(self, path):
"""
Excludes URL patterns that contain .{format}
"""
return '.{format}' in path
| [
"pleasedontbelong@gmail.com"
] | pleasedontbelong@gmail.com |
dd1b73525526e6198b35c04f116baf278eed4316 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/leap/6b4c314f44d841bbb2b4d39674eb65c3.py | e9fd5ef53db94a80a06e2239ef9d865b8e7e86d3 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 166 | py | def is_leap_year(year):
if type(year) != int: return "Not a year."
if (year % 4 == 0 and year % 100 != 0) or year % 400 == 0:
return True
else:
return False
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
7852381fd94bab144af4ccb32cb16d716772262b | ce0a3a73c7825f7327b8319fb2593b6b01659bb0 | /mysite/mysite/settings.py | cd4b00fbac4cb694ac71bec09ea83e0b482d97fd | [] | no_license | soccergame/deeplearning | 28b0a6ed85df12e362b3a451050fab5a2a994be7 | cbc65d3eba453992a279cfd96a9d3640d8fe6b9f | refs/heads/master | 2020-03-28T22:38:26.085464 | 2018-08-31T11:22:39 | 2018-08-31T11:22:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,212 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.14.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '826f-sjl7t%0g9cff#7g90x7fjw3%5226!^8j$^(ec52a(k#na'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'cmdb',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS=(
os.path.join(BASE_DIR, 'static'),
) | [
"18811442380@163.com"
] | 18811442380@163.com |
83f8896cb49a3f55ad7b3291aaa0ac1a5e0330fb | 59a8c7b332c2cd182c9267cfcc5a4d0c3d4edb59 | /convert_to_onnx.py | d8669511953448ba8c4ce443d25a56a72616e252 | [
"MIT"
] | permissive | QLHua001/bsj_retinaface_train_2 | ffce230f50e9b0709c0a77200d4070f52624c8de | 4ac72ffee38779876aff4acd2577f5e8b20470fc | refs/heads/main | 2023-06-06T07:39:54.522042 | 2021-06-26T02:33:32 | 2021-06-26T02:33:32 | 380,394,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,377 | py | from __future__ import print_function
import os
import argparse
import torch
import torch.backends.cudnn as cudnn
import numpy as np
from data import cfg_mnet, cfg_re50
from layers.functions.prior_box import PriorBox
from utils.nms.py_cpu_nms import py_cpu_nms
import cv2
from models.retinaface import RetinaFace
from utils.box_utils import decode, decode_landm
from utils.timer import Timer
import onnx
parser = argparse.ArgumentParser(description='Test')
parser.add_argument('-m', '--trained_model', default='./20-point-weights/Retinaface_192_v0529a-1/mobilenet0.25_Final.pth',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--network', default='mobile0.25', help='Backbone network mobile0.25 or resnet50')
parser.add_argument('--long_side', default=192, help='when origin_size is false, long_side is scaled size(320 or 640 for long side)')
parser.add_argument('--cpu', action="store_true", default=False, help='Use cpu inference')
args = parser.parse_args()
def check_keys(model, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(model.state_dict().keys())
used_pretrained_keys = model_keys & ckpt_keys
unused_pretrained_keys = ckpt_keys - model_keys
missing_keys = model_keys - ckpt_keys
print('Missing keys:{}'.format(len(missing_keys)))
print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))
print('Used keys:{}'.format(len(used_pretrained_keys)))
assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
return True
def remove_prefix(state_dict, prefix):
''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''
print('remove prefix \'{}\''.format(prefix))
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
def load_model(model, pretrained_path, load_to_cpu):
print('Loading pretrained model from {}'.format(pretrained_path))
if load_to_cpu:
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
else:
#Returns the index of a currently selected device.
device = torch.cuda.current_device()
print("device:")
print(device)
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))
if "state_dict" in pretrained_dict.keys():
pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
else:
pretrained_dict = remove_prefix(pretrained_dict, 'module.')
check_keys(model, pretrained_dict)
model.load_state_dict(pretrained_dict, strict=False)
return model
if __name__ == '__main__':
#set_grad_enabled will enable or disable <gradient calculation> based on its argument mode.
#It can be used as a context-manager or as a function.
torch.set_grad_enabled(False)
cfg = None
if args.network == "mobile0.25":
cfg = cfg_mnet
elif args.network == "resnet50":
cfg = cfg_re50
# net and model
net = RetinaFace(cfg=cfg, phase = 'test')
net = load_model(net, args.trained_model, args.cpu)
net.eval()
print('Finished loading model!')
print(net)
#A torch.device is an object representing the device on which a torch.Tensor is or will be allocated.
device = torch.device("cuda:0")
net = net.to(device)
# ------------------------ export -----------------------------
output_onnx = 'Retinaface_192_v0529a-1.onnx'
print("==> Exporting model to ONNX format at '{}'".format(output_onnx))
input_names = ["input0"]
output_names = ["output0","output1", "output2"]
print("args.long_side: ", args.long_side)
inputs = torch.randn(1, 3, args.long_side, args.long_side).to(device)
torch_out = torch.onnx._export(net, inputs, output_onnx, export_params=True, verbose=False,
input_names=input_names, output_names=output_names)
model = onnx.load("./Retinaface_192_v0529a-1.onnx")
dim_proto0 = model.graph.input[0].type.tensor_type.shape.dim[2]
dim_proto0.dim_param = 'input.0_2'
dim_proto1 = model.graph.input[0].type.tensor_type.shape.dim[3]
dim_proto1.dim_param = 'input.0_3'
onnx.save(model, 'Retinaface_192_v0529a-1_dynaInput.onnx')
| [
"gogs@fake.local"
] | gogs@fake.local |
8cfe933c73e083c3c4df4862e73b9e2e558780e8 | 7bd5ca970fbbe4a3ed0c7dadcf43ba8681a737f3 | /2014/codefestival/thanksb/d.py | 6f6c56dc67aa702914e96ae0c45553f0234736fc | [] | no_license | roiti46/Contest | c0c35478cd80f675965d10b1a371e44084f9b6ee | c4b850d76796c5388d2e0d2234f90dc8acfaadfa | refs/heads/master | 2021-01-17T13:23:30.551754 | 2017-12-10T13:06:42 | 2017-12-10T13:06:42 | 27,001,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | N,T = map(int,raw_input().split())
A = [int(raw_input()) for _ in range(N)]
ans = 0
for t in range(1,T+1):
tmp = sum(1 for i in range(N) if t%A[i] == 0)
ans = max(ans,tmp)
print ans
| [
"roiti46@gmail.com"
] | roiti46@gmail.com |
f38803a4c4eb6386cc7d3348d9d2bc33e9cda9dd | 4de03eecadc4c69caf792f4773571c2f6dbe9d68 | /seahub/api2/endpoints/admin/file_audit.py | d8968ed22469ef16846f9475a1ba6b1103a19aff | [
"Apache-2.0"
] | permissive | Tr-1234/seahub | c1663dfd12f7584f24c160bcf2a83afdbe63a9e2 | ed255e0566de054b5570218cb39cc320e99ffa44 | refs/heads/master | 2022-12-23T16:20:13.138757 | 2020-10-01T04:13:42 | 2020-10-01T04:13:42 | 300,138,290 | 0 | 0 | Apache-2.0 | 2020-10-01T04:11:41 | 2020-10-01T04:11:40 | null | UTF-8 | Python | false | false | 2,138 | py | from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAdminUser
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from seaserv import seafile_api
from .utils import check_time_period_valid, \
get_log_events_by_type_and_time
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.utils import api_error
from seahub.base.templatetags.seahub_tags import email2nickname
from seahub.utils.timeutils import datetime_to_isoformat_timestr
from seahub.utils import is_pro_version
class FileAudit(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication )
permission_classes = (IsAdminUser,)
throttle_classes = (UserRateThrottle,)
def get(self, request):
if not is_pro_version():
error_msg = 'Feature disabled.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# check the date format, should be like '2015-10-10'
start = request.GET.get('start', None)
end = request.GET.get('end', None)
if not check_time_period_valid(start, end):
error_msg = 'start or end date invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
result = []
events = get_log_events_by_type_and_time('file_audit', start, end)
if events:
for ev in events:
tmp_repo = seafile_api.get_repo(ev.repo_id)
tmp_repo_name = tmp_repo.name if tmp_repo else ''
result.append({
'repo_id': ev.repo_id,
'repo_name': tmp_repo_name,
'time': datetime_to_isoformat_timestr(ev.timestamp),
'etype': ev.etype,
'ip': ev.ip,
'file_path': ev.file_path,
'etype': ev.etype,
'user_name': email2nickname(ev.user),
'user_email': ev.user
})
return Response(result)
| [
"colinsippl@gmx.de"
] | colinsippl@gmx.de |
341ea1610a3b5ed9e737b2c4b2bbc9bd7ceb736f | 549317bc0a7230ec163914c75f75dd008900c57b | /pyroomacoustics/tests/tests_libroom/test_ccw3p.py | 1c037631a6a39a0deba7194dbded459392295e0b | [
"MIT"
] | permissive | oucxlw/pyroomacoustics | 0bb633427cd7ce3e93392cdc9d0bc3afc5f2dbf3 | 0adc91579c9c6daf1b73d2c4863a9fc66b308dbb | refs/heads/master | 2023-06-17T17:43:49.743201 | 2021-07-21T05:36:46 | 2021-07-21T05:36:46 | 288,884,904 | 1 | 0 | MIT | 2021-07-21T05:36:47 | 2020-08-20T02:22:54 | Python | UTF-8 | Python | false | false | 2,575 | py | # Test of the CCW3P routine
# Copyright (C) 2019 Robin Scheibler, Cyril Cadoux
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# You should have received a copy of the MIT License along with this program. If
# not, see <https://opensource.org/licenses/MIT>.
import numpy as np
import pyroomacoustics as pra
cases = {
'anti-clockwise' : {
'points' : np.array([
[1,-1], [2,-1], [1,0]
]),
'expected' : 1, # anti-clockwise
'label' : 'Test: CCW3P anti-clockwise',
},
'clockwise' : {
'points' : np.array([
[1,-1], [1,0], [2,-1]
]),
'expected' : -1, # clockwise
'label' : 'Test: CCW3P clockwise',
},
'co-linear' : {
'points' : np.array([
[0,0], [0.5,0.5], [1,1]
]),
'expected' : 0, # co-linear
'label' : 'Test: CCW3P co-linear',
},
}
def ccw3p(case):
p1, p2, p3 = case['points']
r = pra.libroom.ccw3p(p1, p2, p3)
assert r == case['expected'], (case['label']
+ ' returned: {}, expected {}'.format(r, case['expected']))
def test_ccw3p_anticlockwise():
ccw3p(cases['anti-clockwise'])
def test_ccw3p_clockwise():
ccw3p(cases['clockwise'])
def test_ccw3p_colinear():
ccw3p(cases['co-linear'])
if __name__ == '__main__':
for lbl, case in cases.items():
try:
ccw3p(case)
except:
print('{} failed'.format(lbl))
| [
"fakufaku@gmail.com"
] | fakufaku@gmail.com |
f835cfa380b740abef9cdc32dc9ca46d5a6b1db0 | cc6ea4b0422ba4c0d0ab2815333330b22e6a2b6f | /py_headless_daw/processing/event/envelope_param_value_emitter.py | 03b96a541bf18c17983d069972fc8bf8f0ac8f40 | [
"MIT"
] | permissive | Catsvilles/py_headless_daw | 07494e39a07510d852af1eda1d611eb34e4d96a8 | 596d2da39e14cda13544601b71714a8ebe6b8874 | refs/heads/master | 2022-11-05T12:28:18.335337 | 2020-06-18T08:14:00 | 2020-06-18T08:14:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,127 | py | from typing import List
import numpy as np
from em.platform.rendering.dto.time_interval import TimeInterval
from em.platform.rendering.primitives.envelope import Envelope
from em.platform.rendering.schema.events.event import Event
from em.platform.rendering.schema.events.parameter_value_event import ParameterValueEvent
from em.platform.rendering.schema.processing_strategy import ProcessingStrategy
class EnvelopeParamValueEmitter(ProcessingStrategy):
def __init__(self, envelope: Envelope, parameter: str):
self.envelope: Envelope = envelope
self.parameter: str = parameter
def render(self, interval: TimeInterval, stream_inputs: List[np.ndarray], stream_outputs: List[np.ndarray],
event_inputs: List[List[Event]], event_outputs: List[List[Event]]):
# a simplified approach:
# only one event is generated, in the very beginning of buffer
# must be good enough for the beginning
event = ParameterValueEvent(0, self.parameter, self.envelope.get_value_at(interval.start_in_bars))
for output in event_outputs:
output.append(event)
| [
"grechin.sergey@gmail.com"
] | grechin.sergey@gmail.com |
5dd3d6567d4a2ec9f3f1a5da2e2856a9b40aa58f | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/24743e5cdca47da694bad41b1c623c1fb6f76d96-<storage_client>-fix.py | cf3c119ef92e771508a7688684103ed9eed03cc2 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | @property
def storage_client(self):
self.log('Getting storage client...')
if (not self._storage_client):
self._storage_client = self.get_mgmt_svc_client(StorageManagementClient, base_url=self._cloud_environment.endpoints.resource_manager, api_version='2017-06-01')
return self._storage_client | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
9d22ea778fb77968652fa8f01e3e11aeb552d468 | 8ab7e102c01d436f37ad221802f601f2206b59a8 | /Tray.py | 8be528c31afbe961604a27f970b2104009cdcb7d | [] | no_license | shanto268/muon_simulator | 9900129fd0fab418b53002cde47191ac667ace36 | c80812edb1800720570b8d3b792a46f7c83f3cbb | refs/heads/master | 2023-02-17T17:33:37.099252 | 2021-01-16T18:56:25 | 2021-01-16T18:56:25 | 330,234,282 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | import matplotlib.pyplot as plt
import numpy as np
class Tray:
"""Docstring for Tray. """
def __init__(self, nbarx, nbary, bar_size, z_pos):
"""TODO: to be defined.
:nbarx: TODO
:nbary: TODO
:bar_size: TODO
:z_pos: TODO
:tray_id: TODO
"""
self._nbarx = nbarx
self._nbary = nbary
self._bar_size = bar_size
self._z_pos = z_pos
self.default_data = self.createPlane()
def createTray(self, n):
return [0 for i in range(n)]
def createPlane(self):
x = self.createTray(self._nbarx)
y = self.createTray(self._nbary)
return np.array([x, y])
def getHit(self, hitTuple):
data = np.array(self.default_data)
for i in range(len(hitTuple)):
if hitTuple[i] != -1:
data[i, hitTuple[i] - 1] = 1
return data
if __name__ == "__main__":
x = Tray(11, 11, 0.5, 1)
hit = x.getHit((2, 3))
hit = x.getHit((-1, 11)) #-1 means missing
hit = x.getHit((-1, -1)) #-1 means missing
| [
"sadman-ahmed.shanto@ttu.edu"
] | sadman-ahmed.shanto@ttu.edu |
9a616dfa2333cd6ed238d4d246c059e82877b52b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03072/s215726876.py | ace993f320aeb75bab4c12c97f84521fdf740fd3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | n = int(input())
a = list(map(int,input().split()))
k = -1
ans = 0
for i in a :
if (k <= i):
ans += 1
k = i
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
2cc4855a8aab578a5cd1e23e4c2ac49e1815d8c6 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /dp_multiq/smooth.py | 00ee23c34be2fbb33604d1da53a84bde719e8fa4 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 2,302 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Smooth sensitivity method for computing differentially private quantiles.
Lemmas 2.6 and 2.9 from "Smooth Sensitivity and Sampling in Private Data
Analysis" by Nissim, Radkhodnikova, and Smith
(https://cs-people.bu.edu/ads22/pubs/NRS07/NRS07-full-draft-v1.pdf) describe the
noise scaled to the smooth sensitivity.
"""
import numpy as np
from dp_multiq import base
from dp_multiq import smooth_utils
def smooth(sorted_data, data_low, data_high, qs, divided_eps, divided_delta):
"""Returns (eps, delta)-differentially private quantile estimates for qs.
Args:
sorted_data: Array of data points sorted in increasing order.
data_low: Lower limit for any differentially private quantile output value.
data_high: Upper limit for any differentially private quantile output value.
qs: Increasing array of quantiles in [0,1].
divided_eps: Privacy parameter epsilon, assumed to be already divided for
the desired overall eps.
divided_delta: Privacy parameter delta, assumed to be already divided for
the desired overall delta.
"""
sorted_data = np.clip(sorted_data, data_low, data_high)
o = np.empty(len(qs))
n = len(sorted_data)
alpha = divided_eps / 2.0
beta = divided_eps / (2 * np.log(2 / divided_delta))
for i in range(len(qs)):
true_quantile_idx = base.quantile_index(n, qs[i])
true_quantile_value = sorted_data[true_quantile_idx]
log_sensitivity = smooth_utils.compute_log_smooth_sensitivity(
sorted_data, data_low, data_high, true_quantile_idx, beta)
noise = np.exp(log_sensitivity) * np.random.laplace() / alpha
o[i] = true_quantile_value + noise
o = np.clip(o, data_low, data_high)
return np.sort(o)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
4f776f4688760e7bdf2e1608574bf13a02b8c879 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/84/usersdata/224/52670/submittedfiles/lista1.py | 406d010dc5782205ffb282a4d8df23827cd7c80d | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | # -*- coding: utf-8 -*-
def funçao(lista):
cont=0
cont2=0
soma2=0
soma=0
for i in range(0,len(lista),1):
if lista[i]%2!=0:
cont=cont+1
soma=soma+lista[i]
else:
soma2=soma2+1
cont2=cont2+1
return cont
return cont2
return soma
return soma2
n=int(input('Digite o tamanho da lista: '))
a=[]
for i in range(1,n+1,1):
numero=int(input('Digite o numero: '))
a.append(numero)
print(funçao(a))
print(a) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
f52ff10a37130d0173b23d7ea2e9932e6209ca88 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03317/s687010919.py | 6fd7ffb65d80691f364264801e1f5e0043da55f5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | import sys
import itertools
sys.setrecursionlimit(1000000000)
from heapq import heapify,heappop,heappush,heappushpop
import math
import collections
MOD = 10**9+7
MAX = 10**18
MIN = -10**18
n,k = map(int,input().split())
a = list(map(int,input().split()))
print(math.ceil((n-1)/(k-1))) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
579a281ff5e8d5080677b812effa12d2b3b09d5f | 44e8285d6851e8e709f124acc490c714578ece68 | /app/recipe/tests/test_tags_api.py | 76ca7c39f7ade31e57dacb543544686456c0c83d | [
"MIT"
] | permissive | alanclaros/recipe-app-api | 628445c41eab175be8472294fbe9f6a1e1971add | 4434209772bdb0d785796ec65d631100ee2d6843 | refs/heads/master | 2022-08-01T20:35:28.539951 | 2020-05-24T18:33:26 | 2020-05-24T18:33:26 | 265,684,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,530 | py | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsApiTests(TestCase):
"""test the public available tags API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""test that login is required for retrieving tags"""
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
"""test the authorized user tags API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
'test@gmail.com',
'test123'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""test retrieving tags"""
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
"""test that tags returned are for the authenticated user"""
user2 = get_user_model().objects.create_user(
'user2@gmail.com',
'test123'
)
Tag.objects.create(user=user2, name='Fruity')
tag = Tag.objects.create(user=self.user, name='Confort Food')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
def test_create_tag_successful(self):
"""test creating a new tag"""
payload = {'name': 'test tag'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_tag_invalid(self):
""" test creating a new tag with invalid payload"""
payload = {'name': ''}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
| [
"alan_claros13@hotmail.com"
] | alan_claros13@hotmail.com |
16e899a2352d374fe7ac99c47ee632c96186479d | 37194bcee20e66e84360010d98a45adcced57963 | /02_PS_I/00_pascals_triangle/2005_pascals_triangle.py | 95f53393c42adbb1cf009aec1daaa6c22326f7c2 | [] | no_license | dmdekf/algo | edcd1bbd067102a622ff1d55b2c3f6274126414a | 544a531799295f0f9879778a2d092f23a5afc4ce | refs/heads/master | 2022-09-13T14:53:31.593307 | 2020-06-05T07:06:03 | 2020-06-05T07:06:03 | 237,857,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | import sys
sys.stdin = open('input.txt')
T = int(input())
for tc in range(1, T+1):
print('#{} '.format(tc))
N = int(input())
tmp = []
result = [1]
print(1)
# temp = []일경우 for문 처음은 돌아가지 않고 temp에 result가 대입된 후 두번째행부터 포문 실행.
for i in range(N-1):
result = [1]
for j in range(i):
result.append(tmp[j]+tmp[j+1])
result.append(1)
print(' '.join(map(str, result)))
tmp = result
| [
"dmdekf@gmail.com"
] | dmdekf@gmail.com |
b083bff66cdb6a9442d1c4e864a68d3db574d737 | 557a5e8ac000718959281d1d31da8e1e4947a155 | /examples/translating_a_file.py | d77465a93203a15d5376b62e0dcbe45e1aa21447 | [
"MIT"
] | permissive | PiotrDabkowski/Js2Py | 66f20a58912d2df719ce5952d7fe046512717d4d | 2e017b86e2f18a6c8a842293b1687f2ce7baa12e | refs/heads/master | 2023-08-17T08:47:00.625508 | 2022-11-06T09:56:37 | 2022-11-06T10:12:00 | 24,736,750 | 2,419 | 318 | MIT | 2023-08-03T18:06:40 | 2014-10-02T21:08:48 | JavaScript | UTF-8 | Python | false | false | 886 | py | import js2py
# there are 2 easy methods to run js file from Js2Py
# Method 1:
eval_result, example = js2py.run_file('example.js')
# Method 2:
js2py.translate_file('example.js', 'example.py') # this translates and saves equivalent Py file
from example import example # yes, it is: import lib_name from lib_name
##### Now you can use your JS code as if it was Python!
print(example.someVariable)
print(example.someVariable.a)
print(example.someVariable['a'])
example.sayHello('Piotrek!')
example.sayHello() # told you, just like JS.
example['$nonPyName']() # non py names have to be accessed through [] example.$ is a syntax error in Py.
# but there is one problem - it is not possible to write 'new example.Rectangle(4,3)' in Python
# so you have to use .new(4,3) instead, to create the object.
rect = example.Rectangle.new(4,3)
print(rect.getArea()) # should print 12
| [
"piodrus@gmail.com"
] | piodrus@gmail.com |
fd1311d8e71ac932322f58c81588dc5b61c5c3b7 | 34de2b3ef4a2478fc6a03ea3b5990dd267d20d2d | /Python/plotting/plotting1/polarPlotting/circle/using_function.py | 20830fc34fff99d77824caa09a7a93d271124a56 | [
"MIT"
] | permissive | bhishanpdl/Programming | d4310f86e1d9ac35483191526710caa25b5f138e | 9654c253c598405a22cc96dfa1497406c0bd0990 | refs/heads/master | 2020-03-26T06:19:01.588451 | 2019-08-21T18:09:59 | 2019-08-21T18:09:59 | 69,140,073 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author : Bhishan Poudel
# Date : Apr 01, 2016
# Imports
import matplotlib.pyplot as plt
import numpy as np
def xy(r,phi):
return r*np.cos(phi), r*np.sin(phi)
fig = plt.figure()
ax = fig.add_subplot(111,aspect='equal')
phis=np.arange(0,6.28,0.01) # 2pi = 6.28
r =1.5
ax.plot( *xy(r,phis), c='r',ls='-' )
plt.show()
| [
"bhishantryphysics@gmail.com"
] | bhishantryphysics@gmail.com |
ca4f212c3ddc1bf31eec951c5286cc6ffc708e07 | 27398b2a8ed409354d6a36c5e1d2089dad45b4ac | /backend/admin/setup.py | cbca0ce536eb06ebf9c7b662c3d19799d6e8c7b6 | [
"Apache-2.0"
] | permissive | amar266/ceph-lcm | e0d6c1f825f5ac07d2926bfbe6871e760b904340 | 6b23ffd5b581d2a1743c0d430f135261b7459e38 | refs/heads/master | 2021-04-15T04:41:55.950583 | 2018-03-23T12:51:26 | 2018-03-23T12:51:26 | 126,484,605 | 0 | 0 | null | 2018-03-23T12:50:28 | 2018-03-23T12:50:27 | null | UTF-8 | Python | false | false | 2,358 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
REQUIREMENTS = (
"decapod-api~=1.2.dev1",
"decapod-common~=1.2.dev1",
"decapod-controller~=1.2.dev1",
"decapodlib~=1.2.dev1",
"python-keystoneclient>=3.9,<4",
"click>=6,<7",
"cryptography>=1.4,<2",
"asyncssh[libnacl,bcrypt]>=1.8,<2",
# 3.15.1 brings Babel!=2.4.0 line which is controversal
# to requirements in Keystone. Therefore installation is broken.
# next version will eliminate runtime dependency to Babel
# completely (first commit after tag 3.15.1)
"oslo.i18n<3.15.1"
)
setuptools.setup(
name="decapod-admin",
description="Admin scripts for Decapod",
long_description="", # TODO
version="1.2.0.dev1",
author="Sergey Arkhipov",
author_email="sarkhipov@mirantis.com",
maintainer="Sergey Arkhipov",
maintainer_email="sarkhipov@mirantis.com",
license="Apache2",
url="https://github.com/Mirantis/ceph-lcm",
packages=setuptools.find_packages(),
python_requires=">=3.4",
install_requires=REQUIREMENTS,
zip_safe=False,
include_package_data=True,
extras_require={
"uvloop": ["uvloop>=0.7"]
},
package_data={
"decapod_admin": [
"migration_scripts/*"
]
},
entry_points={
"console_scripts": [
"decapod-admin = decapod_admin.main:cli"
]
},
classifiers=(
"Intended Audience :: Information Technology",
"Intended Audience :: System Administrators",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5"
)
)
| [
"sarkhipov@mirantis.com"
] | sarkhipov@mirantis.com |
5b72a3bf4afaf327b4f303040f4616e683291caa | 0204dc09d72da99fb35968f493e9499106be7dca | /BasicPython/codes/temploop/index.py | 109e5f907746e838c4501989b3563048baddf21f | [] | no_license | jamesblunt/ITArticles | 4b95a5586b3158672a05c76ea97c4c1c0f1190a2 | 5c1fc6e64ce32bf0143488897ae37bb10a52df91 | refs/heads/master | 2021-01-22T14:39:48.233415 | 2014-11-03T10:15:16 | 2014-11-03T10:15:16 | 26,259,126 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 880 | py | #! /usr/bin/env python
#-*- coding:utf-8 -*-
import os.path
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.options
from tornado.options import define, options
define("port", default=8000, help="run on the given port", type=int)
class IndexHandler(tornado.web.RequestHandler):
def get(self):
lst = ["python","www.itdiffer.com","qiwsir@gmail.com"]
self.render("index.html", info=lst)
handlers = [(r"/", IndexHandler),]
template_path = os.path.join(os.path.dirname(__file__), "temploop")
static_path = os.path.join(os.paht.dirname(__file__), "static")
if __name__ == "__main__":
tornado.options.parse_command_line()
app = tornado.web.Application(handlers, template_path, static)
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
| [
"qiwsir@gmail.com"
] | qiwsir@gmail.com |
adc32f48782610a4a73107a6acc7052b70daad5b | 7c54b892aec3fd9241ee0d134a093a01b4f0c2e6 | /server/pvwproxy/server/__init__.py | eeaa3dc3c079e4cde488826c113d11f2e71aea1a | [
"Apache-2.0"
] | permissive | Cloudydew/HPCCloud | f75861e653d55ac4bdf668be95baa489397c0f75 | 692e270420c9c681c38e6346c19a9df4a7268a07 | refs/heads/master | 2020-04-12T19:16:00.174809 | 2018-04-05T17:47:11 | 2018-04-05T17:47:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,164 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2015 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from girder import events
from .proxy import Proxy
from . import constants
def validate_settings(event):
key = event.info['key']
if key == constants.PluginSettings.PROXY_FILE_PATH:
event.preventDefault().stopPropagation()
def load(info):
events.bind('model.setting.validate', 'pvwproxy', validate_settings)
info['apiRoot'].proxy = Proxy()
| [
"chris.harris@kitware.com"
] | chris.harris@kitware.com |
9d348c74cfa0509c0a01aa7f0a597a277a85211d | 11a0fab712b139bcba9e90f6acdc7597dff68dbb | /mestrado/ppgmcs/m07-elaboracao-de-dissertacao-i/projeto/codigo/teste1/parametros/cadastrarturmas.py | 98f78e068419fccbd2707980275f28f217c91717 | [] | no_license | fapers/MeusTreinamentos | 17ba096d518df533433ae2528b70d18717f3cf96 | 32a6b791b0c3dbb8b29ffd177597919e768b09b5 | refs/heads/master | 2023-06-04T14:00:37.847808 | 2021-06-28T02:37:11 | 2021-06-28T02:37:11 | 292,962,787 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,755 | py | from bancodados.modelo import Turma
# Cadastrar todas as turma da escola
# Nome, Nível, Turno, Número de aulas por semana
# Nível 16 é o 6º ano do ensino fundamental
# Nível 17 é o 7º ano do ensino fundamental
# Nível 18 é o 8º ano do ensino fundamental
# Nível 19 é o 97º ano do ensino fundamental
# Nível 21 é o 1º ano do ensino médio
# Nível 22 é o 2º ano do ensino médio
# Nível 23 é o 3º ano do ensino médio
def cadastrar_turmas():
turma = Turma('6º4', 16, 1, 25)
turma.salvar()
turma = Turma('7º4', 17, 1, 25)
turma.salvar()
turma = Turma('8º4', 18, 1, 25)
turma.salvar()
turma = Turma('9º4', 19, 1, 25)
turma.salvar()
turma = Turma('1º6', 21, 1, 25)
turma.salvar()
turma = Turma('1º7', 21, 1, 25)
turma.salvar()
turma = Turma('1º8', 21, 1, 25)
turma.salvar()
turma = Turma('1º10', 21, 1, 25)
turma.salvar()
turma = Turma('2º6', 22, 1, 25)
turma.salvar()
turma = Turma('2º7', 22, 1, 25)
turma.salvar()
turma = Turma('2º8', 22, 1, 25)
turma.salvar()
turma = Turma('3º5', 23, 1, 25)
turma.salvar()
turma = Turma('3º6', 23, 1, 25)
turma.salvar()
turma = Turma('3º7', 23, 1, 25)
turma.salvar()
turma = Turma('6º3', 16, 2, 25)
turma.salvar()
turma = Turma('7º3', 17, 2, 25)
turma.salvar()
turma = Turma('8º3', 18, 2, 25)
turma.salvar()
turma = Turma('9º3', 19, 2, 25)
turma.salvar()
turma = Turma('1º9', 21, 2, 25)
turma.salvar()
turma = Turma('2º5', 22, 2, 25)
turma.salvar()
turma = Turma('1ºEJA', 21, 3, 20)
turma.salvar()
turma = Turma('3ºEJA', 23, 3, 20)
turma.salvar()
return turma.get_turmas()
| [
"fpsmoc@yahoo.com.br"
] | fpsmoc@yahoo.com.br |
053bd60526c036a98495e45eb02c3f6d5bd9d452 | ecd630f54fefa0a8a4937ac5c6724f9a3bb215c3 | /projeto/emprestimo/migrations/0008_auto_20200516_2138.py | b43edde81f941e7f9a7ebe225aff2c97d42e02e4 | [] | no_license | israelwerther/Esctop_Israel_Estoque | 49968751464a38c473298ed876da7641efedf8de | d6ab3e502f2a97a0d3036351e59c2faa267c0efd | refs/heads/master | 2023-01-07T20:21:38.381593 | 2020-11-12T17:35:14 | 2020-11-12T17:35:14 | 258,642,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | # Generated by Django 3.0.5 on 2020-05-16 21:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('emprestimo', '0007_remove_emprestimo_datetime'),
]
operations = [
migrations.AlterModelOptions(
name='emprestimo',
options={'ordering': ('-data_emprestimo',)},
),
]
| [
"israelwerther48@outlook.com"
] | israelwerther48@outlook.com |
1ec51963bbc5d440bca3ef080c9ca0c7a669dd12 | 99f6c5b7a6b6840163b32d633e658678d5829b46 | /practice/leetcode/algorithm/295_NimGame.py | c5eee37e98f68ac5e3d5cd09b891f4cc40a0854a | [] | no_license | aliceayres/leetcode-practice | 32f2695a567317013b567a68863f2c95c75b438b | 0743cbeb0e9aa4a8a25f4520a1e3f92793fae1ee | refs/heads/master | 2021-06-02T15:11:29.946006 | 2020-02-06T04:06:55 | 2020-02-06T04:06:55 | 131,126,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | """
292. Nim Game
You are playing the following Nim Game with your friend: There is a heap of stones on the table,
each time one of you take turns to remove 1 to 3 stones. The one who removes the last stone will
be the winner. You will take the first turn to remove the stones.
Both of you are very clever and have optimal strategies for the game. Write a function to determine
whether you can win the game given the number of stones in the heap.
For example, if there are 4 stones in the heap, then you will never win the game:
no matter 1, 2, or 3 stones you remove, the last stone will always be removed by your friend.
"""
class Solution:
def canWinNim(self, n):
"""
:type n: int
:rtype: bool
"""
return n % 4 != 0
if __name__ == '__main__':
slt = Solution()
print(slt.canWinNim(5)) | [
"yeziqian@ctsig.com"
] | yeziqian@ctsig.com |
2e694107b84f48482c364a4d684d0bae288ffd4d | 8fa191cd4a67431a04eff62d35122ee83cc7b0af | /bookwyrm/migrations/0143_merge_0142_auto_20220227_1752_0142_user_hide_follows.py | b36fa9f9c491bd522697b8a4626c47e3c6ab624c | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | bookwyrm-social/bookwyrm | 24678676a7a58dba96641194dfae3fffbf01574d | 0f8da5b738047f3c34d60d93f59bdedd8f797224 | refs/heads/main | 2023-08-20T21:45:30.957277 | 2023-08-19T23:41:50 | 2023-08-19T23:41:50 | 236,415,735 | 1,398 | 216 | NOASSERTION | 2023-09-08T20:43:06 | 2020-01-27T03:51:54 | Python | UTF-8 | Python | false | false | 270 | py | # Generated by Django 3.2.12 on 2022-02-28 21:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("bookwyrm", "0142_auto_20220227_1752"),
("bookwyrm", "0142_user_hide_follows"),
]
operations = []
| [
"mousereeve@riseup.net"
] | mousereeve@riseup.net |
2949520275b4940d93e7ccbc937e113936d50e93 | bad9d42860b9c85bf7316cad108cc6ff071bb705 | /tensorflow_estimator/python/estimator/canned/linear_test.py | 3fac9c57415b5a20acb924ed7d7e73c181871496 | [
"Apache-2.0"
] | permissive | tensorflow/estimator | 1a7e469608094f17bece71867c01f22d51d28080 | 359acd5314462c05ef97f9a820d4ace876550c7e | refs/heads/master | 2023-08-17T09:54:38.668302 | 2023-08-04T00:01:29 | 2023-08-04T00:02:02 | 143,069,012 | 331 | 249 | Apache-2.0 | 2023-09-06T21:19:22 | 2018-07-31T20:55:45 | Python | UTF-8 | Python | false | false | 7,789 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for linear.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.feature_column import feature_column_v2
from tensorflow.python.framework import ops
from tensorflow_estimator.python.estimator.canned import linear
from tensorflow_estimator.python.estimator.canned import linear_testing_utils
def _linear_regressor_fn(*args, **kwargs):
return linear.LinearRegressorV2(*args, **kwargs)
def _linear_classifier_fn(*args, **kwargs):
return linear.LinearClassifierV2(*args, **kwargs)
# Tests for Linear Regressor.
class LinearRegressorEvaluationV2Test(
linear_testing_utils.BaseLinearRegressorEvaluationTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorEvaluationTest.__init__(
self, _linear_regressor_fn, fc_lib=feature_column_v2)
class LinearRegressorPredictV2Test(
linear_testing_utils.BaseLinearRegressorPredictTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorPredictTest.__init__(
self, _linear_regressor_fn, fc_lib=feature_column_v2)
class LinearRegressorIntegrationV2Test(
linear_testing_utils.BaseLinearRegressorIntegrationTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorIntegrationTest.__init__(
self, _linear_regressor_fn, fc_lib=feature_column_v2)
class LinearRegressorTrainingV2Test(
linear_testing_utils.BaseLinearRegressorTrainingTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorTrainingTest.__init__(
self, _linear_regressor_fn, fc_lib=feature_column_v2)
# Tests for Linear Classifier.
class LinearClassifierTrainingV2Test(
linear_testing_utils.BaseLinearClassifierTrainingTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierTrainingTest.__init__(
self,
linear_classifier_fn=_linear_classifier_fn,
fc_lib=feature_column_v2)
class LinearClassifierEvaluationV2Test(
linear_testing_utils.BaseLinearClassifierEvaluationTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierEvaluationTest.__init__(
self,
linear_classifier_fn=_linear_classifier_fn,
fc_lib=feature_column_v2)
class LinearClassifierPredictV2Test(
linear_testing_utils.BaseLinearClassifierPredictTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierPredictTest.__init__(
self,
linear_classifier_fn=_linear_classifier_fn,
fc_lib=feature_column_v2)
class LinearClassifierIntegrationV2Test(
linear_testing_utils.BaseLinearClassifierIntegrationTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierIntegrationTest.__init__(
self,
linear_classifier_fn=_linear_classifier_fn,
fc_lib=feature_column_v2)
# Tests for Linear logit_fn.
class LinearLogitFnV2Test(linear_testing_utils.BaseLinearLogitFnTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearLogitFnTest.__init__(
self, fc_lib=feature_column_v2)
# Tests for warm-starting with Linear logit_fn.
class LinearWarmStartingV2Test(linear_testing_utils.BaseLinearWarmStartingTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearWarmStartingTest.__init__(
self,
_linear_classifier_fn,
_linear_regressor_fn,
fc_lib=feature_column_v2)
class ComputeFractionOfZeroTest(tf.test.TestCase):
def _assertSparsity(self, expected_sparsity, tensor):
sparsity = linear._compute_fraction_of_zero([tensor])
self.assertAllClose(expected_sparsity, sparsity)
def test_small_float32(self):
self._assertSparsity(
0.75, ops.convert_to_tensor([0, 0, 0, 1], dtype=tf.dtypes.float32))
self._assertSparsity(
0.5, ops.convert_to_tensor([0, 1, 0, 1], dtype=tf.dtypes.float32))
def test_small_int32(self):
self._assertSparsity(
0.75, ops.convert_to_tensor([0, 0, 0, 1], dtype=tf.dtypes.int32))
def test_small_float64(self):
self._assertSparsity(
0.75, ops.convert_to_tensor([0, 0, 0, 1], dtype=tf.dtypes.float64))
def test_small_int64(self):
self._assertSparsity(
0.75, ops.convert_to_tensor([0, 0, 0, 1], dtype=tf.dtypes.int64))
def test_nested(self):
self._assertSparsity(
0.75, [ops.convert_to_tensor([0, 0]),
ops.convert_to_tensor([0, 1])])
def test_none(self):
with self.assertRaises(ValueError):
linear._compute_fraction_of_zero([])
def test_empty(self):
sparsity = linear._compute_fraction_of_zero([ops.convert_to_tensor([])])
self.assertTrue(
self.evaluate(tf.math.is_nan(sparsity)),
'Expected sparsity=nan, got %s' % sparsity)
def test_multiple_empty(self):
sparsity = linear._compute_fraction_of_zero([
ops.convert_to_tensor([]),
ops.convert_to_tensor([]),
])
self.assertTrue(
self.evaluate(tf.math.is_nan(sparsity)),
'Expected sparsity=nan, got %s' % sparsity)
def test_some_empty(self):
with self.test_session():
self._assertSparsity(0.5, [
ops.convert_to_tensor([]),
ops.convert_to_tensor([0.]),
ops.convert_to_tensor([1.]),
])
def test_mixed_types(self):
with self.test_session():
self._assertSparsity(0.6, [
ops.convert_to_tensor([0, 0, 1, 1, 1], dtype=tf.dtypes.float32),
ops.convert_to_tensor([0, 0, 0, 0, 1], dtype=tf.dtypes.int32),
])
def test_2_27_zeros__using_512_MiB_of_ram(self):
self._assertSparsity(1., tf.zeros([int(2**27 * 1.01)],
dtype=tf.dtypes.int8))
def test_2_27_ones__using_512_MiB_of_ram(self):
self._assertSparsity(0., tf.ones([int(2**27 * 1.01)], dtype=tf.dtypes.int8))
if __name__ == '__main__':
tf.test.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
5b9fad623f650cdb47cb01210f6c4dfeb85a775b | b37b39ed5f4af5bf1455d200976b7e83b2888cab | /src/sensors_apps/workers/event_logger.py | d1b3143663554d2da3ce4e683bbd7d54d39f86f7 | [] | no_license | jldupont/sensors-apps | 71c67ce3a5226411685bd89ad4dd65590ba455ed | cdc930ed18c4a6ab0c55ceea24d765a00b649a0e | refs/heads/master | 2021-01-25T03:54:26.022483 | 2010-03-03T20:24:36 | 2010-03-03T20:24:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,326 | py | """
@author: jldupont
Created on 2010-03-03
"""
__all__=[]
from system.mbus import Bus
from system.worker import WorkerClass, WorkerManager
from system.amqp import AMQPCommRx
class AmqpListenerAgent(WorkerClass):
"""
"""
EXCH="org.sensors"
RKEY="state.io.#"
def __init__(self, config):
WorkerClass.__init__(self, "AmqpListenerAgent")
self.comm=None
self.config=config
def doRun(self):
self.comm=AMQPCommRx(self.config, self.EXCH, rkey=self.RKEY, rq="q.sensors.listener")
self.comm.connect()
while not self.quit and self.comm.isOk():
self.comm.wait()
self.processMsgQueue()
print "AmqpListenerAgent: exiting"
def processMsgQueue(self):
while True:
mtype, rkey, mdata=self.comm.gMsg()
if mtype is None:
break
self.txMsg(rkey, mdata)
## -------------------------------------------------------------------------
## -------------------------------------------------------------------------
class Manager(object):
""" Manages the lifecyle of the Listener Agents
"""
RETRY_INTERVAL=4*10
def __init__(self):
self.currentWorker=None
self.cpc=0
self.last_spawn_count=0
def _hconfig(self, config):
self.config=config
self.update()
def update(self):
""" A new worker will get spawn on the next 'poll'
"""
if self.currentWorker is not None:
WorkerManager.terminate(self.currentWorker)
self.currentWorker=None
def maybeSpawn(self):
if self.currentWorker is None:
delta=self.cpc - self.last_spawn_count
if delta >= self.RETRY_INTERVAL or self.last_spawn_count==0:
self.currentWorker=AmqpListenerAgent(self.config)
self.currentWorker.start()
self.last_spawn_count=self.cpc
def _hpoll(self, pc):
self.cpc=pc
if not self.config:
Bus.publish(self, "%config-amqp?")
self.maybeSpawn()
if self.currentWorker is not None:
if not self.currentWorker.is_alive():
Bus.publish(self, "%conn-error", "warning", "Connection to AMQP broker failed")
del self.currentWorker
self.currentWorker = None
if self.currentWorker is not None:
self.processMsgQueue()
def processMsgQueue(self):
while True:
msg=self.currentWorker.rxFromWorker()
if msg is None:
break
try:
mtype=msg.pop(0)
mdata=msg.pop(0)
except:
Bus.publish(self, "%llog", "%msg-error", "error", "Error whilst decoding message from AMQP exchange 'org.sensors' ")
continue
## e.g. "state.io.din"
Bus.publish(self, mtype, mdata)
def _hquit(self):
self.update()
_mng=Manager()
Bus.subscribe("%config-amqp", _mng._hconfig)
Bus.subscribe("%poll", _mng._hpoll)
Bus.subscribe("%quit", _mng._hquit) | [
"github@jldupont.com"
] | github@jldupont.com |
5a0cca812ad7e8c9c393f058d8056c663951a197 | 8acffb8c4ddca5bfef910e58d3faa0e4de83fce8 | /ml-flask/Lib/site-packages/pandas/tests/arrays/test_array.py | 5c0cf0147c8d8dd41430888dd8c4ec491f63c637 | [
"MIT"
] | permissive | YaminiHP/SimilitudeApp | 8cbde52caec3c19d5fa73508fc005f38f79b8418 | 005c59894d8788c97be16ec420c0a43aaec99b80 | refs/heads/master | 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:c10e1eac29b51da09a84897d1e569c85604c251b75ab5162ca2142102cf54674
size 12601
| [
"yamprakash130@gmail.com"
] | yamprakash130@gmail.com |
5232c8c1d73ead0f086fcec3d0368fe12341dd5c | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /mrrKngM2fqDEDMXtS_0.py | f5c5ee5871ccd936356d15e43beec5e361c917a5 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py |
def can_patch(bridge, planks):
b = ''.join(str(i) for i in bridge)
holes = [len(i) for i in b.split('1')]
if all(i<=1 for i in holes):
return True
elif all(planks.count(i-1) + planks.count(i)>=holes.count(i) for i in holes if i>1):
return True
return False
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
9764ca21187a78fb9872c99429d46e64c77f15ee | f62e4c46fb0f98879fb63977fa29631b02e3928c | /15 задание/Неравенства_009.py | b2bc0ff150714baa3cfe5be28d8e3cd6b0939094 | [] | no_license | SeveralCamper/USE-2020-2021 | c34f4d7a2c3e0f51529141781f523b63242a835d | ac1122649f2fd431a91af5dda5662492e2565109 | refs/heads/master | 2023-09-03T13:36:05.822568 | 2021-10-27T12:54:10 | 2021-10-27T12:54:10 | 392,303,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | # Задание 15 № 16045
# Для какого наибольшего целого неотрицательного числа A выражение
# (y + 2x ≠ 48) ∨ (A < x) ∨ (A < y)
# тождественно истинно, то есть принимает значение 1 при любых целых неотрицательных x и y?
A = 1
while True:
for x in range(1,1000):
for y in range(1, 1000):
if not((y + 2 * x != 48) or (A < x) or (A < y)):
break
else:
continue
break
else:
print(A)
A += 1
# Ответ: 15
| [
"mikha.alkhimovich@mail.ru"
] | mikha.alkhimovich@mail.ru |
788d599d353ccd9b4e3f54de3dc812e97de6ad65 | 96e0dd08563b1f579992c14207d103ee80222b1b | /0408/MR_ACT_TIPS_CLOSE.py | dfa55827fd59522ce3b45c0fbef414e64f266327 | [] | no_license | tonygodspeed/pytest | 4030e21f3206e3c5cb58aac870e3a1a57cd6943d | 2e87b91c148ff6966096bb8b197c0a84f5a1e7e2 | refs/heads/master | 2020-04-02T13:14:20.811887 | 2018-10-24T09:00:57 | 2018-10-24T09:00:57 | 154,472,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,555 | py | #!/usr/bin/env python
# coding=utf8
from MR_BASE import *
reload(sys)
sys.setdefaultencoding("utf-8")
str_act = "ACT_TIPS_CLOSE"
class MR_ACT_TIPS_CLOSE(mr_base):
def __init__(self):
mt_type = [
{'key': 'DISVER', 'type': 's', 'mt': r'.*DISVER:' + common_def.MT_VALUE_INVALID_POSTFIX},
{'key': 'VER', 'type': 's', 'mt': r'VER:' + common_def.MT_VALUE_INVALID_POSTFIX},
{'key': 'CHID', 'type': 's', 'mt': r'CHID:' + common_def.MT_VALUE_INVALID_POSTFIX},
{'key': 'MAC', 'type': 's', 'mt': r'MAC:' + common_def.MT_VALUE_INVALID_POSTFIX},
{'key': 'MCID', 'type': 's', 'mt': r'MCID:' + common_def.MT_VALUE_INVALID_POSTFIX},
{'key': 'CFGVER', 'type': 's', 'mt': r'CFGVER:' + common_def.MT_VALUE_INVALID_POSTFIX},
{'key': 'tips_name', 'type': 's', 'mt': r'tips_name:' + common_def.MT_VALUE_VALID_POSTFIX},
{'key': 'ret', 'type': 'i', 'mt': r'ret:' + common_def.MT_VALUE_INVALID_POSTFIX},
{'key': 'append_info', 'type': 's', 'mt': r'append_info:' + common_def.MT_VALUE_VALID_POSTFIX},
]
mr_base.__init__(self, mt_type, str_act)
mr_obj = MR_ACT_TIPS_CLOSE()
if __name__ == '__main__':
test_str = r'03:12| [INFO]: <SRC:KWSHELLEXT_1.0.6.9051_MUSICDR8021PE|S:1012|PROD:KWSHELLEXT|DISVER:1.0.6.9077|OS:6.1.7601.2_Service Pack 1|PLAT:X64|VER:1.0.0.7|GID:71|CHID:MUSICDR8021PE|PN:rundll32.exe|MAC:F832E4A3AE08|UAC:0|ADMIN:1|MVER:MUSIC_8.5.2.0_P2T2|MCID:81018516|ST:1497888579|CFGVER:37|ACT:ACT_TIPS_CLOSE|tips_name:tips_competitor|ret:102|append_info:c_type=3|{}|U:>(175.43.235.16)TM:1497888194'
mr_obj.LocalTest(test_str)
pass
| [
"412291198@qq.com"
] | 412291198@qq.com |
278032d631fba53001449d91e03a872faa35bca7 | 7300fc72162568f886e04509431359a62a09da79 | /lino_xl/lib/invoicing/fixtures/demo.py | 71831baa9fb747e6c2a93c3f3faffa9c31c2133a | [
"BSD-2-Clause"
] | permissive | forexblog/xl | ad27aa1e9f5669f8a78ec55f4b7d0bd952da6327 | 130303647d01c0d8271f770f3054907c183dc1e8 | refs/heads/master | 2023-03-04T01:44:39.485452 | 2021-02-13T08:18:16 | 2021-02-13T08:18:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | # -*- coding: UTF-8 -*-
# Copyright 2019 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
from lino.api import dd, rt, _
def DEP(name, **kwargs):
kwargs = dd.str2kw('designation', name, **kwargs)
# kwargs.update(designation=name)
return rt.models.invoicing.Area(**kwargs)
def objects():
yield DEP(_("First"))
yield DEP(_("Second"))
yield DEP(_("Third"))
| [
"luc.saffre@gmail.com"
] | luc.saffre@gmail.com |
ae19267c6dda3313c40bbe157565655c4a567a5d | 56b63ee537f872af0fc028016d1508b4c1dd5c60 | /school/migrations/0283_auto_20210430_1342.py | dacf5ab3f5b6d36ae57960a11a16a4d70f68b1f3 | [] | no_license | jacknjillsolutionsrevanth/EMS1 | 01fc571120f765b0fbfe3aa654b15ff578d6e9b9 | db14d8e6c15669b5938aa9276c5e22006218814a | refs/heads/main | 2023-08-03T19:40:50.073133 | 2021-10-01T07:02:37 | 2021-10-01T07:02:37 | 410,202,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | # Generated by Django 3.2 on 2021-04-30 08:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('school', '0282_rpt_dailydata'),
]
operations = [
migrations.AlterField(
model_name='rpt_dailydata',
name='sampno',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='rpt_dailydata',
name='sampno2',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='rpt_excel_bankwise',
name='net',
field=models.FloatField(blank=True, default=0.0, null=True),
),
]
| [
"jacknjillsolutions.revanth@gmail.com"
] | jacknjillsolutions.revanth@gmail.com |
a895ffff2e8eef3681fcd33e7078e5fe1d048327 | 56014da6ebc817dcb3b7a136df8b11cf9f976d93 | /Python基础笔记/12-tkinter图形界面/25.鼠标点击事件.py | 78be4991790e176ac02a8f6d5493e2c7c805b998 | [] | no_license | sunday2146/notes-python | 52b2441c981c1106e70a94b999e986999334239a | e19d2aee1aa9433598ac3c0a2a73b0c1e8fa6dc2 | refs/heads/master | 2022-01-12T22:55:45.401326 | 2019-01-18T03:18:26 | 2019-01-18T03:18:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | import tkinter
#创建主窗口
win = tkinter.Tk()
#设置标题
win.title("sunck")
#设置大小和位置
win.geometry("400x400+200+200")
def func(event):
print(event.x,event.y)
#<Button-1> 鼠标左键
#<Button-2> 鼠标中键 滑轮
#<Button-3> 鼠标右键
#<Double-Button-1> 鼠标左键双击
#<Triple-Button-1> 鼠标左键三击
button1 = tkinter.Button(win,text = "leftMouse button")
#bind 给控件绑定事件
button1.bind("<Button-1>",func)
button1.pack()
win.mainloop() | [
"964640116@qq.com"
] | 964640116@qq.com |
2e2fa323eea5fc934489a50ee7d2abd689370fc0 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/cv/detection/SSD_for_PyTorch/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py | 8db3bebe5c1db6d3d139f2718d06141af12b6c16 | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 1,034 | py | # Copyright 2022 Huawei Technologies Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
| [
"chenyong84@huawei.com"
] | chenyong84@huawei.com |
5e2fabfb02162670a52913d64f726d0eb8816046 | 1aebf8a65f27304a135748683cd100b648b816fc | /easydoc/wsgi.py | 8ee0729e30a3a0d708770b13c2992e6ca098d826 | [] | no_license | Shatki/easydoc | 5e16f31ded11ca123d820c34c525a6edd1f8cbfa | eee96f6857c6486ef16c4eb7b4822f12a160d1a9 | refs/heads/master | 2020-04-18T06:19:35.896596 | 2019-02-18T05:50:12 | 2019-02-18T05:50:12 | 167,315,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
WSGI config for easydoc project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'easydoc.settings')
application = get_wsgi_application()
| [
"Shatki@mail.ru"
] | Shatki@mail.ru |
d718b9f7eb54f07326e127b321bdd783d7613634 | 4a83d8f34a50aea012491058925296fdc3edac0d | /pandasdmx/tests/test_remote.py | 36f3a6f035b4ff2ea6f8329e467fea639c3e5ce0 | [
"Python-2.0",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | openpolis/pandaSDMX | 8af55fd24272ee359fd6700414258101b5a948f2 | 0c4e2ad0c25a63d2bcb75703e577d2723162a9b5 | refs/heads/master | 2020-05-30T14:20:10.821106 | 2020-04-05T22:54:55 | 2020-04-05T22:54:55 | 189,787,828 | 0 | 0 | Apache-2.0 | 2019-06-02T00:08:58 | 2019-06-01T23:55:55 | Python | UTF-8 | Python | false | false | 785 | py | import pytest
from pandasdmx.remote import Session
from . import has_requests_cache
@pytest.mark.skipif(has_requests_cache, reason='test without requests_cache')
def test_session_without_requests_cache(): # pragma: no cover
# Passing cache= arguments when requests_cache is not installed triggers a
# warning
with pytest.warns(RuntimeWarning):
Session(cache_name='test')
@pytest.mark.remote_data
def test_session_init_cache(tmp_path):
# Instantiate a REST object with cache
cache_name = tmp_path / 'pandasdmx_cache'
s = Session(cache_name=str(cache_name), backend='sqlite')
# Get a resource
s.get('https://registry.sdmx.org/ws/rest/dataflow')
# Test for existence of cache file
assert cache_name.with_suffix('.sqlite').exists()
| [
"mail@paul.kishimoto.name"
] | mail@paul.kishimoto.name |
96d51106afa543e4c0fa9dd52a64b843819f1c4e | 23130cd12e38dbce8db8102810edaad70b240ae2 | /lintcode/604.py | 87ad2b4e294171a68625eef8adc80e2ab6a38d0d | [
"MIT"
] | permissive | kangli-bionic/algorithm | ee6687c82101088db20f10fb958b4e45e97d3d31 | c3c38723b9c5f1cc745550d89e228f92fd4abfb2 | refs/heads/master | 2023-01-05T09:29:33.204253 | 2020-10-25T17:29:38 | 2020-10-25T17:29:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | """
604. Window Sum
https://www.lintcode.com/problem/window-sum/description
"""
class Solution:
"""
@param nums: a list of integers.
@param k: length of window.
@return: the sum of the element inside the window at each moving.
"""
def winSum(self, nums, k):
n = len(nums)
right = 0
curr_sum = 0
result = []
for left in range(n):
while right < n and right - left < k:
curr_sum += nums[right]
right += 1
result.append(curr_sum)
if right >= n:
break
curr_sum -= nums[left]
return result
| [
"hipaulshi@gmail.com"
] | hipaulshi@gmail.com |
252e619211ed66f1c15ae2ad2e998cdfb7eb987a | a8289cb7273245e7ec1e6079c7f266db4d38c03f | /Django_Viewsets_RESTAPI/Viewsets_RESTAPI/urls.py | 4fc6746613611cd39c4e03d1f948db1a31cf403b | [] | no_license | palmarytech/Python_Snippet | 6acbd572d939bc9d5d765800f35a0204bc044708 | 41b4ebe15509d166c82edd23b713a1f3bf0458c5 | refs/heads/master | 2022-10-06T22:51:00.469383 | 2020-03-13T08:32:11 | 2020-03-13T08:32:11 | 272,350,189 | 1 | 0 | null | 2020-06-15T05:30:44 | 2020-06-15T05:30:44 | null | UTF-8 | Python | false | false | 1,449 | py | """Viewsets_RESTAPI URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
# from musics import views # If import mutiple models at the same time, can't write this way.
from musics.views import MusicViewSet
from shares.views import ShareViewSet
router = DefaultRouter()
router.register(r'music', MusicViewSet, base_name='music')
router.register(r'shares', ShareViewSet, base_name='share')
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^api/', include(router.urls), name="api"),
# path("api/", include("languages.urls")),
path("", include("languages.urls")),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')) # Adding this line will make the login button show up
]
| [
"leamon.lee13@gmail.com"
] | leamon.lee13@gmail.com |
5837f376ec08bb98bb6bcdad25b085acba191a75 | 8dbc30ab4f0c76bfc08784a6e06b68cae61888a7 | /collective/fbshare/browser/view.py | c2f66ea6010a51dbbeb56be090b22a4499fd00fd | [] | no_license | RedTurtle/collective.fbshare | 11d19b5ab91b7ae728d3602b30d0f1b9bec3fe1a | 66d8a5092a31a23d6b508c8fb23c19da548d7c2f | refs/heads/master | 2021-07-11T13:36:37.216587 | 2016-04-19T06:56:13 | 2016-04-19T06:56:13 | 5,167,545 | 1 | 2 | null | 2021-03-25T13:55:45 | 2012-07-24T15:39:35 | Python | UTF-8 | Python | false | false | 934 | py | # -*- coding: utf-8 -*-
from collective.fbshare.interfaces import IFbShareSettings
from plone.registry.interfaces import IRegistry
from Products.Five.browser import BrowserView
from zExceptions import NotFound
from zope.component import queryUtility
class ShareDefaultImage(BrowserView):
"""Return a bytestream with the default image"""
def data(self):
registry = queryUtility(IRegistry)
settings = registry.forInterface(IFbShareSettings, check=False)
return settings.default_image
def __call__(self, *args, **kwargs):
bytes = self.data()
if bytes:
response = self.request.response
response.setHeader('Content-Type','image/jpg')
response.setHeader('Content-Disposition', 'inline; filename=collective.fbshare.default_image.jpg')
response.write(bytes)
return
# no data? no image
raise NotFound() | [
"luca@keul.it"
] | luca@keul.it |
1ac4d89906a70db4ec6a361cbe5ae3f6b506fe00 | 12ee4c670e3be681376b07af291c22175b680447 | /videos/migrations/0002_video_category.py | 32a4b76909e59df49cff7196d5ca3d8c4321a06e | [] | no_license | chayan007/miworld | b1cd0febac40d665e8925fea5872df8f27456f05 | 969c551acbd9b3872988756aa4bbab7f42a990cc | refs/heads/master | 2022-12-11T08:37:22.173629 | 2019-02-24T12:19:33 | 2019-02-24T12:19:33 | 172,792,993 | 0 | 0 | null | 2022-11-22T03:22:19 | 2019-02-26T21:25:19 | JavaScript | UTF-8 | Python | false | false | 384 | py | # Generated by Django 2.1.4 on 2019-02-04 10:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('videos', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='video',
name='category',
field=models.CharField(max_length=50, null=True),
),
]
| [
"sonicxxx7@gmail.com"
] | sonicxxx7@gmail.com |
a749526e7102540e5a156df975af7fdbfaa9190d | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /070_oop/001_classes/_exercises/_templates/Python_3_Deep_Dive_Part_4/Section 14 Metaprogramming/156. Metaprogramming Application 2.py | ff9aa57dd6c6305e1ed6ad98c8b863c7473ec381 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 7,619 | py | # %%
'''
### Metaprogramming - Application 2
'''
# %%
'''
There's another pattern we can implement using metaprogramming - Singletons.
'''
# %%
'''
If you read online, you'll see that singleton objects are controversial in Python.
I'm not going to get into a debate on this, other than to say I do not use singleton objects, not because I have deep
thoughts about it (or even shallow ones for that matter), but rather because I have never had a need for them.
'''
# %%
'''
However, the question often comes up, so here it is - the metaclass way of implementing the singleton pattern.
Whether you think you should use it or not, is entirely up to you!
'''
# %%
'''
We have seen singleton objects - objects such as `None`, `True` or `False` for example.
'''
# %%
'''
No matter where we create them in our code, they always refer to the **same** object.
'''
# %%
'''
We can recover the type used to create `None` objects:
'''
# %%
NoneType = type(None)
# %%
'''
And now we can create multiple instances of that type:
'''
# %%
n1 = NoneType()
n2 = NoneType()
# %%
id(n1), id(n2)
# %%
'''
As you can see, any instance of `NoneType` is actually the **same** object.
'''
# %%
'''
The same holds true for booleans:
'''
# %%
b1 = bool([])
b2 = bool("")
# %%
id(b1), id(b2)
# %%
'''
These are all examples of singleton objects. Now matter how we create them, we always end up with a reference to
the same instance.
'''
# %%
'''
There is no built-in mechanism to Python for singleton objects, so we have to do it ourselves.
'''
# %%
'''
The basic idea is this:
When an instance of the class is being created (but **before** the instance is actually created), check if an instance
has already been created, in which case return that instance, otherwise, create a new instance and store that instance
reference somewhere so we can recover it the next time an instance is requested.
'''
# %%
'''
We could do it entirely in the class itself, without any metaclasses, using the `__new__` method.
We can start with this:
'''
# %%
class Hundred:
def __new__(cls):
new_instance = super().__new__(cls)
setattr(new_instance, 'name', 'hundred')
setattr(new_instance, 'value', 100)
return new_instance
# %%
h1 = Hundred()
# %%
vars(h1)
# %%
'''
But of course, this is not a singleton object.
'''
# %%
h2 = Hundred()
# %%
print(h1 is h2)
# %%
'''
So, let's fix this to make it a singleton:
'''
# %%
class Hundred:
_existing_instance = None # a class attribute!
def __new__(cls):
if not cls._existing_instance:
print('creating new instance...')
new_instance = super().__new__(cls)
setattr(new_instance, 'name', 'hundred')
setattr(new_instance, 'value', 100)
cls._existing_instance = new_instance
else:
print('instance exists already, using that one...')
return cls._existing_instance
# %%
h1 = Hundred()
# %%
h2 = Hundred()
# %%
print(h1 is h2)
# %%
'''
And there you are, we have a singleton object.
'''
# %%
'''
So this works, but if you need to have multiple of these singleton objects, the code will just become repetitive.
'''
# %%
'''
Metaclasses to the rescue!
'''
# %%
'''
Remember what we are trying to do:
If we create two instances of our class `Hundred` we expect the same instance back.
'''
# %%
'''
But how do we create an instance of a class - we **call** it, so `Hundred()`.
'''
# %%
'''
Which `__call__` method is that? It is not the one in the `Hundred` class, that would make **instances** of `Hundred`
callable, it is the `__call__` method in the **metaclass**.
'''
# %%
'''
So, we need to override the `__call__` in our metaclass.
'''
# %%
class Singleton(type):
def __call__(cls, *args, **kwargs):
print(f'Request received to create an instance of class: {cls}...')
return super().__call__(*args, **kwargs)
# %%
class Hundred(metaclass=Singleton):
value = 100
# %%
h = Hundred()
# %%
h.value
# %%
'''
OK, that works, but now we need to make it into a singleton instance.
'''
# %%
'''
We have to be careful here. Initially we had used the class itself (`Hundred`) to store, as a class variable, whether
an instance had already been created.
And here we could try to do the same thing.
We could store the instance as a class variable in the class of the instance being created
That's actually quite simple, since the class is received as the first argument of the `__call__` method.
'''
# %%
class Singleton(type):
def __call__(cls, *args, **kwargs):
print(f'Request received to create an instance of class: {cls}...')
if getattr(cls, 'existing_instance', None) is None:
print('Creating instance for the first time...')
setattr(cls, 'existing_instance', super().__call__(*args, **kwargs))
else:
print('Using existing instance...')
return getattr(cls, 'existing_instance')
# %%
class Hundred(metaclass=Singleton):
value = 100
# %%
h1 = Hundred()
# %%
h2 = Hundred()
# %%
print(h1 is h2, h1.value, h2.value)
# %%
'''
So that seems to work just fine. Let's create another singleton class and see if things still work.
'''
# %%
class Thousand(metaclass=Singleton):
value = 1000
# %%
t1 = Thousand()
# %%
t2 = Thousand()
# %%
print(h1 is h2, h1.value, h2.value)
# %%
print(t1 is t2, t1.value, t2.value)
# %%
print(h1 is t1, h2 is t2)
# %%
'''
So far so good.
'''
# %%
'''
Finally let's make sure everything works with **inheritance** too - if we inherit from a Singleton class, that subclass
should also be a singleton.
'''
# %%
class HundredFold(Hundred):
value = 100 * 100
# %%
hf1 = HundredFold()
# %%
'''
Whaaat? Using existing instance? But this is the first time we created it!!
'''
# %%
'''
The problem is this: How are we checking if an instance has already been created?
'''
# %%
'''
We did this:
```if getattr(cls, 'existing_instance')```
'''
# %%
'''
But since `HundredFold` inherits from `Hundred`, it also inherited the class attribute `existing_instance`.
'''
# %%
'''
This means we have to be a bit more careful in our metaclass, we need to see if we have an instance of the **specific**
class already created - and we cannot rely on storing a class attribute in the classes themselves since that breaks
the pattern when subclassing.
'''
# %%
'''
So, instead, we are going to store the class, and the instance of that class, in a dictionary **in the metaclass**
itself, and use that dictionary to lookup the existing instance (if any) for a specific class.
'''
# %%
class Singleton(type):
instances = {}
def __call__(cls, *args, **kwargs):
print(f'Request received to create an instance of class: {cls}...')
existing_instance = Singleton.instances.get(cls, None)
if existing_instance is None:
print('Creating instance for the first time...')
existing_instance = super().__call__(*args, **kwargs)
Singleton.instances[cls] = existing_instance
else:
print('Using existing instance...')
return existing_instance
# %%
class Hundred(metaclass=Singleton):
value = 100
class Thousand(metaclass=Singleton):
value = 1000
class HundredFold(Hundred):
value = 100 * 100
# %%
h1 = Hundred()
h2 = Hundred()
# %%
t1 = Thousand()
t2 = Thousand()
# %%
hf1 = HundredFold()
hf2 = HundredFold()
# %%
print(h1 is h2, t1 is t2, hf1 is hf2)
# %%
print(h1.value, h2.value, t1.value, t2.value, hf1.value, hf2.value)
# %%
'''
And just to make sure :-)
'''
# %%
print(h1 is hf1) | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
176397951d78145067e5b871383ecc4d42840ee2 | 05ba1957e63510fd8f4f9a3430ec6875d9ecb1cd | /.history/fh/c_20200819004934.py | 6424d258230e0c58a1cdbfe7cdc81b12ec0b9785 | [] | no_license | cod-lab/try | 906b55dd76e77dbb052603f0a1c03ab433e2d4d1 | 3bc7e4ca482459a65b37dda12f24c0e3c71e88b6 | refs/heads/master | 2021-11-02T15:18:24.058888 | 2020-10-07T07:21:15 | 2020-10-07T07:21:15 | 245,672,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,919 | py | import fileinput as fi
import requests as r
import pprint as p
import sys as s
# Get Current List
def read_file(file,current_list):
with open(file,'r') as f:
for i, line in enumerate(f):
if 37<i<43:
current_list[i] = line # current_list[i] = current_list[i][:-1] # also works-1
# Get New List
def get_repos_list(user,new_list):
payload = {'sort': 'created'}
# response = re.get('https://api.github.com/users/cod-lab/repos?sort=created, timeout=10) # also works
response = r.get('https://api.github.com/users/' + user + '/repos', params=payload, timeout=10)
result = response.json() # got list of all public repos
# getting filtered list (latest 5 public repos which r not forked)
j=1
for i in range(len(result)):
if result[i]['fork'] == False:
new_list[j+37] = "* [" + result[i]['name'] + "](" + result[i]['html_url'] + ")\n"
j+=1
if j>5: break
if len(new_list)<5: s.exit("\nError: less than 5 repos available") # terminate prgm right away after printing msg
# OverWrite New List to file if there's any difference
def write_file(file,current_list,new_list):
for line in fi.FileInput(file,inplace=1):
for i in current_list:
if current_list[i] in line: # if current_list[i]+"\n" == line: # also works-1
line = new_list[i] # line = new_list[i]+"\n" # also works-1
print(line,end='')
user='cod-lab'
file='a.md'
current_list={}
new_list={}
# print('\nread_file block----------------------\n')
# try: read_file(file,current_list)
read_file(file,current_list)
# except FileNotFoundError: s.exit('No such file!!\nEnter correct file name..') # terminate prgm right away after printing msg
# print('\nread_file block end------------------\n')
# print('\nget_repos block----------------------\n')
# try: get_repos_list(user,new_list)
get_repos_list(user,new_list)
# except r.exceptions.ConnectTimeout: s.exit('The server is not responding currently!!\nPlease try again later..') # problem connecting srvr or srvr not responding # terminate prgm right away after printing msg
# except r.exceptions.ReadTimeout: s.exit('The server is not responding currently!!\nPlease try again later..') # unable to read received response # terminate prgm right away after printing msg
print('\nget_repos block end------------------\n')
# '''
print("current_list: ")
p.pprint(current_list, indent=2, width=3)
print("\nnew_list: ")
p.pprint(new_list, indent=2, width=3)
# '''
diff=0
for i in range(5):
if current_list[i+38] != new_list[i+38]: diff+=1
print("\ndiff: ",diff,"\n")
if diff>0:
print('\nwrite_file block----------------------\n')
write_file(file,current_list,new_list)
print('\nwrite_file block end------------------\n')
| [
"arihant806@gmail.com"
] | arihant806@gmail.com |
c7d7aa6f65faeb715e9af1f895734602000e741c | df3c8c521a51f2b412118bd9d0e477da06a3b7cc | /views/get_reactions_to_post/tests/test_case_01.py | e8cc5b4a38bcf767434778e105fc1ab98edd39e3 | [] | no_license | bharatmudragada/fb_post | c30b900731db5844df6b438e5d38a0dfb607412a | c5e7bb185a561bdcfcd7b2e30264554b07106044 | refs/heads/master | 2020-06-21T04:05:22.296755 | 2019-07-17T07:48:22 | 2019-07-17T07:48:22 | 197,339,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | """
# TODO: Update test case description
"""
from django_swagger_utils.utils.test import CustomAPITestCase
from . import APP_NAME, OPERATION_NAME, REQUEST_METHOD, URL_SUFFIX
REQUEST_BODY = """
"""
TEST_CASE = {
"request": {
"path_params": {"post_id": "ibgroup"},
"query_params": {},
"header_params": {},
"securities": {"oauth": {"tokenUrl": "http://auth.ibtspl.com/oauth2/", "flow": "password", "scopes": ["read"], "type": "oauth2"}},
"body": REQUEST_BODY,
},
}
class TestCase01GetReactionsToPostAPITestCase(CustomAPITestCase):
app_name = APP_NAME
operation_name = OPERATION_NAME
request_method = REQUEST_METHOD
url_suffix = URL_SUFFIX
test_case_dict = TEST_CASE
def test_case(self):
self.default_test_case() # Returns response object.
# Which can be used for further response object checks.
# Add database state checks here. | [
"bharathmudragada123@gmail.com"
] | bharathmudragada123@gmail.com |
1492bd5f614068c35c19883d6fc5a9624c0ae5c8 | bcd878b91e75fc14c66943911908447078cd581e | /tensorflow_serving/example/inception_export.py | 6bcd2b36b41b18999ed0c54c1bc4ae212cfa7433 | [
"Apache-2.0"
] | permissive | cfregly/serving | 677367ffa0263f52bf4c3b06287566e20f61e77a | bdd0b47e715a596a8a927deac5018bfb656848d2 | refs/heads/master | 2021-01-14T09:42:24.046049 | 2016-04-04T21:54:08 | 2016-04-04T21:54:08 | 55,674,476 | 1 | 1 | null | 2016-04-07T07:46:01 | 2016-04-07T07:46:01 | null | UTF-8 | Python | false | false | 4,618 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#!/usr/grte/v4/bin/python2.7
"""Export inception model given existing training checkpoints.
"""
import os.path
import sys
# This is a placeholder for a Google-internal import.
import tensorflow as tf
from inception import inception_model
from tensorflow_serving.session_bundle import exporter
tf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/inception_train',
"""Directory where to read training checkpoints.""")
tf.app.flags.DEFINE_string('export_dir', '/tmp/inception_export',
"""Directory where to export inference model.""")
tf.app.flags.DEFINE_integer('image_size', 299,
"""Needs to provide same value as in training.""")
FLAGS = tf.app.flags.FLAGS
NUM_CLASSES = 1000
NUM_TOP_CLASSES = 5
def export():
with tf.Graph().as_default():
# Build inference model.
# Please refer to Tensorflow inception model for details.
# Input transformation.
# TODO(b/27776734): Add batching support.
jpegs = tf.placeholder(tf.string, shape=(1))
image_buffer = tf.squeeze(jpegs, [0])
# Decode the string as an RGB JPEG.
# Note that the resulting image contains an unknown height and width
# that is set dynamically by decode_jpeg. In other words, the height
# and width of image is unknown at compile-time.
image = tf.image.decode_jpeg(image_buffer, channels=3)
# After this point, all image pixels reside in [0,1)
# until the very end, when they're rescaled to (-1, 1). The various
# adjust_* ops all require this range for dtype float.
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
image = tf.image.central_crop(image, central_fraction=0.875)
# Resize the image to the original height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image,
[FLAGS.image_size, FLAGS.image_size],
align_corners=False)
image = tf.squeeze(image, [0])
# Finally, rescale to [-1,1] instead of [0, 1)
image = tf.sub(image, 0.5)
image = tf.mul(image, 2.0)
images = tf.expand_dims(image, 0)
# Run inference.
logits, _ = inception_model.inference(images, NUM_CLASSES + 1)
# Transform output to topK result.
values, indices = tf.nn.top_k(logits, NUM_TOP_CLASSES)
# Restore variables from training checkpoint.
variable_averages = tf.train.ExponentialMovingAverage(
inception_model.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
with tf.Session() as sess:
# Restore variables from training checkpoints.
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
# Assuming model_checkpoint_path looks something like:
# /my-favorite-path/imagenet_train/model.ckpt-0,
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
print('Successfully loaded model from %s at step=%s.' %
(ckpt.model_checkpoint_path, global_step))
else:
print('No checkpoint file found at %s' % FLAGS.checkpoint_dir)
return
# Export inference model.
model_exporter = exporter.Exporter(saver)
signature = exporter.classification_signature(
input_tensor=jpegs, classes_tensor=indices, scores_tensor=values)
model_exporter.init(default_graph_signature=signature)
model_exporter.export(FLAGS.export_dir, tf.constant(global_step), sess)
print('Successfully exported model to %s' % FLAGS.export_dir)
def main(unused_argv=None):
export()
if __name__ == '__main__':
tf.app.run() | [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
da54578f4d643573bbc62d2a04f74d415e2671eb | a6996ea8cc1f0bdcbdb0b82d514da22db48065f9 | /create_index.py | fab09a2f24c5e683db95537aa77063742160ff7d | [
"MIT"
] | permissive | katsby-skye/arxiv-search | ac275d42c2128f693653682cc8dc57a6ef9ccd99 | e5c714b474a2ac5e54452642428cee35e2a0c8b9 | refs/heads/master | 2020-04-28T07:11:50.521900 | 2019-04-18T07:32:47 | 2019-04-18T07:32:47 | 175,083,748 | 2 | 1 | MIT | 2019-04-18T07:32:49 | 2019-03-11T21:06:17 | Python | UTF-8 | Python | false | false | 387 | py | """Use this to initialize the search index for testing."""
import json
import click
from search.factory import create_ui_web_app
from search.services import index
app = create_ui_web_app()
app.app_context().push()
@app.cli.command()
def create_index():
"""Initialize the search index."""
index.current_session().create_index()
if __name__ == '__main__':
create_index()
| [
"brp53@cornell.edu"
] | brp53@cornell.edu |
989e8a3dff4b10f57408941766688533677e929a | 3dff4bef08954fadb7cc83c4f212fffa81b7d27e | /api_site/src/api_x/config/etc/beta.py | 319f422b8199fc39e3ad1d54c83a6af7b95695a7 | [] | no_license | webee/pay | 3ec91cb415d9e3addabe961448533d861c0bd67a | b48c6892686bf3f9014bb67ed119506e41050d45 | refs/heads/master | 2020-04-29T14:31:09.643993 | 2016-02-02T07:14:14 | 2016-02-02T07:14:14 | 176,198,802 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | # coding=utf-8
class App:
TESTING = True
HOST_URL = 'http://pay.lvye.com/api/__'
CHECKOUT_URL = 'http://pay.lvye.com/__/checkout/{sn}'
TEST_CHANNELS = {'zyt_sample'}
| [
"yiwang@lvye.com"
] | yiwang@lvye.com |
87b406089d8124860e5df6cee98e2d1dc37d6514 | 92c6aa579d06d3ff58c9e6d8f5cfa696622623f5 | /flask_mysql/server.py | f8769b77b04fb73d9249cabc9e4b6c7b1d20691b | [] | no_license | askrr3/demo | 8ce6cd1cad3192b46a8583776c654ecfe2992c44 | cb7dbebf2c8cd1e363bad134e67db4a87389a0fe | refs/heads/master | 2021-01-18T22:53:26.127132 | 2016-07-20T19:38:50 | 2016-07-20T19:38:50 | 63,808,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py |
from flask import Flask
# import the Connector function
from mysqlconnection import MySQLConnector
app = Flask(__name__)
# connect and store the connection in "mysql" note that you pass the database name to the function
mysql = MySQLConnector(app, 'mydb')
# an example of running a query
print mysql.query_db("SELECT * FROM users")
app.run(debug=True) | [
"johndoe@example.com"
] | johndoe@example.com |
f252cdd46cf777f6aa3ce61f7216a0d67440387b | dd205a3cd8c457cfee9a1c0c1df2d3ef9d4e69d8 | /easy/jump_cloud_revisit.py | fcfd8fa7a335724596c4d29e857d137c2ceaca81 | [] | no_license | NaNdalal-dev/hacker-rank-problems | c86a2c28979391336517e6c151cbaf57542f9d58 | cce957364c4920af622afea9244b8bcc984deb62 | refs/heads/master | 2023-03-29T19:04:53.720657 | 2021-04-04T14:12:49 | 2021-04-04T14:12:49 | 332,231,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | '''
Jumping on the Clouds: Revisited
https://www.hackerrank.com/challenges/jumping-on-the-clouds-revisited/problem
'''
def jumpingOnClouds(c, k):
l=len(c)
e=100
i=0
while True:
rem=(i+k)%l
if rem==0:
if c[rem]==1 :
e=e-3
break
else:
e-=1
break
if c[rem]==1 :
e=e-3
else:
e-=1
i+=k
return e
| [
"dnandalal7@gmail.com"
] | dnandalal7@gmail.com |
651f7888323079293dfce203eae4791834c80408 | a394b1053f018ff8be63221c61682df03af4937b | /osf/migrations/0058_merge_20170913_2232.py | dae51095b0957ad3237d3cd6dd2a591ff6789da0 | [
"Apache-2.0",
"LGPL-2.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"MIT",
"AGPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference",
"MPL-1.1",
"CPAL-1.0",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause"
] | permissive | RCOSDP/RDM-osf.io | 81b11d9511f6248ec9bccb6c586b54a58429e1e7 | 5d632eb6d4566d7d31cd8d6b40d1bc93c60ddf5e | refs/heads/develop | 2023-09-01T09:10:17.297444 | 2023-08-28T04:59:04 | 2023-08-28T04:59:04 | 123,298,542 | 12 | 24 | Apache-2.0 | 2023-09-12T08:58:28 | 2018-02-28T14:46:05 | Python | UTF-8 | Python | false | false | 351 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-14 03:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0057_order_fileversion_by_date_created'),
('osf', '0054_add_file_version_indices'),
]
operations = [
]
| [
"sloria1@gmail.com"
] | sloria1@gmail.com |
6f11d373f2b16a3dfaf1af6154056173d748cf2f | 18f7d65bda4e5f55d71f78a69f30dd2e58c2f37b | /script.module.7of9-pirateslife4me/resources/root/webscrapers.py | d6704cf8c6081d12de71be4c8860d27ba6d95f25 | [] | no_license | Iph47/kdc_git_repo | 13700d229436a69b4b22e87668154b783aca05cc | 8d903bb5efa7e189f3faaf7eb1637820148f0985 | refs/heads/master | 2022-11-08T11:30:22.323438 | 2020-06-18T01:11:11 | 2020-06-18T01:11:11 | 273,349,618 | 3 | 0 | null | 2020-06-18T22:10:39 | 2020-06-18T22:10:39 | null | UTF-8 | Python | false | false | 7,451 | py | import xbmc,os
addon_id = 'script.module.7of9-pirateslife4me'
icon = xbmc.translatePath(os.path.join('special://home/addons/' + addon_id, 'icon.png'))
fanart = xbmc.translatePath(os.path.join('special://home/addons/' + addon_id , 'fanart.jpg'))
def cat():
addDir('[COLOR white][B]Arconaitv.me[/COLOR][/B]','arconaitv',2,'https://pbs.twimg.com/profile_images/590745210000433152/2u_nu2TM.png',fanart,'')
addDir('[COLOR white][B]Fluxus TV[/COLOR][/B]','fluxustv',2,'https://pbs.twimg.com/profile_images/858019601820467200/FWi_rtsG.jpg',fanart,'')
addDir('[COLOR white][B]iBrod.tv[/COLOR][/B]','ibrod',2,'https://www.ibrod.tv/images/logo.png',fanart,'')
addDir('[COLOR white][B]LiveonlineTv247.to[/COLOR][/B]','liveonlinetv',2,'https://lh3.googleusercontent.com/_QDQuHHm1aj1wyBTRVBoemhvttNZ5fF4RhLG4BWoYpx0z69OKsbvg568hxup5oBqsyrJs7XV-w=s640-h400-e365',fanart,'')
addDir('[COLOR white][B]Mamahd.com[/COLOR][/B]','mamahd',2,'http://www.topmanzana.com/static/mamahd.jpg',fanart,'')
addDir('[COLOR white][B]Shadownet.ro[/COLOR][/B]','shadownet',2,'https://s4.postimg.org/iy7lkmw8d/logon.png',fanart,'')
addDir('[COLOR white][B]Ustreamix.com[/COLOR][/B]','ustreamix',2,'https://cdn6.aptoide.com/imgs/a/7/8/a78c34966c4e443e7235d839b5856c0d_icon.png?w=256',fanart,'')
addDir('[COLOR white][B]Youtube.com[/COLOR][/B]','youtube',2,'https://pbs.twimg.com/profile_images/877566581135597568/PkjTkC0V_400x400.jpg',fanart,'')
def get(url):
if url == 'shadownet':
shadownet()
elif 'shadownetchan:' in url:
shadownetchannels(url)
elif url == 'ustreamix':
ustreamix()
elif url == 'ibrod':
ibrod()
elif url == 'fluxustv':
fluxustv()
elif url == 'liveonlinetv':
liveonlinetv()
elif url == 'arconaitv':
arconaitv()
elif url == 'youtube':
xbmc.executebuiltin('ActivateWindow(Videos,plugin://plugin.video.youtube/kodion/search/query/?event_type=live&q=live%20tv&search_type=video)')
elif url == 'mamahd':
mamahd()
elif url == 'crichd':
crichd()
def mamahd():
import re
open = OPEN_URL('http://mamahd.com')
part = regex_from_to(open,'<div class="standard row channels">','</div>')
regex = re.compile('href="(.+?)".+?src="(.+?)".+?span>(.+?)<',re.MULTILINE|re.DOTALL).findall(part)
for url,icon,name in regex:
if not 'Stream' in name:
if not 'Bundesliga' in name:
if not 'Channel' in name:
if not 'HD ' in name:
addDir(name,url,10,icon,fanart,'')
def arconaitv():
url = 'https://www.arconaitv.me'
page = OPEN_URL(url)
part = regex_from_to(page,'id="cable">','id="donate">')
all_vids=regex_get_all(part,"div class='box-content'",'</a>')
for a in all_vids:
url = regex_from_to(a,"href='","'")
name = regex_from_to(a,"title='","'").replace('#038;','')
if not url=='https://www.arconaitv.me/':
if not name == 'A-E':
if not name == 'F-J':
if not name == 'K-O':
if not name == 'P-T':
if not name == 'U-Z':
addDir('[B][COLOR white]%s[/COLOR][/B]'%name,'https://www.arconaitv.me/'+url,10,icon,fanart,'')
def liveonlinetv():
open = OPEN_URL('http://liveonlinetv247.info/tvchannels.php')
all = regex_get_all(open,'<li>','</li>')
for a in all:
name = regex_from_to(a,'">','<')
url = regex_from_to(a,'href=".*?channel=','"')
if not 'Live' in name:
if not 'UEFA' in name:
if not 'Barclays Premier League' in name:
if not 'IPL' in name:
addDir('[B][COLOR white]%s[/COLOR][/B]'%name,'liveonlinetv247:'+url,10,icon,fanart,'')
def fluxustv():
import re
open = OPEN_URL('https://raw.githubusercontent.com/fluxustv/IPTV/master/list.m3u')
regex = re.compile('#EXTINF:.+?\,(.+?)\n(.+?)\n', re.MULTILINE|re.DOTALL).findall(open)
for name,url in regex:
addDir('[B][COLOR white]%s[/COLOR][/B]'%name,url,10,icon,fanart,'')
def ibrod():
open = OPEN_URL('https://www.ibrod.tv/tvchans.php')
all = regex_get_all(open,'<li> <span>','</a></li>')
for a in all:
name = regex_from_to(a,'</span> <span>','</span>')
url = regex_from_to(a,'href="','"')
addDir('[B][COLOR white]%s[/COLOR][/B]'%name,'http://www.ibrod.tv/'+url,10,'https://www.ibrod.tv/images/logo.png',fanart,'')
def shadownet():
open = OPEN_URL('http://www.shadownet.me')
part = regex_from_to(open,'id="SideCategoryList">','class="afterSideCategoryList">')
all = regex_get_all(part,'<li class="">','</a>')
for a in all:
name = regex_from_to(a,'/">','<').replace('amp;','')
url = regex_from_to(a,'href="','"')
addDir('[B][COLOR white]%s[/COLOR][/B]'%name,'shadownetchan:' + url,2,icon,fanart,'')
def shadownetchannels(url):
import urllib
url = (url).replace('shadownetchan:','')
open = OPEN_URL(url)
part = regex_from_to(open,'id="CategoryContent">','<br class="Clear" />')
all = regex_get_all(part,'<div class="ProductImage">','</li>')
for a in all:
name = regex_from_to(a,'alt="','"')
url1 = regex_from_to(a,'href="','"')
icon = regex_from_to(a,'img src="','"')
addDir('[B][COLOR white]%s[/COLOR][/B]'%name,url1,10,icon,fanart,name)
try:
np = regex_from_to(open,'<div class="FloatRight"><a href="','"')
addDir('[COLOR red][B]Next Page >[/COLOR][/B]','shadownetchan:'+urllib.quote_plus(np),2,icon,fanart,'')
except:
pass
def ustreamix():
open = OPEN_URL('http://v2.ustreamix.com')
t = OPEN_URL('http://www.newtvworld.com/livetv/india/DiscoveryChannel.html')
log(t)
all = regex_get_all(open,'<p><a','</a>')
for a in sorted(all):
name = regex_from_to(a,'target="_blank">','<')
url = regex_from_to(a,'href="','"')
addDir('[B][COLOR white]%s[/COLOR][/B]'%name,'http://v2.ustreamix.com'+url,10,icon,fanart,'')
logfile = xbmc.translatePath(os.path.join('special://home/addons/script.module.7of9-pirateslife4me', 'log.txt'))
def log(text):
file = open(logfile,"w+")
file.write(str(text))
######################################################################################################
def regex_from_to(text, from_string, to_string, excluding=True):
import re,string
if excluding:
try: r = re.search("(?i)" + from_string + "([\S\s]+?)" + to_string, text).group(1)
except: r = ''
else:
try: r = re.search("(?i)(" + from_string + "[\S\s]+?" + to_string + ")", text).group(1)
except: r = ''
return r
def regex_get_all(text, start_with, end_with):
import re,string
r = re.findall("(?i)(" + start_with + "[\S\s]+?" + end_with + ")", text)
return r
def OPEN_URL(url):
import requests
headers = {}
headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'
link = requests.session().get(url, headers=headers, verify=False).text
link = link.encode('ascii', 'ignore')
return link
def addDir(name,url,mode,iconimage,fanart,description):
import xbmcgui,xbmcplugin,urllib,sys
u=sys.argv[0]+"?url="+url+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&iconimage="+urllib.quote_plus(iconimage)+"&description="+urllib.quote_plus(description)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={"Title": name,"Plot":description})
liz.setProperty('fanart_image', fanart)
if mode==7:
liz.setProperty("IsPlayable","true")
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=False)
else:
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
xbmcplugin.endOfDirectory | [
"36279834+tox303@users.noreply.github.com"
] | 36279834+tox303@users.noreply.github.com |
0d2ba5f81ca457b54a1682575851fcf8f6ed0ae9 | e97c5e5beb22444b7eabd743a35493ab6fd4cb2f | /libs/log.py | e05a450020120351fef15832250286ba1117b8fc | [
"BSD-2-Clause-Patent"
] | permissive | greenelab/phenoplier | bea7f62949a00564e41f73b361f20a08e2e77903 | b0e753415e098e93a1f206bb90b103a97456a96f | refs/heads/main | 2023-08-23T20:57:49.525441 | 2023-06-15T06:00:32 | 2023-06-22T16:12:37 | 273,271,013 | 5 | 2 | NOASSERTION | 2023-06-20T20:35:45 | 2020-06-18T15:13:58 | Jupyter Notebook | UTF-8 | Python | false | false | 590 | py | """
Provides logging functions.
"""
import logging
import logging.config
import yaml
import conf
def _get_logger_config():
"""Reads the logging config file in YAML format."""
with open(conf.GENERAL["LOG_CONFIG_FILE"], "r") as f:
return yaml.safe_load(f.read())
logging.config.dictConfig(_get_logger_config())
def get_logger(log_name: str = None) -> logging.Logger:
"""
Returns a Logger instance.
Args:
log_name: logger name.
Returns:
A Logger instance configured with default settings.
"""
return logging.getLogger(log_name)
| [
"miltondp@gmail.com"
] | miltondp@gmail.com |
eab3ff73caf97a4d6ce6cb79b317e5aaa74dd265 | 3d1a8ccef4153b6154c0aa0232787b73f45137ba | /services/customer/db.py | 8e5a6ce81812465a01743fb386158c3603de9757 | [] | no_license | jan25/hotrod-python | a0527930b2afc33ca3589c1cf7ae07814148535a | dbce7df1bc2d764351dd2ba1122078fc525caed7 | refs/heads/master | 2020-06-03T14:59:35.627093 | 2019-06-22T16:52:19 | 2019-06-22T16:52:19 | 191,616,546 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | from .client import Customer
customers = {
"123": Customer(id="123", name="Rachel's Floral Designs", location="115,277"),
"567": Customer(id="567", name="Amazing Coffee Roasters", location="211,653"),
"392": Customer(id="392", name="Trom Chocolatier", location="577,322"),
"731": Customer(id="731", name="Japanese Deserts", location="728,326")
}
def get_customer_by_id(customer_id):
if customer_id not in customers: return
return customers[customer_id]
| [
"abhilashgnan@gmail.com"
] | abhilashgnan@gmail.com |
8d00ae753d8fc0aca6f67ec1de5de4edebd5cbf2 | ca539b0df7ca5a91f80b2e2f64e7379e69243298 | /312.py | 50ff4fd4692929c4fe89ae5e5d19a20f67879cb1 | [] | no_license | yorick76ee/leetcode | 9a9e5d696f3e32d9854c2ed9804bd0f98b03c228 | d9880892fe15f9bb2916beed3abb654869945468 | refs/heads/master | 2020-03-18T22:59:29.687669 | 2016-07-18T19:56:55 | 2016-07-18T19:56:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | class Solution(object):
def h(self, nums, l, r, d):
if (l,r) not in d:
d[(l,r)] = 0
for i in range(l, r+1):
d[(l,r)] = max(d[(l,r)], self.h(nums, l, i-1, d) + self.h(nums, i+1, r, d) + nums[i]*nums[l-1]*nums[r+1])
if l == 1 and r == 4:
print i,':',d[(l,r)]
return d[(l,r)]
def maxCoins(self, nums):
return self.h([1]+nums+[1], 1, len(nums), {})
if __name__ == '__main__':
wds= Solution()
print wds.maxCoins([3,1,5,8])
| [
"641614152@qq.com"
] | 641614152@qq.com |
e7fce07a2a9175afc2ab1e85bb8ee860ae8f77dd | d755aaaf905b48baf31aa90332f03c45f4c8dad3 | /tests/test_utils.py | 494f1115abbc6feaa46c734931099119ef47fe06 | [
"Apache-2.0"
] | permissive | django-ftl/fluent-compiler | 36ffe0c76678e82f4f15bbccef057c8e4cd0e6bc | d8f19b47161788fbdea9822b130ef136fb839540 | refs/heads/master | 2023-08-08T04:16:52.368218 | 2023-07-21T10:37:17 | 2023-07-21T10:37:17 | 248,319,322 | 20 | 1 | NOASSERTION | 2023-04-18T16:14:05 | 2020-03-18T19:04:42 | Python | UTF-8 | Python | false | false | 1,560 | py | import unittest
from fluent_compiler.errors import FluentFormatError
from fluent_compiler.utils import Any, inspect_function_args
class TestInspectFunctionArgs(unittest.TestCase):
def test_inspect_function_args_positional(self):
self.assertEqual(inspect_function_args(lambda: None, "name", []), (0, []))
self.assertEqual(inspect_function_args(lambda x: None, "name", []), (1, []))
self.assertEqual(inspect_function_args(lambda x, y: None, "name", []), (2, []))
def test_inspect_function_args_var_positional(self):
self.assertEqual(inspect_function_args(lambda *args: None, "name", []), (Any, []))
def test_inspect_function_args_keywords(self):
self.assertEqual(inspect_function_args(lambda x, y=1, z=2: None, "name", []), (1, ["y", "z"]))
def test_inspect_function_args_var_keywords(self):
self.assertEqual(inspect_function_args(lambda x, **kwargs: None, "name", []), (1, Any))
def test_inspect_function_args_var_positional_plus_keywords(self):
self.assertEqual(inspect_function_args(lambda x, y=1, *args: None, "name", []), (Any, ["y"]))
def test_inspect_function_args_bad_keyword_args(self):
def foo():
pass
foo.ftl_arg_spec = (0, ["bad kwarg", "good", "this-is-fine-too"])
errors = []
self.assertEqual(inspect_function_args(foo, "FOO", errors), (0, ["good", "this-is-fine-too"]))
self.assertEqual(
errors,
[FluentFormatError("FOO() has invalid keyword argument name 'bad kwarg'")],
)
| [
"L.Plant.98@cantab.net"
] | L.Plant.98@cantab.net |
b9881ef2ec773ce08737733c455d00c4e0f5a07e | bcf0e03ebd7e55588dcf48ab5d990534f8d9ab0c | /CodeChef/Archive 2019/dijikstra algo.py | 8a5245f7d11e94a49ff2d6015c4aea7afa726d55 | [] | no_license | nsky80/competitive_programming | 731321aaf42d9ae546f1d13bbb05215a1fbcfe45 | 9b0c0ffccf092d4d4bbf50cac1746f44dd977d57 | refs/heads/master | 2022-02-06T11:58:44.313635 | 2022-01-30T09:20:15 | 2022-01-30T09:20:15 | 199,516,791 | 1 | 2 | null | 2022-01-30T09:20:16 | 2019-07-29T19:43:17 | Python | UTF-8 | Python | false | false | 2,697 | py | # Python program for Dijkstra's single
# source shortest path algorithm. The program is
# for adjacency matrix representation of the graph
# Library for INT_MAX
import sys
class Graph():
def __init__(self, vertices):
self.V = vertices
self.graph = [ [ 0 for column in range(vertices) ]
for row in range(vertices) ]
def printSolution(self, dist):
print("Vertex tDistance from Source")
for node in range(self.V):
print(node, "t", dist[ node ])
# A utility function to find the vertex with
# minimum distance value, from the set of vertices
# not yet included in shortest path tree
def minDistance(self, dist, sptSet):
# Initilaize minimum distance for next node
min = sys.maxint
# Search not nearest vertex not in the
# shortest path tree
for v in range(self.V):
if dist[ v ] < min and sptSet[ v ] == False:
min = dist[ v ]
min_index = v
return min_index
# Funtion that implements Dijkstra's single source
# shortest path algorithm for a graph represented
# using adjacency matrix representation
def dijkstra(self, src):
dist = [ sys.maxint ] * self.V
dist[ src ] = 0
sptSet = [ False ] * self.V
for cout in range(self.V):
# Pick the minimum distance vertex from
# the set of vertices not yet processed.
# u is always equal to src in first iteration
u = self.minDistance(dist, sptSet)
# Put the minimum distance vertex in the
# shotest path tree
sptSet[ u ] = True
# Update dist value of the adjacent vertices
# of the picked vertex only if the current
# distance is greater than new distance and
# the vertex in not in the shotest path tree
for v in range(self.V):
if self.graph[ u ][ v ] > 0 and sptSet[ v ] == False and \
dist[ v ] > dist[ u ] + self.graph[ u ][ v ]:
dist[ v ] = dist[ u ] + self.graph[ u ][ v ]
self.printSolution(dist)
# Driver program
g = Graph(9)
g.graph = [ [ 0, 4, 0, 0, 0, 0, 0, 8, 0 ],
[ 4, 0, 8, 0, 0, 0, 0, 11, 0 ],
[ 0, 8, 0, 7, 0, 4, 0, 0, 2 ],
[ 0, 0, 7, 0, 9, 14, 0, 0, 0 ],
[ 0, 0, 0, 9, 0, 10, 0, 0, 0 ],
[ 0, 0, 4, 14, 10, 0, 2, 0, 0 ],
[ 0, 0, 0, 0, 0, 2, 0, 1, 6 ],
[ 8, 11, 0, 0, 0, 0, 1, 0, 7 ],
[ 0, 0, 2, 0, 0, 0, 6, 7, 0 ]
]
g.dijkstra(0)
# This code is contributed by Divyanshu Mehta
| [
"satishkumary80@gmail.com"
] | satishkumary80@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.