blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
411d1b5d5d006f9c41b1c82bed003b39f7fba6ac
|
27acd9eeb0d2b9b6326cc0477e7dbb84341e265c
|
/test/vraag4/src/isbn/222.py
|
fd40dde79d0542bab2d8bd49e8cc487684633488
|
[] |
no_license
|
VerstraeteBert/algos-ds
|
e0fe35bc3c5b7d8276c07250f56d3719ecc617de
|
d9215f11cdfa1a12a3b19ade3b95fa73848a636c
|
refs/heads/master
| 2021-07-15T13:46:58.790446
| 2021-02-28T23:28:36
| 2021-02-28T23:28:36
| 240,883,220
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,127
|
py
|
def isISBN_13(code):
if len(code) != 13:
return False
if code[:3] != "978" and code[:3] != "979":
return False
even = code[::2]
oneven = code[1::2]
even_som = 0
oneven_som = 0
for i in range(6):
cijfer = int(even[i])
even_som += cijfer
cijfer = int(oneven[i])
oneven_som += cijfer
controle = (10 - (even_som + 3 * oneven_som) % 10) % 10
return controle == int(even[6])
def overzicht(codes):
types = ["Engelstalige landen", "Franstalige landen", "Duitstalige landen", "Japan", "Russischtalige landen",
"China", "Overige landen", "Fouten"]
lijst = {}
for soort in types:
lijst[soort] = 0
for code in codes:
if not isISBN_13(code):
lijst["Fouten"] += 1
else:
nr = code[3]
if nr == "0":
nr = "1"
elif nr in "689":
nr = "7"
elif nr == "7":
nr = "6"
soort = types[int(nr) - 1]
lijst[soort] += 1
for el in lijst:
print("{}: {}".format(el, lijst[el]))
|
[
"bertverstraete22@gmail.com"
] |
bertverstraete22@gmail.com
|
68166f1c54bc0727d4ea84555b656e8b4fc72753
|
a5200ba8b1d2b248c7c7bef5704c7e375efc1c2a
|
/exp_configs.py
|
c09ecd223c6f468e105e0d7348bd4b1cfa3bf410
|
[] |
no_license
|
hongyunnchen/sps
|
e0c958dadca2a60b0e8d797d8e786f88669cf5c7
|
4ddb3567f9a1893685ea161e2b1d7ba3cb3a1fe3
|
refs/heads/master
| 2023-02-26T06:36:41.462069
| 2021-02-09T12:22:09
| 2021-02-09T12:22:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,448
|
py
|
from haven import haven_utils as hu
import itertools
# datasets
kernel_datasets = ["mushrooms",
# "w8a", "ijcnn",
# "rcv1"
]
# define runs
run_list = [0]
# define optimizers
c_list = [0.2]
sps_list = []
for c, adapt_flag in itertools.product(c_list, ['smooth_iter']):
sps_list += [{'name':"sps", "c":c, 'adapt_flag':adapt_flag}]
opt_list = sps_list + [{'name': 'adam'}]
EXP_GROUPS = {}
# define interpolation exp groups
EXP_GROUPS['kernel'] = hu.cartesian_exp_group({"dataset":kernel_datasets,
"model":["linear"],
"loss_func": ['logistic_loss'],
"acc_func": ["logistic_accuracy"],
"opt": opt_list ,
"batch_size":[100],
"max_epoch":[35],
"runs":run_list})
EXP_GROUPS['mf'] = hu.cartesian_exp_group({"dataset":["matrix_fac"],
"model":["matrix_fac_1", "matrix_fac_4", "matrix_fac_10", "linear_fac"],
"loss_func": ["squared_loss"],
"opt": opt_list,
"acc_func":["mse"],
"batch_size":[100],
"max_epoch":[50],
"runs":run_list})
EXP_GROUPS['mnist'] = hu.cartesian_exp_group({"dataset":["mnist"],
"model":["mlp"],
"loss_func": ["softmax_loss"],
"opt":[{'name':"sps", "c":c,
'adapt_flag':'smooth_iter',
'centralize_grad':True}] + opt_list,
"acc_func":["softmax_accuracy"],
"batch_size":[128],
"max_epoch":[200],
"runs":run_list})
EXP_GROUPS['deep'] = (hu.cartesian_exp_group({"dataset":["cifar10"],
"model":["resnet34", "densenet121"],
"loss_func": ["softmax_loss"],
"opt": opt_list,
"acc_func":["softmax_accuracy"],
"batch_size":[128],
"max_epoch":[200],
"runs":run_list}) +
hu.cartesian_exp_group({"dataset":["cifar100"],
"model":["resnet34_100", "densenet121_100"],
"loss_func": ["softmax_loss"],
"opt": opt_list,
"acc_func":["softmax_accuracy"],
"batch_size":[128],
"max_epoch":[200],
"runs":run_list})
)
EXP_GROUPS['cifar'] = hu.cartesian_exp_group({"dataset":["cifar10"],
"model":["resnet34"],
"loss_func": ["softmax_loss"],
"opt": opt_list + [{'name':"sps", "c":c,
'adapt_flag':'smooth_iter',
'centralize_grad':True}] ,
"acc_func":["softmax_accuracy"],
"batch_size":[128],
"max_epoch":[200],
"runs":[0]})
# define non-interpolation exp groups
eta_max_list = [1, 5, 100]
c_list = [0.5]
sps_l2_list = []
for c, eta_max in itertools.product(c_list, eta_max_list):
sps_l2_list += [{'name':"sps", "c":c,
'fstar_flag':True, 'eps':0,
'adapt_flag':'constant',
'eta_max':eta_max}]
sps_list = []
for c, eta_max in itertools.product(c_list, eta_max_list):
sps_list += [{'name':"sps", "c":c,
'fstar_flag':False, 'eps':0,
'adapt_flag':'constant',
'eta_max':eta_max}]
sgd_list = [{'name':"sgd",
"lr":10.0},{'name':"sgd",
"lr":1.0}, {'name':"sgd",
"lr":1e-3}, {'name':"sgd",
"lr":1e-1}, {'name':"sgd",
"lr":1e-2}]
EXP_GROUPS['syn_l2'] = (hu.cartesian_exp_group({"dataset":['syn'],
"model":["logistic"],
"loss_func": [
'logistic_l2_loss',
],
"acc_func": ["logistic_accuracy"],
"opt": sps_l2_list + sgd_list,
"batch_size":[1],
"max_epoch":[50],
"runs":run_list}))
EXP_GROUPS['syn'] = (hu.cartesian_exp_group({"dataset":['syn'],
"model":["logistic"],
"loss_func": [
'logistic_loss',
],
"acc_func": ["logistic_accuracy"],
"opt": sps_list + sgd_list,
"batch_size":[1],
"max_epoch":[50],
"runs":run_list}))
|
[
"issam.laradji@gmail.com"
] |
issam.laradji@gmail.com
|
5fbdd4faeaa02752c91f94d6860761a2dfb07bac
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/datashare/v20181101preview/get_adls_gen2_file_system_data_set.py
|
6a6d525b551e637a119b660529a00a01bd625ef0
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212
| 2021-11-15T12:43:41
| 2021-11-15T12:43:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,350
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetADLSGen2FileSystemDataSetResult',
'AwaitableGetADLSGen2FileSystemDataSetResult',
'get_adls_gen2_file_system_data_set',
'get_adls_gen2_file_system_data_set_output',
]
@pulumi.output_type
class GetADLSGen2FileSystemDataSetResult:
"""
An ADLS Gen 2 file system data set.
"""
def __init__(__self__, data_set_id=None, file_system=None, id=None, kind=None, name=None, resource_group=None, storage_account_name=None, subscription_id=None, type=None):
if data_set_id and not isinstance(data_set_id, str):
raise TypeError("Expected argument 'data_set_id' to be a str")
pulumi.set(__self__, "data_set_id", data_set_id)
if file_system and not isinstance(file_system, str):
raise TypeError("Expected argument 'file_system' to be a str")
pulumi.set(__self__, "file_system", file_system)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource_group and not isinstance(resource_group, str):
raise TypeError("Expected argument 'resource_group' to be a str")
pulumi.set(__self__, "resource_group", resource_group)
if storage_account_name and not isinstance(storage_account_name, str):
raise TypeError("Expected argument 'storage_account_name' to be a str")
pulumi.set(__self__, "storage_account_name", storage_account_name)
if subscription_id and not isinstance(subscription_id, str):
raise TypeError("Expected argument 'subscription_id' to be a str")
pulumi.set(__self__, "subscription_id", subscription_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="dataSetId")
def data_set_id(self) -> str:
"""
Unique id for identifying a data set resource
"""
return pulumi.get(self, "data_set_id")
@property
@pulumi.getter(name="fileSystem")
def file_system(self) -> str:
"""
The file system name.
"""
return pulumi.get(self, "file_system")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource id of the azure resource
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
"""
Kind of data set.
Expected value is 'AdlsGen2FileSystem'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the azure resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> str:
"""
Resource group of storage account
"""
return pulumi.get(self, "resource_group")
@property
@pulumi.getter(name="storageAccountName")
def storage_account_name(self) -> str:
"""
Storage account name of the source data set
"""
return pulumi.get(self, "storage_account_name")
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> str:
"""
Subscription id of storage account
"""
return pulumi.get(self, "subscription_id")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the azure resource
"""
return pulumi.get(self, "type")
class AwaitableGetADLSGen2FileSystemDataSetResult(GetADLSGen2FileSystemDataSetResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetADLSGen2FileSystemDataSetResult(
data_set_id=self.data_set_id,
file_system=self.file_system,
id=self.id,
kind=self.kind,
name=self.name,
resource_group=self.resource_group,
storage_account_name=self.storage_account_name,
subscription_id=self.subscription_id,
type=self.type)
def get_adls_gen2_file_system_data_set(account_name: Optional[str] = None,
data_set_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
share_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetADLSGen2FileSystemDataSetResult:
"""
An ADLS Gen 2 file system data set.
:param str account_name: The name of the share account.
:param str data_set_name: The name of the dataSet.
:param str resource_group_name: The resource group name.
:param str share_name: The name of the share.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['dataSetName'] = data_set_name
__args__['resourceGroupName'] = resource_group_name
__args__['shareName'] = share_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:datashare/v20181101preview:getADLSGen2FileSystemDataSet', __args__, opts=opts, typ=GetADLSGen2FileSystemDataSetResult).value
return AwaitableGetADLSGen2FileSystemDataSetResult(
data_set_id=__ret__.data_set_id,
file_system=__ret__.file_system,
id=__ret__.id,
kind=__ret__.kind,
name=__ret__.name,
resource_group=__ret__.resource_group,
storage_account_name=__ret__.storage_account_name,
subscription_id=__ret__.subscription_id,
type=__ret__.type)
@_utilities.lift_output_func(get_adls_gen2_file_system_data_set)
def get_adls_gen2_file_system_data_set_output(account_name: Optional[pulumi.Input[str]] = None,
data_set_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetADLSGen2FileSystemDataSetResult]:
"""
An ADLS Gen 2 file system data set.
:param str account_name: The name of the share account.
:param str data_set_name: The name of the dataSet.
:param str resource_group_name: The resource group name.
:param str share_name: The name of the share.
"""
...
|
[
"noreply@github.com"
] |
bpkgoud.noreply@github.com
|
1f9ca65ce07629f7f3f5c41490cfa08c638c7723
|
6ac3e509c9d848497a7cb0f79008ec1f395f3aad
|
/Phone-Numbers/freecarrierlookup/freecarrierlookup/__main__.py
|
e28a3cacc22b778073bda4d6b71388e5f2893fbf
|
[] |
no_license
|
WeilerWebServices/Scrapers
|
a87ca6c0fd719639be831623b2b55183932d8fba
|
206ea9adf48e9b882a2d62df691185609483f9d0
|
refs/heads/master
| 2022-11-30T10:46:09.731660
| 2020-08-04T16:07:19
| 2020-08-04T16:07:19
| 273,375,685
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,091
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import time
import csv
from sys import stderr, stdout
try:
import phonenumbers
except ImportError:
phonenumbers = None
from . import FreeCarrierLookup
########################################
# Parse arguments
p = argparse.ArgumentParser(description='Lookup carrier information using FreeCarrierLookup.com')
if phonenumbers:
p.add_argument('phone_number', nargs='+', type=str.strip, help='Phone number to lookup')
p.add_argument('--region', default='US', help='libphonenumbers dialing region (default %(default)r)')
x = p.add_mutually_exclusive_group()
x.add_argument('--cc', type=str.strip,
help='Default country code (if none, all numbers must be in E.164 format)')
x.add_argument('-E', '--assume-e164', action='store_true',
help="Assume E.164 format even if leading '+' not present")
else:
p.description += '''; phonenumbers module not available (https://github.com/daviddrysdale/python-phonenumbers), so country code must be explicitly specified.'''
p.add_argument('phone_number', nargs='+', type=str.strip,
help='Phone number to lookup (without country code)')
p.add_argument('--cc', type=str.strip, required=True,
help='Country code for all numbers')
p.add_argument('-o','--output', type=argparse.FileType('w'), default=stdout, help='Output file (default is stdout)')
p.add_argument('-c','--csv', action='store_true', help='Output results in CSV format')
p.add_argument('-u', '--user-agent', help="User-Agent string (default is none)")
p.add_argument('-r', '--rate-limit', type=int, help="Rate limit in seconds per query (default is none)")
p.add_argument('--proxy', help='HTTPS proxy (in any format accepted by python-requests, e.g. socks5://localhost:8080)')
args = p.parse_args()
fcl = FreeCarrierLookup(args.user_agent)
csvwr = None
if args.proxy:
fcl.session.proxies['https'] = args.proxy
# Lookup phone numbers' carriers
rate_allow = None
for pn in args.phone_number:
if phonenumbers:
# parse into country code and "national number" with phonenumbers
if not pn.startswith('+'):
if args.cc: pn = '+%s %s' % (args.cc, pn)
elif args.assume_e164: pn = '+' + pn
try:
obj = phonenumbers.parse(pn, region=args.region)
cc, phonenum = obj.country_code, ('0'*(obj.number_of_leading_zeros or obj.italian_leading_zero or 0)) + str(obj.national_number)
except phonenumbers.NumberParseException as e:
print("WARNING: Could not parse %r with phonenumbers: %s" % (pn, ' '.join(e.args)), file=stderr)
continue
else:
# use country code and phone number as-is
if pn.startswith('+'):
print("WARNING: Skipping %r, which has an E.164 country code prefix (can't parse without phonenumbers module)" % pn, file=stderr)
continue
cc, phonenum = args.cc, ''.join(filter(str.isdigit, pn))
# Request (web interface includes test=456 and sessionlogin=0, but they don't seem to be required)
if args.rate_limit:
now = time.time()
if rate_allow and now < rate_allow: time.sleep(rate_allow - now)
rate_allow = time.time() + args.rate_limit
retry = True
while retry:
retry = False
try:
im, prompt = fcl.get_captcha()
captcha = None
if prompt:
print("CAPTCHA prompt: %s" % prompt, file=stderr)
captcha = input("CAPTCHA response (leave blank to show image)? ")
else:
print("Couldn't parse CAPTCHA prompt, showing image", file=stderr)
if not captcha:
im.show()
captcha = input("CAPTCHA response? ")
results = fcl.lookup(cc, phonenum, captcha)
except RuntimeError as e:
status, strings = e.args
if status == 'error' and 'quota' in strings[0].lower():
p.error('exceeded quota')
elif status == 'error' and 'captcha' in strings[0].lower():
print('Incorrect CAPTCHA response. Retry with new CAPTCHA', file=stderr)
retry = True
else:
print('%s received for +%s %s: %s' % (status.title(), cc, phonenum, ' '.join(strings)), file=stderr)
except Exception as e:
p.error('\n'.join(map(str, e.args)))
else:
if args.csv:
if csvwr is None:
csvwr = csv.writer(args.output)
csvwr.writerow(('Country Code', 'Phone Number', 'Carrier', 'Is Wireless', 'SMS Gateway Address', 'MMS Gateway Address', 'Note', 'Extra'))
csvwr.writerow((cc, phonenum, results.pop('Carrier', None), results.pop('Is Wireless', None), results.pop('SMS Gateway Address',None), results.pop('MMS Gateway Address',None), results.pop('Note',None), results or None))
else:
print('+%s %s: %s' % (cc, phonenum, results), file=args.output)
p.exit()
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
50ffb4956388901f75c430e335b4c03a8493463b
|
d748710c6c5fa0f61b5bd6c2ec849d9250428811
|
/demo1/client_python/test/test_format.py
|
a03e8a252d7c7c1a5ae10dea8b78b8c22f086cd7
|
[] |
no_license
|
stefan2904/aries-experiments
|
9f4dab2d0711b76557e3d6ae8e5a27e532102685
|
46f31ee62cf951da2696e5ca4e6dc1d3d753743d
|
refs/heads/main
| 2023-03-23T00:06:06.362992
| 2021-03-18T12:56:58
| 2021-03-18T12:56:58
| 329,986,417
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 902
|
py
|
# coding: utf-8
"""
(Aries Agent REST Server) of VC4SM University.
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.format import Format # noqa: E501
from swagger_client.rest import ApiException
class TestFormat(unittest.TestCase):
"""Format unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFormat(self):
"""Test Format"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.format.Format() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"dev@2904.cc"
] |
dev@2904.cc
|
4befa60f75f65fc7d117fd7196c46db4398c2c4c
|
f99cca94f74c69bc518e298c14140534e18eabd3
|
/OrcApi/start_report.py
|
efcbc2ca473ffe5c686fdeb3c906e7f559f6ecab
|
[] |
no_license
|
pubselenium/OrcTestToolsKit
|
d6d838d9937d2c4d86941e317cb3ff096b58e52d
|
f3ccbbceaed4f4996f6907a2f4880c2fd3f82bbb
|
refs/heads/master
| 2021-04-29T05:15:53.240714
| 2016-12-30T09:42:53
| 2016-12-30T09:42:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 832
|
py
|
# coding=utf-8
import sys
from flask import make_response
from OrcLib import init_log
from OrcLib import get_config
from OrcApi import app
from OrcApi import orc_api
from OrcApi.Run.ReportApi import ReportDetAPI
configer = get_config("server")
@app.after_request
def after_request(response):
response.headers['Access-Control-Allow-Origin'] = '*'
return response
@orc_api.representation("text/html")
def out_html(data, code, headers=None):
resp = make_response(data, code)
resp.headers.extend(headers or {})
return resp
# Widget
orc_api.add_resource(ReportDetAPI, '/api/1.0/Report/<string:p_id>/<string:p_time>', endpoint='Report')
driver_host = configer.get_option("REPORT", "ip")
driver_port = configer.get_option("REPORT", "port")
reload(sys)
init_log()
app.run(host=driver_host, port=driver_port)
|
[
"orange21cn@126.com"
] |
orange21cn@126.com
|
53cde0b836010d45228fa1c3b0df4ed331fc4563
|
0bde5f7f09aa537ed1f4828d4e5ebee66475918f
|
/h2o-py/tests/testdir_apis/Data_Manipulation/pyunit_h2oH2OFrame_split_frame.py
|
7b6184fc5c776b24ad32cc3e4da703f2af126c3c
|
[
"Apache-2.0"
] |
permissive
|
Winfredemalx54/h2o-3
|
d69f1c07e1f5d2540cb0ce5e6073415fa0780d32
|
dfb163c82ff3bfa6f88cdf02465a9bb4c8189cb7
|
refs/heads/master
| 2022-12-14T08:59:04.109986
| 2020-09-23T08:36:59
| 2020-09-23T08:36:59
| 297,947,978
| 2
| 0
|
Apache-2.0
| 2020-09-23T11:28:54
| 2020-09-23T11:28:54
| null |
UTF-8
|
Python
| false
| false
| 1,086
|
py
|
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
import numpy as np
from h2o.utils.typechecks import assert_is_type
from h2o.frame import H2OFrame
def h2o_H2OFrame_split_frame():
"""
Python API test: h2o.frame.H2OFrame.split_frame(ratios=None, destination_frames=None, seed=None)
"""
python_lists = np.random.uniform(-1,1, (10000,2))
h2oframe = h2o.H2OFrame(python_obj=python_lists)
newframe = h2oframe.split_frame(ratios=[0.5, 0.25], destination_frames=["f1", "f2", "f3"], seed=None)
assert_is_type(newframe, list)
assert_is_type(newframe[0], H2OFrame)
assert len(newframe)==3, "h2o.H2OFrame.split_frame() command is not working."
assert h2oframe.nrow==(newframe[0].nrow+newframe[1].nrow+newframe[2].nrow), "h2o.H2OFrame.split_frame() command " \
"is not working."
if __name__ == "__main__":
pyunit_utils.standalone_test(h2o_H2OFrame_split_frame())
else:
h2o_H2OFrame_split_frame()
|
[
"noreply@github.com"
] |
Winfredemalx54.noreply@github.com
|
5953b3b9c01500579a6f297e7f5b22fd87d779c5
|
2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8
|
/pardus/playground/ebayer/c2/kernel/pae/drivers/module-pae-openafs/actions.py
|
5dcbf53a4428541a79d686435aaf1aa6a77d6da8
|
[] |
no_license
|
aligulle1/kuller
|
bda0d59ce8400aa3c7ba9c7e19589f27313492f7
|
7f98de19be27d7a517fe19a37c814748f7e18ba6
|
refs/heads/master
| 2021-01-20T02:22:09.451356
| 2013-07-23T17:57:58
| 2013-07-23T17:57:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 796
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import shelltools
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
from pisi.actionsapi import kerneltools
KDIR = kerneltools.getKernelVersion()
WorkDir = "openafs-%s" % get.srcVERSION()
def setup():
autotools.configure("--with-linux-kernel-headers=/lib/modules/%s/build" % KDIR)
def build():
autotools.make("-j1 only_libafs")
def install():
for m in ("libafs.ko", "afspag.ko"):
pisitools.insinto("/lib/modules/%s/kernel/extra/openafs" % KDIR, "src/libafs/MODLOAD-%s-SP/%s" % (KDIR, m))
|
[
"yusuf.aydemir@istanbul.com"
] |
yusuf.aydemir@istanbul.com
|
6e00615762e8df542d13ee65b1357bdf9cf232dc
|
a11984110d22e8231896c7e8bf2c6c2a96e46502
|
/Daily Challenges/2020/June/Coin Change 2.py
|
11def6bdf3936bba8f8e65cab5a71696240db825
|
[] |
no_license
|
Waqar-107/LeetCode
|
fbd323c89a5ea010b3322b0b35dd087a7744abc4
|
5f7dc48918c0367b20e733830e9807eb40840f77
|
refs/heads/master
| 2023-08-03T12:27:58.593051
| 2023-07-24T01:33:24
| 2023-07-24T01:33:24
| 220,239,559
| 8
| 7
| null | 2022-05-01T18:50:03
| 2019-11-07T13:08:48
|
Python
|
UTF-8
|
Python
| false
| false
| 837
|
py
|
class Solution:
def change(self, amount: int, coins: List[int]) -> int:
n = len(coins)
dp = [[0 for _ in range(n)] for _ in range(amount + 1)]
if amount == 0:
return 1
if n == 0:
return 0
for j in range(n):
dp[0][j] = 1
for i in range(1, amount + 1):
for j in range(n):
# include coin j
if i - coins[j] >= 0:
x = dp[i - coins[j]][j]
else:
x = 0
# do not include j
if j >= 1:
y = dp[i][j - 1]
else:
y = 0
dp[i][j] = x + y
return dp[amount][n - 1]
|
[
"noreply@github.com"
] |
Waqar-107.noreply@github.com
|
0a6b8b51c8c6d0be55dbbec18662c723561424b8
|
44064ed79f173ddca96174913910c1610992b7cb
|
/Second_Processing_app/temboo/Library/SendGrid/WebAPI/Profile/__init__.py
|
6d92d7a013674c733657f66763ff162be16e38d5
|
[] |
no_license
|
dattasaurabh82/Final_thesis
|
440fb5e29ebc28dd64fe59ecd87f01494ed6d4e5
|
8edaea62f5987db026adfffb6b52b59b119f6375
|
refs/heads/master
| 2021-01-20T22:25:48.999100
| 2014-10-14T18:58:00
| 2014-10-14T18:58:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
from UpdateContactProfileEmailAddress import *
from UpdateUsername import *
from ViewAccountProfile import *
from ResetPassword import *
from UpdateAccountProfile import *
|
[
"dattasaurabh82@gmail.com"
] |
dattasaurabh82@gmail.com
|
4ddb79704d7f95d929525eb9514d2329a0e2ae5f
|
9ce4292954000fd66bcdbd0797a280c306308d08
|
/quizzes/00.organize.me/Cracking the Coding Interview/10-5.py
|
b766c70b7b543f695e10a7a7269d68734ca8f968
|
[
"MIT"
] |
permissive
|
JiniousChoi/encyclopedia-in-code
|
0c786f2405bfc1d33291715d9574cae625ae45be
|
77bc551a03a2a3e3808e50016ece14adb5cfbd96
|
refs/heads/master
| 2021-06-27T07:50:10.789732
| 2020-05-29T12:50:46
| 2020-05-29T12:50:46
| 137,426,553
| 2
| 0
|
MIT
| 2020-10-13T08:56:12
| 2018-06-15T01:29:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,342
|
py
|
'''
10.5 - 빈 문자열이 섞여 있는 정렬 상태의 배열이 주어졌을 때, 특정한 문자열 의 위치를 찾는 메서드를 작성하라.
'''
def search_arr_with_empty_string(arr, target):
assert arr
left = init_left(arr)
right = init_right(arr)
mid = get_mid(arr, left, right)
while mid>=0:
if arr[mid]==target:
return mid
if arr[mid]>target:
right=mid
elif arr[mid]<target:
left=mid
else:
assert False
mid = get_mid(arr, left, right)
return -1
def init_left(arr):
for i,e in enumerate(arr):
if e:
return i
raise Exception("주어진 배열이 빈문자열로만 차있습니다")
def init_right(arr):
for i in range(len(arr)-1, -1, -1):
if arr[i]:
return i
raise Exception("주어진 배열이 빈문자열로만 차있습니다")
def get_mid(arr, left, right):
assert left < right
mid = (left+right)//2
if arr[mid]:
return mid
for t in range(mid-1, left, -1):
if arr[t]:
return t
for t in range(mid+1, right):
if arr[t]:
return t
return -1
sample_arr = ["at","","","","ball","","","car","","","dad","",""]
idx = search_arr_with_empty_string(sample_arr, "ball")
print(idx)
|
[
"jinchoiseoul@gmail.com"
] |
jinchoiseoul@gmail.com
|
dcbab961672293df7685c2b68b386d61314c7e39
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_144/ch149_2020_04_13_20_40_50_766295.py
|
878ea953ff527f041b48cc86231d9d5b082aeda2
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,029
|
py
|
salario_bruto = int(input(("Salario: "))
numero_dependentes = int(input("Dependentes: "))
contri_INSS = 0
if salario_bruto <= 1045:
contri_INSS = salario_bruto * 0.075
elif salario_bruto >= 1045.01 and salario_bruto <=2089.60:
contri_INSS = salario_bruto * 0.09
elif salario_bruto >= 2089.01 and salario_bruto <= 3134.40:
contri_INSS = salario_bruto * 0.12
elif salario_bruto >= 3134.41 and salario_bruto <=6101.06:
contri_INSS = salario_bruto * 0.14
else:
contri_INSS = 671.12
base = salario_bruto - contri_INSS - (numero_dependentes* 189.59)
aliquota = 0
deducao = 0
if base <= 1903.98:
aliquota = 0
deducao = 0
elif base >= 1903.99 and base <= 2826.65:
aliquota = 0.75
deducao = 142.80
elif base >= 2826.66 and base <= 3751.05:
aliquota = 0.15
deducao = 354.80
elif base >= 3751.06 and base <= 4664.68:
aliquota = 0.225
deducao = 636.13
else:
aliquota = 0.275
deducao = 869.36
IRRF = base * aliquota - deducao
print(IRRF)
|
[
"you@example.com"
] |
you@example.com
|
c1825c451ebce3e5a90f216fa0ea0683c035ad0d
|
34f6d9a4c4becc057d1b01a0ed3e50f20a071b03
|
/main/migrations/0001_initial.py
|
55e4c7b7da9e0f6d8dea069a8d80d7bc81e61042
|
[] |
no_license
|
hitscanner/WUW
|
e6d59bb8eae3834cf115e50834a2a4af51c29b29
|
31a482afe3e4789c979696a70f5ded17488b7810
|
refs/heads/master
| 2022-12-10T06:01:01.862354
| 2019-08-11T11:31:01
| 2019-08-11T11:31:01
| 196,556,732
| 0
| 0
| null | 2022-07-06T20:13:05
| 2019-07-12T10:06:49
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 771
|
py
|
# Generated by Django 2.2 on 2019-07-16 08:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Search_result',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('poster', models.ImageField(blank=True, upload_to='')),
('heart', models.ImageField(blank=True, upload_to='')),
('created_at', models.DateField(auto_now_add=True)),
('updated_at', models.DateField(auto_now=True)),
],
),
]
|
[
"dmswl_0311@naver.com"
] |
dmswl_0311@naver.com
|
cab7d49da20714d35bcfe777d586c4c4b8e8bcb1
|
3c000380cbb7e8deb6abf9c6f3e29e8e89784830
|
/venv/Lib/site-packages/cobra/modelimpl/eqpt/spcmnblk.py
|
4a85bd179a0f4e21e2c358232362a472c0383c4d
|
[] |
no_license
|
bkhoward/aciDOM
|
91b0406f00da7aac413a81c8db2129b4bfc5497b
|
f2674456ecb19cf7299ef0c5a0887560b8b315d0
|
refs/heads/master
| 2023-03-27T23:37:02.836904
| 2021-03-26T22:07:54
| 2021-03-26T22:07:54
| 351,855,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,084
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class SpCmnBlk(Mo):
"""
A SPROM common block.
"""
meta = ClassMeta("cobra.model.eqpt.SpCmnBlk")
meta.moClassName = "eqptSpCmnBlk"
meta.rnFormat = "spcmn"
meta.category = MoCategory.REGULAR
meta.label = "Sprom Common Block"
meta.writeAccessMask = 0x80080000000001
meta.readAccessMask = 0x80080000000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.eqpt.SpromFan")
meta.parentClasses.add("cobra.model.eqpt.SpromLc")
meta.parentClasses.add("cobra.model.eqpt.SpromSup")
meta.parentClasses.add("cobra.model.eqpt.SpromPsu")
meta.parentClasses.add("cobra.model.eqpt.SpromBP")
meta.superClasses.add("cobra.model.eqpt.SpBlkHdr")
meta.rnPrefixes = [
('spcmn', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cksum", "cksum", 3358, PropCategory.REGULAR)
prop.label = "Checksum"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cksum", prop)
prop = PropMeta("str", "clei", "clei", 3375, PropCategory.REGULAR)
prop.label = "CLEI Code"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("clei", prop)
prop = PropMeta("str", "count", "count", 3360, PropCategory.REGULAR)
prop.label = "Block Count"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("count", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "engBits", "engBits", 3372, PropCategory.REGULAR)
prop.label = "Engineering Bits"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("engBits", prop)
prop = PropMeta("str", "hwRevMaj", "hwRevMaj", 3369, PropCategory.REGULAR)
prop.label = "Hardware Revision Major Number"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("hwRevMaj", prop)
prop = PropMeta("str", "hwRevMin", "hwRevMin", 3370, PropCategory.REGULAR)
prop.label = "Hardware Revision Minor Number"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("hwRevMin", prop)
prop = PropMeta("str", "len", "len", 3357, PropCategory.REGULAR)
prop.label = "Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("len", prop)
prop = PropMeta("str", "major", "major", 3361, PropCategory.REGULAR)
prop.label = "FRU Major Number"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("major", prop)
prop = PropMeta("str", "mfgBits", "mfgBits", 3371, PropCategory.REGULAR)
prop.label = "Manufacturing Bits"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("mfgBits", prop)
prop = PropMeta("str", "mfgDev", "mfgDev", 3368, PropCategory.REGULAR)
prop.label = "Manufacturing Deviation"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("mfgDev", prop)
prop = PropMeta("str", "minor", "minor", 3362, PropCategory.REGULAR)
prop.label = "FRU Minor Number"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("minor", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "oem", "oem", 3363, PropCategory.REGULAR)
prop.label = "OEM"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("oem", prop)
prop = PropMeta("str", "pRev", "pRev", 3367, PropCategory.REGULAR)
prop.label = "Part Revision"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("pRev", prop)
prop = PropMeta("str", "pdNum", "pdNum", 3364, PropCategory.REGULAR)
prop.label = "Product Number"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("pdNum", prop)
prop = PropMeta("str", "prtNum", "prtNum", 3366, PropCategory.REGULAR)
prop.label = "Part Number"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("prtNum", prop)
prop = PropMeta("str", "pwrCon", "pwrCon", 3373, PropCategory.REGULAR)
prop.label = "Power Consumption"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("pwrCon", prop)
prop = PropMeta("str", "ramFl", "ramFl", 3374, PropCategory.REGULAR)
prop.label = "RMA Failure Code"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("ramFl", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "serNum", "serNum", 3365, PropCategory.REGULAR)
prop.label = "Serial Number"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("serNum", prop)
prop = PropMeta("str", "sig", "sig", 3355, PropCategory.REGULAR)
prop.label = "Signature"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("sig", prop)
prop = PropMeta("str", "size", "size", 3359, PropCategory.REGULAR)
prop.label = "Block Size"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("size", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "vdrId", "vdrId", 3376, PropCategory.REGULAR)
prop.label = "Vendor ID"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("vdrId", prop)
prop = PropMeta("str", "ver", "ver", 3356, PropCategory.REGULAR)
prop.label = "Version"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("ver", prop)
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("EqptSlotToEPg", "EPG", "cobra.model.fv.EPg"))
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"bkhoward@live.com"
] |
bkhoward@live.com
|
877d1c526b3f20bd3188f83451ed138f7a56e486
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_sniggering.py
|
17cd2b9a2bb4518486b4ed491fb93307ec1d284d
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
#calss header
class _SNIGGERING():
def __init__(self,):
self.name = "SNIGGERING"
self.definitions = snigger
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['snigger']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
ac7098eb210e84d502ecbef2178e0288d257fa61
|
010215c1421f5275a846e7154189b22cdd3c89bc
|
/MS/Two Pointer/backspace_compare.py
|
b930b074d11fe2ed02fd6c809b5f4f8223bf58ac
|
[] |
no_license
|
bsextion/CodingPractice_Py
|
ab54d5715298645a8fd7ab6945bf3b22d4e6a874
|
da2847a04705394c32a6fe1b5f6c6b64c24647a3
|
refs/heads/master
| 2023-08-16T17:14:47.643989
| 2021-09-28T19:23:40
| 2021-09-28T19:23:40
| 383,658,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 644
|
py
|
def backspace_compare(str1:str, str2):
#two pointers,
ptr_one = 0
ptr_two = 0
while ptr_one < len(str1):
if str1[ptr_one] is '#' and ptr_one > 0:
temp = list(str1)
temp[ptr_one-1] = ''
temp[ptr_one] = ''
str1 = ''.join(temp)
ptr_one += 1
while ptr_two < len(str2):
if str2[ptr_two] is '#' and ptr_two > 0:
temp = list(str2)
temp[ptr_two - 1] = ''
temp[ptr_two] = ''
str2 = ''.join(temp)
ptr_two += 1
if str1 == str2:
return True
return False
backspace_compare("xp#", "xyz##")
|
[
"bsextion@gmail.com"
] |
bsextion@gmail.com
|
b6677daaa5433a1a5bff104dbd781005f9caa6ad
|
bdba52c756cc09f192b720ea318510c265665dcd
|
/swagger_client/models/get_characters_character_id_planets_planet_id_head.py
|
d6daf4f3b6cdc6f78e96cc5957b82e43bb1a5d74
|
[
"MIT"
] |
permissive
|
rseichter/bootini-star
|
6b38195890f383615cc2b422c365ac28c5b87292
|
a80258f01a05e4df38748b8cb47dfadabd42c20d
|
refs/heads/master
| 2020-03-14T03:17:11.385048
| 2018-06-28T17:23:23
| 2018-06-28T17:23:23
| 131,416,504
| 0
| 0
|
MIT
| 2018-05-01T14:26:04
| 2018-04-28T14:28:46
|
Python
|
UTF-8
|
Python
| false
| false
| 5,555
|
py
|
# coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online # noqa: E501
OpenAPI spec version: 0.8.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GetCharactersCharacterIdPlanetsPlanetIdHead(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'head_id': 'int',
'latitude': 'float',
'longitude': 'float'
}
attribute_map = {
'head_id': 'head_id',
'latitude': 'latitude',
'longitude': 'longitude'
}
def __init__(self, head_id=None, latitude=None, longitude=None): # noqa: E501
"""GetCharactersCharacterIdPlanetsPlanetIdHead - a model defined in Swagger""" # noqa: E501
self._head_id = None
self._latitude = None
self._longitude = None
self.discriminator = None
self.head_id = head_id
self.latitude = latitude
self.longitude = longitude
@property
def head_id(self):
"""Gets the head_id of this GetCharactersCharacterIdPlanetsPlanetIdHead. # noqa: E501
head_id integer # noqa: E501
:return: The head_id of this GetCharactersCharacterIdPlanetsPlanetIdHead. # noqa: E501
:rtype: int
"""
return self._head_id
@head_id.setter
def head_id(self, head_id):
"""Sets the head_id of this GetCharactersCharacterIdPlanetsPlanetIdHead.
head_id integer # noqa: E501
:param head_id: The head_id of this GetCharactersCharacterIdPlanetsPlanetIdHead. # noqa: E501
:type: int
"""
if head_id is None:
raise ValueError("Invalid value for `head_id`, must not be `None`") # noqa: E501
if head_id is not None and head_id > 9: # noqa: E501
raise ValueError("Invalid value for `head_id`, must be a value less than or equal to `9`") # noqa: E501
if head_id is not None and head_id < 0: # noqa: E501
raise ValueError("Invalid value for `head_id`, must be a value greater than or equal to `0`") # noqa: E501
self._head_id = head_id
@property
def latitude(self):
"""Gets the latitude of this GetCharactersCharacterIdPlanetsPlanetIdHead. # noqa: E501
latitude number # noqa: E501
:return: The latitude of this GetCharactersCharacterIdPlanetsPlanetIdHead. # noqa: E501
:rtype: float
"""
return self._latitude
@latitude.setter
def latitude(self, latitude):
"""Sets the latitude of this GetCharactersCharacterIdPlanetsPlanetIdHead.
latitude number # noqa: E501
:param latitude: The latitude of this GetCharactersCharacterIdPlanetsPlanetIdHead. # noqa: E501
:type: float
"""
if latitude is None:
raise ValueError("Invalid value for `latitude`, must not be `None`") # noqa: E501
self._latitude = latitude
@property
def longitude(self):
"""Gets the longitude of this GetCharactersCharacterIdPlanetsPlanetIdHead. # noqa: E501
longitude number # noqa: E501
:return: The longitude of this GetCharactersCharacterIdPlanetsPlanetIdHead. # noqa: E501
:rtype: float
"""
return self._longitude
@longitude.setter
def longitude(self, longitude):
"""Sets the longitude of this GetCharactersCharacterIdPlanetsPlanetIdHead.
longitude number # noqa: E501
:param longitude: The longitude of this GetCharactersCharacterIdPlanetsPlanetIdHead. # noqa: E501
:type: float
"""
if longitude is None:
raise ValueError("Invalid value for `longitude`, must not be `None`") # noqa: E501
self._longitude = longitude
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetCharactersCharacterIdPlanetsPlanetIdHead):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"github@seichter.de"
] |
github@seichter.de
|
c4f36d4b7fa6d6a4966f1a22288517df1842a6e4
|
480e33f95eec2e471c563d4c0661784c92396368
|
/Configuration/Generator/python/QCD_Pt-20toInf_MuEnrichedPt15_TuneCUETP8M1_13TeV_pythia8_cff.py
|
7ca22905e2e7cd1528d55e528353b0c20e2ccb2d
|
[
"Apache-2.0"
] |
permissive
|
cms-nanoAOD/cmssw
|
4d836e5b76ae5075c232de5e062d286e2026e8bd
|
4eccb8a758b605875003124dd55ea58552b86af1
|
refs/heads/master-cmsswmaster
| 2021-01-23T21:19:52.295420
| 2020-08-27T08:01:20
| 2020-08-27T08:01:20
| 102,867,729
| 7
| 14
|
Apache-2.0
| 2022-05-23T07:58:09
| 2017-09-08T14:03:57
|
C++
|
UTF-8
|
Python
| false
| false
| 2,277
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(0.00042),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.0),
crossSection = cms.untracked.double(7.20648e+08),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'ParticleDecays:limitTau0 = off',
'ParticleDecays:limitCylinder = on',
'ParticleDecays:xyMax = 2000',
'ParticleDecays:zMax = 4000',
'HardQCD:all = on',
'PhaseSpace:pTHatMin = 20',
'130:mayDecay = on',
'211:mayDecay = on',
'321:mayDecay = on'
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',
)
)
)
mugenfilter = cms.EDFilter("MCSmartSingleParticleFilter",
MinPt = cms.untracked.vdouble(15.,15.),
MinEta = cms.untracked.vdouble(-2.5,-2.5),
MaxEta = cms.untracked.vdouble(2.5,2.5),
ParticleID = cms.untracked.vint32(13,-13),
Status = cms.untracked.vint32(1,1),
# Decay cuts are in mm
MaxDecayRadius = cms.untracked.vdouble(2000.,2000.),
MinDecayZ = cms.untracked.vdouble(-4000.,-4000.),
MaxDecayZ = cms.untracked.vdouble(4000.,4000.)
)
configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('\$Revision$'),
name = cms.untracked.string('\$Source$'),
annotation = cms.untracked.string('QCD dijet production, pThat > 20 GeV, with INCLUSIVE muon preselection (pt(mu) > 15 GeV), 13 TeV, TuneCUETP8M1')
)
ProductionFilterSequence = cms.Sequence(generator*mugenfilter)
|
[
"kpedro88@gmail.com"
] |
kpedro88@gmail.com
|
ba2213f9f76fd58af80066c34ca0933cca61dfbe
|
8b2b497069ed3db150e15863559dc0e9a44dc8c1
|
/pure_protobuf/io/url.py
|
3e2004fb24b0f96f02a7ce5efe77d39b2a72b5a2
|
[
"MIT"
] |
permissive
|
eigenein/protobuf
|
2aec2c544cf9f6571b161b1e62ec3675a5b141eb
|
cf14bc702302c9334c7c9cc839b0b24334a725ef
|
refs/heads/master
| 2023-08-31T21:23:29.258800
| 2023-08-27T12:00:26
| 2023-08-28T12:36:25
| 1,890,285
| 216
| 20
|
MIT
| 2023-09-13T12:58:54
| 2011-06-13T18:26:55
|
Python
|
UTF-8
|
Python
| false
| false
| 619
|
py
|
"""Reading and writing parsed URLs."""
from typing import IO, Iterator
from urllib.parse import ParseResult, urlparse, urlunparse
from pure_protobuf.interfaces.read import Read
from pure_protobuf.interfaces.write import Write
from pure_protobuf.io.bytes_ import read_string, write_string
class ReadUrl(Read[ParseResult]):
__slots__ = ()
def __call__(self, io: IO[bytes]) -> Iterator[ParseResult]:
yield urlparse(read_string(io))
class WriteUrl(Write[ParseResult]):
__slots__ = ()
def __call__(self, value: ParseResult, io: IO[bytes]) -> None:
write_string(urlunparse(value), io)
|
[
"eigenein@gmail.com"
] |
eigenein@gmail.com
|
aee56c11569ff5461d903c1776810a2242a2f6ce
|
12c15c7ae150acaf8032f444db24440da2234b1a
|
/ComputerVision/Projects/cv20_proj1/lap.py
|
f91c55cde634d08ca8ca04d68cc2380417508879
|
[] |
no_license
|
Jimut123/rkmveri-labs
|
315ecd4607af72dd0851489e427a3ab09a8009ff
|
be19a453ea32460c454e3443798e3d8954fb084b
|
refs/heads/master
| 2023-02-02T17:11:23.641187
| 2020-12-13T18:35:20
| 2020-12-13T18:35:20
| 201,784,550
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,476
|
py
|
import numpy as np
import cv2
cutoff_frequency = 4
filter = cv2.getGaussianKernel(ksize=cutoff_frequency*4+1,
sigma=cutoff_frequency)
filter = np.dot(filter, filter.T)
def del2(M):
dx = 1
dy = 1
rows, cols = M.shape
dx = dx * np.ones ((1, cols - 1))
dy = dy * np.ones ((rows-1, 1))
mr, mc = M.shape
D = np.zeros ((mr, mc))
if (mr >= 3):
## x direction
## left and right boundary
D[:, 0] = (M[:, 0] - 2 * M[:, 1] + M[:, 2]) / (dx[:,0] * dx[:,1])
D[:, mc-1] = (M[:, mc - 3] - 2 * M[:, mc - 2] + M[:, mc-1]) \
/ (dx[:,mc - 3] * dx[:,mc - 2])
## interior points
tmp1 = D[:, 1:mc - 1]
tmp2 = (M[:, 2:mc] - 2 * M[:, 1:mc - 1] + M[:, 0:mc - 2])
tmp3 = np.kron (dx[:,0:mc -2] * dx[:,1:mc - 1], np.ones ((mr, 1)))
D[:, 1:mc - 1] = tmp1 + tmp2 / tmp3
if (mr >= 3):
## y direction
## top and bottom boundary
D[0, :] = D[0,:] + \
(M[0, :] - 2 * M[1, :] + M[2, :] ) / (dy[0,:] * dy[1,:])
D[mr-1, :] = D[mr-1, :] \
+ (M[mr-3,:] - 2 * M[mr-2, :] + M[mr-1, :]) \
/ (dy[mr-3,:] * dx[:,mr-2])
## interior points
tmp1 = D[1:mr-1, :]
tmp2 = (M[2:mr, :] - 2 * M[1:mr - 1, :] + M[0:mr-2, :])
tmp3 = np.kron (dy[0:mr-2,:] * dy[1:mr-1,:], np.ones ((1, mc)))
D[1:mr-1, :] = tmp1 + tmp2 / tmp3
return D / 4
print(del2(filter))
|
[
"jimutbahanpal@yahoo.com"
] |
jimutbahanpal@yahoo.com
|
c89b9794cbf2b7f1b847fd4e611f0c42b5aa35fa
|
4771ca5cd2c7be8e6d0a50f1e0b1f85a17ec5efd
|
/todos/forms.py
|
51e119676bbd8af1823a81a48e4933eac9090377
|
[] |
no_license
|
luanfonceca/todomvc-django-over-the-wire
|
03aa2e57c04d465c56cf06e1c95b417c502bcbad
|
ae1b6e989c0c9edd7d4f8de2d9553bf57e4e1e38
|
refs/heads/main
| 2023-03-03T19:23:35.849691
| 2021-02-07T13:23:17
| 2021-02-07T13:23:17
| 334,795,276
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 272
|
py
|
from django import forms
from todos.models import ToDo
class ToDoForm(forms.ModelForm):
class Meta:
model = ToDo
fields = ('title',)
class CompleteToDoForm(forms.ModelForm):
class Meta:
model = ToDo
fields = ('is_completed',)
|
[
"luanfonceca@gmail.com"
] |
luanfonceca@gmail.com
|
2212a2b636017e168a6d9d41201b0c3c70163ac9
|
057d662a83ed85897e9906d72ea90fe5903dccc5
|
/Comprehension.py
|
68427686ee2e27abe1a3290558f7865fd4fd49bb
|
[] |
no_license
|
Karishma00/AnsiblePractice
|
19a4980b1f6cca7b251f2cbea3acf9803db6e016
|
932558d48869560a42ba5ba3fb72688696e1868a
|
refs/heads/master
| 2020-08-05T00:05:31.679220
| 2019-10-04T13:07:29
| 2019-10-04T13:07:29
| 212,324,468
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
#
list=[x**2 for x in range(0,10)]
print(list)
#another ex
celcius=[0,10,30,90]
fahrenheiet=[((0/5)*temp +32) for temp in celcius]
print(fahrenheiet)
|
[
"karishma11198@gmail.com"
] |
karishma11198@gmail.com
|
1d806f235836b0b15c4c2615bbf176dff8458479
|
82be2ebd50fef5b359cfbcacd21f38da4c383ffc
|
/tests/test_writer.py
|
a340f79117bfab8297d6e6b0fb63a8be472e2988
|
[
"BSD-3-Clause"
] |
permissive
|
isabella232/helium-commander
|
5eae81b89cccf2dae56a4163815d867777387288
|
58d1fe4064c51beccbff7a0d93bf037fffdac370
|
refs/heads/master
| 2021-06-15T15:16:00.139651
| 2017-02-28T23:22:36
| 2017-02-28T23:22:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,170
|
py
|
from helium_commander import Sensor, DataPoint
from itertools import islice
import pytest
def validate_format(output, client, sensors, capsys):
first_sensor = sensors[0]
# With sort
Sensor.display(client, sensors, format=output, sort='name')
out, err = capsys.readouterr()
assert first_sensor.short_id in out
Sensor.display(client, sensors, format=output, sort='name', reverse=True)
reversed, err = capsys.readouterr()
assert reversed != out
# Without sort
Sensor.display(client, sensors, format=output)
out, err = capsys.readouterr()
assert first_sensor.short_id in out
Sensor.display(client, sensors, format=output, reverse=True)
reversed, err = capsys.readouterr()
assert reversed != out
def test_formats(client, sensors, capsys):
for output in ['csv', 'tabular', 'json']:
validate_format(output, client, sensors, capsys)
with pytest.raises(AttributeError):
Sensor.display(client, sensors, format='xxx')
def test_timeseries(client, authorized_organization):
points = islice(authorized_organization.timeseries(), 10)
DataPoint.display(client, points, max_width=20)
|
[
"noreply@github.com"
] |
isabella232.noreply@github.com
|
102a94ec2318f2e1673fd0e494380451db909578
|
0e7aed5eef2e1d132a7e75dd8f439ae76c87639c
|
/python/652_find_duplicated_subtrees.py
|
ee4a673bdc3ecbf54bdd00a403e289703d72c886
|
[
"MIT"
] |
permissive
|
liaison/LeetCode
|
2a93df3b3ca46b34f922acdbc612a3bba2d34307
|
bf03743a3676ca9a8c107f92cf3858b6887d0308
|
refs/heads/master
| 2022-09-05T15:04:19.661298
| 2022-08-19T19:29:19
| 2022-08-19T19:29:19
| 52,914,957
| 17
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,429
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def findDuplicateSubtrees(self, root: TreeNode) -> List[TreeNode]:
# set of all node strings
node_str_set = set()
duplicated_strs = set()
duplicated_nodes = list()
def node2str(node):
"""
this function accomplishes two tasks:
- index each node into a string
- search the duplicated nodes during the traversal
"""
nonlocal node_str_set
nonlocal duplicated_strs
nonlocal duplicated_nodes
if node is None:
return ""
left_str = node2str(node.left)
right_str = node2str(node.right)
node_str = str(node.val) + "(" + left_str + ")" + "(" + right_str + ")"
if node_str in node_str_set:
if node_str not in duplicated_strs:
duplicated_strs.add(node_str)
duplicated_nodes.append(node)
else:
node_str_set.add(node_str)
return node_str
node2str(root)
return duplicated_nodes
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class SolutionCount:
def findDuplicateSubtrees(self, root: TreeNode) -> List[TreeNode]:
# node_str -> count
node_str_count = defaultdict(int)
duplicated_nodes = list()
def node2str(node):
"""
this function accomplishes two tasks:
- index each node into a string
- search the duplicated nodes during the traversal
"""
nonlocal node_str_count
nonlocal duplicated_nodes
if node is None:
return ""
node_str = "{}({})({})".format(
node.val, node2str(node.left), node2str(node.right))
node_str_count[node_str] += 1
if node_str_count[node_str] == 2:
duplicated_nodes.append(node)
return node_str
node2str(root)
return duplicated_nodes
|
[
"lisong.guo@me.com"
] |
lisong.guo@me.com
|
15fd9952fee0476a4522d0e9c5220985962185cf
|
88abc8645e499a61e96e2979ae6092e98bfd09e7
|
/streamz/utils.py
|
4e1538da7c4506a9bf7fed145c07d2eb9fdde2bc
|
[
"BSD-3-Clause"
] |
permissive
|
vishalbelsare/streamz
|
5e2d6e112b6a2a90e396c4e3bc11cb1167d879e3
|
b73a8c4c5be35ff1dae220daaefbfd2bfa58e0a1
|
refs/heads/master
| 2022-12-24T17:28:40.600327
| 2022-11-22T16:40:35
| 2022-11-22T16:40:35
| 207,001,623
| 0
| 0
|
BSD-3-Clause
| 2022-12-10T04:20:03
| 2019-09-07T17:20:32
|
Python
|
UTF-8
|
Python
| false
| false
| 1,184
|
py
|
_method_cache = {}
class methodcaller(object):
"""
Return a callable object that calls the given method on its operand.
Unlike the builtin `operator.methodcaller`, instances of this class are
serializable
"""
__slots__ = ('method',)
func = property(lambda self: self.method) # For `funcname` to work
def __new__(cls, method):
if method in _method_cache:
return _method_cache[method]
self = object.__new__(cls)
self.method = method
_method_cache[method] = self
return self
def __call__(self, obj, *args, **kwargs):
return getattr(obj, self.method)(*args, **kwargs)
def __reduce__(self):
return (methodcaller, (self.method,))
def __str__(self):
return "<%s: %s>" % (self.__class__.__name__, self.method)
__repr__ = __str__
class MethodCache(object):
"""Attribute access on this object returns a methodcaller for that
attribute.
Examples
--------
>>> a = [1, 3, 3]
>>> M.count(a, 3) == a.count(3)
True
"""
__getattr__ = staticmethod(methodcaller)
__dir__ = lambda self: list(_method_cache)
M = MethodCache()
|
[
"noreply@github.com"
] |
vishalbelsare.noreply@github.com
|
3a084bb437dc7e9fbb08e486b1cc9993909d21bb
|
71d535545c4f3b2fc626cd04cfcee22805b67353
|
/copacity_app/migrations/0007_auto_20210613_1019.py
|
c0f52b9073d682a55e6c58ee766848aa894fabd7
|
[] |
no_license
|
mcnalj/copacity_django
|
01a018d32ee9cb9ba392e5dcd160d636ba0b5b74
|
48432cff7585af342599c06cac497947e4b68195
|
refs/heads/master
| 2023-07-04T14:27:50.736252
| 2021-08-10T16:53:59
| 2021-08-10T16:53:59
| 383,779,070
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,768
|
py
|
# Generated by Django 3.1.7 on 2021-06-13 10:19
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('copacity_app', '0006_checkin_owner'),
]
operations = [
migrations.CreateModel(
name='Circle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('createdBy', models.CharField(max_length=50)),
('createdOn', models.DateTimeField(auto_now_add=True)),
('adminId', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='CircleMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('circle', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='copacity_app.circle')),
('inviter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='circle_invites', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='circle',
name='members',
field=models.ManyToManyField(related_name='circle_member', through='copacity_app.CircleMembership', to=settings.AUTH_USER_MODEL),
),
]
|
[
"example@example.com"
] |
example@example.com
|
ef954e4bc9bcc4a0ca428034f6427da6e1577c8f
|
07da31b260bf2949ffd9463ad4f777ca93b75d43
|
/sleekforum/src/sleekapps/threads/views/post/post.py
|
6f81800803801b5318b4dba53439f620da360d57
|
[] |
no_license
|
adepeter/sleek-docker
|
134fd7de12ade8c521ceb8e1b2b2611fa2224dde
|
dcf010c3da53093600101d970c6888c82360209f
|
refs/heads/master
| 2022-12-15T14:53:01.499098
| 2020-09-14T00:42:31
| 2020-09-14T00:42:31
| 282,499,689
| 0
| 0
| null | 2020-07-31T14:31:22
| 2020-07-25T18:12:19
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,459
|
py
|
from django.contrib import messages
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from django.utils.translation import gettext_lazy as _
from ...forms.post.post import PostEditForm, PostForm
from ...viewmixins.post import BasePostMixin
TEMPLATE_URL = 'threads/post'
class EditPost(BasePostMixin, UpdateView):
form_class = PostEditForm
template_name = f'{TEMPLATE_URL}/edit_post.html'
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update({'request': self.request})
return kwargs
def form_valid(self, form):
if not form.has_changed():
messages.success(self.request, _('No changes were made to your reply'))
else:
messages.success(self.request, _('Post was successfully edited.'))
return super().form_valid(form)
class DeletePost(BasePostMixin, DeleteView):
pass
class ReplyPost(BasePostMixin, CreateView):
form_class = PostForm
template_name = f'{TEMPLATE_URL}/reply_post.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['parent'] = self.get_object()
return context
def form_valid(self, form):
parent_object = self.get_object()
form.instance.thread = parent_object.thread
form.instance.parent = parent_object
form.instance.user = self.request.user
return super().form_valid(form)
|
[
"adepeter26@gmail.com"
] |
adepeter26@gmail.com
|
418ff5b81b82739dbb020083e568e2276627c16e
|
fdaba69f8d3ae3e645cb548a31111814b67f88bc
|
/credit/xgboost_Chunk_join.py
|
3d222a1d0c5720a80dc1aa63bfc2f49b7910ada3
|
[] |
no_license
|
curryli/pandasFlow
|
6c381a06843f353f3449666cc9aee3e3fc2c3620
|
891963e1d9acd8cdd23732180a3fd4b4633bc335
|
refs/heads/master
| 2020-12-07T15:24:36.500075
| 2018-07-01T09:01:09
| 2018-07-01T09:01:09
| 95,520,789
| 4
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,752
|
py
|
# -*- coding: utf-8 -*-
import pandas as pd
from sklearn.cross_validation import train_test_split
from sklearn.metrics import recall_score, precision_score
from sklearn.metrics import precision_recall_fscore_support
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.utils import shuffle
import datetime
from collections import Counter
from xgboost.sklearn import XGBClassifier
import numpy as np
start_time = datetime.datetime.now()
#################################################
#reader = pd.read_csv("new_FE_idx.csv", low_memory=False, iterator=True)
#reader = pd.read_csv("trans_small.csv", low_memory=False, iterator=True)
reader = pd.read_csv("cert_all_right.csv", low_memory=False, iterator=True)
loop = True
chunkSize = 100000
chunks = []
i = 0
while loop:
try:
chunk = reader.get_chunk(chunkSize)
chunks.append(chunk)
if (i%5)==0:
print i
i = i+1
except StopIteration:
loop = False
print "Iteration is stopped."
df_All = pd.concat(chunks, ignore_index=True)
print df_All.columns
#df_All = df_All.drop(["Trans_at","hist_fraud_cnt"], axis=1,inplace=False)
df_All = df_All[(df_All["label"] == 0) | (df_All["label"] == 1)]
df_All_stat = pd.read_csv("train_1108.csv", sep=',')
df_All_stat = df_All_stat[(df_All_stat["label"]==0) | (df_All_stat["label"]==1)]
df_All_stat= df_All_stat.drop( ["label"], axis=1,inplace=False)
df_All = pd.merge(left=df_All, right=df_All_stat, how='left', left_on='certid', right_on='certid')
df_All = shuffle(df_All)
df_All = df_All.fillna(-1)
df_X = df_All.drop(["label","certid","card_no"], axis=1,inplace=False)
df_y = df_All[["certid","label"]]
X_train, X_test, y_train, y_test = train_test_split(df_X, df_y, test_size=0.2)
np.savetxt("X_train_cols.csv",np.array(X_train.columns),fmt="%s" )
###############################################
certid_test = y_test
y_train = y_train.drop(["certid"], axis=1,inplace=False)
y_test = y_test.drop(["certid"], axis=1,inplace=False)
clf = XGBClassifier(learning_rate =0.1,n_estimators=500,max_depth=5,gamma=0.05,subsample=0.8,colsample_bytree=0.8,objective= 'binary:logistic', reg_lambda=1,seed=27)
print "start training"
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
cm1=confusion_matrix(y_test,pred)
print cm1
print "For Trans:\n"
result = precision_recall_fscore_support(y_test,pred)
#print result
precision_0 = result[0][0]
recall_0 = result[1][0]
f1_0 = result[2][0]
precision_1 = result[0][1]
recall_1 = result[1][1]
f1_1 = result[2][1]
print "precision_0: ", precision_0," recall_0: ", recall_0, " f1_0: ", f1_0
#print "certid_test_ori\n",certid_test
certid_test.index = range(certid_test.shape[0])
#print "certid_test\n",certid_test
certid_pred = pd.DataFrame(pred,columns=["pred"])
#print "certid_pred\n", certid_pred
certid_DF = pd.concat([certid_test,certid_pred], axis=1, ignore_index=True)
certid_DF.columns = ["certid","label","pred"]
#print "certid_DF\n",certid_DF
print certid_DF.dtypes
certid_DF.to_csv("certid_DF_drop.csv")
certid_grouped = certid_DF.groupby([certid_DF['certid']])
#certid_grouped = certid_DF.groupby([certid_DF['certid']], as_index=False)
# def label_cnt(arr): # 同一个人出现次数最多的元素
# cnt_set = Counter(arr)
# max_cnt_pair = cnt_set.most_common(1)[0] # (maxitem,maxcount)
# return max_cnt_pair[0]
def label_cnt(arr): # 同一个人出现次数最多的元素
cnt_0 = 0
arr_values = arr.values
for i in range(len(arr_values)):
if arr_values[i]==float(0):
cnt_0 = cnt_0+1
if(cnt_0>0):
return 0
else:
return 1
agg_dict = {}
agg_dict["pred"] = [label_cnt]
agg_stat_df = certid_grouped.agg(agg_dict)
agg_stat_df.columns = agg_stat_df.columns.map('{0[0]}-{0[1]}'.format)
#https://www.cnblogs.com/hhh5460/p/7067928.html
agg_stat_df.reset_index(level=0, inplace=True)
#print agg_stat_df
pred_label_DF = agg_stat_df[["certid", "pred-label_cnt"]]
true_label_DF = certid_test.drop_duplicates()
compare_df = pd.merge(left=true_label_DF, right=pred_label_DF, how='left', left_on='certid', right_on='certid')
y_test = compare_df["label"]
pred = compare_df["pred-label_cnt"]
cm2=confusion_matrix(y_test,pred)
print cm2
print "For Person:\n"
result = precision_recall_fscore_support(y_test,pred)
#print result
precision_0 = result[0][0]
recall_0 = result[1][0]
f1_0 = result[2][0]
precision_1 = result[0][1]
recall_1 = result[1][1]
f1_1 = result[2][1]
print "precision_0: ", precision_0," recall_0: ", recall_0, " f1_0: ", f1_0
end_time = datetime.datetime.now()
delta_time = str((end_time-start_time).total_seconds())
print "cost time",delta_time,"s"
|
[
"xurui.lee@msn.com"
] |
xurui.lee@msn.com
|
4b3187694d3f43ef8b7ee834c64e18fad6e4b5d3
|
2290eed5c494202beea0da1b9257a38b7a4403d2
|
/script/[662]二叉树最大宽度.py
|
382e8af49c05465745fefb5b155a1be322b7d57b
|
[] |
no_license
|
DSXiangLi/Leetcode_python
|
4b1c9848ea774955fb252b9bd796ba8d46ad728e
|
a2ef0ba5e86405dbf68dbc1ffeb086c7d864db1d
|
refs/heads/main
| 2022-09-01T04:34:04.260402
| 2022-08-20T01:12:27
| 2022-08-20T01:12:27
| 445,347,891
| 1
| 0
| null | 2022-07-23T06:32:14
| 2022-01-07T00:15:20
|
Python
|
UTF-8
|
Python
| false
| false
| 2,352
|
py
|
# 给定一个二叉树,编写一个函数来获取这个树的最大宽度。树的宽度是所有层中的最大宽度。这个二叉树与满二叉树(full binary tree)结构相同,但一些节
# 点为空。
#
# 每一层的宽度被定义为两个端点(该层最左和最右的非空节点,两端点间的null节点也计入长度)之间的长度。
#
# 示例 1:
#
#
# 输入:
#
# 1
# / \
# 3 2
# / \ \
# 5 3 9
#
# 输出: 4
# 解释: 最大值出现在树的第 3 层,宽度为 4 (5,3,null,9)。
#
#
# 示例 2:
#
#
# 输入:
#
# 1
# /
# 3
# / \
# 5 3
#
# 输出: 2
# 解释: 最大值出现在树的第 3 层,宽度为 2 (5,3)。
#
#
# 示例 3:
#
#
# 输入:
#
# 1
# / \
# 3 2
# /
# 5
#
# 输出: 2
# 解释: 最大值出现在树的第 2 层,宽度为 2 (3,2)。
#
#
# 示例 4:
#
#
# 输入:
#
# 1
# / \
# 3 2
# / \
# 5 9
# / \
# 6 7
# 输出: 8
# 解释: 最大值出现在树的第 4 层,宽度为 8 (6,null,null,null,null,null,null,7)。
#
#
# 注意: 答案在32位有符号整数的表示范围内。
# Related Topics 树 深度优先搜索 广度优先搜索 二叉树 👍 384 👎 0
# leetcode submit region begin(Prohibit modification and deletion)
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def widthOfBinaryTree(self, root: Optional[TreeNode]) -> int:
maxw = 0
stack = [(root,0)]
while stack:
l = len(stack)
left = 0
for i in range(l):
node, pos = stack.pop(0)
if i==0:
left= pos
if node.left:
stack.append((node.left, pos*2))
if node.right:
stack.append((node.right, pos*2+1))
if i==l-1:
maxw = max(maxw, pos-left+1)
return maxw
# leetcode submit region end(Prohibit modification and deletion)
|
[
"liningrui@xiaohongshu.com"
] |
liningrui@xiaohongshu.com
|
68a8b7bca0c433c9063cdb4726ee5fc8ce83c752
|
48aacf0425c5ab071972034c3fbd388feb036578
|
/node-7/site-packages/ceph_deploy/connection.py
|
381f6afa4c9bd0454eb2cda6a9881067950e2c18
|
[] |
no_license
|
wputra/MOS-centos
|
2b8ec0116bb3a28632c54d6052d322a42391439f
|
0a4f24dd4183d4d44e8c7beb27adce12e42f0201
|
refs/heads/master
| 2021-01-10T19:22:22.920342
| 2014-09-12T03:33:54
| 2014-09-12T03:33:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 654
|
py
|
from ceph_deploy.lib.remoto import Connection
from sudo_pushy import needs_sudo # TODO move this to utils once pushy is out
def get_connection(hostname, logger, threads=5):
"""
A very simple helper, meant to return a connection
that will know about the need to use sudo.
"""
try:
return Connection(
hostname,
logger=logger,
sudo=needs_sudo(),
threads=threads,
)
except Exception as error:
msg = "connecting to host: %s " % hostname
errors = "resulted in errors: %s %s" % (error.__class__.__name__, error)
raise RuntimeError(msg + errors)
|
[
"wasis.putra@gmail.com"
] |
wasis.putra@gmail.com
|
f54066fc82d29ccbcbb0f6fbc82e0b625fe67fb5
|
ad0857eaba945c75e705594a53c40dbdd40467fe
|
/leetCode/maximal_rectangle.py
|
98b030c3831e8cf2db830fb6e04f0209fe45fc5d
|
[
"MIT"
] |
permissive
|
yskang/AlgorithmPractice
|
c9964d463fbd0d61edce5ba8b45767785b0b5e17
|
3efa96710e97c8740d6fef69e4afe7a23bfca05f
|
refs/heads/master
| 2023-05-25T13:51:11.165687
| 2023-05-19T07:42:56
| 2023-05-19T07:42:56
| 67,045,852
| 0
| 0
| null | 2021-06-20T02:42:27
| 2016-08-31T14:40:10
|
Python
|
UTF-8
|
Python
| false
| false
| 927
|
py
|
class Solution(object):
def maximal_rectangle(self, matrix):
if not matrix or not matrix[0]:
return 0
width = len(matrix[0])
heights = [0] * (width+1)
ans = 0
for row in matrix:
for i in range(width):
heights[i] = heights[i] + 1 if row[i] == '1' else 0
stack = [-1]
for i in range(width+1):
while heights[i] < heights[stack[-1]]:
h = heights[stack.pop()]
w = i - 1 - stack[-1]
ans = max(ans, h * w)
stack.append(i)
return ans
if __name__ == "__main__":
sol = Solution()
print(sol.maximal_rectangle([["1", "0", "1", "0", "0"],
["1", "0", "1", "1", "1"],
["1", "1", "1", "1", "1"],
["1", "0", "1", "1", "0"]]))
|
[
"yongsung.kang@gmail.com"
] |
yongsung.kang@gmail.com
|
ac7a8bc1157a39c61201db61f88be40f9b180771
|
7cf52b987da6595ebc5f763b384b03e608ccb25f
|
/tests/index/test_mongodb_index.py
|
87a66da4180d9cc8e832f22eb992d389b7dee1c3
|
[] |
no_license
|
shaypal5/dinglebop
|
93fdfda48ec4d91c0a9485173a106d1edbcd1b29
|
a10473b4abecfd70a00cd9086aa8919a404959c9
|
refs/heads/master
| 2021-08-20T10:17:46.640046
| 2017-11-14T18:01:43
| 2017-11-14T18:01:43
| 109,569,728
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,710
|
py
|
"""Testing the implementation of MongoDB-based dingle indexes."""
import pytest
from dinglebop.index.mongodb import MongoDBIndex
from dinglebop.shared import get_dinglebop_cfg
SAMPLE_IDEN1 = 'school_data_2016'
SAMPLE_IDEN2 = 'school_data_2017'
SAMPLE_DOC1 = {'identifier': SAMPLE_IDEN1, 'version': 'v1.0',
'store': 'somestore', 'format_identifier': 'arrow'}
SAMPLE_DOC2 = {'identifier': SAMPLE_IDEN1, 'version': 'v1.1',
'store': 'somestore', 'format_identifier': 'csv'}
SAMPLE_DOC3 = {'identifier': SAMPLE_IDEN2, 'version': 'v0.03',
'store': 'somestore', 'format_identifier': 'csv'}
SAMPLE_DOC4 = {'identifier': SAMPLE_IDEN2, 'version': 'v0.23',
'store': 'somestore', 'format_identifier': 'csv'}
SAMPLE_DOCS = [SAMPLE_DOC1, SAMPLE_DOC2, SAMPLE_DOC3, SAMPLE_DOC4]
def _get_mongodb_idx_instance():
dcfg = get_dinglebop_cfg()
idx_cfg = dcfg['dingles']['dinglebop_test']['index'].copy()
assert idx_cfg.pop('type') == 'MongoDB'
return MongoDBIndex(**idx_cfg)
def _get_idx_collection():
return _get_mongodb_idx_instance()._get_collection()
@pytest.fixture(scope="session", autouse=True)
def reset_idx_collection():
idx_obj = _get_mongodb_idx_instance()
collection = idx_obj._get_collection()
if MongoDBIndex._INDEX_NAME in collection.index_information():
collection.drop_index(MongoDBIndex._INDEX_NAME)
collection.delete_many({})
collection.insert_many([d.copy() for d in SAMPLE_DOCS])
def test_mongodb_index_autocreation():
idx_collection = _get_idx_collection()
assert MongoDBIndex._INDEX_NAME in idx_collection.index_information()
def test_get_all_dataset_entries():
dingle_idx = _get_mongodb_idx_instance()
cursor = dingle_idx.get_all_dataset_entries(identifier=SAMPLE_IDEN1)
docs = list(cursor)
assert len(docs) == 2
assert docs[0]['version'] == 'v1.1'
assert docs[1]['version'] == 'v1.0'
def test_get_latest_dataset_entry():
dingle_idx = _get_mongodb_idx_instance()
doc1 = dingle_idx.get_latest_dataset_entry(identifier=SAMPLE_IDEN1)
assert doc1['version'] == 'v1.1'
doc2 = dingle_idx.get_latest_dataset_entry(identifier=SAMPLE_IDEN2)
assert doc2['version'] == 'v0.23'
def test_get_dataset_entry_by_version():
dingle_idx = _get_mongodb_idx_instance()
doc = dingle_idx.get_dataset_entry_by_version(
identifier=SAMPLE_IDEN1, version='v1.0')
assert doc['format_identifier'] == 'arrow'
@pytest.fixture(scope='function')
def clear_all_idx_docs():
collection = _get_idx_collection()
collection.delete_many({})
def test_add_entry(clear_all_idx_docs):
dingle_idx = _get_mongodb_idx_instance()
dingle_idx.add_entry(**SAMPLE_DOC1)
dingle_idx.add_entry(**SAMPLE_DOC2)
docs = list(dingle_idx.get_all_dataset_entries(identifier=SAMPLE_IDEN1))
assert len(docs) == 2
@pytest.fixture(scope='function')
def add_all_idx_docs():
collection = _get_idx_collection()
collection.delete_many({})
collection.insert_many([d.copy() for d in SAMPLE_DOCS])
def test_remove_entries(add_all_idx_docs):
dingle_idx = _get_mongodb_idx_instance()
docs1 = list(dingle_idx.get_all_dataset_entries(identifier=SAMPLE_IDEN1))
assert len(docs1) == 2
docs2 = list(dingle_idx.get_all_dataset_entries(identifier=SAMPLE_IDEN2))
assert len(docs2) == 2
dingle_idx.remove_entries(identifier=SAMPLE_IDEN1, version='v1.0')
docs1 = list(dingle_idx.get_all_dataset_entries(identifier=SAMPLE_IDEN1))
assert len(docs1) == 1
dingle_idx.remove_entries(identifier=SAMPLE_IDEN2)
docs2 = list(dingle_idx.get_all_dataset_entries(identifier=SAMPLE_IDEN2))
assert len(docs2) == 0
|
[
"shaypal5@gmail.com"
] |
shaypal5@gmail.com
|
634e12c0e89842b519a5bae4fcff0bcc9f6bc466
|
6e19835f99efea46d7b7966144efa8e2302d5e4c
|
/tensorflow/python/autograph/utils/misc_test.py
|
c813e0f5c96386a0d0fbd078bd5b663c688b0327
|
[
"Apache-2.0"
] |
permissive
|
Cincan/tensorflow
|
415fba147ef4676901f424a839d751aa7d1c50f0
|
94c9acddd9f3fd73a5e4b5bc1fd7c9284a68ea75
|
refs/heads/master
| 2020-04-08T14:07:14.355697
| 2018-11-28T00:59:40
| 2018-11-28T00:59:40
| 159,422,705
| 1
| 0
|
Apache-2.0
| 2018-11-28T01:08:35
| 2018-11-28T01:08:35
| null |
UTF-8
|
Python
| false
| false
| 1,719
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for misc module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.utils.misc import alias_tensors
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.ops.variables import Variable
from tensorflow.python.platform import test
class MiscTest(test.TestCase):
def test_alias_single_tensor(self):
a = constant(1)
new_a = alias_tensors(a)
self.assertFalse(new_a is a)
with self.cached_session() as sess:
self.assertEqual(1, self.evaluate(new_a))
def test_alias_tensors(self):
a = constant(1)
v = Variable(2)
s = 'a'
l = [1, 2, 3]
new_a, new_v, new_s, new_l = alias_tensors(a, v, s, l)
self.assertFalse(new_a is a)
self.assertTrue(new_v is v)
self.assertTrue(new_s is s)
self.assertTrue(new_l is l)
with self.cached_session() as sess:
self.assertEqual(1, self.evaluate(new_a))
if __name__ == '__main__':
test.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
d3bb18f8490dbe42f2945f71dc53ab3f6ba81073
|
012aadc12dc2a4560eabc04527414c3883e87e3d
|
/myvenv/bin/autopep8
|
b7dcecda5a6c5b3d815b73491659a8d14741a669
|
[] |
no_license
|
kosiannpann/my-first-blog
|
a0c17286256e0d16a90b40b6b2f9beddebe9b03e
|
e41f4966da20785cabb9402e02a4119fb981fee1
|
refs/heads/master
| 2023-06-09T04:19:02.276691
| 2021-07-04T02:52:22
| 2021-07-04T02:52:22
| 376,177,938
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 237
|
#!/Users/ootadaiki/djangogirls/myvenv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from autopep8 import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"you@example.com"
] |
you@example.com
|
|
a1169142ea4526aa901d36823d53da96429542b2
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02898/s602471427.py
|
314d28278090b7657613ada6c2c07aa32f78faee
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
from sys import stdin
import sys
import math
from functools import reduce
import functools
import itertools
from collections import deque,Counter,defaultdict
from operator import mul
import copy
# ! /usr/bin/env python
# -*- coding: utf-8 -*-
import heapq
sys.setrecursionlimit(10**6)
# INF = float("inf")
INF = 10**18
import bisect
import statistics
mod = 10**9+7
# mod = 998244353
N, K = map(int, input().split())
h = list(map(int, input().split()))
ans = 0
for i in range(N):
if h[i] >= K:
ans += 1
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
df7b698115c3ffbcc37de296d1f45a03dd270d4e
|
78144baee82268a550400bbdb8c68de524adc68f
|
/Production/python/Autumn18/RPV_2t6j_mStop-1300_mN1-100_TuneCP2_13TeV-madgraphMLM-pythia8_cff.py
|
76c72b7101248b1ce6344a581378b039f338ce9b
|
[] |
no_license
|
tklijnsma/TreeMaker
|
e6989c03189b849aff2007bad22e2bfc6922a244
|
248f2c04cc690ef2e2202b452d6f52837c4c08e5
|
refs/heads/Run2_2017
| 2023-05-26T23:03:42.512963
| 2020-05-12T18:44:15
| 2020-05-12T18:44:15
| 263,960,056
| 1
| 2
| null | 2020-09-25T00:27:35
| 2020-05-14T15:57:20
| null |
UTF-8
|
Python
| false
| false
| 1,467
|
py
|
import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/mc/RunIIAutumn18MiniAOD/RPV_2t6j_mStop-1300_mN1-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/30000/38EB0BD1-6F82-A44F-BF83-86E69D8B150E.root',
'/store/mc/RunIIAutumn18MiniAOD/RPV_2t6j_mStop-1300_mN1-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/30000/3A2A6249-6A8F-D24F-A36F-4C441E9A6DF1.root',
'/store/mc/RunIIAutumn18MiniAOD/RPV_2t6j_mStop-1300_mN1-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/30000/4041B441-D1EF-534F-B6BB-C2C07AB51940.root',
'/store/mc/RunIIAutumn18MiniAOD/RPV_2t6j_mStop-1300_mN1-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/30000/795C52C1-CEAD-7F44-9D3B-8737D8AC54DE.root',
'/store/mc/RunIIAutumn18MiniAOD/RPV_2t6j_mStop-1300_mN1-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/30000/BF2BF1E5-ECC7-9042-A2A8-B906E018E1F2.root',
'/store/mc/RunIIAutumn18MiniAOD/RPV_2t6j_mStop-1300_mN1-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/30000/FBE70B20-508A-984A-9CBF-95601BA7E965.root',
] )
|
[
"Chris_Madrid@Baylor.edu"
] |
Chris_Madrid@Baylor.edu
|
0bf8b725ddbfa47071048214793a8fd56f8a68d9
|
90c6262664d013d47e9a3a9194aa7a366d1cabc4
|
/tests/storage/cases/test_KT1GvgQwPwo8ZdYojFrQyjs1QtjRKjn52cbV.py
|
2ae9510b0d691b63af6e04ec0a6f9d672271926b
|
[
"MIT"
] |
permissive
|
tqtezos/pytezos
|
3942fdab7aa7851e9ea81350fa360180229ec082
|
a4ac0b022d35d4c9f3062609d8ce09d584b5faa8
|
refs/heads/master
| 2021-07-10T12:24:24.069256
| 2020-04-04T12:46:24
| 2020-04-04T12:46:24
| 227,664,211
| 1
| 0
|
MIT
| 2020-12-30T16:44:56
| 2019-12-12T17:47:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,130
|
py
|
from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1GvgQwPwo8ZdYojFrQyjs1QtjRKjn52cbV(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/zeronet/KT1GvgQwPwo8ZdYojFrQyjs1QtjRKjn52cbV.json')
def test_storage_encoding_KT1GvgQwPwo8ZdYojFrQyjs1QtjRKjn52cbV(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1GvgQwPwo8ZdYojFrQyjs1QtjRKjn52cbV(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1GvgQwPwo8ZdYojFrQyjs1QtjRKjn52cbV(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
|
[
"mz@baking-bad.org"
] |
mz@baking-bad.org
|
8f05bd2092972f6b401e756e15c2117a31a5a4ba
|
ad69b52951c2f80d152b9ce2225b9a588f110deb
|
/fan_element_struct.py
|
66a7720d1a15cb86b7b4a0c70052200350ac8318
|
[] |
no_license
|
hailangzz/fan_health_program
|
47c70fe884ec8e28b20be63f99d5c3004bb2a261
|
137d8a1a2271a44c68fe5a5b2b4e367023c0efad
|
refs/heads/master
| 2020-03-19T16:03:13.442179
| 2018-06-09T07:47:34
| 2018-06-09T07:47:34
| 136,698,139
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,210
|
py
|
#coding=utf-8
import numpy as np
import copy
def fan_element_struct():
#机组状态码暂时不确定,因此后续动态添加···,做单台机组状态码频率分布···,'stames_alive_list'是不同机组状态的存活列表···
stames_code={'stamescode_number':0,'stamescode_time':0,'reduce_power':0,'stames_alive_list':[]}
#机组故障及其次数统计
error={'uiHubErr':{'HubErr_code':{},'starttime':[]},
'uiErrFir':{'ErrFir_code':{},'starttime':[]},
'uiConErr':{'ConErr_code':{},'starttime':[]},
'uiYawErr':{'YawErr_code':{},'starttime':[]},
'uiWarFir':{'WarFir_code':{},'starttime':[]}
}
#windspeed_array=[set_wind_cut for set_wind_cut in np.arange(3,20,0.1)]
#存储计算机组正常的功率曲线数据···,可以基于此数据,统计风况概率分布,不同风况下机组总发电量分布```
normal_power_curve={}
windspeed_array=np.arange(3,20,0.2)
for wind_cut in windspeed_array:
if wind_cut not in normal_power_curve:
normal_power_curve[round(wind_cut,1)]={'total_power':0,'registe_number':0,'poweravg':0}
hzth_standard_wind_power={}
hzth_power_list=[123,142,164,189,213,239,268,300,331,366,398,434,470,514,552,593,630,661,707,742,806,843,893,953,1001,1049,1095,1147,1204,1248,1293,
1353,1398,1428,1465,1481,1493,1501,1514,1528,1540,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,
1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552
]
windspeed_array=np.arange(3,20,0.2)
for wind_cut_id in range(len(windspeed_array)):
if windspeed_array[wind_cut_id] not in hzth_standard_wind_power:
hzth_standard_wind_power[round(windspeed_array[wind_cut_id],1)]={'poweravg':0}
hzth_standard_wind_power[round(windspeed_array[wind_cut_id],1)]['poweravg']=hzth_power_list[wind_cut_id]
#用于存储风机全部出口功率频率分布···
power_status_distribute={}
power_status=np.arange(0,1800,10)
for power_cut in power_status:
if power_cut not in power_status_distribute:
power_status_distribute[power_cut]={'registe_number':0}
#用于统计存储风机风况频率分布···
wind_status_distribute={}
wind_array=np.arange(0,20,0.2)
for wind_cut in wind_array:
if wind_cut not in wind_status_distribute:
wind_status_distribute[round(wind_cut,1)]={'registe_number':0}
fChoGenTemAve_status_distribute={}
temperature1_cut=np.arange(0,200,2)
for temp1_cut in temperature1_cut:
if temp1_cut not in fChoGenTemAve_status_distribute:
fChoGenTemAve_status_distribute[temp1_cut]={'registe_number':0}
fGeaBeaTemAve_status_distribute={}
temperature1_cut=np.arange(0,150,2)
for temp1_cut in temperature1_cut:
if temp1_cut not in fGeaBeaTemAve_status_distribute:
fGeaBeaTemAve_status_distribute[temp1_cut]={'registe_number':0}
fGeaOilTemAve_status_distribute={}
temperature1_cut=np.arange(0,150,2)
for temp1_cut in temperature1_cut:
if temp1_cut not in fGeaOilTemAve_status_distribute:
fGeaOilTemAve_status_distribute[temp1_cut]={'registe_number':0}
fGenTemAve_status_distribute={}
temperature1_cut=np.arange(0,200,2)
for temp1_cut in temperature1_cut:
if temp1_cut not in fGenTemAve_status_distribute:
fGenTemAve_status_distribute[temp1_cut]={'registe_number':0}
fGenBeaDriTemAve_status_distribute={}
temperature1_cut=np.arange(0,150,2)
for temp1_cut in temperature1_cut:
if temp1_cut not in fGenBeaDriTemAve_status_distribute:
fGenBeaDriTemAve_status_distribute[temp1_cut]={'registe_number':0}
fConGsclgbTemAve_status_distribute={}
temperature1_cut=np.arange(0,150,2)
for temp1_cut in temperature1_cut:
if temp1_cut not in fConGsclgbTemAve_status_distribute:
fConGsclgbTemAve_status_distribute[temp1_cut]={'registe_number':0}
tenminlog={'wind_status_distribute':{},#存储风况频率分布
'power_status_distribute':{},#存储正常功率频率分布
'fChoGenTemAve_distribute':{},#存储机组发电机感应线圈温度频率分布···
'fGeaBeaTemAve_distribute':{},#存储机组齿轮箱温度频率分布···
'fGeaOilTemAve_distribute':{},#存储机组齿轮箱油温频率分布···
'fGenTemAve_distribute':{},
'fGenBeaDriTemAve_distribute':{},
'fConGsclgbTemAve_distribute':{},
'normal_power_splat':{'wind_list':[],'power_list':[]},#存储正常功率风速散点···
'all_power_splat':{'wind_list':[],'power_list':[]},#存储所有功率风速散点···
'selflimite_power_splat':{'wind_list':[],'power_list':[]},#存储超温限功率散点···
'limite_power_splat':{'wind_list':[],'power_list':[]},
'stop_power_splat':{'wind_list':[],'power_list':[]},
#超温限功率数据统计···
'over_temperature':{'fChoGenTemAve':{'number':0,'total_time':0},
'fGeaBeaTemAve':{'number':0,'total_time':0},
'fGeaOilTemAve':{'number':0,'total_time':0},
'fGenTemAve':{'number':0,'total_time':0},
'fGenBeaDriTemAve':{'number':0,'total_time':0},
'fConGsclgbTemAve':{'number':0,'total_time':0}
},
'totalpower':0,#机组总发电量···
'normal_totalpower':0,#机组正常发电总的发电量存储···
'selflimite_totaltime':0,
'limite_totaltime':0,
'stop_totaltime':0,
'over_temperature_totaltime':0,
'hzth_increase_totalpower':0,
'selflimite_reducepower':0, #限功率损失发电量统计···
'limite_reducepower':0,
'stop_reducepower':0,
'fChoGenTemAve':{'registe_id':[],'temperature':[]},
'fGeaBeaTemAve':{'registe_id':[],'temperature':[]},
'fGeaOilTemAve':{'registe_id':[],'temperature':[]},
'fGenTemAve':{'registe_id':[],'temperature':[]},
'fGenBeaDriTemAve':{'registe_id':[],'temperature':[]},
'fConGsclgbTemAve':{'registe_id':[],'temperature':[]}
#机组部件温度数据概率分布统计···
}
#初始化‘tenminlog’结构变量···
tenminlog['wind_status_distribute']=copy.deepcopy(wind_status_distribute)
tenminlog['power_status_distribute']=copy.deepcopy(power_status_distribute)
tenminlog['fChoGenTemAve_distribute']=copy.deepcopy(fChoGenTemAve_status_distribute)
tenminlog['fGeaBeaTemAve_distribute']=copy.deepcopy(fGeaBeaTemAve_status_distribute)
tenminlog['fGeaOilTemAve_distribute']=copy.deepcopy(fGeaOilTemAve_status_distribute)
tenminlog['fGenTemAve_distribute']=copy.deepcopy(fGenTemAve_status_distribute)
tenminlog['fGenBeaDriTemAve_distribute']=copy.deepcopy(fGenBeaDriTemAve_status_distribute)
tenminlog['fConGsclgbTemAve_distribute']=copy.deepcopy(fConGsclgbTemAve_status_distribute)
fan_element={'stames':{},'error':{},'tenminlog':{},'normal_power_curve':{},'fanset_information':{'fanid':0,'fanname':'','fanip':'','fantype':0,'plctype':0}}
fan_element['error']=copy.deepcopy(error)
fan_element['tenminlog']=copy.deepcopy(tenminlog)
fan_element['normal_power_curve']=copy.deepcopy(normal_power_curve)
fan_element['hzth_standard_wind_power']=copy.deepcopy(hzth_standard_wind_power)
fan_root_dict={}
return fan_root_dict,fan_element,stames_code
|
[
"344267342@qq.com"
] |
344267342@qq.com
|
b7f51ac07e35b2adf6dab304ed1b86b064e9a447
|
29cc0a662b62078e553c461f05ef999c76c0f51f
|
/Lab_01/connection.py
|
7f6f22939e3081ad8120f3d5f4badfa55ace0957
|
[] |
no_license
|
fefeagus/Redes_Sistemas_Distribuidos_2015
|
bd2978f439389d8f50cbe55a9681cede2530de26
|
eee77359891d6c52083c2bd116c2ae65cf36af14
|
refs/heads/master
| 2023-04-14T13:46:13.935385
| 2017-09-12T03:37:50
| 2017-09-12T03:37:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,300
|
py
|
# encoding: utf-8
# Copyright 2014 Carlos Bederián
# $Id: connection.py 455 2011-05-01 00:32:09Z carlos $
import os
import socket
from constants import *
import server
class Connection(object):
"""
Conexión punto a punto entre el servidor y un cliente.
Se encarga de satisfacer los pedidos del cliente hasta
que termina la conexión.
"""
def __init__(self, socket, directory):
# Inicialización de conexión
self.sock = socket
self.dir = directory
self.buff_in = ''
self.buff_out = ''
self.connection_active = True
def es_nombre_valido(self, name_file):
"""
Devuelve True si el nombre ingresado contiene caracteres validos
o False en caso contrario.
"""
nombre = set(name_file) - VALID_CHARS
return nombre == set([])
def send_buffer(self):
"""
Envia datos para ser recibidos por el cliente.
"""
while self.buff_out:
cant_bytes = self.sock.send(self.buff_out)
assert cant_bytes > 0
self.buff_out = self.buff_out[cant_bytes:]
def unknown_command(self):
"""
Mensaje de comando inválido.
"""
self.buff_out += str(INVALID_COMMAND)
self.buff_out += space + error_messages[INVALID_COMMAND] + EOL
self.send_buffer()
def wrong_arg_q(self):
"""
Mensaje de argumentos inválidos.
"""
self.buff_out += str(INVALID_ARGUMENTS)
self.buff_out += space + error_messages[INVALID_ARGUMENTS] + EOL
self.send_buffer()
def file_not_found(self):
"""
Mensaje de archivo inexistente.
"""
self.buff_out += str(FILE_NOT_FOUND)
self.buff_out += space + error_messages[FILE_NOT_FOUND] + EOL
self.send_buffer()
def bad_offset(self):
"""
Mensaje de posicion inexistente en un archivo.
"""
self.buff_out += str(BAD_OFFSET)
self.buff_out += space + error_messages[BAD_OFFSET] + EOL
self.send_buffer()
def bad_eol(self):
"""
Mensaje de que se encontro un caracter r\n fuera de un terminador
de pedido EOL.
"""
self.buff_out += str(BAD_EOL)
self.buff_out += space + error_messages[BAD_EOL] + EOL
self.send_buffer()
def get_file_listing(self):
"""
Lista los archivos de un directorio.
"""
try:
lista = os.listdir(self.dir)
except:
print('INTERNAL SERVER ERROR')
raise INTERNAL_ERROR
else:
self.buff_out += "0 OK" + EOL
for x in lista:
self.buff_out += x
self.buff_out += EOL
self.buff_out += EOL
self.send_buffer()
def get_metadata(self, name_file):
"""
Devuelve el tamaño del archivo dado (en bytes).
"""
is_valid_name = self.es_nombre_valido(name_file)
file_exist = os.path.isfile(os.path.join(self.dir, name_file))
if not is_valid_name: # si el nombre de archivo es valido
self.wrong_arg_q()
elif not file_exist:
self.file_not_found()
# Error interno del servidor
else:
try:
data = os.path.getsize(os.path.join(self.dir, name_file))
except:
print('INTERNAL SERVER ERROR')
raise INTERNAL_ERROR
else:
self.buff_out += "0 OK" + EOL + str(data) + EOL
self.send_buffer()
def get_slice(self, avl_file, offset, size):
"""
Leer y muestra los datos del archivo ingresado desde el OFFSET hasta
OFFSET + SIZE.
"""
file_exist = os.path.isfile(os.path.join(self.dir, avl_file))
if not file_exist:
self.file_not_found()
else:
try:
offset2 = int(offset)
size2 = int(size)
except ValueError:
self.wrong_arg_q()
else:
size_file = size2
start_read = offset2
len_file = os.path.getsize(os.path.join(self.dir, avl_file))
offset_plus = start_read > len_file
size_plus = (start_read + size_file) > len_file
if offset_plus or size_plus:
self.bad_offset()
else:
try:
file_open = open(os.path.join(self.dir, avl_file), 'r')
except IOError:
print("el archivo no se pudo abrir")
raise INTERNAL_ERROR
file_open.seek(start_read)
self.buff_out += "0 OK" + EOL
remain = size_file
while remain > 0:
last_part = min(remain, SIZE_READ)
bytes_read = file_open.read(last_part)
self.buff_out += str(len(bytes_read))
self.buff_out += space + bytes_read + EOL
remain -= len(bytes_read)
self.send_buffer()
self.buff_out += "0 " + EOL
self.send_buffer()
def quit(self):
"""
Cierra la conexion al cliente.
"""
self.buff_out += str(CODE_OK) + " Listo!" + EOL
self.send_buffer()
self.sock.close()
self.connection_active = False
def analizar(self, command):
"""
Analiza si el pedido esta bien escrito y si contiene la cantidad
de argumentos necesarios para cada método.
"""
c_tmp = command.split(space)
if c_tmp[0] == 'get_file_listing':
if len(c_tmp) == 1:
self.get_file_listing()
else:
self.wrong_arg_q()
elif c_tmp[0] == 'get_metadata':
if len(c_tmp) != 2 or c_tmp[1] == '':
self.wrong_arg_q()
else:
self.get_metadata(c_tmp[1])
elif c_tmp[0] == 'get_slice':
if len(c_tmp) == 4:
self.get_slice(c_tmp[1], c_tmp[2], c_tmp[3])
else:
self.wrong_arg_q()
elif c_tmp[0] == 'quit':
if len(c_tmp) == 1:
self.quit()
else:
self.wrong_arg_q()
else:
self.unknown_command()
def handle(self):
"""
Atiende eventos de la conexión hasta que termina.
"""
# Maneja recepciones y envíos hasta desconexión
while self.connection_active:
# Recibe datos hasta recibir un EOL
while EOL not in self.buff_in:
rec = self.sock.recv(SIZE_READ)
self.buff_in += rec
# Separa el primer "pedido" del resto
request, self.buff_in = self.buff_in.split(EOL, 1)
# Se fija que no exista error tipo 100
if new_line in request:
self.bad_eol()
# Analiza el primer "pedido" recibido
else:
self.analizar(request)
# Cerramos el socket en desconexión
self.sock.close()
|
[
"ferreyramario7@gmail.com"
] |
ferreyramario7@gmail.com
|
20413f0c344df7cbdad1bb7338a11aa39fc9861d
|
48460db1a6fdc6c09845c86cf5fa257f1a32f08a
|
/leetcode/medium/0949_Largest_Time_for_Given_Digits.py
|
0a85a9dd6fad4107f8c6a0e5a7d7bc8004502a85
|
[] |
no_license
|
MichalBrzozowski91/algorithms
|
9d0b085621ed94b1aff5473663fbdc686463cd8d
|
ae57535b574a800c6300eae7d55b21f2432c3baa
|
refs/heads/master
| 2022-12-20T08:00:59.385002
| 2020-09-30T16:32:33
| 2020-09-30T16:32:33
| 290,835,098
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,053
|
py
|
class Solution:
def largestTimeFromDigits(self, A: List[int]) -> str:
B = A.copy()
for firstDigitLimit in [2,1]:
A = B.copy()
result = ''
temp = [a for a in A if a in range(firstDigitLimit + 1)]
if not temp:
return ''
dig = max(temp)
result += str(dig)
A.remove(dig)
# Second digit
if dig == 2:
temp = [a for a in A if a in [0,1,2,3]]
else:
temp = A
if not temp:
continue
dig = max(temp)
result += str(dig)
A.remove(dig)
# Third digit
temp = [a for a in A if a in [0,1,2,3,4,5]]
if not temp:
continue
dig = max(temp)
result += ':' + str(dig)
A.remove(dig)
# Fourth digit
dig = A[0]
result += str(dig)
return result
return ''
|
[
"noreply@github.com"
] |
MichalBrzozowski91.noreply@github.com
|
172e43d93c0b543dc370d654dd22753e9dd1cdfd
|
f7574ee7a679261e758ba461cb5a5a364fdb0ed1
|
/MergeSortedArray.py
|
25c884f75350c4b5cb98ff52b73b35e165289aaa
|
[] |
no_license
|
janewjy/Leetcode
|
807050548c0f45704f2f0f821a7fef40ffbda0ed
|
b4dccd3d1c59aa1e92f10ed5c4f7a3e1d08897d8
|
refs/heads/master
| 2021-01-10T19:20:22.858158
| 2016-02-26T16:03:19
| 2016-02-26T16:03:19
| 40,615,255
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,549
|
py
|
class Solution(object):
def merge(self, nums1, m, nums2, n):
"""
:type nums1: List[int]
:type m: int
:type nums2: List[int]
:type n: int
:rtype: void Do not return anything, modify nums1 in-place instead.
"""
i = 0
j = 0
for j in range(n):
while i < m+j and nums1[i] < nums2[j]:
i += 1
nums1.insert(i,nums2[j])
i += 1
nums1[m+j+1:] = nums2[j+1:]
# inster() slow the code down
def merge2(self, nums1, m, nums2, n):
l1, l2, end = m-1, n-1, m+n-1
while l1 >= 0 and l2 >= 0:
if nums1[l1] > nums2[l2]:
nums1[end] = nums1[l1]
l1 -= 1
else:
nums1[end] = nums2[l2]
l2 -= 1
end -= 1
if l1 < 0:
nums1[:l2+1] = nums2[:l2+1]
# 1-28
class Solution(object):
def merge(self, nums1, m, nums2, n):
"""
:type nums1: List[int]
:type m: int
:type nums2: List[int]
:type n: int
:rtype: void Do not return anything, modify nums1 in-place instead.
"""
i,j,cur = m-1,n-1,m+n-1
while i>=0 and j>=0:
if nums1[i] > nums2[j]:
nums1[cur] = nums1[i]
i-=1
else:
nums1[cur] = nums2[j]
j -= 1
cur -= 1
if i < 0:
nums1[:cur+1] = nums2[:j+1]
|
[
"janewjy87@gmail.com"
] |
janewjy87@gmail.com
|
74c5c8c7b320b2dfc6dc3ab53abcf9739fd64eaa
|
343bdaddfc66c6316e2cee490e9cedf150e3a5b7
|
/0001_0100/0076/0076.py
|
851fe3c3ab0a773579c4237f01aaebb9804a5a57
|
[] |
no_license
|
dm-alexi/acmp
|
af7f6b4484b78f5922f3b464406a0ba5dea0d738
|
3fa0016d132adfeab7937b3e8c9687a34642c93a
|
refs/heads/master
| 2021-07-09T15:14:25.857086
| 2020-10-20T19:08:54
| 2020-10-20T19:08:54
| 201,908,038
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 584
|
py
|
def timeint(s):
return int(s[:2]) * 60 + int(s[3:])
with open("input.txt", "r") as f, open("output.txt", "w") as q:
m = 0
inlist, outlist = [], []
n = int(f.readline())
for i in range(n):
a, b = (timeint(x) for x in f.readline().split())
inlist.append(a)
outlist.append(b)
inlist.sort()
outlist.sort()
i, j, c = 0, 0, 0
while i < n:
if inlist[i] <= outlist[j]:
i += 1
c += 1
if c > m:
m = c
else:
j += 1
c -= 1
q.write(str(m))
|
[
"dm2.alexi@gmail.com"
] |
dm2.alexi@gmail.com
|
24a2b2bd01037bb5984627af29d73e874afe85da
|
94ea21700381f12b72649a59d2c90ae32c7e04f0
|
/addons/hc_medication_administration/models/hc_res_medication_administration.py
|
43a24cc10010e8dd270762918187fcb536cf5171
|
[] |
no_license
|
messakali/odoo-fhir
|
c07e2d058763580de2929d4c84ebd4717ac15c43
|
1f5c28a3fdd788370696a5f75ab68a2acfe16d25
|
refs/heads/master
| 2021-01-10T22:35:55.158494
| 2016-09-28T17:21:56
| 2016-09-28T17:21:56
| 69,700,012
| 0
| 1
| null | 2016-09-30T20:30:57
| 2016-09-30T20:30:56
| null |
UTF-8
|
Python
| false
| false
| 450
|
py
|
# -*- coding: utf-8 -*-
from openerp import models, fields, api
# class hc_medication_administration(models.Model):
# _name = 'hc_medication_administration.hc_medication_administration'
# name = fields.Char()
# value = fields.Integer()
# value2 = fields.Float(compute="_value_pc", store=True)
# description = fields.Text()
#
# @api.depends('value')
# def _value_pc(self):
# self.value2 = float(self.value) / 100
|
[
"lsison@moxylus.com"
] |
lsison@moxylus.com
|
f3a1b7d5b8f3c6718af758c89fae01723081f4ca
|
ca0757ab59d6420efae766dae80a539a3b692fbd
|
/apps/ippcdrupal/auth_backends.py
|
ba28716f1c21d575cec5c18d2e1d8708a507320f
|
[] |
no_license
|
hypertexthero/itwishlist
|
bc1cfe7f3542a395ab439ee5aa71c1991baaadff
|
148a085238ae86ee07255f94d3a48a92190ce5c5
|
refs/heads/master
| 2020-06-05T01:00:41.981168
| 2013-08-30T15:06:52
| 2013-08-30T15:06:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,052
|
py
|
# =todo: authenticate against drupal users db
# Looks like we'll need to upgrade to Django 1.4...
from django.conf import settings
from django.contrib.auth import login
from django.contrib.auth.models import User, check_password
from itwishlist.apps.ippcdrupal.models import DrupalUsers
# from itwishlist.apps.ippcdrupal.hashers import is_password_usable, get_hasher
# =todo: upgrade to django 1.4
# from django.contrib.auth.hashers import is_password_usable, get_hasher
from django.utils.encoding import smart_str
# http://stackoverflow.com/questions/16482531/django-registration-custom-backend
# class DrupalUserAuthBackend(object):
# """
# Authenticates against django.contrib.auth.models.User. with my modifications
# """
# supports_inactive_user = True
#
# """
# This function does not upgrade the user password hasher
# """
# def check_password(self, password, encoded):
# if not password or not is_password_usable(encoded):
# # is_password_usable is only available in Django 1.4
# # https://docs.djangoproject.com/en/1.4/topics/auth/#django.contrib.auth.hashers.is_password_usable
# # if not password:
# return False
#
# password = smart_str(password)
# encoded = smart_str(encoded)
#
# if encoded[0] == "$":
# encoded = encoded[1:] # make it compatible so that drupal 7 sha512 hasher can work properly
#
# if len(encoded) == 32 and '$' not in encoded:
# hasher = get_hasher('unsalted_md5')
# else:
# algorithm = encoded.split('$', 1)[0]
# hasher = get_hasher(algorithm)
#
# is_correct = hasher.verify(password, encoded)
#
# return is_correct
#
# def authenticate(self, username=None, password=None, db=None, **kwargs):
# try:
# user = DrupalUsers.objects.using(db).get(name=username) # name in ippcdrupal.models.DrupalUsers
# if self.check_password(password, user.pass_field):
# return user
# except DrupalUsers.DoesNotExist:
# return None
# # http://query7.com/django-authentication-backends
# http://djangosnippets.org/snippets/2729/
# from account.models import Account
# from itwishlist.apps.ippcdrupal.drupalhasher.DrupalPasswordHasher import verify
# from django.contrib.auth.models import User
#
# class DrupalUserAuthBackend(object):
#
# def authenticate(self, username, password):
#
# try:
# account = DrupalUsers.objects.using('drupaldb').get(username=username, sha_pass_hash=verify(username, password))
#
# try:
# user = User.objects.get(username=username)
#
# except User.DoesNotExist:
#
# user = User(username=account.username)
# user.is_staff = False
# user.is_superuser = False
# user.set_unusable_password()
# user.save()
#
# return user
#
# except Account.DoesNotExist:
#
# return None
#
# def get_user(self, id):
# try:
# return User.objects.get(id=id)
# except User.DoesNotExist:
# return None
class DrupalUserAuthBackend:
"""
Authenticate against the settings ADMIN_LOGIN and ADMIN_PASSWORD.
Use the login name, and a hash of the password. For example:
ADMIN_LOGIN = 'admin'
ADMIN_PASSWORD = 'sha1$4e987$afbcf42e21bd417fb71db8c66b321e9fc33051de'
"""
supports_object_permissions = False
supports_anonymous_user = False
supports_inactive_user = False
def authenticate(self, username=None, password=None):
# login_valid = (settings.ADMIN_LOGIN == username)
# pwd_valid = check_password(password, settings.ADMIN_PASSWORD)
# if login_valid and pwd_valid:
try:
user = DrupalUsers.objects.using('drupaldb').get(name=username)
except DrupalUsers.DoesNotExist:
# Create a new user. Note that we can set password
# to anything, because it won't be checked; the password
# from settings.py will.
# user = User(username=username, password='test')
# user.is_staff = False
# user.is_active = False
# user.is_superuser = False
# user.save()
return None
# return None
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
# class DrupalUserAuthBackend(object):
# """
# Authenticates against ippcdrupal.models.DrupalUsers
# """
#
# def authenticate(self, username=None, password=None, **kwargs):
# # UserModel = get_user_model()
# # if username is None:
# # username = kwargs.get(UserModel.USERNAME_FIELD)
# try:
# user = DrupalUsers.objects.using('drupaldb').get(name=username) # name in ippcdrupal.models.DrupalUsers
# # if check_password(password):
# if check_password(password):
# return user
# except DrupalUsers.DoesNotExist:
# return None
# class SettingsBackend(object):
# """
# Authenticate against the settings ADMIN_LOGIN and ADMIN_PASSWORD.
#
# Use the login name, and a hash of the password. For example:
#
# ADMIN_LOGIN = 'admin'
# ADMIN_PASSWORD = 'sha1$4e987$afbcf42e21bd417fb71db8c66b321e9fc33051de'
# """
#
# def DrupalUserAuth(self, username=None, password=None, db=None, **kwargs):
# login_valid = (settings.ADMIN_LOGIN == username)
# pwd_valid = check_password(password, settings.ADMIN_PASSWORD)
# if login_valid and pwd_valid:
# try:
# user = User.objects.using(db).get(username=name)
# if user.check_password(password):
# return user
# # user = User.objects.get(username=username)
# # except User.DoesNotExist:
# # # Create a new user. Note that we can set password
# # # to anything, because it won't be checked; the password
# # # from settings.py will.
# # user = User(username=username, password='get from settings.py')
# # user.is_staff = True
# # user.is_superuser = True
# # user.save()
# return user
# return None
#
# def get_user(self, user_id):
# try:
# return User.objects.get(pk=user_id)
# except User.DoesNotExist:
# return None
#
# from __future__ import unicode_literals
# from django.contrib.auth import get_user_model
# from django.contrib.auth.models import Permission
#
# class DrupalUserAuth(object):
# """
# Authenticates against django.contrib.auth.models.User.
# """
#
# def authenticate(self, username=None, password=None, db=None, **kwargs):
# UserModel = get_user_model()
# if username is None:
# username = kwargs.get(UserModel.USERNAME_FIELD)
# try:
# user = UserModel.objects.using(db).get(username=username)
# if user.check_password(password):
# return user
# except UserModel.DoesNotExist:
# return None
# from __future__ import unicode_literals
# from django.contrib.auth import get_user_model
# from django.contrib.auth.models import Permission
#
# class DrupalUserAuth(object):
# """
# Authenticates against django.contrib.auth.models.User.
# """
#
# def authenticate(self, username=None, password=None, db=None, **kwargs):
# UserModel = get_user_model()
# if username is None:
# username = kwargs.get(UserModel.USERNAME_FIELD)
# try:
# user = UserModel.objects.using(db).get(username=username)
# if user.check_password(password):
# return user
# except UserModel.DoesNotExist:
# return None
#
#
#
|
[
"simon@hypertexthero.com"
] |
simon@hypertexthero.com
|
b2e0397ffe57b93e5e6ae261bde6a10fee12cd3a
|
b213c8b10b831d5fdacfb65c145450f6af846a4f
|
/blog/blog.py
|
ce23082f7c014309cc37d87c9b6217fc56981450
|
[] |
no_license
|
tuomas56/random-python-stuff
|
1df260532abeb0a3da02560ed23ad1ee1995f5b2
|
12737127a31f1a3b84456021e8a5ac81545324da
|
refs/heads/master
| 2020-12-31T04:42:12.123345
| 2015-11-25T21:54:28
| 2015-11-25T21:54:28
| 46,889,061
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,649
|
py
|
from bottle import server_names, ServerAdapter, run, request, Bottle, redirect,response, abort
import markdown
import re
import os
import pickle
import uuid
import scrypt
import base64
from datetime import datetime, timedelta
from cherrypy import wsgiserver
from cherrypy.wsgiserver.ssl_builtin import BuiltinSSLAdapter
from config import SSL_PRIV_KEY, PASS_DB, SALT_DB, HASH_TIME
SCRIPT_RE = re.compile(r"\<script\>(.*?)\<\\script\>")
HASH_TIME = timedelta.strptime("%H:%M:%S")
InvalidUserPass = RuntimeError("Invalid username or password.")
class SSLCherryPy(ServerAdapter):
def run(self, handler):
server = wsgiserver.CherryPyWSGIServer((self.host, self.port), handler)
server.ssl_adapter = BuiltinSSLAdapter(SSL_PRIV_KEY, SSL_PRIV_KEY)
try:
server.start()
finally:
server.stop()
server_names['sslcherrypy'] = SSLCherryPy
def enable_cors(fn):
def _enable_cors(*args, **kwargs):
response.headers['Access-Control-Allow-Origin'] = '*'
return fn(*args, **kwargs)
return _enable_cors
app = Bottle()
current_hashes = {}
with open(PASS_DB, "rb") as f:
pass_db = pickle.load(f)
with open(SALT_DB, "rb") as f:
salt_db = pickle.load(f)
class HashData:
def __init__(self, hash, expiry, user):
self.hash = hash
self.expiry = expiry
self.user = user
def expired(self):
return self.expiry < datetime.now()
def authenticated(fn):
def _authenticated(hash, *args, **kwargs):
if hash in current_hashes:
if not current_hashes[hash].expired():
return fn(current_hashes[hash], *args, **kwargs)
else:
del current_hashes[hash]
redirect('/login/expired')
else:
redirect('/login/expired')
return _authenticated
def action_login(user, passwd):
if user not in pass_db or pass_db[user] != passwd_hash(user, passwd):
raise InvalidUserPass
else:
return generate_hash(user)
def generate_hash(user):
expiry = datetime.now() + HASH_TIME
hash = uuid.uuid4()
return Hash(hash, expiry, user)
def generate_salt():
return base64.b64encode(os.urandom(16)).decode()
def passwd_hash(user, passwd):
return salt_db[user] + scrypt.hash(passwd, salt_db[user], mintime=0.1)
@app.route("/do/login/<user>/<passwd>")
@enable_cors
def do_login(user, passwd):
try:
current_hashes[user] = action_login(user, passwd)
redirect('/home/%s' % current_hashes[user])
except RuntimeError:
redirect('/login/invalid')
@app.route("/login/<error>")
def login(error):
return template('pages/login.html.tpl', error=login_error(error))
def login_error(error):
if error = 'invalid':
return 'Invalid username or password.'
elif error = 'expired':
return 'Hash has expired; please login.'
elif error = 'none':
return ''
else:
raise RuntimeError("No such login error.")
class Article:
def __init__(self, author, date_written, tags, text):
self.author = author
self.date_written = date_written
self.tags = tags
self.text = text
class Comment:
def __init__(self, author, date_posted, parent, article, text):
self.author = author
self.date_posted = date_posted
self.parent = parent
self.article = article
self.text = text
def process_article(text):
lines = text.split("\n")
author, date_written, tags, *lines = lines
date_written = datetime.strptime(date_written, "%d/%m/%Y %H:%M")
tags = tags.split(",")
text = markdown.markdown('\n'.join(lines))
return Article(author, date_written, tags, text)
def process_comment(author, date_posted, parent, article, text):
return Comment(author, datetime.strptime(date_written, "%d/%m/%Y %H:%M"),article,SCRIPT_RE.replace(markdown.markdown(text), r"<code>\1</code>"))
|
[
"pigworts2@gmail.com"
] |
pigworts2@gmail.com
|
b191a119c6debbe2643f12b03216b61002e09590
|
8f4c59e69cce2f6e932f55b3c65aae376b206a2c
|
/笨办法学python/ex47/skeleton/tests/ex47_tests.py
|
00d322ae6ea3a7f953674e7ad506bc4a1713fde2
|
[] |
no_license
|
zmjm4/python
|
ef7206292f1c3a3a5763b25527024999de5e8e79
|
44cf74c0f16891c351ce214762218ccf2d7353a0
|
refs/heads/master
| 2020-05-27T17:23:48.776167
| 2018-05-24T07:14:16
| 2018-05-24T07:14:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,022
|
py
|
# -*- coding: utf-8 -*-
from nose.tools import *
from ex47.game import Room
def test_room():
gold=Room("GoldRoom",
"""This room has gold in it you can grab. There's a
door to the north.""")
assert_equal(gold.name,"GoldRoom")
assert_equal(gold.paths,{})
def test_room_paths():
center = Room("Center", "Test room in the center.")
north = Room("North", "Test room in the north.")
south = Room("South", "Test room in the south.")
center.add_paths({'north':north,'south':south})
assert_equal(center.go('north'),north)
assert_equal(center.go('south'),south)
def test_map():
start = Room("Start", "You can go west and down a hole.")
west = Room("Trees", "There are trees here, you can go east.")
down = Room("Dungeon", "It's dark down here, you can go up.")
start.add_paths({'west':west,'down':down})
west.add_paths({'east':start})
down.add_paths({'up':start})
assert_equal(start.go('west'),west)
assert_equal(start.go('west').go('east'),start)
assert_equal(start.go('down').go('up'),start)
|
[
"715073608@qq.com"
] |
715073608@qq.com
|
58c46c9a110a1eb99789632d26ae3ae38b04e23d
|
9463d85666453fd8e57a0ce9e515e4765ae2b60a
|
/cwetsy/cwetsy/parser/browse_parser.py
|
a049cb5b8bc4542890ee7856ce7379b97e183bed
|
[
"MIT"
] |
permissive
|
trujunzhang/djzhang-targets
|
dc6c3086553a5450fb239cc1cef5330a51a02e1f
|
c2e327acde9d51f0455e7243f17d93d74b579501
|
refs/heads/master
| 2021-01-09T20:52:31.258826
| 2016-07-16T13:18:53
| 2016-07-16T13:18:53
| 60,747,429
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
from cwetsy.parser.base_parser import BaseParser
class BrowseParser(BaseParser):
def __init__(self):
super(BrowseParser, self).__init__()
def parse(self, url, hxs):
return None
|
[
"trujunzhang@gmail.com"
] |
trujunzhang@gmail.com
|
68e4256f5b371f2525935ebc77355c859a1a2757
|
2993adb383fed317e6a83f2b8c2cacd640d19fb3
|
/bookmarks/account/authentication.py
|
2a9db5db7359fcc4a5a5ddcca0b1e3170ebbf911
|
[] |
no_license
|
Dyavathrocky/socialapp
|
0e811a957a224b30aa32e8a24e3253c1b49a25df
|
1dc071b69f9258c4f540211e25635ac277a6f6e4
|
refs/heads/master
| 2022-12-02T03:42:32.778466
| 2020-08-21T13:19:25
| 2020-08-21T13:19:25
| 286,060,425
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 579
|
py
|
from django.contrib.auth.models import User
class EmailAuthBackend(object):
"""
Authenticate using an e-mail address.
"""
def authenticate(self, request, username=None, password=None):
try:
user = User.objects.get(email=username)
if user.check_password(password):
return user
return None
except User.DoesNotExist:
return None
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
|
[
"davathrak@gmail.com"
] |
davathrak@gmail.com
|
9478688498c1c1a485af4ce8894c0f2948b2b74b
|
6223dc2e5de7921696cb34fb62142fd4a4efe361
|
/.metadata/.plugins/org.eclipse.core.resources/.history/25/0083d7fa3b6a00141afa8a8ed49a3dc2
|
7b564846565a3335ffc0ed085fe8f0d38b42e923
|
[] |
no_license
|
Mushirahmed/python_workspace
|
5ef477b2688e8c25b1372f546752501ee53d93e5
|
46e2ed783b17450aba29e4e2df7b656522b2b03b
|
refs/heads/master
| 2021-03-12T19:24:50.598982
| 2015-05-25T10:23:54
| 2015-05-25T10:23:54
| 24,671,376
| 0
| 1
| null | 2015-02-06T09:27:40
| 2014-10-01T08:40:33
|
Python
|
UTF-8
|
Python
| false
| false
| 5,466
|
#!/usr/bin/env python
#
# Copyright 2014 <+YOU OR YOUR COMPANY+>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import numpy
#from operator import add
#import copy
#from gnuradio import gr
import gras
class expo(gras.Block):
"""
docstring for block expo
"""
def __init__(self):
gras.Block.__init__(self,
name="expo",
in_sig=[numpy.float32],
out_sig=[numpy.float32])
def set_parameters(self,g,a,b):
self.gama=g
self.alpha=a
self.beta=b
def yield_times(self):
from datetime import date, time, datetime, timedelta
start = datetime.combine(date.today(), time(0, 0))
yield start.strftime("%S")
while True:
start += timedelta(seconds=0.5)
yield start.strftime("%S")
def work(self, input_items, output_items):
in0 = input_items[0]
out = output_items[0]
tmrg = []
o1 = []
o2 = []
o3 = []
ans = []
final_output = []
gen = self.yield_times()
for ii in range(20):
tmrg.append(gen.next())
# print "tmrg :",tmrg
"""for i1 in range(0,10):
o1.append((self.gama)/(self.alpha*self.beta))
print "o1 : ", o1
for i2 in range(0,10):
o2.append(((self.gama)*(-numpy.exp(self.alpha)))/(self.alpha*(self.beta-self.alpha)))
print "o2 : ",o2
for i3 in range(0,10):
o3.append(((self.gama)*(-numpy.exp(self.beta)))/(self.beta*(self.alpha-self.beta)))
print "o3 : ",o3
#ans.append(o1+o2+o3)
for i in range(0,10):
ans.append(list(numpy.array(o1[i])+numpy.array(o2[i])+numpy.array(o3[i])))
print "Final Ans : ",ans
print "Type out : ",type(out)
print "Type ans :",type(ans)
out = copy.copy(ans)
#out[0:1] = ans
print "Output is : " ,out
self.consume(0,1)
self.produce(0,1)"""
#o1.append((self.gama)/(self.alpha*self.beta))
#print "o1 : ", o1
for i in range(0,20):
o1.append((self.gama)/(self.alpha*self.beta))
print "o1 : ", o1[i]
o2.append(((self.gama)*(numpy.exp(-(self.alpha*in0[0]*i)))/(self.alpha*(self.beta-self.alpha))))
print "o2 : ",o2[i]
o3.append(((self.gama)*(numpy.exp(-(self.beta*in0[0]*i)))/(self.beta*(self.alpha-self.beta))))
print "o3 : ",o3[i]
ans.append(o1[i]+o2[i]+o3[i])
print "Final Ans : ",ans
#print "Type out : ",type(out)
#print "Type ans :",type(ans)
#out[0:1] = ans
#print "Output : ", out[0]
"""for i in range(0,len(ans)):
#out = copy.copy(ans[i])
#out[0:1] = ans
#print "Output is : " ,out"""
"""for i1 in range(0,len(ans)):
final_output.append(o1+ans[i1])
print "Final OutPut : ", final_output"""
for i1 in range(0,len(ans)):
out[0] = ans[i1]
print "Output Sent : ", out
#out[:len(final_output)] = copy.copy(final_output)
self.consume(0,1)
self.produce(0,1)
"""result = []
for i in range(0,20):
result.append(numpy.exp(i))
print "Result : ",result
out[0] = result
self.consume(0,1)
self.produce(0,1) """
#o2 = -numpy.exp(-2*in0[0:1])
#o3 = -numpy.exp(-3*in0[0:1])
#o2=numpy.exp(-(in0[0:1]*self.alpha))
#print("o2 :",o2)
#o3=numpy.sin((self.freq*in0[0:1])+(self.sigma))
#print("o3 :",o3)
#o4=numpy.sqrt(o1-numpy.square(self.zita))
#print("o4 :",o4)
"""ans = o1-(mul/o4)
#ans.append(o1-((numpy.exp(-in0[0:1]*self.sigma)*(numpy.sin((self.freq*in0[0:1])+(self.sigma))))/numpy.sqrt(o1-numpy.square(self.zita))))
print("Final Value : ",ans)
out[0:1] = ans"""
#o2 = -numpy.exp(-2*tmrg)
#o3 = -numpy.exp(-3*in0[0:1])
#o2 = numpy.exp(-in0[0:1]*self.alpha)
#o3 = numpy.exp(-in0[0:1]*self.beta)
#o4 = numpy.sqrt(1-numpy.square(self.alpha))
#ans = 1-((o2*o3)/o4)
#ans.append(o2)
#ans.append(o1-((numpy.exp(-in0[0:1]*self.sigma)*(numpy.sin((self.freq*in0[0:1])+(self.sigma))))/numpy.sqrt(o1-numpy.square(self.zita))))
#print("Final Value : ",ans)
#out[0:1] = ans
#out = copy.copy(ans)
#self.consume(0,1)
#self.produce(0,1)
#return len(output_items[0])
|
[
"imushir@gmail.com"
] |
imushir@gmail.com
|
|
d3e241d4b04a38c79e01d0b0348b62f60c6c72fa
|
b44ba1ca68154a37936ae3822ca016b5d9a99a2a
|
/Redis/redis_pipe.py
|
bfde6e48477131440764d56064521f1f1f917c54
|
[] |
no_license
|
liuxingrichu/advanced-network-program
|
6e17d30980e21b3397ac5ed5e404a282983a6869
|
3f84c4600a35af12a68a4c512afbe60ddf6347b1
|
refs/heads/master
| 2021-01-23T02:05:45.933255
| 2017-08-06T09:15:54
| 2017-08-06T09:15:54
| 85,964,385
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import redis
import time
'''
使用pipeline实现一次请求,执行多条命令
'''
# db的选择范围为0-15
pool = redis.ConnectionPool(host='localhost', port=6379, db=12)
r = redis.Redis(connection_pool=pool)
# pipe = r.pipeline(transaction=False)
pipe = r.pipeline(transaction=True)
pipe.set('name', 'Tom')
time.sleep(30)
pipe.set('role', 'teacher')
pipe.execute()
|
[
"liuxingrichu@163.com"
] |
liuxingrichu@163.com
|
4c3c98dc139b2f8f584f48a9f1db91fb63471c18
|
5eea120356afc15cc3edb71f8864d6771ad865c6
|
/futures/var_model/__init__.py
|
e9af4c55745c0234467df114b12290c6b8f19f73
|
[
"MIT"
] |
permissive
|
ShubraChowdhury/Investment_Finance
|
469d5e5a200616eee830be18cb4a86d54319a30b
|
3da761d755278d3d2de8c201b56d4ff9cb23def4
|
refs/heads/master
| 2022-12-12T11:52:33.585329
| 2021-09-23T18:13:15
| 2021-09-23T18:13:15
| 153,317,318
| 2
| 0
| null | 2022-12-08T00:45:34
| 2018-10-16T16:22:56
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 312
|
py
|
"""
The __init__.py files are required to make Python treat the directories as
containing packages; this is done to prevent directories with a common name,
such as string, from unintentionally hiding valid modules that occur later
(deeper) on the module search path.
@author: ucaiado
Created on 09/05/2016
"""
|
[
"noreply@github.com"
] |
ShubraChowdhury.noreply@github.com
|
91f6f93546e8240aff32445f1e68c11ccfe19d83
|
4d2238210813c1581bf44f64d8a63196f75d2df4
|
/tem.py
|
ece18216c221c476bc14897a6b8a415a8a9197d1
|
[] |
no_license
|
wwtang/code02
|
b1600d34907404c81fa523cfdaa74db0021b8bb3
|
9f03dda7b339d8c310c8a735fc4f6d795b153801
|
refs/heads/master
| 2020-12-24T14:10:33.738734
| 2012-12-14T04:24:47
| 2012-12-14T04:24:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
color = raw_input('please select the color: ')
if color == "white" or color == "black":
print "the color was black or white"
elif color > "k" :
print "the color start with letter after the 'K' in alphabet"
|
[
"andytang1994@gmail.com"
] |
andytang1994@gmail.com
|
a4ce7faf8a9617e3a7dcffa89948c091bf32dc3f
|
1e11d6f9245c55e21edfb24f4340d52e3f7f327f
|
/dillo/migrations/0078_organizations.py
|
ecdc1eed4f69e90c13232e53b67ef2f646fc6389
|
[] |
no_license
|
armadillica/dillo
|
996e8462f4f76349ecc49ecb08cdd6c8c66e072b
|
960aed85f8438109bed9883321891305e1db8b10
|
refs/heads/main
| 2023-08-04T06:45:34.570071
| 2023-06-04T00:07:57
| 2023-06-04T00:07:57
| 30,461,275
| 79
| 18
| null | 2023-08-02T00:22:40
| 2015-02-07T16:17:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,925
|
py
|
# Generated by Django 3.2.13 on 2022-11-19 22:29
import dillo.models.mixins
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
def forwards_func(apps, schema_editor):
"""Set default cateogries."""
OrganizationCategory = apps.get_model('dillo', 'OrganizationCategory')
db_alias = schema_editor.connection.alias
for c in {'3D', '2D', 'Features', 'Shorts', 'Games'}:
OrganizationCategory.objects.using(db_alias).create(name=c)
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('dillo', '0077_profile_job'),
]
operations = [
migrations.CreateModel(
name='OrganizationCategory',
fields=[
(
'id',
models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('name', models.CharField(max_length=128, unique=True)),
],
options={
'verbose_name_plural': 'Organization categories',
},
),
migrations.CreateModel(
name='Organization',
fields=[
(
'id',
models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
(
'created_at',
models.DateTimeField(auto_now_add=True, verbose_name='date created'),
),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='date edited')),
('name', models.CharField(max_length=255, unique=True)),
(
'visibility',
models.CharField(
choices=[
('public', 'Public'),
('unlisted', 'Unlisted'),
('under_review', 'Under Review'),
],
default='under_review',
max_length=16,
),
),
(
'description',
models.TextField(
blank=True,
help_text='A description of the organization activities.',
null=True,
),
),
('website', models.URLField(max_length=120)),
(
'logo',
models.ImageField(
blank=True,
height_field='logo_height',
upload_to=dillo.models.mixins.get_upload_to_hashed_path,
width_field='logo_width',
help_text='A square picture, around 512x512.',
),
),
('logo_height', models.PositiveIntegerField(null=True)),
('logo_width', models.PositiveIntegerField(null=True)),
('city', models.CharField(blank=True, max_length=256, null=True)),
(
'country',
django_countries.fields.CountryField(blank=True, max_length=2, null=True),
),
(
'is_online',
models.BooleanField(
default=False, help_text='Operates fully online, with no physical HQ.'
),
),
('is_active', models.BooleanField(default=True)),
(
'categories',
models.ManyToManyField(
help_text='Keywords to identify this organization.',
null=True,
to='dillo.OrganizationCategory',
),
),
(
'city_ref',
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='organizations',
to='dillo.city',
),
),
(
'user',
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
'abstract': False,
},
),
migrations.RunPython(forwards_func, migrations.RunPython.noop),
]
|
[
"francesco.siddi@gmail.com"
] |
francesco.siddi@gmail.com
|
95482e1fc560e2c251c59b36d951f928ba1157ba
|
06292f96cba132ca57777672a447cfff7c5abee6
|
/week5/tut/submit/1.py
|
b099a6a693b68bb37a739181ac6b9f40fa36844d
|
[] |
no_license
|
kietteik/ppl
|
1746440b12affe71e67d6f958922b32b1fdaab5c
|
2ee60582e81595b8d8b5d0f8212d20151cfe9264
|
refs/heads/master
| 2023-03-01T00:24:36.969189
| 2021-01-31T05:15:13
| 2021-01-31T05:15:13
| 305,802,556
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
def double(lst):
'''1. a'''
return [i * 2 for i in lst]
def double(lst):
'''1. b'''
if not lst: return []
return [lst[0] * 2] + double(lst[1:])
def double(lst):
'''1. c'''
return list(map(lambda x: x * 2, lst))
|
[
"kietteikdoi@gmail.com"
] |
kietteikdoi@gmail.com
|
c469f6d0359884d8d16ed851a6af1e7f39b15f42
|
6f04b7f8fd55fffb54ce4c78049812655b8c176b
|
/chap03_GroupApply/lecture/step02_groupby_plot_선생님.py
|
a33aaf98830f6e875d4ec23df91b81e5c56e0c20
|
[] |
no_license
|
Elly-bang/Python-ll
|
71092507b719e1532675f8bab489be3f7366c1de
|
2658de214cc4a9dd68ad35d82202b59b3129e5af
|
refs/heads/master
| 2022-11-09T18:11:55.449732
| 2020-06-30T06:57:11
| 2020-06-30T06:57:11
| 276,021,248
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,962
|
py
|
# -*- coding: utf-8 -*-
"""
집단변수 기준 자료 분석
- subset 생성
- group 객체 생성
- 시각화
"""
import pandas as pd
# 1. dataset load
wine = pd.read_csv('C:/ITWILL/4_Python-II/data/winequality-both.csv')
wine.info() # type, quality
# 칼럼명 변경 : 공백 -> '_' 교체
wine.columns = wine.columns.str.replace(' ', '_')
wine.info()
# RangeIndex: 6497 entries, 0 to 6496
# Data columns (total 13 columns)
# 집단변수 확인
wine['type'].unique() # ['red', 'white']
wine.quality.unique() # [5, 6, 7, 4, 8, 3, 9]
# 2. subset 생성
# 1) type 칼럼 : DataFrame(2차원)
red_wine = wine.loc[wine['type']=='red'] #[row, col]
red_wine.info()
# Int64Index: 1599 entries, 0 to 1598
# Data columns (total 13 columns):
red_wine.shape # (1599, 13)
# 2) type(행) vs quality(열) : Series(1차원)
red_quality = wine.loc[wine['type']=='red', 'quality']#[행, 열]
type(red_quality) # pandas.core.series.Series
red_quality.shape # (1599,)
white_quality = wine.loc[wine['type']=='white', 'quality']#[행, 열]
type(white_quality) # pandas.core.series.Series
white_quality.shape # (4898,)
# 3. group 객체 생성 : 집단변수 2개 -> 11변수 그룹화
# 형식) DF.groupby(['칼럼1', '칼럼2'])
wine_grp = wine.groupby(['type', 'quality'])
# 각 그룹의 빈도수
wine_grp.size()
'''
type quality
red 3 10
4 53
5 681
6 638
7 199
8 18
white 3 20
4 163
'''
# 1d -> 2d : 교차분할표
grp_2d = wine_grp.size().unstack()
grp_2d
'''
quality 3 4 5 6 7 8 9
type
red 10.0 53.0 681.0 638.0 199.0 18.0 NaN
white 20.0 163.0 1457.0 2198.0 880.0 175.0 5.0
'''
# 교차분할표
tab = pd.crosstab(wine['type'], wine['quality']) # (index=행, columns=열)
tab
'''
quality 3 4 5 6 7 8 9
type
red 10 53 681 638 199 18 0
white 20 163 1457 2198 880 175 5
'''
# 4. group 객체 시각화
import matplotlib.pyplot as plt
type(grp_2d) # pandas.core.frame.DataFrame
# 누적형 가로막대
grp_2d.plot(kind='barh',
title='type vs quality',
stacked=True)
plt.show()
# 5. wine 종류(집단변수) vs 알콜(연속형) 통계량
wine_grp = wine.groupby('type') # 집단변수 1개 -> 12개 변수 그룹화
# 각 집단별 알콜 요약통계량
wine_grp['alcohol'].describe()
'''
count mean std min 25% 50% 75% max
type
red 1599.0 10.422983 1.065668 8.4 9.5 10.2 11.1 14.9
white 4898.0 10.514267 1.230621 8.0 9.5 10.4 11.4 14.2
'''
|
[
"noreply@github.com"
] |
Elly-bang.noreply@github.com
|
03ae5a477c8f067d8cb700f67401521690fd068d
|
eda9187adfd53c03f55207ad05d09d2d118baa4f
|
/algo/pca/pca_old.py
|
264da244de8c2b727e5ca60a969c58a436681e39
|
[] |
no_license
|
HuiZhaozh/python_tutorials
|
168761c9d21ad127a604512d7c6c6b38b4faa3c7
|
bde4245741081656875bcba2e4e4fcb6b711a3d9
|
refs/heads/master
| 2023-07-07T20:36:20.137647
| 2020-04-24T07:18:25
| 2020-04-24T07:18:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,519
|
py
|
# -*- coding:utf-8 -*-
# /usr/bin/python
'''
@Author: Yan Errol @Email:2681506@gmail.com
@Date: 2019-06-09 23:59
@Describe:
@Evn:
'''
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import numpy as np
# A value we picked to always display the same results
# Feel free to change this to any value greater than 0 view different random value outcomes
seed = 9000
# We're using a seeded random state so we always get the same outcome
seeded_state = np.random.RandomState(seed=seed)
# Returns a random 150 points (x, y pairs) in a gaussian distribution,
# IE most of the points fall close to the average with a few outliers
rand_points = seeded_state.randn(150, 2)
# The @ operator performs matrix multiplication, and serves to bring
# our gaussian distribution points closer together
points = rand_points @ seeded_state.rand(2, 2)
x = points[:, 0]
y = points[:, 1]
# Now we have a sample dataset of 150 points to perform PCA on, so
# go ahead and display this in a plot.
plt.scatter(x, y, alpha=0.5)
plt.title("Sample Dataset")
print("Plotting our created dataset...\n")
print("Points:")
for p in points[:10, :]:
print("({:7.4f}, {:7.4f})".format(p[0], p[1]))
print("...\n")
plt.show()
# Find two principal components from our given dataset
pca = PCA(n_components = 2)
pca.fit(points)
# Once we are fitted, we have access to inner mean_, components_, and explained_variance_ variables
# Use these to add some arrows to our plot
plt.scatter(x, y, alpha=0.5)
plt.title("Sample Dataset with Principal Component Lines")
for var, component in zip(pca.explained_variance_, pca.components_):
plt.annotate(
"",
component * np.sqrt(var) * 2 + pca.mean_,
pca.mean_
)
print("Plotting our calculated principal components...\n")
plt.show()
# Reduce the dimensionality of our data using a PCA transformation
pca = PCA(n_components = 1)
transformed_points = pca.fit_transform(points)
# Note that all the inverse transformation does is transforms the data to its original space.
# In practice, this is unnecessary. For this example, all data would be along the x axis.
# We use it here for visualization purposes
inverse = pca.inverse_transform(transformed_points)
t_x = inverse[:, 0]
t_y = inverse[:, 0]
# Plot the original and transformed data sets
plt.scatter(x, y, alpha=0.3)
plt.scatter(t_x, t_y, alpha=0.7)
plt.title("Sample Dataset (Blue) and Transformed Dataset (Orange)")
print("Plotting our dataset with a dimensionality reduction...")
plt.show()
|
[
"2681506@gmail.com"
] |
2681506@gmail.com
|
c3029c19a6c3b697bb29649019096a2ef9384915
|
521efcd158f4c69a686ed1c63dd8e4b0b68cc011
|
/airflow/operators/datetime.py
|
47021c1730952719ea17c0bf05c4778c8d57ae5f
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
coutureai/RaWorkflowOrchestrator
|
33fd8e253bfea2f9a82bb122ca79e8cf9dffb003
|
cd3ea2579dff7bbab0d6235fcdeba2bb9edfc01f
|
refs/heads/main
| 2022-10-01T06:24:18.560652
| 2021-12-29T04:52:56
| 2021-12-29T04:52:56
| 184,547,783
| 5
| 12
|
Apache-2.0
| 2022-11-04T00:02:55
| 2019-05-02T08:38:38
|
Python
|
UTF-8
|
Python
| false
| false
| 4,632
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
from typing import Iterable, Union
from airflow.exceptions import AirflowException
from airflow.operators.branch import BaseBranchOperator
from airflow.utils import timezone
from airflow.utils.context import Context
class BranchDateTimeOperator(BaseBranchOperator):
"""
Branches into one of two lists of tasks depending on the current datetime.
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BranchDateTimeOperator`
True branch will be returned when ``datetime.datetime.now()`` falls below
``target_upper`` and above ``target_lower``.
:param follow_task_ids_if_true: task id or task ids to follow if
``datetime.datetime.now()`` falls above target_lower and below ``target_upper``.
:type follow_task_ids_if_true: str or list[str]
:param follow_task_ids_if_false: task id or task ids to follow if
``datetime.datetime.now()`` falls below target_lower or above ``target_upper``.
:type follow_task_ids_if_false: str or list[str]
:param target_lower: target lower bound.
:type target_lower: Optional[datetime.datetime]
:param target_upper: target upper bound.
:type target_upper: Optional[datetime.datetime]
:param use_task_execution_date: If ``True``, uses task's execution day to compare with targets.
Execution date is useful for backfilling. If ``False``, uses system's date.
:type use_task_execution_date: bool
"""
def __init__(
self,
*,
follow_task_ids_if_true: Union[str, Iterable[str]],
follow_task_ids_if_false: Union[str, Iterable[str]],
target_lower: Union[datetime.datetime, datetime.time, None],
target_upper: Union[datetime.datetime, datetime.time, None],
use_task_execution_date: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
if target_lower is None and target_upper is None:
raise AirflowException(
"Both target_upper and target_lower are None. At least one "
"must be defined to be compared to the current datetime"
)
self.target_lower = target_lower
self.target_upper = target_upper
self.follow_task_ids_if_true = follow_task_ids_if_true
self.follow_task_ids_if_false = follow_task_ids_if_false
self.use_task_execution_date = use_task_execution_date
def choose_branch(self, context: Context) -> Union[str, Iterable[str]]:
if self.use_task_execution_date is True:
now = timezone.make_naive(context["logical_date"], self.dag.timezone)
else:
now = timezone.make_naive(timezone.utcnow(), self.dag.timezone)
lower, upper = target_times_as_dates(now, self.target_lower, self.target_upper)
if upper is not None and upper < now:
return self.follow_task_ids_if_false
if lower is not None and lower > now:
return self.follow_task_ids_if_false
return self.follow_task_ids_if_true
def target_times_as_dates(
base_date: datetime.datetime,
lower: Union[datetime.datetime, datetime.time, None],
upper: Union[datetime.datetime, datetime.time, None],
):
"""Ensures upper and lower time targets are datetimes by combining them with base_date"""
if isinstance(lower, datetime.datetime) and isinstance(upper, datetime.datetime):
return lower, upper
if lower is not None and isinstance(lower, datetime.time):
lower = datetime.datetime.combine(base_date, lower)
if upper is not None and isinstance(upper, datetime.time):
upper = datetime.datetime.combine(base_date, upper)
if lower is None or upper is None:
return lower, upper
if upper < lower:
upper += datetime.timedelta(days=1)
return lower, upper
|
[
"noreply@github.com"
] |
coutureai.noreply@github.com
|
734b6a332d6f0af9cd41c64282aff3d00bb8662f
|
461bffdd97ba507b29f1fbf6f9af1800f0e241f6
|
/pytext/metric_reporters/classification_metric_reporter.py
|
1e3ced78d285a9bff886349e7b06f32ac39129b1
|
[
"BSD-3-Clause"
] |
permissive
|
Erica-Liu/pytext
|
d347e1327254bbe746c491fd8002bcc2e29d82a9
|
0a77e34e555750311ede54514c3c85b133b258f3
|
refs/heads/master
| 2020-06-16T02:49:21.589774
| 2019-07-05T18:25:52
| 2019-07-05T18:33:55
| 195,459,270
| 0
| 0
|
NOASSERTION
| 2019-07-05T19:38:34
| 2019-07-05T19:38:34
| null |
UTF-8
|
Python
| false
| false
| 6,254
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from enum import Enum
from typing import List, Optional
from pytext.common.constants import Stage
from pytext.data import CommonMetadata
from pytext.metrics import (
LabelListPrediction,
LabelPrediction,
compute_classification_metrics,
compute_multi_label_classification_metrics,
)
from .channel import Channel, ConsoleChannel, FileChannel
from .metric_reporter import MetricReporter
META_LABEL_NAMES = "label_names"
class IntentModelChannel(FileChannel):
def get_title(self):
return ("predicted", "actual", "scores_str", "text")
def gen_content(self, metrics, loss, preds, targets, scores, contexts):
for i in range(len(preds)):
yield [
preds[i],
targets[i],
",".join([f"{s:.2f}" for s in scores[i]]),
contexts["utterance"][i],
]
class ComparableClassificationMetric(Enum):
ACCURACY = "accuracy"
ROC_AUC = "roc_auc"
MCC = "mcc"
MACRO_F1 = "macro_f1"
LABEL_F1 = "label_f1"
LABEL_AVG_PRECISION = "label_avg_precision"
LABEL_ROC_AUC = "label_roc_auc"
# use negative because the reporter's lower_is_better value is False
NEGATIVE_LOSS = "negative_loss"
class ClassificationMetricReporter(MetricReporter):
__EXPANSIBLE__ = True
class Config(MetricReporter.Config):
model_select_metric: ComparableClassificationMetric = (
ComparableClassificationMetric.ACCURACY
)
target_label: Optional[str] = None
#: These column names correspond to raw input data columns. Text in these
#: columns (usually just 1 column) will be concatenated and output in
#: the IntentModelChannel as an evaluation tsv.
text_column_names: List[str] = ["text"]
def __init__(
self,
label_names: List[str],
channels: List[Channel],
model_select_metric: ComparableClassificationMetric = (
ComparableClassificationMetric.ACCURACY
),
target_label: Optional[str] = None,
text_column_names: List[str] = Config.text_column_names,
) -> None:
super().__init__(channels)
self.label_names = label_names
self.model_select_metric = model_select_metric
self.target_label = target_label
self.text_column_names = text_column_names
@classmethod
def from_config(cls, config, meta: CommonMetadata = None, tensorizers=None):
# TODO: refactor metric reporting and remove this hack
if tensorizers:
labels = list(tensorizers["labels"].vocab)
else:
labels = meta.target.vocab.itos
return cls.from_config_and_label_names(config, labels)
@classmethod
def from_config_and_label_names(cls, config, label_names: List[str]):
if config.model_select_metric in (
ComparableClassificationMetric.LABEL_F1,
ComparableClassificationMetric.LABEL_AVG_PRECISION,
ComparableClassificationMetric.LABEL_ROC_AUC,
):
assert config.target_label is not None
assert config.target_label in label_names
if config.model_select_metric in (
ComparableClassificationMetric.ROC_AUC,
ComparableClassificationMetric.MCC,
):
assert len(label_names) == 2
return cls(
label_names,
[ConsoleChannel(), IntentModelChannel((Stage.TEST,), config.output_path)],
config.model_select_metric,
config.target_label,
config.text_column_names,
)
def batch_context(self, raw_batch, batch):
context = super().batch_context(raw_batch, batch)
context["utterance"] = [
" | ".join(str(row[column_name]) for column_name in self.text_column_names)
for row in raw_batch
]
return context
def calculate_metric(self):
return compute_classification_metrics(
[
LabelPrediction(scores, pred, expect)
for scores, pred, expect in zip(
self.all_scores, self.all_preds, self.all_targets
)
],
self.label_names,
self.calculate_loss(),
)
def get_meta(self):
return {META_LABEL_NAMES: self.label_names}
def get_model_select_metric(self, metrics):
if self.model_select_metric == ComparableClassificationMetric.ACCURACY:
metric = metrics.accuracy
elif self.model_select_metric == ComparableClassificationMetric.ROC_AUC:
metric = metrics.roc_auc
elif self.model_select_metric == ComparableClassificationMetric.MCC:
metric = metrics.mcc
elif self.model_select_metric == ComparableClassificationMetric.MACRO_F1:
metric = metrics.macro_prf1_metrics.macro_scores.f1
elif self.model_select_metric == ComparableClassificationMetric.LABEL_F1:
metric = metrics.macro_prf1_metrics.per_label_scores[self.target_label].f1
elif (
self.model_select_metric
== ComparableClassificationMetric.LABEL_AVG_PRECISION
):
metric = metrics.per_label_soft_scores[self.target_label].average_precision
elif self.model_select_metric == ComparableClassificationMetric.LABEL_ROC_AUC:
metric = metrics.per_label_soft_scores[self.target_label].roc_auc
elif self.model_select_metric == ComparableClassificationMetric.NEGATIVE_LOSS:
metric = -metrics.loss
else:
raise ValueError(f"unknown metric: {self.model_select_metric}")
assert metric is not None
return metric
class MultiLabelClassificationMetricReporter(ClassificationMetricReporter):
def calculate_metric(self):
return compute_multi_label_classification_metrics(
[
LabelListPrediction(scores, pred, expect)
for scores, pred, expect in zip(
self.all_scores, self.all_preds, self.all_targets
)
],
self.label_names,
self.calculate_loss(),
)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
8f6c010d69c13e262cdd609efe3ac4b6009f38d3
|
6dae31f10260e39feae9d268e3ebe6d23146575a
|
/spm/bin_deep_surveys/run_stellarpop_miles_deep2_kroupa
|
fc11bb5287636c2c426dae12945d749d5984c5b1
|
[
"CC0-1.0"
] |
permissive
|
JohanComparat/pySU
|
e55eba92f0660e733468bce618595a03dc25a3d2
|
4169e11414be661dc0c01c774e64fb8ce6242825
|
refs/heads/master
| 2021-12-25T11:06:04.315554
| 2021-10-11T12:03:22
| 2021-10-11T12:03:22
| 44,340,565
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,277
|
#! /usr/bin/env python
import sys
from os.path import join
import os
import time
import numpy as np
import glob
import astropy.cosmology as co
cosmo = co.Planck13
import astropy.io.fits as fits
# for one galaxy spectrum
import GalaxySpectrumFIREFLY as gs
import StellarPopulationModel as spm
catalog=fits.open(join(os.environ['DEEP2_DIR'], "catalogs", "zcat.deep2.dr4.v4.LFcatalogTC.Planck15.fits"))[1].data
outputFolder = join( os.environ['DEEP2_DIR'], 'stellarpop-m11-kroupa-miles', 'stellarpop')
def runSpec(catalog_entry):
print catalog_entry['ZBEST'], catalog_entry['RA'], catalog_entry['DEC']
t0=time.time()
mask=str(catalog_entry['MASK'])
objno=str(catalog_entry['OBJNO'])
path_to_spectrum = glob.glob(join(os.environ['DEEP2_DIR'], 'spectra', mask, '*', '*' + objno + '*_fc_tc.dat'))
if len(path_to_spectrum)>=1:
try:
spec=gs.GalaxySpectrumFIREFLY("-", milky_way_reddening=True)
spec.openObservedDEEP2pectrum(catalog_entry)
ageMax = np.log10(cosmo.age(spec.redshift).value*1e9)
if spec.redshift>0.01 and spec.redshift < 1.7 :
model = spm.StellarPopulationModel(spec, join(outputFolder , 'spFly-deep2-'+mask+'-'+objno ), cosmo, models = 'm11', model_libs = ['MILES'], imfs = ['kr'], age_limits = [6,10], downgrade_models = True, data_wave_medium = 'air', Z_limits = [-3.,1.],suffix="-kr.fits", use_downgraded_models = True)
try :
model.fit_models_to_data()
#print( model.averages )
except (ValueError):
pass
print "time used =", time.time()-t0 ,"seconds"
except (IndexError):
pass
for catalog_entry in catalog[::-1]:
mask=str(catalog_entry['MASK'])
objno=str(catalog_entry['OBJNO'])
if os.path.isfile(join(outputFolder , 'spFly-deep2-'+mask+'-'+objno +"-kr.fits")):
print "pass", join(outputFolder , 'spFly-deep2-'+mask+'-'+objno +"-kr.fits")
else:
runSpec(catalog_entry)
sys.exit()
n_fc_tc = n.zeros_like(catalog['ZBEST'])
for ii, catalog_entry in enumerate(catalog):
mask=str(catalog_entry['MASK'])
objno=str(catalog_entry['OBJNO'])
path_to_spectrum = glob.glob(join(os.environ['DEEP2_DIR'], 'spectra', mask, '*', '*' + objno + '*_fc_tc.dat'))
n_fc_tc[ii] = len(path_to_spectrum )
ok=(catalog['ZBEST']>0.01)&(catalog['ZBEST']<1.7)&(n_fc_tc>=1)
print len(catalog), len(catalog[ok])
|
[
"johan.comparat@gmail.com"
] |
johan.comparat@gmail.com
|
|
ff6f46df45a62d02b5d3eb10ff5fa6488d3aca62
|
ea01ed735850bf61101b869b1df618d3c09c2aa3
|
/python基础/network_programming/ftp_task/ftp/conf/settings.py
|
fe1097cb50a4b2bf3c4804ce40907ffed75bb71a
|
[] |
no_license
|
liuzhipeng17/python-common
|
867c49ac08719fabda371765d1f9e42f6dd289b9
|
fb44da203d4e3a8304d9fe6205e60c71d3a620d8
|
refs/heads/master
| 2021-09-27T10:39:45.178135
| 2018-11-08T01:49:33
| 2018-11-08T01:49:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,296
|
py
|
# -*- coding: utf-8 -*-
import os.path
_project_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
USER_DB_PATH = os.path.join(_project_path, 'db', 'userdb.ini')
ENCODING = 'utf-8'
MAX_BUFFER_SIZE = 1024
USER_BASE_PATH = os.path.join(_project_path, 'dir', 'home')
BASE_DIR = os.path.join(_project_path, 'dir')
USER_DOWNLOAD_BASE_DIR = os.path.join(_project_path, 'dir', 'downloads')
USER_UPLOAD_BASE_DIR = os.path.join(_project_path, 'dir', 'uploads')
STATUS_CODE = {
200 : "Task finished",
250 : "Invalid cmd format, e.g: {'action':'get','filename':'tests.py','size':344}",
251 : "Invalid cmd ",
252 : "Invalid auth data",
253 : "Wrong username or password",
254 : "Passed authentication",
255 : "Filename doesn't provided",
256 : "File doesn't exist on server",
257 : "ready to send file",
258 : "md5 verification",
259 : "path doesn't exist on server",
260 : "path changed",
261 : "send File line",
262 : "File has exist on server",
263 : "Put empty file",
264 : "Put not null file",
265 : "Get empty file",
266 : "Path access permitted or Path not exist",
267 : "pwd invalid cmd arguments",
268 : "pwd pass",
269 : "permitted putting same-name file unless continue situation"
}
|
[
"liucpliu@sina.cn"
] |
liucpliu@sina.cn
|
e1550eadd9cc69970c6b6044d39bd284e1baef25
|
474525154a4e1d48ef5242d1f44164d05399b145
|
/spinoffs/oryx/oryx/experimental/nn/function.py
|
b9d20e453f86199f85885faeeef667bb5300a2ac
|
[
"Apache-2.0"
] |
permissive
|
svshivapuja/probability
|
9855737790f74a39169688fbfec9671deef804d9
|
af7ccb22d972329633530c3b754ed1f49472f6a7
|
refs/heads/main
| 2023-07-17T04:14:53.703622
| 2021-08-30T17:47:06
| 2021-08-30T17:47:06
| 400,983,015
| 1
| 0
|
Apache-2.0
| 2021-08-29T07:51:29
| 2021-08-29T07:51:29
| null |
UTF-8
|
Python
| false
| false
| 1,863
|
py
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Lint as: python3
"""Registers custom rules for neural networks in the stateful function API.
The Oryx state API enables having a custom unzip rules when `init`-ing a
function. We use this for neural networks to thread kwargs through the Jaxpr
that is created when unzipping a function. This module implements this by first
replacing instances of `layer_cau` with a `FlatPrimitive`s, which avoids
using a call primitive, which we would be difficult to pass new keyword
arguments into. We can more easily override the behavior of a regular primitive.
"""
from jax import tree_util
from oryx.core import state
from oryx.experimental.nn import base
__all__ = [
]
def layer_cau_kwargs_rule(*flat_args, num_consts, in_tree, kwargs, **_):
"""Custom kwargs rule for layer_cau primitive."""
flat_args = flat_args[num_consts:]
layer, *args = tree_util.tree_unflatten(in_tree, flat_args)
kwargs = dict(kwargs)
has_rng = kwargs.pop('has_rng', False)
if has_rng:
rng, args = args[0], args[1:]
kwargs = dict(kwargs, rng=rng)
ans = layer.call_and_update(*args, **kwargs)
return tree_util.tree_leaves(ans)
state.kwargs_rules[base.layer_cau_p] = layer_cau_kwargs_rule
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
b55ecc78784e9edeb59f797fac7f6750b1ccd7e5
|
79c0358277a5f6ae231d89ee4476cb1facd00e50
|
/extra/desktop/gnome/addons/gnome-color-manager/actions.py
|
c37eddcb3525032b734864d2c7b456d5bc8496bb
|
[] |
no_license
|
mrust1/PisiLinux
|
a139dbc9f8d3d61ebec38d08f36dfa6eafff7107
|
a2014b6912df50ad22da5b2f3d21bf01cbd8e192
|
refs/heads/master
| 2020-12-11T03:42:50.309869
| 2014-10-05T14:05:17
| 2014-10-05T14:05:17
| 24,826,519
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
def setup():
autotools.configure("--libexecdir=/usr/lib/gnome-color-manager")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.insinto("/usr/share/pixmaps", "data/icons/48x48/gnome-color-manager.png")
pisitools.dodoc("AUTHORS", "COPYING", "ChangeLog", "NEWS", "README")
|
[
"namso-01@hotmail.it"
] |
namso-01@hotmail.it
|
656b9a478e48b1c9114cb46915cfa1113d2c3a9e
|
651a296c8f45b5799781fd78a6b5329effe702a0
|
/polpak/bell_values.py
|
22a15e0802b2391a06bf53d6f330732079415995
|
[] |
no_license
|
pdhhiep/Computation_using_Python
|
095d14370fe1a01a192d7e44fcc81a52655f652b
|
407ed29fddc267950e9860b8bbd1e038f0387c97
|
refs/heads/master
| 2021-05-29T12:35:12.630232
| 2015-06-27T01:05:17
| 2015-06-27T01:05:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,625
|
py
|
#!/usr/bin/env python
#
def bell_values ( n_data ):
#*****************************************************************************80
#
## BELL_VALUES returns some values of the Bell numbers.
#
# Discussion:
#
# The Bell number B(N) is the number of restricted growth functions on N.
#
# Note that the Stirling numbers of the second kind, S^m_n, count the
# number of partitions of N objects into M classes, and so it is
# true that
#
# B(N) = S^1_N + S^2_N + ... + S^N_N.
#
# The Bell numbers were named for Eric Temple Bell.
#
# In Mathematica, the function can be evaluated by
#
# Sum[StirlingS2[n,m],{m,1,n}]
#
# The Bell number B(N) is defined as the number of partitions (of
# any size) of a set of N distinguishable objects.
#
# A partition of a set is a division of the objects of the set into
# subsets.
#
# Example:
#
# There are 15 partitions of a set of 4 objects:
#
# (1234),
# (123) (4),
# (124) (3),
# (12) (34),
# (12) (3) (4),
# (134) (2),
# (13) (24),
# (13) (2) (4),
# (14) (23),
# (1) (234),
# (1) (23) (4),
# (14) (2) (3),
# (1) (24) (3),
# (1) (2) (34),
# (1) (2) (3) (4).
#
# and so B(4) = 15.
#
# First values:
#
# N B(N)
# 0 1
# 1 1
# 2 2
# 3 5
# 4 15
# 5 52
# 6 203
# 7 877
# 8 4140
# 9 21147
# 10 115975
#
# Recursion:
#
# B(I) = sum ( 1 <= J <=I ) Binomial ( I-1, J-1 ) * B(I-J)
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 23 November 2014
#
# Author:
#
# John Burkardt
#
# Reference:
#
# Milton Abramowitz and Irene Stegun,
# Handbook of Mathematical Functions,
# US Department of Commerce, 1964.
#
# Stephen Wolfram,
# The Mathematica Book,
# Fourth Edition,
# Wolfram Media / Cambridge University Press, 1999.
#
# Parameters:
#
# Input/output, integer N_DATA. The user sets N_DATA to 0 before the
# first call. On each call, the routine increments N_DATA by 1, and
# returns the corresponding data; when there is no more data, the
# output value of N_DATA will be 0 again.
#
# Output, integer N, the order of the Bell number.
#
# Output, integer C, the value of the Bell number.
#
import numpy as np
n_max = 11
c_vec = np.array ( ( 1, 1, 2, 5, 15, 52, 203, 877, 4140, 21147, 115975 ) )
n_vec = np.array ( ( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ) )
if ( n_data < 0 ):
n_data = 0
if ( n_max <= n_data ):
n_data = 0
n = 0
c = 0
else:
n = n_vec[n_data]
c = c_vec[n_data]
n_data = n_data + 1
return n_data, n, c
def bell_values_test ( ):
#*****************************************************************************80
#
## BELL_VALUES_TEST demonstrates the use of BELL_VALUES.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 23 November 2014
#
# Author:
#
# John Burkardt
#
print ''
print 'BELL_VALUES_TEST:'
print ' BELL_VALUES returns values of'
print ' the Bell numbers.'
print ''
print ' N BELL(N)'
print ''
n_data = 0
while ( True ):
n_data, n, c = bell_values ( n_data )
if ( n_data == 0 ):
break
print '%6d %10d' % ( n, c )
print ''
print 'BELL_VALUES_TEST:'
print ' Normal end of execution.'
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
bell_values_test ( )
timestamp ( )
|
[
"siplukabir@gmail.com"
] |
siplukabir@gmail.com
|
e636e89dc9a0a67ae30601cbdb6cdcf9947fef12
|
e4f2aba6cb66ac33c5fc439374e8ef39d0bb0e4a
|
/Week-2-format-string/Exercise-4.py
|
7d00cc9ba3f0f3a488faa705797ac2907d073325
|
[] |
no_license
|
AChen24562/Python-QCC
|
573f5b545239aa24b8047c74539ca6b3e997faa0
|
1da01b76e209eb9b0d08f0f205d635bc2a149dfd
|
refs/heads/master
| 2023-02-06T23:18:41.850377
| 2020-12-28T12:59:29
| 2020-12-28T12:59:29
| 289,614,327
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
width = 17
height = 12.0
delimiter = "."
print(width//2, type(width//2))
print(width/2.0, type(width/2.0))
print(height/3, type(height/3))
# delimiter * 5 = '.....', str
print(delimiter * 5, type(delimiter * 5))
|
[
"54772925+AChen24562@users.noreply.github.com"
] |
54772925+AChen24562@users.noreply.github.com
|
307a62915d6949a0d0da070e0c930329d1b02074
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/compute/v20180601/get_log_analytic_export_request_rate_by_interval.py
|
3eff60bb02dd42eaf3cbb6765a0555d41fb0c38f
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924
| 2021-07-19T20:57:53
| 2021-07-19T20:57:53
| 387,815,163
| 0
| 0
|
Apache-2.0
| 2021-07-20T14:18:29
| 2021-07-20T14:18:28
| null |
UTF-8
|
Python
| false
| false
| 3,956
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'GetLogAnalyticExportRequestRateByIntervalResult',
'AwaitableGetLogAnalyticExportRequestRateByIntervalResult',
'get_log_analytic_export_request_rate_by_interval',
]
@pulumi.output_type
class GetLogAnalyticExportRequestRateByIntervalResult:
"""
LogAnalytics operation status response
"""
def __init__(__self__, properties=None):
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter
def properties(self) -> 'outputs.LogAnalyticsOutputResponse':
"""
LogAnalyticsOutput
"""
return pulumi.get(self, "properties")
class AwaitableGetLogAnalyticExportRequestRateByIntervalResult(GetLogAnalyticExportRequestRateByIntervalResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetLogAnalyticExportRequestRateByIntervalResult(
properties=self.properties)
def get_log_analytic_export_request_rate_by_interval(blob_container_sas_uri: Optional[str] = None,
from_time: Optional[str] = None,
group_by_operation_name: Optional[bool] = None,
group_by_resource_name: Optional[bool] = None,
group_by_throttle_policy: Optional[bool] = None,
interval_length: Optional['IntervalInMins'] = None,
location: Optional[str] = None,
to_time: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetLogAnalyticExportRequestRateByIntervalResult:
"""
LogAnalytics operation status response
:param str blob_container_sas_uri: SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to.
:param str from_time: From time of the query
:param bool group_by_operation_name: Group query result by Operation Name.
:param bool group_by_resource_name: Group query result by Resource Name.
:param bool group_by_throttle_policy: Group query result by Throttle Policy applied.
:param 'IntervalInMins' interval_length: Interval value in minutes used to create LogAnalytics call rate logs.
:param str location: The location upon which virtual-machine-sizes is queried.
:param str to_time: To time of the query
"""
__args__ = dict()
__args__['blobContainerSasUri'] = blob_container_sas_uri
__args__['fromTime'] = from_time
__args__['groupByOperationName'] = group_by_operation_name
__args__['groupByResourceName'] = group_by_resource_name
__args__['groupByThrottlePolicy'] = group_by_throttle_policy
__args__['intervalLength'] = interval_length
__args__['location'] = location
__args__['toTime'] = to_time
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:compute/v20180601:getLogAnalyticExportRequestRateByInterval', __args__, opts=opts, typ=GetLogAnalyticExportRequestRateByIntervalResult).value
return AwaitableGetLogAnalyticExportRequestRateByIntervalResult(
properties=__ret__.properties)
|
[
"noreply@github.com"
] |
morrell.noreply@github.com
|
ae3b13b10359ae08b10f0782054445f49475fc90
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/maxProduct_20200731212441.py
|
c7bb8018626c41179870e4caa9d8418f760ec486
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 766
|
py
|
import sys
# time complexity is o(n3 ) and space is o(1)
def maxThree(arr):
if len(arr) < 3:
return -1
maxProduct = -(sys.maxsize -1)
print(maxProduct)
n = len(arr)
for i in range(0,n-2):
for j in range(i+1,n-1):
for k in range(j+1,n):
print('i',arr[i],'j',arr[j],'k',arr[k])
product = arr[i] * arr[j] * arr[k]
if product > maxProduct:
maxProduct = product
return maxProduct
# Optimal solution o(nlogn)
def maxOp(arr):
n = len(arr)
arr.sort()
first = arr[n-1] * arr[n-2] * arr[n-3]
second = arr[0] * arr[1] * arr[n-1]
return max(first,second)
print(maxOp([-5,-5,4,5]))
# O(n) time complexity
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
1699d7d134745de10adce3f9435f31332bfe41fd
|
635cb7fb75048f9de7b95b48d1f59de68f9b3368
|
/R09/używanie_metaklas_do_kontrolowania_tworzenia_obiektów/example1.py
|
e76bc265bf850d4e8c8757ec5aaa9bafea6fbc7d
|
[] |
no_license
|
anpadoma/python_receptury3
|
9e889ac503e48eb62160050eecfdc4a64072c184
|
c761f2c36707785a8a70bdaccebd7533c76dee21
|
refs/heads/master
| 2021-01-22T14:38:34.718999
| 2014-01-31T22:09:44
| 2014-01-31T22:09:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
# example1.py
#
# Bezpośrednie tworzenie obiektów jest niedozwolone
class NoInstances(type):
def __call__(self, *args, **kwargs):
raise TypeError("Nie można bezpośrednio tworzyć obiektu")
class Spam(metaclass=NoInstances):
@staticmethod
def grok(x):
print('Spam.grok')
if __name__ == '__main__':
try:
s = Spam()
except TypeError as e:
print(e)
Spam.grok(42)
|
[
"mtar@data.pl"
] |
mtar@data.pl
|
c6634f95a8581d8241a4d53c143fe96dbad59ea9
|
8516f0f456b91c0da6c016b64d68ff2c2cdaf68d
|
/src/array/intersect.py
|
1dff47db905e488c248ad540666fbb1ba5276a6f
|
[] |
no_license
|
huowolf/leetcode
|
b5bb67206fab6417055b0534c7627bc281a29eef
|
f34909f09c22d2164bbe21fc7da0361fcbd63fd6
|
refs/heads/master
| 2020-03-26T20:20:13.386017
| 2018-11-20T13:04:03
| 2018-11-20T13:04:03
| 145,317,787
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,160
|
py
|
#https://leetcode-cn.com/explore/interview/card/top-interview-questions-easy/1/array/26/
#两个数组的交集 II
#给定两个数组,编写一个函数来计算它们的交集
#===============================================================================
# 输入: nums1 = [1,2,2,1], nums2 = [2,2]
# 输出: [2,2]
#===============================================================================
class Solution:
def intersect(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
result=[]
#遍历其中一个数组,发现相同元素时添加到新列表中,同时删去另一个数组中的一个相同元素
for i in nums1:
for j in nums2:
#删除相同元素后,同时跳出该趟搜索
if i==j:
result.append(i)
nums2.remove(j)
break
return result
nums1 = [1,2,2,1]
nums2 = [2,2]
l=Solution().intersect(nums1, nums2)
print(l)
nums1 = [4,9,5]
nums2 = [9,4,9,8,4]
l=Solution().intersect(nums1, nums2)
print(l)
|
[
"274956285@qq.com"
] |
274956285@qq.com
|
0968ca412075e81decf4567daec9d3887be7f97a
|
0a2356bde96ebc9b6a82bd91a833bbe04ffb3b82
|
/myInstagram/migrations/0006_auto_20201019_1230.py
|
304e84e3452ec7887377156161bd1620a31e667a
|
[
"MIT"
] |
permissive
|
dancan-sandys/Instagram-clone
|
3c1ec335f0806ab714e8946fba94e87b17329c78
|
08cfe2a40f6e701beb6b5fc97a090b61d5e242f3
|
refs/heads/master
| 2023-01-01T09:20:52.186988
| 2020-10-20T11:19:39
| 2020-10-20T11:19:39
| 304,602,156
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 636
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-10-19 09:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myInstagram', '0005_remove_photo_profile'),
]
operations = [
migrations.AlterField(
model_name='photo',
name='photo_url',
field=models.ImageField(upload_to='photos/'),
),
migrations.AlterField(
model_name='profile',
name='profile_photo',
field=models.ImageField(upload_to='profile/'),
),
]
|
[
"dancan.oruko96@gmail.com"
] |
dancan.oruko96@gmail.com
|
de6ce81cbeb176a956e3e8f4d930d20d3ee38341
|
1b9bd441c500e79042c48570035071dc20bfaf44
|
/sources/Yalkut Shimoni/set_alt_structs_nach.py
|
d8f9d28ee8faf0f6b3ed0e326dc0f06ecfc49d49
|
[] |
no_license
|
Sefaria/Sefaria-Data
|
ad2d1d38442fd68943535ebf79e2603be1d15b2b
|
25bf5a05bf52a344aae18075fba7d1d50eb0713a
|
refs/heads/master
| 2023-09-05T00:08:17.502329
| 2023-08-29T08:53:40
| 2023-08-29T08:53:40
| 5,502,765
| 51
| 52
| null | 2023-08-29T11:42:31
| 2012-08-22T00:18:38
| null |
UTF-8
|
Python
| false
| false
| 4,419
|
py
|
# -*- coding: utf-8 -*-
import urllib
import urllib2
from urllib2 import URLError, HTTPError
import json
import pdb
import os
import sys
from bs4 import BeautifulSoup
import re
p = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, p)
os.environ['DJANGO_SETTINGS_MODULE'] = "sefaria.settings"
from local_settings import *
sys.path.insert(0, SEFARIA_PROJECT_PATH)
from sefaria.model import *
def post_index(index):
url = SEFARIA_SERVER + '/api/v2/raw/index/Yalkut_Shimoni_on_Nach'
indexJSON = json.dumps(index)
values = {
'json': indexJSON,
'apikey': API_KEY
}
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
try:
response = urllib2.urlopen(req)
print response.read()
except HTTPError, e:
print 'Error code: ', e.code
def convertIntoRef(line):
arr = line.split(",")
perek = arr[0]
remez = arr[1]
para = arr[2]
return (perek, Ref("Yalkut Shimoni on Torah."+remez+"."+para))
perakim = {}
perakim = { "nodes" : [] }
parshiot = { "nodes": [] }
title_eng = ["Joshua", "Judges", "I Samuel", "II Samuel", "I Kings", "II Kings", "Isaiah", "Jeremiah", "Ezekiel", "Hosea",
"Joel", "Amos", "Obadiah", "Jonah", "Micah", "Nahum", "Habakkuk", "Zephaniah", "Haggai", "Zechariah", "Malachi",
"Psalms", "Proverbs", "Job", "Song of Songs", "Ruth", "Lamentations", "Eccelesiastes", "Esther", "Daniel", "Ezra",
"Nehemiah", "I Chronicles", "II Chronicles"]
title_heb = [u"יהושע", u"שופתים", u"שמואל א", u"שמואל ב", u"מלכים א",
u"מלכים ב", u"ישעיהו", u"ירמיהו", u"יחזקאל", u"הושע", u"יואל", u"עמוס",
u"עובדיה", u"יונה", u"מיכה", u"נחום", u"חבקוק", u"צפניה", u"חגי",
u"זכריה", u"מלאכי", u"תהילים", u"משלי", u"איוב", u"שיר השירים",
u"רות", u"איכה", u"קהלת", u"אסתר", u"דניאל", u"עזרא", u"נחמיה",
u"דברי הימים א", u"דברי הימים ב"]
def getHebrewParsha(parsha):
for count, eng in enumerate(title_eng):
if eng==parsha:
return title_heb[count]
for count, title in enumerate(title_eng):
f=open("parsha_"+title+".txt", 'r')
while True:
line = f.readline()
if line == '':
break
parsha_name, start_ref = convertIntoRef(line)
line = f.readline()
parsha_name, end_ref = convertIntoRef(line)
wholeRef = start_ref.to(end_ref).normal()
parsha = ArrayMapNode()
parsha.add_title(parsha_name, "en", primary=True)
parsha.add_title(getHebrewParsha(parsha_name), "he", primary=True)
parsha.key = parsha_name
parsha.depth = 0
parsha.addressTypes = []
parsha.sectionNames = []
parsha.wholeRef = wholeRef
parsha.refs = []
parshiot["nodes"].append(parsha.serialize())
for count, title in enumerate(title_eng):
if title=='Devarim':
continue
f=open("perek_"+title+".txt", 'r')
line = "nothing"
first_one = ""
last_one = ""
refs_dict = {}
current = 0
while line != '':
prev_line = line
line = f.readline()
if line == '':
break
start_perek, start_ref = convertIntoRef(line)
if prev_line == "nothing":
first_one = (start_perek, start_ref)
line = f.readline()
end_perek, end_ref = convertIntoRef(line)
last_one = (end_perek, end_ref)
if start_perek == end_perek:
refs_dict[start_perek] = start_ref.to(end_ref).normal()
refs = []
for i in range(int(last_one[0])):
if str(i+1) in refs_dict:
refs.append(refs_dict[str(i+1)])
else:
refs.append("")
whole_ref = first_one[1].to(last_one[1]).normal()
chumash = ArrayMapNode()
chumash.add_title(title_heb[count], "he", primary=True)
chumash.add_title(title, "en", primary=True)
chumash.key = title
chumash.addressTypes = ["Integer"]
chumash.sectionNames = ["Chapter"]
chumash.depth = 1
chumash.wholeRef = whole_ref
chumash.refs = refs
chumash.validate()
perakim["nodes"].append(chumash.serialize())
f.close()
root = JaggedArrayNode()
root.key = "yalkut_on_nach"
root.add_title("Yalkut Shimoni on Nach", "en", primary=True)
root.add_title(u"""ילקות שמעוני על נ״ח""", "he", primary=True)
root.depth = 2
root.sectionNames = ["Remez", "Paragraph"]
root.heSectionNames = [u"רמז", u"פסקה"]
root.addressTypes = ["Integer", "Integer"]
index = {
"title": "Yalkut Shimoni on Nach",
"categories": ["Midrash"],
"alt_structs": {"Parsha": parshiot, "Chapters": perakim},
"default_struct": "Remez",
"schema": root.serialize()
}
post_index(index)
|
[
"skaplan@brandeis.edu"
] |
skaplan@brandeis.edu
|
5167d45c33d996d136141fa7758a62e1f72334c7
|
8a2f5982c90c205cfee8f8fdce264a930c1b3749
|
/acoustics/standards/iso_1996_1_2003.py
|
266bc5cfdd490432fa7c696a757a75053959407c
|
[
"BSD-3-Clause",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
AlanLeJie/python-acoustics
|
05dde3ba6b7cf07265f5186f3742bb22f01fa1bb
|
af72e7f88003f0bba06934ea38c98e8993c4a6c6
|
refs/heads/master
| 2023-08-28T18:41:08.924307
| 2020-08-18T21:37:14
| 2021-10-26T12:24:40
| 444,331,803
| 1
| 0
|
BSD-3-Clause
| 2022-01-04T07:52:28
| 2022-01-04T07:52:28
| null |
UTF-8
|
Python
| false
| false
| 1,313
|
py
|
"""
ISO 1996-1:2003
===============
ISO 1996-1:2003 defines the basic quantities to be used for the description of
noise in community environments and describes basic assessment procedures. It
also specifies methods to assess environmental noise and gives guidance on
predicting the potential annoyance response of a community to long-term exposure
from various types of environmental noises. The sound sources can be separate or
in various combinations. Application of the method to predict annoyance response
is limited to areas where people reside and to related long-term land uses.
"""
import numpy as np
def composite_rating_level(levels, hours, adjustment):
"""Composite rating level.
:params levels: Level per period.
:params hours: Amount of hours per period.
:params adjustment: Adjustment per period.
Composite whole-day rating levels are calculated as
.. math:: L_R = 10 \\log{\\left[ \\sum_i \\frac{d_i}{24} 10^{(L_i+K_i)/10} \\right]}
where :math:`i` is a period. See equation 6 and 7 of the standard.
.. note:: Summation is done over the last axis.
"""
levels = np.asarray(levels)
hours = np.asarray(hours)
adjustment = np.asarray(adjustment)
return 10.0 * np.log10((hours / 24.0 * 10.0**((levels + adjustment) / 10.0)).sum(axis=-1))
|
[
"fridh@fridh.nl"
] |
fridh@fridh.nl
|
c2506960988c3521a7ee66ffa6b3e0b8285d7800
|
741ee09b8b73187fab06ecc1f07f46a6ba77e85c
|
/AutonomousSourceCode/data/raw/sort/02e9654d-b2b6-4400-b461-b0237b1385b4__selection_sort.py
|
32b8579c493ef2d06cb99bcee9dee400f3b1157d
|
[] |
no_license
|
erickmiller/AutomatousSourceCode
|
fbe8c8fbf215430a87a8e80d0479eb9c8807accb
|
44ee2fb9ac970acf7389e5da35b930d076f2c530
|
refs/heads/master
| 2021-05-24T01:12:53.154621
| 2020-11-20T23:50:11
| 2020-11-20T23:50:11
| 60,889,742
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 822
|
py
|
from ds import arrays
import sys
from profile import profile
@profile
def sort(a):
selection_sort(a,0,len(a))
def selection_sort(a,start,length):
for i in xrange(start+1,start+length):
key = a[i]
j = i
while(j>start and a[j-1]>key):
a[j] = a[j-1]
j -= 1
a[j] = key
def main():
a = arrays.make(sys.argv)
sort(a)
return a
if __name__=="__main__":
main()
########################################tests########################################
def assert_sorted(a,from_index,length):
selection_sort(a, from_index, length)
for i in xrange(from_index, from_index + length - 1):
assert a[i]<=a[i+1]
def should_partially_sort():
assert_sorted([30,20,10,5,3,2,4,1,-4,-5],3,5)
assert_sorted(arrays.array(50,False),10,20)
|
[
"erickmiller@gmail.com"
] |
erickmiller@gmail.com
|
74e26e09eea6b1c02032bef6283506cea981f66f
|
5b11be48f06b6779fe073deb90dde14659543367
|
/coloredlogs/tests.py
|
f160d525e80fffe8de65f5b542bdc121101ea1e4
|
[
"MIT"
] |
permissive
|
davliu/python-coloredlogs
|
03a612a6d6dd72255d7cee98386bf74f7ed5a204
|
549decbfde404b475cb772e86bdb5091f9f1baff
|
refs/heads/master
| 2020-05-20T18:31:58.680478
| 2015-06-02T19:03:23
| 2015-06-02T19:03:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,045
|
py
|
# Automated tests for the `coloredlogs' package.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: May 27, 2015
# URL: http://coloredlogs.readthedocs.org
# Standard library modules.
import logging
import random
import re
import string
import unittest
# External dependencies.
from humanfriendly.terminal import ansi_wrap
# The module we're testing.
import coloredlogs
import coloredlogs.converter
# External test dependency required to test support for custom log levels.
import verboselogs
# Compatibility with Python 2 and 3.
try:
# Python 2.
from StringIO import StringIO
except ImportError:
# Python 3.
from io import StringIO
# Compiled regular expression that matches a single line of output produced by
# ColoredStreamHandler (does not include matching of ANSI escape sequences).
PLAIN_TEXT_PATTERN = re.compile(r'''
(?P<date> \d{4}-\d{2}-\d{2} )
\s (?P<time> \d{2}:\d{2}:\d{2} )
\s (?P<hostname> \S+ )
\s (?P<logger_name> \w+ )
\[ (?P<process_id> \d+ ) \]
\s (?P<severity> [A-Z]+ )
\s (?P<message> .* )
''', re.VERBOSE)
class ColoredLogsTestCase(unittest.TestCase):
def setUp(self):
"""Start each test from a known state."""
# Reset global state.
coloredlogs.install()
coloredlogs.set_level(logging.INFO)
# Reset local state.
self.stream = StringIO()
self.handler = coloredlogs.ColoredStreamHandler(stream=self.stream, isatty=False)
self.logger_name = ''.join(random.choice(string.ascii_letters) for i in range(25))
self.logger = verboselogs.VerboseLogger(self.logger_name)
self.logger.addHandler(self.handler)
def test_is_verbose(self):
"""Make sure is_verbose() does what it should :-)."""
assert coloredlogs.root_handler.level == logging.INFO
assert not coloredlogs.is_verbose()
coloredlogs.set_level(logging.VERBOSE)
assert coloredlogs.is_verbose()
def test_increase_verbosity(self):
"""Make sure increase_verbosity() respects default and custom levels."""
assert coloredlogs.root_handler.level == logging.INFO
coloredlogs.increase_verbosity()
assert coloredlogs.root_handler.level == logging.VERBOSE
coloredlogs.increase_verbosity()
assert coloredlogs.root_handler.level == logging.DEBUG
coloredlogs.increase_verbosity()
assert coloredlogs.root_handler.level == logging.NOTSET
coloredlogs.increase_verbosity()
assert coloredlogs.root_handler.level == logging.NOTSET
def test_decrease_verbosity(self):
"""Make sure decrease_verbosity() respects default and custom levels."""
assert coloredlogs.root_handler.level == logging.INFO
coloredlogs.decrease_verbosity()
assert coloredlogs.root_handler.level == logging.WARNING
coloredlogs.decrease_verbosity()
assert coloredlogs.root_handler.level == logging.ERROR
coloredlogs.decrease_verbosity()
assert coloredlogs.root_handler.level == logging.CRITICAL
coloredlogs.decrease_verbosity()
assert coloredlogs.root_handler.level == logging.CRITICAL
def test_level_discovery(self):
"""Make sure find_defined_levels() always reports the levels defined in Python's standard library."""
for number in (0, 10, 20, 30, 40, 50):
assert number in coloredlogs.find_defined_levels()
def test_missing_isatty_method(self):
"""Make sure ColoredStreamHandler() doesn't break because of a missing isatty() method."""
# This should not raise any exceptions in the constructor.
coloredlogs.ColoredStreamHandler(stream=object())
def test_non_string_messages(self):
"""Make sure ColoredStreamHandler() doesn't break because of non-string messages."""
# This should not raise any exceptions; all of these values can be cast to strings.
for value in (True, False, 0, 42, (), []):
self.logger.info(value)
def test_plain_text_output_format(self):
"""Inspect the plain text output of coloredlogs."""
# Test that filtering on severity works.
self.handler.level = logging.INFO
self.logger.debug("No one should see this message.")
assert len(self.stream.getvalue().strip()) == 0
# Test that the default output format looks okay in plain text.
self.handler.level = logging.DEBUG
for method, severity in ((self.logger.debug, 'DEBUG'),
(self.logger.info, 'INFO'),
(self.logger.verbose, 'VERBOSE'),
(self.logger.warning, 'WARN'),
(self.logger.error, 'ERROR'),
(self.logger.critical, 'CRITICAL')):
# Prepare the text.
text = "This is a message with severity %r." % severity.lower()
# Log the message with the given severity.
method(text)
# Get the line of output generated by the handler.
output = self.stream.getvalue()
lines = output.splitlines()
last_line = lines[-1]
assert text in last_line
assert severity in last_line
assert PLAIN_TEXT_PATTERN.match(last_line)
def test_html_conversion(self):
ansi_encoded_text = 'I like %s - www.eelstheband.com' % ansi_wrap('birds', bold=True, color='blue')
assert ansi_encoded_text == 'I like \x1b[1;34mbirds\x1b[0m - www.eelstheband.com'
html_encoded_text = coloredlogs.converter.convert(ansi_encoded_text)
assert html_encoded_text == 'I like <span style="font-weight: bold; color: blue;">birds</span> - <a href="http://www.eelstheband.com" style="color: inherit;">www.eelstheband.com</a>'
def test_output_interception(self):
expected_output = 'testing, 1, 2, 3 ..'
assert coloredlogs.converter.capture(['sh', '-c', 'echo -n %s' % expected_output]) == expected_output
|
[
"peter@peterodding.com"
] |
peter@peterodding.com
|
49a15a9cea2349452b2a8d4b6d3320a51ee31a06
|
cd4f28ff0efa74889c4db8e91fb2caaebe37d592
|
/data.py
|
4fe388fae73f36ff04095a522803be9981dd224a
|
[] |
no_license
|
quirell/CSOBO
|
fd59f4fac0f9f38c32afb7277d6a4c1ecc59a8ea
|
73a98374cbf21920cecc9b19f77aee1624339769
|
refs/heads/master
| 2021-01-13T02:06:14.489103
| 2015-05-24T00:39:16
| 2015-05-24T00:39:16
| 33,379,478
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,948
|
py
|
__author__ = 'quirell'
import os
import re
class TestCase:
"""
fullname - nazwa przypadku testowego
testname - nazwa grupy do ktorej przypadek testowy nalezy, wiecej tu: http://anjos.mgi.polymtl.ca/qaplib/inst.html
value - najlepsza (minimalna) wartosc rozwiazania
solution - permutacja dla ktorej rozwiazanie przyjmuje najmniejsza wartosc
distance, flow - wiadomo
"""
datapath = ""
solutionspath = ""
def __init__(self,name):
self.fullname = name
self.testname = re.match(r"([a-zA-Z]+).*",name).group(1)
self.value = self.flow = self.distance = self.solution = None
self.size = 0
def load(self):
with open(TestCase.datapath + "/" + self.fullname + ".dat") as f:
self.size = int(f.readline())
line = "\n"
while line == "\n":
line = f.readline()
flow = []
for _ in xrange(self.size):
flow.append([int(i) for i in line.split()])
while len(flow[-1]) != self.size:
line = f.readline()
flow[-1].extend([int(i) for i in line.split()])
line = f.readline()
# line = "\n"
while line == "\n":
line = f.readline()
distance = []
for _ in xrange(self.size):
distance.append([int(i) for i in line.split()])
while len(distance[-1]) != self.size:
line = f.readline()
distance[-1].extend([int(i) for i in line.split()])
line = f.readline()
solution = None
if os.path.isfile(TestCase.solutionspath + "/" + self.fullname + ".sln"):
with open(TestCase.solutionspath + "/" + self.fullname + ".sln") as f:
line = f.readline()
_, self.value = line.split()
self.value = int(self.value)
solution = []
for line in f:
if "," in line:
solution.extend([int(i.strip()) for i in line.split(",") if i.strip().isdigit()])
else:
solution.extend([int(i.strip()) for i in line.split()])
self.flow = flow
self.distance = distance
if solution:
self.solution = [i-1 for i in solution]
def solutionavailable(self):
return self.solution is not None
def __str__(self):
return self.fullname + " size: "+self.size+" value: "+self.value
class Data:
def __init__(self):
self.datapath = "data"
self.solutionspath = "solutions"
TestCase.datapath = self.datapath
TestCase.solutionspath = self.solutionspath
def gettestcases(self):
testcases = []
for filename in os.listdir(self.datapath):
testcases.append(TestCase(filename[:-4]))
return testcases
|
[
"="
] |
=
|
746cf426cb0daad0ecbfe251adf903b1597644cb
|
01b8229a1adbc8149e4226c81e31e56e0598b87b
|
/python/emp.py
|
8904efc26c20d0c856d6e55dda26f62c425d2f7a
|
[] |
no_license
|
Amel294/amel
|
660f4cbd16f58d89a624bc90beeb791d239c6959
|
e54fd091a4bff436fe94a9901ca9b7c189b6824d
|
refs/heads/master
| 2023-05-10T16:56:54.039667
| 2023-05-08T16:57:18
| 2023-05-08T16:57:18
| 160,795,270
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
class employee:
def add(self):
self.name=raw_input('\nenter employee name:\t')
self.nuber=input('\nemployee number:\t')
self.salary=input('\nenter salary:\t')
def show(self):
print('\nname=',self.name,'\n')
print('e number=',self.nuber,'\n')
print('salary=',self.salary,'\n')
x=employee()
x.add()
x.show()
|
[
"amel294@gmail.com"
] |
amel294@gmail.com
|
bebb96c64a6feaa14c97cbda198522f712b111c1
|
d525935af3c80584fb2175623591a1fc86349db5
|
/Problems/Process integer input/task.py
|
929699ff539661399dfca3f35156baba73cba1c6
|
[] |
no_license
|
TonyNewbie/CoffeeMachine
|
63822ffdec8570166ebf44c0ffe51bfa14d33810
|
319c41189ede6a2e6e33bd15ae675101c3377b62
|
refs/heads/master
| 2022-04-22T13:23:47.904126
| 2020-04-26T07:25:44
| 2020-04-26T07:25:44
| 258,960,677
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
number = int(input())
while number < 101:
if number < 10:
number = int(input())
continue
print(number)
number = int(input())
|
[
"prostomaster90@gmail.com"
] |
prostomaster90@gmail.com
|
8bd8d6553e25d404d1919bb14a246ace10a097c8
|
a1a57977131ea917a3f3094dae4a3d18846103c0
|
/tests_auto/2d/quad4/axialdisp_gendb.py
|
d81640af303b4b9322f9a47bbcf8322f0d52db21
|
[
"MIT"
] |
permissive
|
rwalkerlewis/pylith
|
cef02d5543e99a3e778a1c530967e6b5f1d5dcba
|
8d0170324d3fcdc5e6c4281759c680faa5dd8d38
|
refs/heads/master
| 2023-08-24T18:27:30.877550
| 2020-08-05T16:37:28
| 2020-08-05T16:37:28
| 154,047,591
| 0
| 0
|
MIT
| 2018-10-21T20:05:59
| 2018-10-21T20:05:59
| null |
UTF-8
|
Python
| false
| false
| 2,319
|
py
|
#!/usr/bin/env python
#
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
## @file tests/2d/quad4/axialdisp_gendb.py
##
## @brief Python script to generate spatial database with displacement
## boundary conditions for the axial displacement test.
import numpy
class GenerateDB(object):
"""
Python object to generate spatial database with displacement
boundary conditions for the axial displacement test.
"""
def __init__(self):
"""
Constructor.
"""
return
def run(self):
"""
Generate the database.
"""
# Domain
x = numpy.arange(-4000.0, 4000.1, 1000.0)
y = numpy.arange(-4000.0, 4000.1, 1000.0)
npts = x.shape[0]
xx = x * numpy.ones( (npts, 1), dtype=numpy.float64)
yy = y * numpy.ones( (npts, 1), dtype=numpy.float64)
xy = numpy.zeros( (npts**2, 2), dtype=numpy.float64)
xy[:,0] = numpy.ravel(xx)
xy[:,1] = numpy.ravel(numpy.transpose(yy))
from axialdisp_soln import AnalyticalSoln
soln = AnalyticalSoln()
disp = soln.displacement(xy)
from spatialdata.geocoords.CSCart import CSCart
cs = CSCart()
cs.inventory.spaceDim = 2
cs._configure()
data = {'points': xy,
'coordsys': cs,
'data_dim': 2,
'values': [{'name': "displacement-x",
'units': "m",
'data': numpy.ravel(disp[0,:,0])},
{'name': "displacement-y",
'units': "m",
'data': numpy.ravel(disp[0,:,1])}]}
from spatialdata.spatialdb.SimpleIOAscii import SimpleIOAscii
io = SimpleIOAscii()
io.inventory.filename = "axial_disp.spatialdb"
io._configure()
io.write(data)
return
# ======================================================================
if __name__ == "__main__":
app = GenerateDB()
app.run()
# End of file
|
[
"baagaard@usgs.gov"
] |
baagaard@usgs.gov
|
1e175f5fbd461cacef895e6d82085207e7b62938
|
02ea99ea65d4768781b59ac97082fa7e1763711c
|
/neural_structured_learning/estimator/adversarial_regularization.py
|
a63a7ad858f2c30b8260beef2ba03ee89b88acf5
|
[
"Apache-2.0"
] |
permissive
|
Nzteb/neural-structured-learning
|
cad8a94b7b7d0d77eb0d0dca584398d749ff4ca6
|
0d50227f01556b1cebbd841496f5d1c9c4ef4a61
|
refs/heads/master
| 2022-11-15T22:10:32.112476
| 2020-07-09T00:13:09
| 2020-07-09T00:13:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,326
|
py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A wrapper function to enable adversarial regularization to an Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import inspect
import neural_structured_learning.configs as nsl_configs
import neural_structured_learning.lib as nsl_lib
import tensorflow as tf
def add_adversarial_regularization(estimator,
optimizer_fn=None,
adv_config=None):
"""Adds adversarial regularization to a `tf.estimator.Estimator`.
The returned estimator will include the adversarial loss as a regularization
term in its training objective, and will be trained using the optimizer
provided by `optimizer_fn`. `optimizer_fn` (along with the hyperparameters)
should be set to the same one used in the base `estimator`.
If `optimizer_fn` is not set, a default optimizer `tf.train.AdagradOptimizer`
with `learning_rate=0.05` will be used.
Args:
estimator: A `tf.estimator.Estimator` object, the base model.
optimizer_fn: A function that accepts no arguments and returns an instance
of `tf.train.Optimizer`. This optimizer (instead of the one used in
`estimator`) will be used to train the model. If not specified, default to
`tf.train.AdagradOptimizer` with `learning_rate=0.05`.
adv_config: An instance of `nsl.configs.AdvRegConfig` that specifies various
hyperparameters for adversarial regularization.
Returns:
A modified `tf.estimator.Estimator` object with adversarial regularization
incorporated into its loss.
"""
if not adv_config:
adv_config = nsl_configs.AdvRegConfig()
base_model_fn = estimator._model_fn # pylint: disable=protected-access
try:
base_model_fn_args = inspect.signature(base_model_fn).parameters.keys()
except AttributeError: # For Python 2 compatibility
base_model_fn_args = inspect.getargspec(base_model_fn).args # pylint: disable=deprecated-method
def adv_model_fn(features, labels, mode, params=None, config=None):
"""The adversarial-regularized model_fn.
Args:
features: This is the first item returned from the `input_fn` passed to
`train`, `evaluate`, and `predict`. This should be a single `tf.Tensor`
or `dict` of same.
labels: This is the second item returned from the `input_fn` passed to
`train`, `evaluate`, and `predict`. This should be a single `tf.Tensor`
or dict of same (for multi-head models). If mode is
`tf.estimator.ModeKeys.PREDICT`, `labels=None` will be passed. If the
`model_fn`'s signature does not accept `mode`, the `model_fn` must still
be able to handle `labels=None`.
mode: Optional. Specifies if this is training, evaluation, or prediction.
See `tf.estimator.ModeKeys`.
params: Optional `dict` of hyperparameters. Will receive what is passed to
Estimator in the `params` parameter. This allows users to configure
Estimators from hyper parameter tuning.
config: Optional `estimator.RunConfig` object. Will receive what is passed
to Estimator as its `config` parameter, or a default value. Allows
setting up things in the model_fn based on configuration such as
`num_ps_replicas`, or `model_dir`. Unused currently.
Returns:
A `tf.estimator.EstimatorSpec` with adversarial regularization.
"""
# Parameters 'params' and 'config' are optional. If they are not passed,
# then it is possible for base_model_fn not to accept these arguments.
# See documentation for tf.estimator.Estimator for additional context.
kwargs = {'mode': mode}
if 'params' in base_model_fn_args:
kwargs['params'] = params
if 'config' in base_model_fn_args:
kwargs['config'] = config
base_fn = functools.partial(base_model_fn, **kwargs)
# Uses the same variable scope for calculating the original objective and
# adversarial regularization.
with tf.compat.v1.variable_scope(tf.compat.v1.get_variable_scope(),
reuse=tf.compat.v1.AUTO_REUSE,
auxiliary_name_scope=False):
original_spec = base_fn(features, labels)
# Adversarial regularization only happens in training.
if mode != tf.estimator.ModeKeys.TRAIN:
return original_spec
adv_neighbor, _ = nsl_lib.gen_adv_neighbor(
features,
original_spec.loss,
adv_config.adv_neighbor_config,
# The pgd_model_fn is a dummy identity function since loss is
# directly available from spec_fn.
pgd_model_fn=lambda features: features,
pgd_loss_fn=lambda labels, features: base_fn(features, labels).loss,
pgd_labels=labels)
# Runs the base model again to compute loss on adv_neighbor.
adv_spec = base_fn(adv_neighbor, labels)
final_loss = original_spec.loss + adv_config.multiplier * adv_spec.loss
if not optimizer_fn:
# Default to the Adagrad optimizer, the same as canned DNNEstimator.
optimizer = tf.train.AdagradOptimizer(learning_rate=0.05)
else:
optimizer = optimizer_fn()
train_op = optimizer.minimize(
loss=final_loss, global_step=tf.compat.v1.train.get_global_step())
update_ops = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.UPDATE_OPS)
if update_ops:
train_op = tf.group(train_op, *update_ops)
return original_spec._replace(loss=final_loss, train_op=train_op)
# Replaces the model_fn while keeps other fields/methods in the estimator.
estimator._model_fn = adv_model_fn # pylint: disable=protected-access
return estimator
|
[
"tensorflow.copybara@gmail.com"
] |
tensorflow.copybara@gmail.com
|
6f0e144c9f2150d6ed2b247269a15962705f55d8
|
f98c9dea0e212be5c7bc3161499e5633383bd4d7
|
/zmq/zmq_client.py
|
a27214a3bfe54b37c5d0b7c49f3ab8e11e63d9e0
|
[
"MIT"
] |
permissive
|
ysoftman/test_code
|
dddb5bee3420977bfa335320a09d66e5984403f5
|
0bf6307073081eeb1d654a1eb5efde44a0bdfe1e
|
refs/heads/master
| 2023-08-17T05:45:49.716829
| 2023-08-16T05:00:09
| 2023-08-16T05:00:09
| 108,200,568
| 4
| 0
|
MIT
| 2023-03-15T04:23:10
| 2017-10-25T00:49:26
|
C++
|
UTF-8
|
Python
| false
| false
| 713
|
py
|
#-*- coding: utf-8 -*-
# 20160422 ysoftman
# pyzmq (python3.x) client
import zmq
import sys
def send_req(ip, port):
context = zmq.Context()
socket = context.socket(zmq.REQ)
# socket.setsockopt(zmq.REQ, b'')
socket.connect("tcp://%s:%s" % (ip, port))
# python 3 에서 기본 인코딩이 유니코드
# send 함스는 유니코드를 사용할 수 없어, byte 형태로 만든다.
data = b'hello'
for i in range(10):
socket.send(data)
print("send %s to server. [%d]" % (data, i))
reply = socket.recv()
print("reply %s from server.[%d]" % (reply, i))
if __name__ == "__main__":
print("start testing...")
send_req("127.0.0.1", "55555")
|
[
"ysoftman@gmail.com"
] |
ysoftman@gmail.com
|
7f0537ded18f3ab59f3f0455f9f3074597b23440
|
377e3a552fb807febc18ce036af77edbce93ca19
|
/binary trees/inorder_traversal_DFS.py
|
47831f96111d76a9cebf1a030c565db26ac4a083
|
[] |
no_license
|
souravs17031999/100dayscodingchallenge
|
940eb9b6d6037be4fc0dd5605f9f808614085bd9
|
d05966f3e6875a5ec5a8870b9d2627be570d18d9
|
refs/heads/master
| 2022-10-29T11:05:46.762554
| 2022-09-28T13:04:32
| 2022-09-28T13:04:32
| 215,993,823
| 44
| 12
| null | 2022-08-18T14:58:50
| 2019-10-18T09:55:03
|
Python
|
UTF-8
|
Python
| false
| false
| 2,714
|
py
|
# Program for inorder traversal for a binary tree
# --------------------------------
# As we know inorder traversal means, Left-Node-Right
# We can take example for follwoing tree and visualize stack call :
# 1
# / \
# 2 3
# / \
# 4 5
#
# RECURSIVE APPROACH
# --------------------------------
# TIME : 0(N), SPACE : NOT CONSTANT, DUE TO RECURSIVE CALLS.
# WE should also try to write iterative solution, because there might
# be some case where stack recursion depth limit is exceeded due to not
# enough memory available or due to system limit on recursion calls.
# ---------------------------------------
# ITERATIVE SOLTUION :
#Push the current node to S and set current = current->left until current is NULL
# If current is NULL and stack is not empty then
# * Pop the top item from stack.
# * Print the popped item, set current = popped_item->right
# * Go to step 3.
# If current is NULL and stack is empty then we are done.
# ---------------------------------------------
# TIME : 0(N), SPACE : 0(N) WHERE N IS THE NUMBER OF NODES IN THE TREE.
# ----------------------------------------------
# we can also optimized more on space complexity part by not using any
# stack or recursion, named as "MORRIS TRAVERSAL" which is described in
# MORRIS_traversal.py in a separate program.
# ----------------------------------------------
class Node:
def __init__(self, val):
self.data = val
self.left = None
self.right = None
# inorder recursive
def inorder_rec(root):
if root == None:
return
inorder_rec(root.left)
print(root.data, end = " ")
inorder_rec(root.right)
# ITERATIVE SOLUTION :
from collections import deque
def inorder_itr(root):
if root == None:
return
stack = deque([])
ptr = root
while True:
# this will be true everytimee until ptr.left becomes None,
# that means all the left ones will be on the stack firstly.
if ptr:
stack.append(ptr)
ptr = ptr.left
# now when above fails, then we need to pop the top of stack
# and print it, also make current ptr to ptr.right to traverse
# for right subtree
elif stack:
ptr = stack.pop()
print(ptr.data, end = " ")
ptr = ptr.right
# now if current ptr is also None and stack is also empty,
# then we need to move out of loop.
else:
break
# driver test function
if __name__ == '__main__':
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
#inorder_rec(root)
inorder_itr(root)
|
[
"souravs_1999@rediffmail.com"
] |
souravs_1999@rediffmail.com
|
da5ef56bfccc88c74e51c8cf0376e38e3a3ca319
|
7626a8371c7a847f93bdae5e1d6e03ee9667c3ba
|
/print/users/migrations/0001_initial.py
|
08083d447c75319b2a232ce671351aac48b5e516
|
[] |
no_license
|
zzyzx4/sp
|
52c815fd115b4605942baa73687838f64cd41864
|
90c7a90b3de27af674422e2c8892bad5ba7891e8
|
refs/heads/master
| 2020-05-23T21:20:28.166932
| 2019-07-19T11:56:49
| 2019-07-19T11:56:49
| 186,950,380
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 777
|
py
|
# Generated by Django 2.2.2 on 2019-06-24 15:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(default='default.jpg', upload_to='profile_pics')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"you@example.com"
] |
you@example.com
|
946f6acb38c734fdff7ed61463b00a60f7de3399
|
f0316e656767cf505b32c83eef4df13bb9f6b60c
|
/Kattis/qaly.py
|
86b1b75314997d2f525dc10e31ebf5f2bd66f855
|
[] |
no_license
|
AkshdeepSharma/Classroom
|
70ec46b35fab5fc4a9d2eac430659d7dafba93da
|
4e55799466c101c736de6c7e07d716ff147deb83
|
refs/heads/master
| 2022-06-13T18:14:03.236503
| 2022-05-17T20:16:28
| 2022-05-17T20:16:28
| 94,828,359
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 141
|
py
|
N = int(input())
qaly = 0
for i in range(N):
nums = input().split(" ")
qaly += float(nums[0]) * float(nums[1])
print(round(qaly, 3))
|
[
"akshdeep.sharma1@gmail.com"
] |
akshdeep.sharma1@gmail.com
|
a332e92186cd5002d1095263b0a5abaae4af5d37
|
3c27b86f0165ab24e6b04d505e8471e032594f0b
|
/pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/raw/GLES1/OES/texture_cube_map.py
|
1f616ca628a9790612c0acccc4ae8f17355fac0e
|
[
"LicenseRef-scancode-warranty-disclaimer",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LGPL-2.1-or-later",
"GPL-3.0-only",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"MIT"
] |
permissive
|
alexus37/AugmentedRealityChess
|
8b9ccdfffc8aee93a86a44b8ef53c034ec6a10d1
|
7f600ad153270feff12aa7aa86d7ed0a49ebc71c
|
refs/heads/master
| 2020-12-24T13:29:21.967833
| 2020-02-27T09:38:50
| 2020-02-27T09:38:50
| 31,264,034
| 1
| 1
|
MIT
| 2020-02-27T09:38:52
| 2015-02-24T14:36:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,394
|
py
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES1 import _types as _cs
# End users want this...
from OpenGL.raw.GLES1._types import *
from OpenGL.raw.GLES1 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES1_OES_texture_cube_map'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES1,'GLES1_OES_texture_cube_map',error_checker=_errors._error_checker)
GL_MAX_CUBE_MAP_TEXTURE_SIZE_OES=_C('GL_MAX_CUBE_MAP_TEXTURE_SIZE_OES',0x851C)
GL_NORMAL_MAP_OES=_C('GL_NORMAL_MAP_OES',0x8511)
GL_REFLECTION_MAP_OES=_C('GL_REFLECTION_MAP_OES',0x8512)
GL_TEXTURE_BINDING_CUBE_MAP_OES=_C('GL_TEXTURE_BINDING_CUBE_MAP_OES',0x8514)
GL_TEXTURE_CUBE_MAP_NEGATIVE_X_OES=_C('GL_TEXTURE_CUBE_MAP_NEGATIVE_X_OES',0x8516)
GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_OES=_C('GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_OES',0x8518)
GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_OES=_C('GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_OES',0x851A)
GL_TEXTURE_CUBE_MAP_OES=_C('GL_TEXTURE_CUBE_MAP_OES',0x8513)
GL_TEXTURE_CUBE_MAP_POSITIVE_X_OES=_C('GL_TEXTURE_CUBE_MAP_POSITIVE_X_OES',0x8515)
GL_TEXTURE_CUBE_MAP_POSITIVE_Y_OES=_C('GL_TEXTURE_CUBE_MAP_POSITIVE_Y_OES',0x8517)
GL_TEXTURE_CUBE_MAP_POSITIVE_Z_OES=_C('GL_TEXTURE_CUBE_MAP_POSITIVE_Z_OES',0x8519)
GL_TEXTURE_GEN_MODE_OES=_C('GL_TEXTURE_GEN_MODE_OES',0x2500)
GL_TEXTURE_GEN_STR_OES=_C('GL_TEXTURE_GEN_STR_OES',0x8D60)
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLfloatArray)
def glGetTexGenfvOES(coord,pname,params):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLintArray)
def glGetTexGenivOES(coord,pname,params):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,ctypes.POINTER(_cs.GLfixed))
def glGetTexGenxvOES(coord,pname,params):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLfloat)
def glTexGenfOES(coord,pname,param):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLfloatArray)
def glTexGenfvOES(coord,pname,params):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLint)
def glTexGeniOES(coord,pname,param):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLintArray)
def glTexGenivOES(coord,pname,params):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLfixed)
def glTexGenxOES(coord,pname,param):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,ctypes.POINTER(_cs.GLfixed))
def glTexGenxvOES(coord,pname,params):pass
|
[
"alexlelidis@gmx.de"
] |
alexlelidis@gmx.de
|
fc32d33d92acf211a927ee9591a9c2e2c794716f
|
faa83048d2bb62c27f030942f3f038f87637c293
|
/indico/core/oauth/protector.py
|
29693821d8a79e3bc2dcaeb25d629331dcedddbf
|
[
"MIT"
] |
permissive
|
janschill/indico
|
f79536db43afaf631449fef5119069af2938e76d
|
068a947446eb624308d6264e34a4061807e6ff12
|
refs/heads/master
| 2023-06-08T07:32:33.007683
| 2021-06-18T12:42:03
| 2021-06-18T12:42:03
| 339,700,154
| 0
| 0
|
MIT
| 2021-06-18T12:42:04
| 2021-02-17T11:22:48
|
Python
|
UTF-8
|
Python
| false
| false
| 3,105
|
py
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import flask
from authlib.integrations.flask_oauth2 import ResourceProtector
from authlib.oauth2.rfc6750.validator import BearerTokenValidator
from flask import after_this_request, jsonify
from werkzeug.exceptions import HTTPException
from indico.core.db import db
from indico.core.oauth.models.applications import SystemAppType
from indico.core.oauth.models.tokens import OAuthToken
from indico.core.oauth.util import query_token
from indico.util.date_time import now_utc
class IndicoAuthlibHTTPError(HTTPException):
def __init__(self, status_code, payload, headers):
super().__init__(payload.get('error_description') or payload['error'])
resp = jsonify(payload)
resp.headers.update(headers)
resp.status_code = status_code
self.response = resp
class IndicoResourceProtector(ResourceProtector):
def raise_error_response(self, error):
payload = dict(error.get_body())
headers = error.get_headers()
raise IndicoAuthlibHTTPError(error.status_code, payload, headers)
def parse_request_authorization(self, request):
access_token_querystring = flask.request.args.get('access_token')
if access_token_querystring and not request.headers.get('Authorization', '').lower().startswith('bearer '):
validator = self.get_token_validator('legacy_qs')
return validator, access_token_querystring
return super().parse_request_authorization(request)
class IndicoBearerTokenValidator(BearerTokenValidator):
def authenticate_token(self, token_string):
return query_token(token_string)
def validate_token(self, token, scopes):
super().validate_token(token, scopes)
# if we get here, the token is valid so we can mark it as used at the end of the request
# XXX: should we wait or do it just now? even if the request failed for some reason, the
# token could be considered used, since it was valid and most likely used by a client who
# expected to do something with it...
token_id = token.id # avoid DetachedInstanceError in the callback
@after_this_request
def _update_last_use(response):
with db.tmp_session() as sess:
# do not modify `token` directly, it's attached to a different session!
sess.query(OAuthToken).filter_by(id=token_id).update({OAuthToken.last_used_dt: now_utc()})
sess.commit()
return response
class IndicoLegacyQueryStringBearerTokenValidator(IndicoBearerTokenValidator):
TOKEN_TYPE = 'legacy_qs'
def authenticate_token(self, token_string):
token = super().authenticate_token(token_string)
if token and token.application.system_app_type == SystemAppType.checkin:
# Only the checkin app is allowed to pass tokens insecurely via query string
return token
|
[
"adrian.moennich@cern.ch"
] |
adrian.moennich@cern.ch
|
78b44ed568709f7982622043bd51601faeed8ab8
|
4791bde7bb7275fc25480fdf0cd81d1a9450a50c
|
/accounts/views.py
|
2cd377f5f3cbd97f9432c0c428c386f474093f90
|
[] |
no_license
|
VinneyJ/RU-I-tech-app
|
9692f0681dd704ce52c621b3d080d1a90fbe501b
|
dd15335f26a35d8e32477e7dd384f3a80351d25d
|
refs/heads/master
| 2020-05-02T21:04:17.773867
| 2019-03-28T13:25:18
| 2019-03-28T13:25:18
| 178,209,692
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,070
|
py
|
from django.shortcuts import render, redirect
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.decorators import login_required
from django.contrib.auth import login, logout
from accounts.form import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
from .models import Profile
# Create your views here.
def signup_view(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
user = form.save()
#log user in
login(request, user)
return redirect('articles:list')
else:
form=UserRegisterForm()
return render(request, 'accounts/signup.html', {'form':form})
def login_view(request):
if request.method == 'POST':
form = AuthenticationForm(data=request.POST)
if form.is_valid():
user = form.get_user()
login(request,user)
#log user in
if 'next' in request.POST:
return redirect(request.POST.get('next'))
else:
return redirect('articles:list')
else:
form = AuthenticationForm()
return render(request, 'accounts/login.html', {'form':form})
@login_required
def profile_view(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST,
request.FILES,
instance=request.user.profile)
if u_form.is_valid and p_form.is_valid():
u_form.save()
p_form.save()
#ENTER MESSAGES HERE
return redirect('accounts:profile')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
return render(request, 'accounts/profile.html', {'u_form' : u_form, 'p_form' : p_form})
def logout_view(request):
if request.method =='POST':
logout(request)
return redirect('articles:list')
|
[
"vincentjayden49@gmail.com"
] |
vincentjayden49@gmail.com
|
86c0cb10d29d06dcf7aa7a311986ac1f5d219e7e
|
2420eab92b5d1ec2225d2eeb128a41e3c7b1ce38
|
/11/JackTonenizer.py
|
b7a45cc64a958be663d46d4e26cc25dc8127e138
|
[] |
no_license
|
hokiepete/building-a-modern-computer-from-first-principles
|
f98549a5d32ff23d4eab3b089324e61dac22841a
|
bba0d437b10ba5b5f6861067b3a0ba4ac14ef447
|
refs/heads/master
| 2023-08-17T21:38:44.589836
| 2021-10-24T01:41:56
| 2021-10-24T01:41:56
| 385,073,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,155
|
py
|
# Main program that sets up and invokes the other modules
import os
KEYWORDS = set([
'class','constructor','function','method','field','static','var','int','char','boolean',
'void','true','false','null','this','let','do','if','else','while','return'
])
SYMBOL = set([
'{','}','(',')','[',']','.',',',';','+','-','*','/','&','|','<','>','=','~'
])
SUBS = {'<':'<', '>': '>', '\'': '"', '\"': '"', '&': '&'}
class JackTokenizer:
def __init__(self, input_string):
self.raw_string = input_string
self.tokens = []
self.tagged_tokens = []
self.clean_lines()
self.tokenize()
self.tag_tokens()
def clean_lines(self):
lines = self.raw_string.split('\n')
cleaned = []
IN_COMMENT = False
for line in lines:
if IN_COMMENT:
if "*/" in line:
IN_COMMENT = False
cleaned_line = line.split('*/')[1].strip()
else:
continue
elif '//' in line:
cleaned_line = line.split('//')[0].strip()
elif "//*" in line:
if '*/' in line:
pref, suff = line.split('//*')
cleaned_line = pref.strip() + ' ' + suff.split('*/')[1].strip()
else:
IN_COMMENT = True
cleaned_line = line.split('//*')[0].strip()
elif "/*" in line:
if '*/' in line:
pref, suff = line.split('/*')
cleaned_line = pref.strip() + ' ' + suff.split('*/')[1].strip()
else:
IN_COMMENT = True
cleaned_line = line.split('/*')[0].strip()
else:
cleaned_line = line.strip()
if cleaned_line and (not cleaned_line.isspace()):
cleaned.append(cleaned_line)
self.cleaned_string = ' '.join(cleaned)
def tokenize(self):
while self.cleaned_string:
token = self.get_next_token()
if token:
self.tokens.append(token)
def get_next_token(self):
token = ''
literal = False
for i, char in enumerate(self.cleaned_string):
if char in ['\'', "\""]:
if literal:
literal = False
else:
literal = True
if not literal:
if char == ' ':
self.cleaned_string = self.cleaned_string[i+1:]
return token
if char in SYMBOL:
if token:
self.cleaned_string = self.cleaned_string[i:]
return token
else:
self.cleaned_string = self.cleaned_string[i+1:]
return char
if token.isnumeric() and not char.isnumeric():
raise ValueError(
f"Variable names cannot start with a numeric character. Please fix token beginning with {token + char}"
)
token += char
return token
def tag_tokens(self):
self.tagged_tokens.append('<tokens>')
for token in self.tokens:
if token in KEYWORDS:
self.tagged_tokens.append(f"<keyword> {token} </keyword>")
elif token in SUBS:
self.tagged_tokens.append(f"<symbol> {SUBS[token]} </symbol>")
elif token in SYMBOL:
self.tagged_tokens.append(f"<symbol> {token} </symbol>")
elif token[0] in ['\'', '\"']:
self.tagged_tokens.append(f"<stringConstant> {token[1:-1]} </stringConstant>")
elif token.isnumeric():
self.tagged_tokens.append(f"<integerConstant> {token} </integerConstant>")
else:
self.tagged_tokens.append(f"<identifier> {token} </identifier>")
self.tagged_tokens.append('</tokens>')
if __name__ == '__main__':
srcpath = 'ArrayTest\Main.jack'
if os.path.isdir(srcpath):
# read and parse the system file
# with open(srcpath + '\\Sys.vm', 'r') as file:
# text = file.read()
# get all the files in the directory minus the system file
# and parse the files
files = os.listdir(srcpath)
for file in files:
if file.endswith('.jack'):
with open(srcpath + f'\\{file}', 'r') as f:
text = f.read()
analyzer = JackTokenizer(text)
destfile = f'{srcpath}\\{file.replace(".jack", "T.xml")}'
with open(destfile, 'w') as f:
f.write('\n'.join(analyzer.tagged_tokens)+'\n')
else:
with open(srcpath, 'r') as file:
text = file.read()
analyzer = JackTokenizer(text)
destfile = f'{srcpath.replace(".jack", "T.xml")}'
with open(destfile, 'w') as f:
f.write('\n'.join(analyzer.tagged_tokens)+'\n')
|
[
"pnolan1986@gmail.com"
] |
pnolan1986@gmail.com
|
6a8158e71b678a31bafc2805c7a170059a1636e0
|
8528604d3231d86feada09465170220b892c1c35
|
/landlab/grid/warnings.py
|
f1b23dd50399188b1024ed1c35288a9fb622b4ba
|
[
"MIT"
] |
permissive
|
ChristinaB/landlab
|
9780acbd5753741cd91c40dbc5d683b66f2481a6
|
82fd45d059dbe58728b074b024e46a1a10ce1e5c
|
refs/heads/master
| 2021-04-26T22:45:44.007416
| 2020-12-02T22:57:27
| 2020-12-02T22:57:27
| 124,138,852
| 0
| 1
|
MIT
| 2020-12-02T22:57:28
| 2018-03-06T21:08:56
|
Python
|
UTF-8
|
Python
| false
| false
| 988
|
py
|
import os
from ..core.messages import deprecation_message
class DeprecatedSignature(DeprecationWarning):
msg = "You are using a deprecated calling signature."
def __init__(self, name, old=None, new=None):
self._name = name
self._old = old
self._new = new
if old:
self._old = self._construct_call(name, self._old[0], self._old[1])
if new:
self._new = self._construct_call(name, self._new[0], self._new[1])
@staticmethod
def _construct_call(name, args, kwds):
signature = ", ".join(
[repr(arg) for arg in args]
+ ["{k}={v}".format(k=k, v=repr(v)) for k, v in kwds.items()]
)
return "{name}({signature})".format(name=name, signature=signature)
def __str__(self):
if self._new:
use = ">>> grid = {call}".format(call=self._new)
else:
use = None
return os.linesep + deprecation_message(self.msg, use=use)
|
[
"mcflugen@gmail.com"
] |
mcflugen@gmail.com
|
99a5ca707f3d8c2a1b8bca69fffae0e09c4debbb
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03007/s124226483.py
|
6a9358f63822c404d9005c20865e9e13e95ae0d6
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
def c_successive_subtraction(N, A):
A.sort()
maximum = A.pop()
minimum = A.pop(0)
# Aの要素のうち、最大のものと最小のものを分けて置いておく。
# それら以外の要素 a_k について、
# a_k が非負なら、最小のものから a_k を引くことで負の方向に大きくできる。
# a_k が負なら、最大のものから a_k を引くことで正の方向に大きくできる。
# 最後に 最大のもの - 最小のもの とすると、最後に残る整数を最大にできる。
operation = []
for a in A:
if a >= 0:
operation.append('{} {}'.format(minimum, a))
minimum -= a
else:
operation.append('{} {}'.format(maximum, a))
maximum -= a
operation.append('{} {}'.format(maximum, minimum))
return str(maximum - minimum) + '\n' + '\n'.join(operation)
N = int(input())
A = [int(i) for i in input().split()]
print(c_successive_subtraction(N, A))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
2c9a70b0cecef465d4b201e73b956ba6ebe213bf
|
d1969a22afb7adda791caa4edf464dad02b684c0
|
/apps/edu/urls.py
|
b7d0eac3b362ac6a5aee738851c571d6a27a6507
|
[
"MIT"
] |
permissive
|
local-host-club/cms
|
11d4ea1105dabc6d0a60b935484b5f9eb2ec1da9
|
136fb075f11011ea77672b3468f69262a43eb500
|
refs/heads/master
| 2020-07-02T17:09:36.135855
| 2016-11-28T13:31:11
| 2016-11-28T13:31:11
| 74,293,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,036
|
py
|
"""cms URL Configuration
"""
from django.conf.urls import url
from apps.edu import views
urlpatterns = [
url(r'^curriculo/$', views.CompetenciaAreaList.as_view(), name='curriculo'),
url(r'^competencia_area/add$', views.CompetenciaAreaCreateView.as_view(), name='competencia_area_add'),
url(r'^competencia_area/(?P<pk>\d+)/$', views.CompetenciaAreaDetail.as_view(), name='competencia_area_detail'),
url(r'^competencia/add$', views.CompetenciaCreateView.as_view(), name='competencia_add'),
url(r'^indicador/add$', views.IndicadorCreateView.as_view(), name='indicador_add'),
url(r'^nivel/add$', views.NivelCreateView.as_view(), name='nivel_add'),
url(r'^nota/(?P<pk>\d+)/add$', views.NotaCreateView.as_view(), name='nota_add'),
url(r'^evaluacion/add$', views.EvaluacionCreateView.as_view(), name='evaluacion_add'),
url(r'^evaluacion/list$', views.EvaluacionListView.as_view(), name='evaluacion_list'),
url(r'^evaluacion/(?P<pk>\d+)$', views.EvaluacionDetail.as_view(), name='evaluacion_detail'), ]
|
[
"jinchuika@gmail.com"
] |
jinchuika@gmail.com
|
6c2852e49e135f9302519afe6ba267c820f0331f
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-gsn-edf/gsn-edf_ut=2.0_rd=0.5_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=53/sched.py
|
4782c407067e60157e4904c483d2dc99938f2852
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 175
|
py
|
-X FMLP -Q 0 -L 2 70 300
-X FMLP -Q 0 -L 2 60 200
-X FMLP -Q 1 -L 2 58 175
-X FMLP -Q 2 -L 2 55 175
-X FMLP -Q 3 -L 1 33 200
30 400
30 400
26 100
20 125
15 175
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
f9799d5d427629227541426eb2f76d39b9c7be55
|
06e2dd0aa78241edbe647a8b5ef075a90ee4a8b6
|
/97/holidays.py
|
719a5166b67dda66aa92ab9dfadacfa9422f242d
|
[] |
no_license
|
StefanKaeser/pybites
|
a6a78b51039ab4792deb285dc799c6abf7bea6d5
|
9f839af4ef400786b7c28701c2241f310bb4422c
|
refs/heads/master
| 2020-08-23T11:28:03.172666
| 2020-06-15T15:37:14
| 2020-06-15T15:37:14
| 216,606,021
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,051
|
py
|
from collections import defaultdict
import os
from urllib.request import urlretrieve
from bs4 import BeautifulSoup
# prep data
# tmp = os.getenv("TMP", "/tmp")
tmp = os.path.curdir
page = "us_holidays.html"
holidays_page = os.path.join(tmp, page)
urlretrieve(f"https://bites-data.s3.us-east-2.amazonaws.com/{page}", holidays_page)
with open(holidays_page) as f:
content = f.read()
def get_us_bank_holidays(content=content):
"""Receive scraped html output, make a BS object, parse the bank
holiday table (css class = list-table), and return a dict of
keys -> months and values -> list of bank holidays"""
soup = BeautifulSoup(content, "html.parser")
holiday_table = soup.find("table", {"class": "list-table"})
months = [tag.string.split("-")[1] for tag in holiday_table.find_all("time")]
holiday_names = [tag.string.strip() for tag in holiday_table.find_all("a")]
holidays = defaultdict(list)
for month, name in zip(months, holiday_names):
holidays[month].append(name)
return holidays
|
[
"stefan.kaeser7@gmail.com"
] |
stefan.kaeser7@gmail.com
|
0381c9188731c7b9a643d7d35757c09a22da7724
|
f2ebfb99b0e6a07afba7e583f820737511a1a98e
|
/code/models/listener.py
|
53325f3801b8e3ba46209f53072932ee06991b75
|
[
"MIT"
] |
permissive
|
jayelm/emergent-generalization
|
211c065d4829322792396ad6605dc51024e913cd
|
35b1d97a940826008cde13498aa75c233a7f454a
|
refs/heads/master
| 2023-08-27T00:25:32.988514
| 2021-09-29T04:47:22
| 2021-09-29T04:47:22
| 373,944,198
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,517
|
py
|
"""
Listener models
"""
import torch
import torch.nn as nn
from . import rnn
class CopyListener(nn.Module):
def __init__(self, feat_model, message_size=100, dropout=0.2):
super().__init__()
self.feat_model = feat_model
self.feat_size = feat_model.final_feat_dim
self.dropout = nn.Dropout(p=dropout)
self.message_size = message_size
if self.message_size is None:
self.bilinear = nn.Linear(self.feat_size, 1, bias=False)
else:
self.bilinear = nn.Linear(self.message_size, self.feat_size, bias=False)
def embed_features(self, feats):
batch_size = feats.shape[0]
n_obj = feats.shape[1]
rest = feats.shape[2:]
feats_flat = feats.view(batch_size * n_obj, *rest)
feats_emb_flat = self.feat_model(feats_flat)
feats_emb = feats_emb_flat.unsqueeze(1).view(batch_size, n_obj, -1)
feats_emb = self.dropout(feats_emb)
return feats_emb
def compare(self, feats_emb, message_enc):
"""
Compute dot products
"""
scores = torch.einsum("ijh,ih->ij", (feats_emb, message_enc))
return scores
def forward(self, feats, message):
# Embed features
feats_emb = self.embed_features(feats)
# Embed message
if self.message_size is None:
return self.bilinear(feats_emb).squeeze(2)
else:
message_bilinear = self.bilinear(message)
return self.compare(feats_emb, message_bilinear)
def reset_parameters(self):
self.feat_model.reset_parameters()
self.bilinear.reset_parameters()
class Listener(CopyListener):
def __init__(self, feat_model, embedding_module, **kwargs):
super().__init__(feat_model, **kwargs)
self.embedding = embedding_module
self.lang_model = rnn.RNNEncoder(self.embedding, hidden_size=self.message_size)
self.vocab_size = embedding_module.num_embeddings
def forward(self, feats, lang, lang_length):
# Embed features
feats_emb = self.embed_features(feats)
# Embed language
lang_emb = self.lang_model(lang, lang_length)
# Bilinear term: lang embedding space -> feature embedding space
lang_bilinear = self.bilinear(lang_emb)
return self.compare(feats_emb, lang_bilinear)
def reset_parameters(self):
super().reset_parameters()
self.embedding.reset_parameters()
self.lang_model.reset_parameters()
|
[
"6245103+jayelm@users.noreply.github.com"
] |
6245103+jayelm@users.noreply.github.com
|
de99f451e82714b1c4644394330ee7044c740365
|
62cbf8dcd921feb309d79ad66767405ea27623ba
|
/python/boj/2667_boj_danji.py
|
34b79935b3fb77392d1a17aad2eb8c53ba322477
|
[] |
no_license
|
woodg1207/APS
|
1d20f8b7c6d7a7f41e9920ec41ad0c435a881519
|
e49a6fb01e1a51213963cd58f13a1364833482f8
|
refs/heads/master
| 2023-06-13T21:41:28.114299
| 2021-06-21T04:17:19
| 2021-06-21T04:17:19
| 288,982,972
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 802
|
py
|
import sys; sys.stdin=open('s2667.txt','r')
from collections import deque
N = int(input())
arr = [list(map(int, input())) for _ in range(N)]
dr = [1,0,-1,0]
dc = [0,1,0,-1]
danji_list = []
for i in range(N):
for j in range(N):
if arr[i][j]:
arr[i][j] = 0
c = 1
q = deque()
q.append([i,j])
while q:
p = q.popleft()
for d in range(4):
nr, nc = p[0]+dr[d], p[1]+dc[d]
if 0<=nr<N and 0<=nc<N:
if arr[nr][nc]:
q.append([nr,nc])
arr[nr][nc] = 0
c += 1
danji_list.append(c)
print(len(danji_list))
for i in sorted(danji_list):
print(i)
|
[
"woodg1207@gmail.com"
] |
woodg1207@gmail.com
|
77ceff54bf05ef0ec310abe67194e4c272925c48
|
a7b66311c2ce113789933ec3162f1128b2862f13
|
/app/closeLoop/ForcastDiffFactor.py
|
08170df61a83a596a2f558df9d51de995411210f
|
[
"MIT"
] |
permissive
|
ChanJeunlam/geolearn
|
214b2c42359ea1164b39117fad2d7470adeb6d35
|
791caa54eb70920823ea7d46714dc8a3e7fa7445
|
refs/heads/master
| 2023-07-16T04:13:15.526364
| 2021-08-16T05:24:18
| 2021-08-16T05:24:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,847
|
py
|
from hydroDL import pathSMAP, master, utils
from hydroDL.master import default
from hydroDL.post import plot, stat
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import pandas as pd
doLst = list()
# doLst.append('train')
doLst.append('test')
doLst.append('post')
saveDir = os.path.join(pathSMAP['dirResult'], 'DA')
# test
if 'test' in doLst:
torch.cuda.set_device(2)
subset = 'CONUSv2f1'
tRange = [20150402, 20180401]
yrStrLst = ['2015', '2016', '2017']
yfLst = list()
ypLst = list()
for yrStr in yrStrLst:
out = os.path.join(pathSMAP['Out_L3_NA'], 'DA', 'CONUSv2f1_DA' + yrStr)
df, yf, obs = master.test(
out, tRange=tRange, subset=subset, batchSize=100)
out = os.path.join(pathSMAP['Out_L3_NA'], 'DA',
'CONUSv2f1_LSTM' + yrStr)
df, yp, obs = master.test(out, tRange=tRange, subset=subset)
yf = yf.squeeze()
yp = yp.squeeze()
yfLst.append(yf)
ypLst.append(yp)
obs = obs.squeeze()
# figure out how many days observation lead
maskObs = 1 * ~np.isnan(obs.squeeze())
maskDay = np.zeros(maskObs.shape).astype(int)
ngrid, nt = maskObs.shape
for j in range(ngrid):
temp = 0
for i in range(nt):
maskDay[j, i] = temp
if maskObs[j, i] == 1:
temp = 1
else:
if temp != 0:
temp = temp + 1
ind = np.random.randint(0, ngrid)
maskObsDay = maskObs * maskDay
unique, counts = np.unique(maskObsDay, return_counts=True)
maskF = (maskDay >= 1) & (maskDay <= 3)
statPLst = list()
statFLst = list()
for k in range(3):
statP = stat.statError(
utils.fillNan(ypLst[k], maskF), utils.fillNan(obs, maskF))
statF = stat.statError(
utils.fillNan(yfLst[k], maskF), utils.fillNan(obs, maskF))
statPLst.append(statP)
statFLst.append(statF)
cropFile = r'/mnt/sdb/Data/Crop/cropRate_CONUSv2f1.csv'
cropRate = pd.read_csv(cropFile, dtype=np.float, header=None).values
# croprate - 0 corn, 4 soybean, 22 spring wheat, 23 winter wheat
dataGrid = [(statPLst[0]['RMSE'] - statFLst[0]['RMSE']) / statPLst[0]['RMSE'],
(statPLst[1]['RMSE'] - statFLst[1]['RMSE']) / statPLst[1]['RMSE'],
(statPLst[2]['RMSE'] - statFLst[2]['RMSE']) / statPLst[2]['RMSE'],
]
prcp = df.getDataTs('APCP_FORA').squeeze()
dataTs = [[obs, ypLst[0], yfLst[0]], [obs, ypLst[1], yfLst[1]],
[obs, ypLst[2], yfLst[2]], [prcp]]
crd = df.getGeo()
t = df.getT()
mapNameLst = ['dRMSE 2015', 'dRMSE 2016', 'dRMSE 2017']
tsNameLst = ['obs', 'prj', 'fore']
tBar = [utils.time.t2dt(20160401), utils.time.t2dt(20170401)]
#plt.tight_layout()
plot.plotTsMap(
dataGrid,
dataTs,
lat=crd[0],
lon=crd[1],
t=t,
mapNameLst=mapNameLst,
isGrid=True,
multiTS=True,
linewidth=1,
figsize=(10, 10),
tBar=tBar)
# see result for different seasons
tRangeLst = [[20180101, 20180201], [20180201, 20180301], [20180301, 20180401],
[20160401, 20160501], [20160501, 20160601], [20160601, 20160701],
[20160701, 20160801], [20160801, 20160901], [20160901, 20161001],
[20161001, 20161101], [20161101, 20161201], [20161201, 20170101],
[20170101, 20170201], [20170201, 20170301], [20170301, 20170401],
[20170401, 20170501], [20170501, 20170601], [20170601, 20170701],
[20170701, 20170801], [20170801, 20170901], [20170901, 20171001],
[20171001, 20171101], [20171101, 20171201], [20171201, 20180101]]
tAllR = [20150402, 20180401]
tAllA = utils.time.tRange2Array(tAllR)
statPLst = list()
statFLst = list()
for k in range(12):
tRLst = [tRangeLst[k], tRangeLst[k + 12]]
temp = list()
for tR in tRLst:
tA = utils.time.tRange2Array(tR)
ind0 = np.array(range(nt))
ind1, ind2 = utils.time.intersect(tAllA, tA)
temp.append(ind1)
indT = np.concatenate(temp)
yfTemp = utils.fillNan(yf, maskF)[:, indT]
ypTemp = utils.fillNan(yp, maskF)[:, indT]
obsTemp = utils.fillNan(obs, maskF)[:, indT]
statPLst.append(stat.statError(ypTemp, obsTemp))
statFLst.append(stat.statError(yfTemp, obsTemp))
import matplotlib
matplotlib.rcParams.update({'font.size': 14})
matplotlib.rcParams.update({'lines.linewidth': 2})
matplotlib.rcParams.update({'lines.markersize': 6})
labCrop = ['Corn', 'Spring wheat', 'Winter wheat']
indCrop = [0, 22, 23]
cropFile = r'/mnt/sdb/Data/Crop/cropRate_CONUSv2f1.csv'
cropRate = pd.read_csv(cropFile, dtype=np.float, header=None).values
key = 'RMSE'
[lat, lon] = df.getGeo()
fig, axes = plt.subplots(1, 3, figsize=[12, 5])
for k in range(3):
grid, uy, ux = utils.grid.array2grid(
cropRate[:, indCrop[k]], lat=lat, lon=lon)
plot.plotMap(
grid, ax=axes[k], lat=uy, lon=ux, title=labCrop[k] + ' percentage')
plt.tight_layout()
fig.show()
import matplotlib
matplotlib.rcParams.update({'font.size': 14})
matplotlib.rcParams.update({'lines.linewidth': 2})
matplotlib.rcParams.update({'lines.markersize': 6})
indLst = [cropRate[:, 0] > 30, cropRate[:, 22] > 5, cropRate[:, 23] > 10]
labMonth = [
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Agu', 'Sep', 'Oct',
'Nov', 'Dec'
]
labCrop = ['Corn', 'Spring wheat', 'Winter wheat']
cLst = 'rgb'
dataBox = list()
for iC in range(len(indLst)):
dataBox = list()
for k in range(12):
data = statPLst[k]['RMSE'][indLst[iC]] - statFLst[k]['RMSE'][
indLst[iC]]
if len(data[~np.isnan(data)]) < 20:
data = []
dataBox.append(data)
fig = plot.plotBoxFig(
dataBox,
label1=labMonth,
label2=[labCrop[iC]],
sharey=True,
figsize=[8, 3],
colorLst=cLst[iC])
plt.subplots_adjust(wspace=0, hspace=0)
plt.ylim(-0.02, 0.04)
fig.show()
|
[
"geofkwai@gmail.com"
] |
geofkwai@gmail.com
|
0a606a1b05d67e2bbd7b110945393561282c0ba4
|
430db754af2a7481358df2dcd7f74919c4ecddcf
|
/prob_tools/tools.py
|
6acebfcfbb108e282202e948521f5579880f9c75
|
[
"MIT"
] |
permissive
|
arruda/exercicios_probabilidade
|
567eb318ff137bcce155142d3a951cf6b1c40515
|
dca3503a0b4d982e63795b775bf30b9b95440bcd
|
refs/heads/master
| 2021-01-19T19:35:24.552403
| 2014-10-22T18:31:42
| 2014-10-22T18:31:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 500
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import math
def combinacao(n, p):
"""
Combinação de N, P à P
"""
n_fat = math.factorial(n)
p_fat = math.factorial(p)
n_menos_p_fat = math.factorial(n-p)
return n_fat / (p_fat * n_menos_p_fat)
def bernuille():
pass
def distribuicao_binomial(n, p, X):
"""
Binomial:
n = Total de elementos
p = probabilidade de sucesso
X = variavel aleatoria
"""
return
|
[
"felipe.arruda.pontes@gmail.com"
] |
felipe.arruda.pontes@gmail.com
|
6060cea90a85849c7a6e237732ba3d0a8e87983d
|
ec0e1779383bec96de803ba893de5096c563158f
|
/tensorflow/python/estimator/inputs/pandas_io.py
|
a1e418f487c5b7da6907fa945fba0165334432cf
|
[] |
no_license
|
DengZhuangSouthRd/simple_tensorflow
|
45d8fc7c2ef9da947f11f876aff7c1e169dc457c
|
83d742219c4a04c61822935487626890bc735301
|
refs/heads/master
| 2021-01-18T19:05:36.414639
| 2017-04-01T15:06:16
| 2017-04-01T15:06:16
| 86,887,616
| 11
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,137
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs.queues import feeding_functions
try:
# pylint: disable=g-import-not-at-top
# pylint: disable=unused-import
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""Returns input function that would feed Pandas DataFrame into the model.
Note: `y`'s index must match `x`'s index.
Args:
x: pandas `DataFrame` object.
y: pandas `Series` object.
batch_size: int, size of batches to return.
num_epochs: int, number of epochs to iterate over data. If not `None`,
read attempts that would exceed this value will raise `OutOfRangeError`.
shuffle: bool, whether to read the records in random order.
queue_capacity: int, size of the read queue. If `None`, it will be set
roughly to the size of `x`.
num_threads: int, number of threads used for reading and enqueueing.
target_column: str, name to give the target column `y`.
Returns:
Function, that has signature of ()->(dict of `features`, `target`)
Raises:
ValueError: if `x` already contains a column with the same name as `y`, or
if the indexes of `x` and `y` don't match.
"""
if not HAS_PANDAS:
raise TypeError(
'pandas_input_fn should not be called without pandas installed')
x = x.copy()
if y is not None:
if target_column in x:
raise ValueError(
'Cannot use name %s for target column: DataFrame already has a '
'column with that name: %s' % (target_column, x.columns))
if not np.array_equal(x.index, y.index):
raise ValueError('Index for x and y are mismatched.\nIndex for x: %s\n'
'Index for y: %s\n' % (x.index, y.index))
x[target_column] = y
# TODO(mdan): These are memory copies. We probably don't need 4x slack space.
# The sizes below are consistent with what I've seen elsewhere.
if queue_capacity is None:
if shuffle:
queue_capacity = 4 * len(x)
else:
queue_capacity = len(x)
min_after_dequeue = max(queue_capacity / 4, 1)
def input_fn():
"""Pandas input function."""
queue = feeding_functions._enqueue_data( # pylint: disable=protected-access
x,
queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
enqueue_size=batch_size,
num_epochs=num_epochs)
if num_epochs is None:
features = queue.dequeue_many(batch_size)
else:
features = queue.dequeue_up_to(batch_size)
assert len(features) == len(x.columns) + 1, ('Features should have one '
'extra element for the index.')
features = features[1:]
features = dict(zip(list(x.columns), features))
if y is not None:
target = features.pop(target_column)
return features, target
return features
return input_fn
|
[
"liuguiyangnwpu@163.com"
] |
liuguiyangnwpu@163.com
|
b2c8ee7450114fe41b23728d52ab158c5be37155
|
37c38ef8ead53739b3128147da9a24c44cfccccb
|
/froide/helper/search.py
|
096eeb8474bc34da53009caf07af0861a887d59b
|
[
"MIT"
] |
permissive
|
zlodej/pekel
|
ff102dc1c05180dfcff6a30bd944252d128e0fb5
|
b1114618ef032503ab49476e738e90952c4da71a
|
refs/heads/master
| 2021-05-06T15:38:28.301853
| 2017-12-02T11:33:19
| 2017-12-02T11:33:19
| 102,724,490
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,627
|
py
|
from haystack.fields import NgramField
from haystack.exceptions import MissingDependency
class SuggestField(NgramField):
pass
try:
from haystack.backends.elasticsearch_backend import (
ElasticsearchSearchEngine, ElasticsearchSearchBackend, FIELD_MAPPINGS
)
except (ImportError, MissingDependency):
pass
else:
class SuggestField(NgramField): # noqa
field_type = 'suggest'
FIELD_MAPPINGS['suggest'] = {'type': 'string', 'analyzer': 'suggest_analyzer'}
class FroideElasticsearchSearchBackend(ElasticsearchSearchBackend):
# Settings to add an custom suggest analyzer
DEFAULT_SETTINGS = {
'settings': {
"analysis": {
"analyzer": {
"ngram_analyzer": {
"type": "custom",
"tokenizer": "standard",
"filter": ["haystack_ngram", "lowercase"]
},
"edgengram_analyzer": {
"type": "custom",
"tokenizer": "standard",
"filter": ["haystack_edgengram", "lowercase"]
},
"suggest_analyzer": {
"filter": ["lowercase", "asciifolding"],
"type": "custom",
"tokenizer": "froide_autocomplete_ngram"
}
},
"tokenizer": {
"haystack_ngram_tokenizer": {
"type": "nGram",
"min_gram": 3,
"max_gram": 15,
},
"haystack_edgengram_tokenizer": {
"type": "edgeNGram",
"min_gram": 2,
"max_gram": 15,
"side": "front"
},
"froide_autocomplete_ngram": {
"type": "edgeNGram",
"min_gram": 1,
"max_gram": 15,
"token_chars": ["letter", "digit"]
}
},
"filter": {
"haystack_ngram": {
"type": "nGram",
"min_gram": 3,
"max_gram": 15
},
"haystack_edgengram": {
"type": "edgeNGram",
"min_gram": 2,
"max_gram": 15
}
}
}
}
}
class FroideElasticsearchSearchEngine(ElasticsearchSearchEngine):
backend = FroideElasticsearchSearchBackend
class SearchQuerySetWrapper(object):
"""
Decorates a SearchQuerySet object using a generator for efficient iteration
"""
def __init__(self, qs, model):
self.qs = qs
self.model = model
def count(self):
return self.qs.count()
def __iter__(self):
for result in self.qs:
yield result.object
def __getitem__(self, key):
if isinstance(key, int) and (key >= 0 or key < self.count()):
# return the object at the specified position
return self.qs[key].object
# Pass the slice/range on to the delegate
return SearchQuerySetWrapper(self.qs[key], self.model)
|
[
"mail@stefanwehrmeyer.com"
] |
mail@stefanwehrmeyer.com
|
a23d5a870a4d32a4c8c889089b00f6e56ee3dd50
|
73c2ec3edf0f6eaea4ce1f73e910f02592119a42
|
/mmdet/models/utils/fpn_utils.py
|
ab7558ff1ecd6d699cd8724dd5260b276affb28f
|
[
"Apache-2.0"
] |
permissive
|
violet998/video_class_agnostic_segmentation
|
ab9b496415857678979a70890cb68e92fa014061
|
c4614fe675e8a5352012f603c15bc24fb43d690c
|
refs/heads/main
| 2023-04-20T15:31:37.699645
| 2021-04-22T16:41:26
| 2021-04-22T16:41:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
import torch
import torch.nn.functional as F
def merge_fpn(x, average=True):
max_size = x[0].shape
merged_fpn = []
for i, _ in enumerate(x):
merged_fpn.append(F.interpolate(x[i], max_size[-2:]))
if average:
return torch.stack(merged_fpn).mean(dim=0)
else:
concat = torch.stack(merged_fpn)
return concat.permute(1,0,2,3,4).reshape(concat.shape[1], -1, *concat.shape[-2:])
|
[
"mennatul@ualberta.ca"
] |
mennatul@ualberta.ca
|
7099ddfcc80ff50780eeb6bea1817b88a9fa94f2
|
70d929497cbd70bb40ed939f3aa0ce56c3f7d816
|
/pandas/core/groupby/ops.pyi
|
58a34d0c2081a8e25b9bb1f4bab161d88906e3f0
|
[
"MIT"
] |
permissive
|
matangover/python-type-stubs
|
abd4bc46f9841d0a2c44e1597055019d21f9ee70
|
15285c6b88dc684d9de9bfdaf8b72d4eb7c3e257
|
refs/heads/main
| 2023-07-13T04:19:03.481222
| 2021-08-05T20:26:18
| 2021-08-05T20:26:18
| 399,435,272
| 0
| 0
|
MIT
| 2021-08-24T11:11:57
| 2021-08-24T11:11:56
| null |
UTF-8
|
Python
| false
| false
| 2,824
|
pyi
|
import numpy as np
from pandas._typing import FrameOrSeries as FrameOrSeries
from pandas.core.groupby import grouper as grouper
from pandas.core.indexes.api import Index as Index
from pandas.core.series import Series as Series
from typing import List, Optional, Sequence, Tuple
class BaseGrouper:
axis = ...
sort = ...
group_keys = ...
mutated = ...
indexer = ...
def __init__(self, axis: Index, groupings: Sequence[grouper.Grouping], sort: bool=..., group_keys: bool=..., mutated: bool=..., indexer: Optional[np.ndarray]=...) -> None: ...
@property
def groupings(self) -> List[grouper.Grouping]: ...
@property
def shape(self): ...
def __iter__(self) : ...
@property
def nkeys(self) -> int: ...
def get_iterator(self, data: FrameOrSeries, axis: int=...) : ...
def apply(self, f, data: FrameOrSeries, axis: int=...) : ...
def indices(self): ...
@property
def codes(self) -> List[np.ndarray]: ...
@property
def levels(self) -> List[Index]: ...
@property
def names(self): ...
def size(self) -> Series: ...
def groups(self): ...
def is_monotonic(self) -> bool: ...
def group_info(self): ...
def codes_info(self) -> np.ndarray: ...
def ngroups(self) -> int: ...
@property
def reconstructed_codes(self) -> List[np.ndarray]: ...
def result_index(self) -> Index: ...
def get_group_levels(self): ...
def aggregate(self, values, how: str, axis: int=..., min_count: int=...) -> Tuple[np.ndarray, Optional[List[str]]]: ...
def transform(self, values, how: str, axis: int=..., **kwargs) : ...
def agg_series(self, obj: Series, func) : ...
class BinGrouper(BaseGrouper):
bins = ...
binlabels = ...
mutated = ...
indexer = ...
def __init__(self, bins, binlabels, filter_empty: bool=..., mutated: bool=..., indexer=...) -> None: ...
def groups(self): ...
@property
def nkeys(self) -> int: ...
def get_iterator(self, data: FrameOrSeries, axis: int=...) : ...
def indices(self): ...
def group_info(self): ...
def reconstructed_codes(self) -> List[np.ndarray]: ...
def result_index(self): ...
@property
def levels(self): ...
@property
def names(self): ...
@property
def groupings(self) -> List[grouper.Grouping]: ...
def agg_series(self, obj: Series, func) : ...
class DataSplitter:
data = ...
labels = ...
ngroups = ...
axis = ...
def __init__(self, data: FrameOrSeries, labels, ngroups: int, axis: int=...) -> None: ...
def slabels(self): ...
def sort_idx(self): ...
def __iter__(self) : ...
class SeriesSplitter(DataSplitter): ...
class FrameSplitter(DataSplitter):
def fast_apply(self, f, names): ...
def get_splitter(data: FrameOrSeries, *args, **kwargs) -> DataSplitter: ...
|
[
"gram@geekraver.com"
] |
gram@geekraver.com
|
f3ce2b0bddb87af3a6913e654894d0f19c5a9fe7
|
436166fda7a671805b8fa6fc06e68bf7d42f9be6
|
/test/boost.py
|
f977bb0860e75e9f0cee74eff9cad1e18a818acf
|
[] |
no_license
|
VitalyVorobyev/libLinAl
|
1e4872feb0e5deb70d9c3acc2e31e676e27818c2
|
b815efeee109922ffd28d97b9253f636403aa999
|
refs/heads/master
| 2021-01-22T08:39:29.897025
| 2020-08-28T04:06:52
| 2020-08-28T04:06:52
| 81,914,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,131
|
py
|
""" Boost """
import numpy as np
import matplotlib.pyplot as plt
from liblinal import vect, lvect
def boost_test():
""" Boost unit test """
boost_list = np.linspace(0.0001, 0.9, 500)
txprime = np.array([lvect(1., 0, 0, 0).boost(vect(bx, 0, 0)).as_list[:2]
for bx in boost_list])
tprime, xprime = txprime[:, 0], txprime[:, 1]
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rc('font', size=22)
plt.style.use('seaborn-white')
label_size = 28
plt.figure(num=1, figsize=(6, 4), dpi=100)
plt.plot(boost_list, tprime, 'b-', markersize=12)
plt.ylabel(r'$t^{\prime}$', fontsize=label_size)
plt.xlabel(r'$\beta$', fontsize=label_size)
plt.tight_layout(pad=.2)
plt.figure(num=2, figsize=(6, 4), dpi=100)
# plt.semilogy(boost_list, xprime, 'b-', markersize=12)
# plt.loglog(boost_list, xprime, 'b-', markersize=12)
plt.plot(boost_list, xprime, 'b-', markersize=12)
plt.ylabel(r'$x^{\prime}$', fontsize=label_size)
plt.xlabel(r'$\beta$', fontsize=label_size)
plt.tight_layout(pad=.2)
plt.show()
boost_test()
|
[
"vit.vorobiev@gmail.com"
] |
vit.vorobiev@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.