blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cda5a4031d2648d83bfb86e146b7e8729e3f2bec | 8c1fc3dec9d6f3982e307cb0805ea20cc237c317 | /hashcrack/autoconfig/shadow.py | d86ef630a2a8c50b00bcddf56f8c898689acf9cb | [] | no_license | bannsec/hashcrack | a6a759a553552a1c9f53116b050e893f1860b8b7 | 88b651dc98347bec8acf297cea25aa22fdc55f9b | refs/heads/master | 2020-12-10T02:46:28.967850 | 2020-01-22T02:06:52 | 2020-01-22T02:06:52 | 233,484,652 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,194 | py |
import logging
from ..config import config
from .. import types
from prompt_toolkit import print_formatted_text as print, HTML
def run():
hashtype = None
for line in config['hashes'].strip().split(b"\n"):
tokens = line.split(b":")
# Not a shadow file
if len(tokens) != 9:
return
hash = tokens[1].strip(b"*!")
# Not shadow
if hash != b"" and not hash.startswith(b"$"):
return
if hash.startswith(b"$6$"):
# sha512crypt $6$, SHA512 (Unix)
# Since they may change the text name of this
hashtype = next(name for name in types.hashcat if types.hashcat[name] == '1800')
elif hash.startswith(b"$5$"):
# sha256crypt $5$, SHA256 (Unix)
# Since they may change the text name of this
hashtype = next(name for name in types.hashcat if types.hashcat[name] == '7400')
# If we made it this far, this should be a strong match. Go with it.
if hashtype is not None:
config['hash_type'] = hashtype
print(HTML("<ansigreen>Autoconfigured shadow file</ansigreen>"))
LOGGER = logging.getLogger(__name__)
| [
"whootandahalf@gmail.com"
] | whootandahalf@gmail.com |
4ba201a19176dc8ba93096d858b815f8b260eb74 | 8644a2174c3cb7ccfe211a5e49edffbcc3a74a46 | /hashcode/contest2021/hash_code.py | 348eb62abc65e6ddfd9415877e1b9bc528b06d02 | [] | no_license | bhavya2403/Learning-Python | 9e7cc9dee21172321fb217cae27c8072357f71ce | 3898211b357fbab320010a82a4811b68611d0422 | refs/heads/main | 2023-03-24T03:19:49.989965 | 2021-03-22T20:11:04 | 2021-03-22T20:11:04 | 315,962,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,651 | py | def solve():
adjList = {}
for key in street:
start, end, L = street[key]
end = int(end)
if end not in adjList:
adjList[end] = [1, key]
else:
adjList[end][0] += 1
adjList[end].append(key)
streetsUsed = set()
for path in carPaths:
streetsUsed = streetsUsed.union(set(path[1:]))
output.write(str(totIntersection) + '\n')
for inter in adjList:
count = 0
notUsed = set()
for i in range(1, adjList[inter][0] + 1):
if adjList[inter][i] not in streetsUsed:
count += 1
notUsed.add(adjList[inter][i])
output.write(str(inter) + '\n')
if not adjList[inter][0]-count:
output.write(str(1) + '\n')
output.write(str(adjList[inter][1]) + ' ' + str(1) + '\n')
else:
output.write(str(adjList[inter][0]-count) + '\n')
for i in range(1, adjList[inter][0]+1):
if adjList[inter][i] not in notUsed:
output.write(str(adjList[inter][i]) + ' ' + str(1) + '\n')
input = open("f.txt", "r")
SimulationTime, totIntersection, totStreets, totCars, bonusPoints = map(int, input.readline().split())
street = {}
for _ in range(totStreets):
start, end, streetName, L = input.readline().split()
street[streetName] = (start, end, L)
carPaths = []
for _ in range(totCars):
path = list(input.readline().split())
carPaths.append(path)
carPaths.sort(key=lambda a:int(a[0]))
input.close()
output = open("fOutput", "w")
solve()
output.close() | [
"noreply@github.com"
] | bhavya2403.noreply@github.com |
ef6c7204f335001e4a416d5712d8eff6af8abf83 | b09a8df80c35e3ccca43cd74cec6e1a14db76ad7 | /blocks/forms.py | 7da0e469bffae4aa7eeb2899c3ef8df356c5a8e4 | [
"MIT"
] | permissive | ofa/everyvoter | 79fd6cecb78759f5e9c35ba660c3a5be99336556 | 3af6bc9f3ff4e5dfdbb118209e877379428bc06c | refs/heads/master | 2021-06-24T19:38:25.256578 | 2019-07-02T10:40:57 | 2019-07-02T10:40:57 | 86,486,195 | 7 | 3 | MIT | 2018-12-03T19:52:20 | 2017-03-28T17:07:15 | Python | UTF-8 | Python | false | false | 722 | py | """Forms for account app"""
from django import forms
from blocks.models import Block
from election.models import Election, LegislativeDistrict
class BlockModelForm(forms.ModelForm):
"""Model form for blocks"""
weight = forms.ChoiceField(choices=[(x, x) for x in range(1, 100)])
class Meta(object):
"""Meta options for form"""
model = Block
fields = ['name', 'geodataset', 'weight', 'categories', 'code']
class BlockPreviewForm(forms.Form):
"""Preview form"""
election = forms.ModelChoiceField(queryset=Election.objects.all())
district = forms.ModelChoiceField(
queryset=LegislativeDistrict.objects.all())
block = forms.CharField(widget=forms.HiddenInput())
| [
"nickcatal@gmail.com"
] | nickcatal@gmail.com |
a39e31cefdc46526f203635269c06d872405a9db | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/network/v20200501/get_ip_allocation.py | 1b0cabaf141ea140a23a362799f9df7f8521a40c | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 7,547 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetIpAllocationResult',
'AwaitableGetIpAllocationResult',
'get_ip_allocation',
]
@pulumi.output_type
class GetIpAllocationResult:
"""
IpAllocation resource.
"""
def __init__(__self__, allocation_tags=None, etag=None, ipam_allocation_id=None, location=None, name=None, prefix=None, prefix_length=None, prefix_type=None, subnet=None, tags=None, type=None, virtual_network=None):
if allocation_tags and not isinstance(allocation_tags, dict):
raise TypeError("Expected argument 'allocation_tags' to be a dict")
pulumi.set(__self__, "allocation_tags", allocation_tags)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if ipam_allocation_id and not isinstance(ipam_allocation_id, str):
raise TypeError("Expected argument 'ipam_allocation_id' to be a str")
pulumi.set(__self__, "ipam_allocation_id", ipam_allocation_id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if prefix and not isinstance(prefix, str):
raise TypeError("Expected argument 'prefix' to be a str")
pulumi.set(__self__, "prefix", prefix)
if prefix_length and not isinstance(prefix_length, int):
raise TypeError("Expected argument 'prefix_length' to be a int")
pulumi.set(__self__, "prefix_length", prefix_length)
if prefix_type and not isinstance(prefix_type, str):
raise TypeError("Expected argument 'prefix_type' to be a str")
pulumi.set(__self__, "prefix_type", prefix_type)
if subnet and not isinstance(subnet, dict):
raise TypeError("Expected argument 'subnet' to be a dict")
pulumi.set(__self__, "subnet", subnet)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_network and not isinstance(virtual_network, dict):
raise TypeError("Expected argument 'virtual_network' to be a dict")
pulumi.set(__self__, "virtual_network", virtual_network)
@property
@pulumi.getter(name="allocationTags")
def allocation_tags(self) -> Optional[Mapping[str, str]]:
"""
IpAllocation tags.
"""
return pulumi.get(self, "allocation_tags")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="ipamAllocationId")
def ipam_allocation_id(self) -> Optional[str]:
"""
The IPAM allocation ID.
"""
return pulumi.get(self, "ipam_allocation_id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def prefix(self) -> Optional[str]:
"""
The address prefix for the IpAllocation.
"""
return pulumi.get(self, "prefix")
@property
@pulumi.getter(name="prefixLength")
def prefix_length(self) -> Optional[int]:
"""
The address prefix length for the IpAllocation.
"""
return pulumi.get(self, "prefix_length")
@property
@pulumi.getter(name="prefixType")
def prefix_type(self) -> Optional[str]:
"""
The address prefix Type for the IpAllocation.
"""
return pulumi.get(self, "prefix_type")
@property
@pulumi.getter
def subnet(self) -> 'outputs.SubResourceResponse':
"""
The Subnet that using the prefix of this IpAllocation resource.
"""
return pulumi.get(self, "subnet")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualNetwork")
def virtual_network(self) -> 'outputs.SubResourceResponse':
"""
The VirtualNetwork that using the prefix of this IpAllocation resource.
"""
return pulumi.get(self, "virtual_network")
class AwaitableGetIpAllocationResult(GetIpAllocationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetIpAllocationResult(
allocation_tags=self.allocation_tags,
etag=self.etag,
ipam_allocation_id=self.ipam_allocation_id,
location=self.location,
name=self.name,
prefix=self.prefix,
prefix_length=self.prefix_length,
prefix_type=self.prefix_type,
subnet=self.subnet,
tags=self.tags,
type=self.type,
virtual_network=self.virtual_network)
def get_ip_allocation(expand: Optional[str] = None,
ip_allocation_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetIpAllocationResult:
"""
Use this data source to access information about an existing resource.
:param str expand: Expands referenced resources.
:param str ip_allocation_name: The name of the IpAllocation.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['ipAllocationName'] = ip_allocation_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20200501:getIpAllocation', __args__, opts=opts, typ=GetIpAllocationResult).value
return AwaitableGetIpAllocationResult(
allocation_tags=__ret__.allocation_tags,
etag=__ret__.etag,
ipam_allocation_id=__ret__.ipam_allocation_id,
location=__ret__.location,
name=__ret__.name,
prefix=__ret__.prefix,
prefix_length=__ret__.prefix_length,
prefix_type=__ret__.prefix_type,
subnet=__ret__.subnet,
tags=__ret__.tags,
type=__ret__.type,
virtual_network=__ret__.virtual_network)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
0a9069262dcd7f1e236e03cb138351c395dad067 | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/splom/marker/colorbar/_showticksuffix.py | 80b850d3cb4f83181bfce76a44d5751d71c84e68 | [
"MIT"
] | permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 534 | py | import _plotly_utils.basevalidators
class ShowticksuffixValidator(
_plotly_utils.basevalidators.EnumeratedValidator
):
def __init__(
self,
plotly_name='showticksuffix',
parent_name='splom.marker.colorbar',
**kwargs
):
super(ShowticksuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
role='style',
values=['all', 'first', 'last', 'none'],
**kwargs
)
| [
"jon.mease@gmail.com"
] | jon.mease@gmail.com |
9e6823d1c327a49f0d484cae4b19d90a619c5311 | eaa781cc52a1a8686cd50424132744a300ce2fda | /shell/essay.py | 349342a9af55273a8978fd7add6d96451404e286 | [] | no_license | hustmonk/kdd2014 | 2d7f1d7393cccfaff95f8fc399a213ba588c7f02 | bf4f41af61e5ab08eb0b08aee990c04a7afbef0d | refs/heads/master | 2021-01-02T23:07:03.632621 | 2014-08-13T00:02:45 | 2014-08-13T00:02:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Last modified:
#申请论文信息
#essay info
"""docstring
"""
import csv
__revision__ = '0.1'
class Essay:
def _normal(self, list):
new_list = []
for x in list:
x = x.lower()
newx = []
for ch in x:
if ('a' <= ch and ch <= 'z') or ('0' <= ch and ch <= '9') or ch == ' ':
newx.append(ch)
else:
newx.append(" ")
new_list.append("".join(newx))
return new_list
def __init__(self, debug = False):
data_dir = '../data/'
filename = "essays.csv"
reader = csv.reader(file(data_dir + filename, 'rb'))
reader.next()
self.resources_feature = {}
idx = 0
for line in reader:
pid = line[0]
self.resources_feature[pid] = " ".join(self._normal(line[2:])).decode("utf-8")
if debug:
if idx > 1000:
break
idx = idx + 1
if __name__ == '__main__':
essay = Essay()
| [
"liujingminghust@163.com"
] | liujingminghust@163.com |
23a05b1cda180f9c05216f1af7274c13c0307cd6 | 223c3cf4281427e41ce6dc8d2501e157e05d8f25 | /results/run_check_epochs/0x1.5426135e0ddf4p+30/script.py | c013f937d02bf76521ab27987b3b27a8ab2b8650 | [
"BSD-2-Clause"
] | permissive | chri4354/meg_perceptual_decision_symbols | 94160d81035be283a4ade13dc8f488447b6773f6 | 34ed840c95d6fbedbf911c1b1506da383da77eb9 | refs/heads/master | 2021-01-15T11:46:02.968578 | 2015-03-29T19:12:40 | 2015-03-29T19:12:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,711 | py | import sys
import mkl
import os.path as op
import mne
from meeg_preprocessing.utils import setup_provenance
from ambiguity.conditions import get_events
import matplotlib.pyplot as plt
import warnings
import numpy as np
from scripts.config import (
data_path,
subjects,
results_dir,
open_browser
)
report, run_id, results_dir, logger = setup_provenance(
script=__file__, results_dir=results_dir)
#
# if len(sys.argv) > 1:
# subjects = [sys.argv[1]]
# mkl.set_num_threads(1)
for subject in subjects:
# STIM =====================================================================
epochs_fname = op.join(data_path, 'MEG', subject,
'stim_lock-{}-epo.fif'.format(subject))
bhv_fname = op.join(data_path, 'behavior',
'{}_behaviorMEG.mat'.format(subject[-2:]))
# Read Mat file
events = get_events(bhv_fname, 'stim_lock')
mat = np.array(events['trigger_value'].astype(list))
# Read events from epochs
epochs = mne.read_epochs(epochs_fname)
fiff = epochs.events[:,2]
# Checkup procedure
if len(mat) > len(fiff):
# XXX Here need procedure to correct issue
raise(error)
warnings.warn('too many events in mat as compared to fiff')
mat = mat[0:len(fiff)]
if len(mat) < len(fiff):
raise(error)
warnings.warn('too many events in fiff as compared to mat')
fiff = fiff[0:len(mat)]
if np.any(mat != fiff):
index = np.where((mat - fiff) != 0.)[0][0]
warnings.warn('{}: Problem with trigger {}.'.format(subject, index))
# Report
fig, (ax1, ax2) = plt.subplots(2, 1, sharey=True)
ax1.plot(mat)
ax1.plot(fiff + np.max(mat) + 1.0)
ax2.set_title('triggers from mat & fiff')
ax2.plot(mat - fiff)
ax2.set_title('mat - fiff')
report.add_figs_to_section(fig, 'Stim triggers', subject)
# plt.show()
# MOTOR ====================================================================
epochs_fname = op.join(data_path, 'MEG', subject,
'motor_lock-{}-epo.fif'.format(subject))
# Read Mat file
events = get_events(bhv_fname, 'motor_lock')
mat = np.array(events['motor_side'].astype(list))
# Read events from epochs
epochs = mne.read_epochs(epochs_fname)
fiff = 1 + (epochs.events[:,2] < 2 ** 14)
if len(mat) > len(fiff):
# XXX Here need procedure to correct issue
raise(error)
warnings.warn('too many events in mat as compared to fiff')
mat = mat[0:len(fiff)]
if len(mat) < len(fiff):
raise(error)
warnings.warn('too many events in fiff as compared to mat')
fiff = fiff[0:len(mat)]
# rm = list()
# index = np.where((mat - fiff[0:len(mat)]) != 0.)[0]
# while (len(index) > 0) and ((len(mat) + len(rm)) <= len(fiff)):
# print(rm)
# rm.append(index[0] + len(rm))
# sel = [i for i in range(0,len(mat)+len(rm)) if i not in rm]
# index = np.where((mat - fiff[sel]) != 0.)[0]
# epochs = epochs[sel]
# warnings.warn('Found {} unwanted epochs. Correcting and resaving {} epochs...'.format(len(rm), subject))
# fiff = 1 + (epochs.events[:,2] < 2 ** 14)
# epochs.save(op.join(data_path, 'MEG', subject, 'stim_lock-{}-epo.fif'.format(subject)))
fig, (ax1, ax2) = plt.subplots(2, 1, sharey=True)
ax1.plot(mat)
ax1.plot(fiff + np.max(mat) + 1.0)
ax2.set_title('triggers from mat & fiff')
ax2.plot(mat - fiff)
ax2.set_title('mat - fiff')
report.add_figs_to_section(fig, 'Motor triggers', subject)
# plt.show()
report.save(open_browser=open_browser)
| [
"jeanremi.king+github@gmail.com"
] | jeanremi.king+github@gmail.com |
a25456d0b47ab6ab15ab64b06f610459fed2cc9e | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/atomic/long/Schema+Instance/NISTXML-SV-IV-atomic-long-whiteSpace-1-2.py | edbc3af912f4107c5b5261ff2d179988d496fe2f | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 267 | py | from output.models.nist_data.atomic.long.schema_instance.nistschema_sv_iv_atomic_long_white_space_1_xsd.nistschema_sv_iv_atomic_long_white_space_1 import NistschemaSvIvAtomicLongWhiteSpace1
obj = NistschemaSvIvAtomicLongWhiteSpace1(
value=-141811240377451630
)
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
79f0bc35dead901f383ef6d61b759a98596cf035 | 13f6df9b74df10c7054cbf826e6e3538012493c9 | /ajenti/plugins/supervisor/client.py | 5edf2c37045040caf22f82a43dc36f22e470eb02 | [] | no_license | zennro/ajenti | 6906bf285a72b35a485555d5a2296f04717d4bae | 0d40cbb38117a2018607d21a138a83bf6581d729 | refs/heads/master | 2021-01-18T20:15:10.615198 | 2014-03-29T17:39:35 | 2014-03-29T17:39:35 | 18,260,276 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,723 | py | import subprocess
from ajenti.api import *
from ajenti.plugins.services.api import Service, ServiceManager
@plugin
class SupervisorServiceManager (ServiceManager):
def test(self):
return subprocess.call(['supervisorctl', 'status']) == 0
def run(self, *cmds):
return subprocess.check_output(['supervisorctl'] + list(cmds))
def get_all(self):
r = []
try:
lines = self.run('status').splitlines()
except:
return []
for l in lines:
if l:
l = l.split(None, 2)
s = SupervisorService()
s.name = l[0]
s.running = len(l) > 2 and l[1] == 'RUNNING'
s.status = l[2] if len(l) > 2 else ''
r.append(s)
return r
def fill(self, programs):
for p in programs:
p.status = ''
p.icon = ''
for s in self.get_all():
for p in programs:
if p.name == s.name:
p.running = s.running
p.status = s.status
p.icon = 'play' if p.running else None
class SupervisorService (Service):
source = 'supervisord'
def __init__(self):
self.name = None
self.running = False
def run(self, *cmds):
return subprocess.check_output(['supervisorctl'] + list(cmds))
@property
def icon(self):
return 'play' if self.running else None
def start(self):
self.run('start', self.name)
def stop(self):
self.run('stop', self.name)
def restart(self):
self.run('restart', self.name)
def tail(self, id):
return self.run('tail', self.name)
| [
"e@ajenti.org"
] | e@ajenti.org |
cbc2ed0f38928c2471f097b3803e8dddb7b91602 | 0809ea2739d901b095d896e01baa9672f3138825 | /without_restapim/testApp/utils.py | 93e40d370280d08463e8ec4a6eabb6bf09a80774 | [] | no_license | Gagangithub1988/djangoprojects | dd001f2184e78be2fb269dbfdc8e3be1dd71ce43 | ea236f0e4172fbf0f71a99aed05ed7c7b38018e2 | refs/heads/master | 2022-11-15T23:46:46.134247 | 2020-07-15T06:37:51 | 2020-07-15T06:37:51 | 273,479,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | import json
def is_json(data):
try:
p_data=json.loads(data)
valid=True
except ValueError:
valid=False
return valid | [
"djangopython1988@gmail.com"
] | djangopython1988@gmail.com |
449428cd24acaff229144d38164a47472ec94748 | f207164511f0dfe3f01f6e0c21fd7548e626397f | /toolkit/toolkit.mozbuild | ebdca4a1aed9c6fad7483c3e0a71d58cff634e1d | [] | no_license | PortableApps/palemoon27 | 24dbac1a4b6fe620611f4fb6800a29ae6f008d37 | 3d7e107cc639bc714906baad262a3492372e05d7 | refs/heads/master | 2023-08-15T12:32:23.822300 | 2021-10-11T01:54:45 | 2021-10-11T01:54:45 | 416,058,642 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,400 | mozbuild | # vim: set filetype=python:
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
if CONFIG['LIBXUL_SDK']:
error('toolkit.mozbuild is not compatible with --enable-libxul-sdk=')
if CONFIG['MOZ_SANDBOX']:
DIRS += ['/security/sandbox']
DIRS += [
# Depends on NSS and NSPR, and must be built after sandbox or else B2G emulator
# builds fail.
'/security/certverifier',
# Depends on certverifier
'/security/apps',
]
# the signing related bits of libmar depend on nss
if CONFIG['MOZ_UPDATER']:
DIRS += ['/modules/libmar']
DIRS += [
'/config/external/freetype2',
'/xpcom',
'/modules/libpref',
'/intl',
'/netwerk',
]
if CONFIG['MOZ_AUTH_EXTENSION']:
DIRS += ['/extensions/auth']
if CONFIG['MOZ_UPDATER']:
DIRS += ['/other-licenses/bsdiff']
# Gecko/Core components.
DIRS += [
'/ipc',
'/js/ipc',
'/hal',
'/js/xpconnect',
'/intl/chardet',
'/media/libyuv',
'/modules/libjar',
'/storage',
]
if CONFIG['MOZ_PERMISSIONS']:
DIRS += [
'/extensions/cookie',
'/extensions/permissions',
]
DIRS += [
'/rdf',
]
if CONFIG['MOZ_WEBRTC']:
DIRS += [
'/media/webrtc',
'/media/mtransport',
]
if CONFIG['MOZ_OMX_PLUGIN']:
DIRS += [
'/media/omx-plugin/lib/ics/libutils',
'/media/omx-plugin/lib/ics/libstagefright',
'/media/omx-plugin/lib/ics/libvideoeditorplayer',
'/media/omx-plugin/lib/gb/libutils',
'/media/omx-plugin/lib/gb/libstagefright',
'/media/omx-plugin/lib/gb/libstagefright_color_conversion',
'/media/omx-plugin/lib/gb235/libstagefright',
'/media/omx-plugin',
'/media/omx-plugin/gb',
'/media/omx-plugin/gb235',
'/media/omx-plugin/lib/hc/libstagefright',
'/media/omx-plugin/hc',
'/media/omx-plugin/kk',
]
if CONFIG['ENABLE_TESTS']:
DIRS += ['/testing/specialpowers']
DIRS += [
'/testing/gtest',
'/uriloader',
'/caps',
'/parser',
'/gfx',
'/image',
'/dom',
'/view',
'/widget',
'/content',
'/editor',
'/layout',
'/docshell',
'/embedding',
'/xpfe/appshell'
]
# This needs to be built after the gfx/ directory
# to ensure all dependencies for skia (e.g. mozalloc, xpcom)
# have been built
if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'android':
DIRS += ['/other-licenses/skia-npapi']
if CONFIG['MOZ_UNIVERSALCHARDET']:
DIRS += ['/extensions/universalchardet']
if CONFIG['ACCESSIBILITY']:
DIRS += ['/accessible']
else:
DIRS += ['/accessible/ipc']
# toolkit
DIRS += ['/profile']
# This must precede xpfe.
if CONFIG['MOZ_JPROF']:
DIRS += ['/tools/jprof']
DIRS += [
'/tools/profiler',
'/xpfe/components',
]
if CONFIG['MOZ_ENABLE_XREMOTE']:
DIRS += ['/widget/xremoteclient']
if CONFIG['MOZ_SPELLCHECK']:
DIRS += ['/extensions/spellcheck']
DIRS += [
'/security/manager',
'/toolkit',
]
if CONFIG['MOZ_PREF_EXTENSIONS']:
DIRS += ['/extensions/pref']
DIRS += [
'/services',
'/startupcache',
'/js/ductwork/debugger',
'/other-licenses/snappy',
]
if CONFIG['MOZ_GIO_COMPONENT']:
DIRS += ['/extensions/gio']
DIRS += [
'/toolkit/library/StaticXULComponentsEnd',
'/toolkit/library',
]
if CONFIG['MOZ_ENABLE_GNOME_COMPONENT']:
DIRS += ['/toolkit/system/gnome']
# if QtNetwork is present, it will do its own network monitoring
if not CONFIG['MOZ_ENABLE_QTNETWORK'] and CONFIG['MOZ_ENABLE_DBUS']:
DIRS += ['/toolkit/system/dbus']
if CONFIG['ENABLE_MARIONETTE'] or CONFIG['MOZ_WIDGET_TOOLKIT'] not in ('gonk', 'android'):
DIRS += ['/testing/marionette']
DIRS += [
'/tools/quitter',
'/media/gmp-clearkey/0.1',
]
if CONFIG['ENABLE_TESTS']:
DIRS += [
'/testing/mochitest',
'/testing/xpcshell',
'/testing/tools/screenshot',
'/testing/profiles',
'/testing/mozbase',
'/testing/modules',
'/testing/runtimes',
'/testing/web-platform',
]
if CONFIG['MOZ_WEBRTC'] and CONFIG['MOZ_WIDGET_TOOLKIT'] != 'gonk':
DIRS += [
'/media/webrtc/signaling/test',
'/media/webrtc/signaling/test/standalone',
'/media/mtransport/test',
]
| [
"roytam@gmail.com"
] | roytam@gmail.com |
57eb78a0554396f29509498063511bd588bb9f1f | f2d99d437f29af7bd2d4a5ea8af57f49e3d30871 | /test/assignment/module_attribute.py | f003021d6d9dd2f1123f626046504736d9312929 | [] | no_license | terrence2/millipede | 6d68a09773f83e304702d72a38475946590e3079 | 3e1d63899f54f1154cb6a0bc3634815623803246 | refs/heads/master | 2020-04-01T14:28:06.810699 | 2011-05-30T20:15:30 | 2011-05-30T20:15:30 | 839,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | class A: pass
a = A()
a.b = A()
a.b.c = A()
a.b.c.d = A()
print(a.__class__.__name__)
print(a.b.__class__.__name__)
print(a.b.c.__class__.__name__)
print(a.b.c.d.__class__.__name__)
#out: A
#out: A
#out: A
#out: A
| [
"terrence@zettabytestorage.com"
] | terrence@zettabytestorage.com |
a6d78dcee4b7d9d0b627e8613bc50a7083969f3b | 92abae05cb2c31f4f4f228844ff8ecfc5439e098 | /Generators/GEN_GLCM_PREPROCESS.py | c141ca99775a719a86f5ce463c86bcbea82ed86a | [] | no_license | lukkascost/py_Crosswalk | 1530133e0c808be433bdb00cbcda810b689df6ce | 287db1583bf21696cd9f307498a5fa2e1cb69c1b | refs/heads/master | 2021-06-03T16:33:38.139919 | 2020-01-29T16:12:39 | 2020-01-29T16:12:39 | 140,041,089 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,663 | py | import cv2
import numpy as np
from MachineLearn.Classes.Extractors.GLCM import GLCM
MIN_BITS = 8
MAX_BITS = 8
MIN_DECIMATION = 20
MAX_DECIMATION = 100
PATH_TO_IMAGES_FOLDER = '../database-Crosswalk/Preprocessed/'
PATH_TO_SAVE_FEATURES = 'GLCM_FILES/EXP_01/'
for nbits in range(MIN_BITS, MAX_BITS + 1):
for k in range(MIN_DECIMATION, MAX_DECIMATION + 1):
listGLCM = []
for quantity in [[1, 50], [2, 50], [3, 50], [4, 150]]:
for image in range(1, quantity[1] + 1):
img = cv2.imread(PATH_TO_IMAGES_FOLDER+"c{:d}_p1_{:d}.JPG".format(quantity[0], image), 0)
""" DECIMATION """
klist = [x for x in range(0, img.shape[0], k)]
klist2 = [x for x in range(0, img.shape[1], k)]
img = img[klist]
img = img[:, klist2]
""" CHANGING IMAGE TO VALUES BETWEEN 0 AND 2**NBITS"""
img = img / 2 ** (8 - nbits)
""" GENERATING FEATURES FOR GLCM """
oGlcm = GLCM(img, nbits)
oGlcm.generateCoOccurenceHorizontal()
oGlcm.normalizeCoOccurence()
oGlcm.calculateAttributes()
""" ADDING FEATURES IN ARRAY FOR SAVE IN FILE """
listGLCM.append(oGlcm.exportToClassfier("Class " + str(quantity[0])))
print nbits, k, quantity[0], image
listGLCM = np.array(listGLCM)
""" SAVE FILE WITH FEATURES, DECIMATION WITH STEP = k AND CORRELATION MATRIX WITH nbits BITS. """
np.savetxt(PATH_TO_SAVE_FEATURES+"FEATURES_M{}_CM{}b.txt".format(k, nbits), listGLCM, fmt="%s", delimiter=',')
| [
"lucas.costa@lit.ifce.edu.br"
] | lucas.costa@lit.ifce.edu.br |
0bba2d08714c610787e7f98c5cdf1edbf1dfeff9 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/438/usersdata/314/99574/submittedfiles/pico.py | 2fe6a6464d97f450c55ab04953a46384b1186e82 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | # -*- coding: utf-8 -*-
def pico(lista):
maior=lista[0]
meio=lista.index(lista[i])
for i in range(0,len(lista)-1,1):
if lista[i]>maior:
maior=lista[i]
crescente=True
if meio>lista[i]:
decrescente=True
else:
decrescente=False
crescente=False
n=int(input('Digite n: '))
lista=[]
for i in range(0,n,1):
lista.append(int(input('Digite a quantidade de elementos da lista: ')))
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
bfe7efa037054787430565e8e7dd7e67369deef0 | 9eb35d6df7b0490d556623f84dba12bb05f30ee2 | /comp_stat_little_tricks/welfords_variance.py | 879fe04be37135f1677925987a51fe5785db7a74 | [
"MIT"
] | permissive | FelSiq/statistics-related | 0b4442bd19338c5b0da7dcf5ecd53eb304dcd3f8 | ee050202717fc368a3793b195dea03687026eb1f | refs/heads/master | 2021-11-24T12:31:08.660652 | 2021-11-03T23:42:39 | 2021-11-03T23:42:39 | 211,089,869 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 848 | py | import typing as t
def welfords_var(vals: t.Sequence[t.Union[int, float]]) -> float:
"""Powerful one-pass method for computing array variance."""
M, S = 0, 0
for k, x in enumerate(vals, 1):
oldM = M
M += (x - M) / k
S += (x - M) * (x - oldM)
return S / (len(vals) - 1)
if __name__ == "__main__":
import numpy as np
np.random.seed(1444)
for i in range(500):
vals = (np.random.randint(-999999, 999999, size=1000) +
2.0 * np.random.random(size=1000) - 1.0)
var_wf = welfords_var(vals)
var_np = vals.var(ddof=1)
assert np.allclose(var_wf, var_np)
for i in range(500):
vals = 2.0 * np.random.random(size=1000) - 1.0
var_wf = welfords_var(vals)
var_np = vals.var(ddof=1)
assert np.allclose(var_wf, var_np)
| [
"felipe.siqueira@usp.br"
] | felipe.siqueira@usp.br |
c95eafdcb80903dfb6648b489d3951c8372de95c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02775/s052208557.py | 8fef45c5cc9e573263a2006aaadb8f57669aedf5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | import sys
def main():
input=sys.stdin.readline
S=input().strip()
dp=[[0,0] for i in range(len(S)+1)]
dp[0][1]=1
for i in range(1,len(S)+1):
for j in (0,1):
if j==0:
dp[i][0]=min(dp[i-1][0]+int(S[i-1]),dp[i-1][1]+10-int(S[i-1]))
elif j==1:
dp[i][1]=min(dp[i-1][0]+int(S[i-1])+1,dp[i-1][1]+10-int(S[i-1])-1)
print(dp[len(S)][0])
if __name__=="__main__":
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
be521724694bccb38164c968ce2d4e190a3ce385 | cea03b578f0f6207afe5056611090848ab76bd23 | /model/verify_huisu.py | e987f6282e8d3cd45c0c5d6d7e1b57863d2ca583 | [] | no_license | swq90/stock | fa295f4fa0bf6a4d8afe8a71c02cc54fc7442bcd | a49ae395de82ecdfa38220f4fdbcaf4da6a39719 | refs/heads/master | 2021-07-14T03:42:21.950897 | 2020-10-14T11:52:24 | 2020-10-14T11:52:24 | 215,325,863 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,617 | py | import datetime
from sqlalchemy import create_engine
import pandas as pd
import stock.util.stockfilter as sfilter
import stock.util.sheep as sheep
engine = create_engine('postgresql://nezha:nezha@10.0.0.5:5432/stock', echo=False)
def get_data(start_date,end_date):
NOTCONTAIN = sfilter.StockFilter().stock_basic(end_date, name="st|ST", market="科创板")
t1=datetime.datetime.now()
raw_data = pd.read_sql_query('select * from daily where (trade_date>=%(start)s and trade_date<=%(end)s)',
params={'start': start_date, 'end': end_date}, con=engine)
stk_limit = pd.read_sql_query('select * from stk_limit where (trade_date>=%(start)s and trade_date<=%(end)s)',
params={'start': start_date, 'end': end_date}, con=engine)
print(datetime.datetime.now()-t1)
raw_data.drop_duplicates(inplace=True)
stk_limit.drop_duplicates(inplace=True)
print('交易数据%s,包含%s个交易日,涨停数据%s' % (raw_data.shape, raw_data['trade_date'].unique().shape, stk_limit.shape))
raw_data = raw_data[raw_data["ts_code"].isin(NOTCONTAIN['ts_code']) == False]
df=raw_data.merge(stk_limit,on=['ts_code','trade_date'])
return df
def verify(up,data):
t1 = datetime.datetime.now()
s1=sheep.wool(up,data)
t2= datetime.datetime.now()
s2=sheep.wool2(up,data)
t3 = datetime.datetime.now()
s1.to_csv('s1.csv')
s2.to_csv('s2.csv')
print()
t=get_data('20190220','20190430')
s=t[t['close']==t['up_limit']]
pd.DataFrame(s.groupby('trade_date')['ts_code'].count()).to_csv('count.csv')
verify(s,t)
| [
"shaowenqin620@163.com"
] | shaowenqin620@163.com |
c99a1bdd8fc9c1068b09146888230fdd761ab4bb | 6f6465903edbb0587a43fbef9c3a6776c948d9b3 | /IntroducaoAoKNN/KNNComPandasESKTlearn.py | 3441ee1659085dbe72e658130ac2c775726d6418 | [] | no_license | borin98/CursoDeMachineLearning | 5871beccd2e09f2fc5d51e40370c11c730f056c1 | 6ead1db41c27009207a32658089bc7a790a06be8 | refs/heads/master | 2020-03-25T09:04:13.436480 | 2018-08-30T02:52:32 | 2018-08-30T02:52:32 | 143,645,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,382 | py | import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from math import sqrt
"""
Função que mexe com um arquivo CSV sobre sapatos que funciona da seguinte forma :
Coluna 0 : tamanho do sapato
Coluna 1 : Peso da pessoa
Coluna 2 : Tipo de pessoa ( Senior ou fourth - primario )
"""
def main ( ) :
# montando os dados de teste e treino
dadosTreino = pd.read_csv ( "train.csv" )
dadosTeste = pd.read_csv ( "test.csv" )
# montando um numPY array dos dados de treino e teste
col1 = ["shoe size", "height"]
col2 = ["class"]
xTreino = dadosTreino.as_matrix ( col1 )
yTreino = dadosTreino.as_matrix ( col2 )
xTeste = dadosTeste.as_matrix ( col1 )
yTeste = dadosTeste.as_matrix ( col2 )
# montando o parâmetro k
k = int ( sqrt ( len ( dadosTreino ) + len ( dadosTeste ) ) )
# montando o parâmetro knn
knn = KNeighborsClassifier ( n_neighbors = k, weights = "distance" )
knn.fit ( xTreino, yTreino.ravel() )
predicao = knn.predict ( xTeste )
acertos = np.sum ( predicao == yTeste )
porcentagemAcertos = knn.score ( xTeste, yTeste )
print("Quantidade de dados acertados : {} acertos de 11329 dados \n\n".format ( acertos ) )
print("Porcentagem de acertos : {} %\n\n".format ( porcentagemAcertos*100 ) )
main()
| [
"borinmacedo@gmail.com"
] | borinmacedo@gmail.com |
0d4b8f8cacf0f2d8d6475dca0953ff0640291571 | 79c67ec1a5bececc030c222d7469e73c0dc775eb | /life/migrations/0004_auto_20151017_1106.py | 65632b5077c0f0249461420b9d73d05740b1ea54 | [] | no_license | evz/awildlife | 64b29ddcc6644514cd6536b5c1f67dca7d5114db | 39de34006ece41119efef006a73974ce020ae360 | refs/heads/master | 2021-01-10T11:50:59.810504 | 2015-11-07T18:50:57 | 2015-11-07T18:50:57 | 44,484,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('life', '0003_event_image'),
]
operations = [
migrations.AddField(
model_name='event',
name='image_height',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='event',
name='image_width',
field=models.IntegerField(null=True),
),
]
| [
"eric.vanzanten@gmail.com"
] | eric.vanzanten@gmail.com |
c09e9222d65ec6663cd9d3ff0e6ef114d5147ac7 | 9ba0771dcef17191483798b4d18f78dbb7b4c27e | /camacq/plugins/leica/helper.py | d3f6d5c7f229b25f26bc63a1e8c5f792be4cd92a | [
"Apache-2.0"
] | permissive | CellProfiling/cam_acq | fd035831bbdd7d6e6bfc2e85fea1b838829eacb8 | 8cf99cb738353c052b93e7ff1dbd5951f65808c2 | refs/heads/master | 2023-08-31T01:46:58.390960 | 2023-08-28T07:12:05 | 2023-08-28T07:12:05 | 35,374,025 | 5 | 1 | Apache-2.0 | 2023-09-13T21:44:38 | 2015-05-10T14:23:34 | Python | UTF-8 | Python | false | false | 1,929 | py | """Helper functions for Leica api."""
from pathlib import Path, PureWindowsPath
from leicaimage import experiment
def find_image_path(relpath, root):
"""Parse the relpath from the server to find file path from root.
Convert from windows path to posix path.
Parameters
----------
relpath : str
A relative path to the image.
root : str
Path to directory where path should start.
Returns
-------
str
Return path to image.
"""
parts = PureWindowsPath(relpath).parts
return str(Path(root).joinpath(*parts))
def get_field(path):
"""Get path to field from image path.
Parameters
----------
path : string
Path to image.
Returns
-------
str
Return path to field directory of image.
"""
return experiment.Experiment(path).dirname # pylint: disable=no-member
def get_well(path):
"""Get path to well from image path.
Parameters
----------
path : string
Path to image.
Returns
-------
str
Return path to well directory of image.
"""
# pylint: disable=no-member
return experiment.Experiment(get_field(path)).dirname
def get_imgs(path, img_type="tif", search=""):
"""Get all images below path.
Parameters
----------
path : string
Path to directory where to search for images.
img_type : string
A string representing the image file type extension.
path : string
A glob pattern string to use in the search.
Returns
-------
list
Return paths of all images found.
"""
root = Path(path)
_path = Path("")
if search:
search = f"{search}*"
patterns = ["slide", "chamber--", "field--", "image--"]
for pattern in patterns:
if pattern not in path:
_path = _path / f"{pattern}*"
return list(root.glob(f"{_path}{search}.{img_type}"))
| [
"noreply@github.com"
] | CellProfiling.noreply@github.com |
9e6e6ebc0d8bceed5f15987173cad13b3f0b7933 | 7d1d49560328f9b5588197abf2c623c304c0d95a | /src/datasets/mpii.py | f844b380ffa8e5ac8b779a29186730dd9c064817 | [] | no_license | peternara/adversarial-pose-pytorch | 1dc6ed10281844c59a827b77505f2ab55d906c16 | 12570ea03f3f2e8ecca7208997c99eb88da47824 | refs/heads/master | 2021-09-05T05:01:01.110501 | 2018-01-24T08:16:29 | 2018-01-24T08:16:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,590 | py | import os
import numpy as np
import h5py
import skimage as skim
import skimage.io as skio
import skimage.transform as sktf
import torch
import torch.utils.data
from .utils import rand, rnd, crop, fliplr_coords, transform, create_label
class MPII_Dataset(torch.utils.data.Dataset):
def __init__(self, data_root, split,
inp_res=256, out_res=64, sigma=1,
scale_factor=0.25, rot_factor=30, return_meta=False, small_image=True):
self.data_root = data_root
self.split = split
self.inp_res = inp_res
self.out_res = out_res
self.sigma = sigma
self.scale_factor = scale_factor
self.rot_factor = rot_factor
self.return_meta = return_meta
self.small_image = small_image
self.nJoints = 16
self.accIdxs = [0, 1, 2, 3, 4, 5, 10, 11, 14, 15] # joint idxs for accuracy calculation
self.flipRef = [[0, 5], [1, 4], [2, 3], # noqa
[10, 15], [11, 14], [12, 13]]
self.annot = {}
tags = ['imgname', 'part', 'center', 'scale']
f = h5py.File('{}/mpii/{}.h5'.format(data_root, split), 'r')
for tag in tags:
self.annot[tag] = np.asarray(f[tag]).copy()
f.close()
def _getPartInfo(self, index):
# get a COPY
pts = self.annot['part'][index].copy()
c = self.annot['center'][index].copy()
s = self.annot['scale'][index].copy()
# Small adjustment so cropping is less likely to take feet out
c[1] = c[1] + 15 * s
s = s * 1.25
return pts, c, s
def _loadImage(self, index):
impath = os.path.join(self.data_root, 'mpii/images', self.annot['imgname'][index].decode('utf-8'))
im = skim.img_as_float(skio.imread(impath))
return im
def __getitem__(self, index):
im = self._loadImage(index)
pts, c, s = self._getPartInfo(index)
r = 0
if self.split == 'train':
# scale and rotation
s = s * (2 ** rnd(self.scale_factor))
r = 0 if rand() < 0.6 else rnd(self.rot_factor)
# flip LR
if rand() < 0.5:
im = im[:, ::-1, :]
pts = fliplr_coords(pts, width=im.shape[1], matchedParts=self.flipRef)
c[0] = im.shape[1] - c[0] # flip center point also
# Color jitter
im = np.clip(im * np.random.uniform(0.6, 1.4, size=3), 0, 1)
# Prepare image
im = crop(im, c, s, r, self.inp_res)
if im.ndim == 2:
im = np.tile(im, [1, 1, 3])
if self.small_image:
# small size image
im_s = sktf.resize(im, [self.out_res, self.out_res], preserve_range=True)
# (h, w, c) to (c, h, w)
im = np.transpose(im, [2, 0, 1])
if self.small_image:
im_s = np.transpose(im_s, [2, 0, 1])
# Prepare label
labels = np.zeros((self.nJoints, self.out_res, self.out_res))
new_pts = transform(pts.T, c, s, r, self.out_res).T
for i in range(self.nJoints):
if pts[i, 0] > 0:
labels[i] = create_label(
labels.shape[1:],
new_pts[i],
self.sigma)
ret_list = [im.astype(np.float32), labels.astype(np.float32)]
if self.small_image:
ret_list.append(im_s)
if self.return_meta:
meta = [pts, c, s, r]
ret_list.append(meta)
return tuple(ret_list)
def __len__(self):
return len(self.annot['imgname'])
| [
"roytseng.tw@gmail.com"
] | roytseng.tw@gmail.com |
a17fb5bb9c1a3b110d7a6567ee80dd59963ffcfe | 9795e787a54d15f2f249a17b616fec3df67d4559 | /exception/custom_exceptions.py | 9fad21e41c70df88234ff44398cf49bea43a46cf | [] | no_license | gebbz03/PythonProject | 377b6ccf5eafa37dd157012ce499138370ba882f | c12f939cf194a4c541ee77e1f614ba9867ef7090 | refs/heads/master | 2020-04-02T22:16:11.082863 | 2018-10-30T05:49:22 | 2018-10-30T05:49:22 | 154,827,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | class VowelNotAccepted(Exception):
def __init__(self,message,status):
self.message=message
self.status=status
def check_chars(word):
for char in word:
if char.lower() in ['a','e','i','o','u']:
raise VowelNotAccepted('Vowel is not accepted',101)
return word
try:
print(check_chars("love"))
except Exception as e:
print("Error reason: ",e.message) | [
"gebb.freelancer@gmail.com"
] | gebb.freelancer@gmail.com |
d09996202930b38b56df754c8e5dd034958f4031 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/pfnet_chainer/chainer-master/cupy/testing/__init__.py | 7399762e4c91f5d702050e4014d3363e1b55177d | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 1,934 | py | from cupy.testing import array
from cupy.testing import attr
from cupy.testing import helper
from cupy.testing import parameterized
assert_allclose = array.assert_allclose
assert_array_almost_equal = array.assert_array_almost_equal
assert_array_almost_equal_nulp = array.assert_array_almost_equal_nulp
assert_array_max_ulp = array.assert_array_max_ulp
assert_array_equal = array.assert_array_equal
assert_array_list_equal = array.assert_array_list_equal
assert_array_less = array.assert_array_less
numpy_cupy_allclose = helper.numpy_cupy_allclose
numpy_cupy_array_almost_equal = helper.numpy_cupy_array_almost_equal
numpy_cupy_array_almost_equal_nulp = \
helper.numpy_cupy_array_almost_equal_nulp
numpy_cupy_array_max_ulp = helper.numpy_cupy_array_max_ulp
numpy_cupy_array_equal = helper.numpy_cupy_array_equal
numpy_cupy_array_list_equal = helper.numpy_cupy_array_list_equal
numpy_cupy_array_less = helper.numpy_cupy_array_less
numpy_cupy_raises = helper.numpy_cupy_raises
for_dtypes = helper.for_dtypes
for_all_dtypes = helper.for_all_dtypes
for_float_dtypes = helper.for_float_dtypes
for_signed_dtypes = helper.for_signed_dtypes
for_unsigned_dtypes = helper.for_unsigned_dtypes
for_int_dtypes = helper.for_int_dtypes
for_dtypes_combination = helper.for_dtypes_combination
for_all_dtypes_combination = helper.for_all_dtypes_combination
for_signed_dtypes_combination = helper.for_signed_dtypes_combination
for_unsigned_dtypes_combination = helper.for_unsigned_dtypes_combination
for_int_dtypes_combination = helper.for_int_dtypes_combination
for_orders = helper.for_orders
for_CF_orders = helper.for_CF_orders
with_requires = helper.with_requires
parameterize = parameterized.parameterize
product = parameterized.product
shaped_arange = helper.shaped_arange
shaped_reverse_arange = helper.shaped_reverse_arange
shaped_random = helper.shaped_random
NumpyError = helper.NumpyError
gpu = attr.gpu
multi_gpu = attr.multi_gpu
| [
"659338505@qq.com"
] | 659338505@qq.com |
a015cd16852be7d4367b84a80f44d6eb6db18e83 | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/third_party/google/cloud/pubsublite/cloudpubsub/internal/ack_set_tracker_impl.py | 3222994e53e33b5ba1cad9c761ad3e563cb45561 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 2,672 | py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import deque
from typing import Optional
from google.api_core.exceptions import FailedPrecondition
from google.cloud.pubsublite.cloudpubsub.internal.sorted_list import SortedList
from google.cloud.pubsublite.cloudpubsub.internal.ack_set_tracker import AckSetTracker
from google.cloud.pubsublite.internal.wire.committer import Committer
from google.cloud.pubsublite_v1 import Cursor
class AckSetTrackerImpl(AckSetTracker):
_committer: Committer
_receipts: "deque[int]"
_acks: SortedList[int]
def __init__(self, committer: Committer):
super().__init__()
self._committer = committer
self._receipts = deque()
self._acks = SortedList()
def track(self, offset: int):
if len(self._receipts) > 0:
last = self._receipts[0]
if last >= offset:
raise FailedPrecondition(
f"Tried to track message {offset} which is before last tracked message {last}."
)
self._receipts.append(offset)
def ack(self, offset: int):
self._acks.push(offset)
prefix_acked_offset: Optional[int] = None
while len(self._receipts) != 0 and not self._acks.empty():
receipt = self._receipts.popleft()
ack = self._acks.peek()
if receipt == ack:
prefix_acked_offset = receipt
self._acks.pop()
continue
self._receipts.appendleft(receipt)
break
if prefix_acked_offset is None:
return
# Convert from last acked to first unacked.
cursor = Cursor()
cursor._pb.offset = prefix_acked_offset + 1
self._committer.commit(cursor)
async def clear_and_commit(self):
self._receipts.clear()
self._acks = SortedList()
await self._committer.wait_until_empty()
async def __aenter__(self):
await self._committer.__aenter__()
async def __aexit__(self, exc_type, exc_value, traceback):
await self._committer.__aexit__(exc_type, exc_value, traceback)
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
d8244c4d2a0e627a721f9626fcb9edc7c3ef3b0e | 3cd4e2aae2a3ee3f9002fea903a6695f9fd5d373 | /bigml/tests/read_configuration_steps.py | 4d14340f31677c9017ce140619a82c926a491344 | [
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | jaykamau7/python | 1c2daf7222f12909563005701b02308b8b80c732 | faf718173e4a108ae8d500e82a6b4197fabbecb4 | refs/heads/master | 2023-02-28T13:29:59.759663 | 2021-02-07T14:10:20 | 2021-02-07T14:10:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 957 | py | # -*- coding: utf-8 -*-
#
# Copyright 2017-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from .world import world
from nose.tools import eq_
from bigml.api import HTTP_OK
#@step(r'I get the configuration "(.*)"')
def i_get_the_configuration(step, configuration):
resource = world.api.get_configuration(configuration)
world.status = resource['code']
eq_(world.status, HTTP_OK)
world.configuration = resource['object']
| [
"merce@bigml.com"
] | merce@bigml.com |
0a77fbe43f734f778b70dd19d7af6633f82c5acc | 4c534dc33d548acf07edc3e7b826f4bfb9207030 | /lexicon/management/commands/populatetags.py | 247a11e8fedbee41aad11ff4954dd17c4615e3be | [] | no_license | Typecraft/valex-backend | a1fdbcdf0f761eca51f7762ec20e57a2feec234b | 7c7ca74851bf595e811ffba5b0f5b09fbd5ac19a | refs/heads/develop | 2022-12-18T08:22:47.545094 | 2017-10-24T09:00:58 | 2017-10-24T09:00:58 | 102,861,803 | 0 | 0 | null | 2022-12-08T00:38:08 | 2017-09-08T13:00:09 | Python | UTF-8 | Python | false | false | 3,141 | py | from functools import reduce
from django.core.management import BaseCommand
from lexicon.models import ValenceFrame
tags = [
"EXPL",
"EXPL+[INF:rais"
"EXPL+adpos",
"EXPL+adpos+PP[S",
"EXPL+adpos+S",
"EXPL+APpred+INF",
"EXPL+APpred+PP",
"EXPL+APpred+S",
"EXPL+INF",
"EXPL+INF:equiOBJ",
"EXPL+INF:equiSBJ",
"EXPL+NP",
"EXPL+NP+adpos",
"EXPL+NP+INF",
"EXPL+NP+INF:equiSBJ",
"EXPL+NP+NP",
"EXPL+NP+NP+INF",
"EXPL+NP+NP+S",
"EXPL+NP+S",
"EXPL+NPpred+INF",
"EXPL+NPpred+S",
"EXPL+PARTPpred+S",
"EXPL+PP",
"EXPL+PP+INF",
"EXPL+PP[INF",
"EXPL+PP[S",
"EXPL+PPpred+S",
"EXPL+S",
"INF",
"INF+APpred",
"INF+INF",
"INF+NP",
"INF+NP+NP",
"INF+NP+NPgen",
"INF+NP+PP",
"INF+NPdat+NP",
"INF+NPpred",
"INF+PARTPpred",
"INF+PPpred",
"inherentCompNP+NP+NP",
"NP",
"NP+adpos",
"NP+adpos+INF:equiSBJ",
"NP+adpos+INF:equiSBJ",
"NP+adpos+NP",
"NP+adpos+PARTPpred",
"NP+adpos+PP",
"NP+adpos+PP[INF:equiSBJ",
"NP+adpos+PP[INF:raisSBJ",
"NP+adpos+S",
"NP+ADVPpred",
"NP+APpred",
"NP+APpred+adpos",
"NP+EXPL+APpred+S",
"NP+EXPL+INF",
"NP+EXPL+S",
"NP+INF",
"NP+INF",
"NP+INF:equiSBJ",
"NP+INF:raisingOBJ",
"NP+INF:raisingSBJ",
"NP+NP",
"NP+NP++ADVPpredSBJ",
"NP+NP+ADVPpred",
"NP+NP+APpred",
"NP+NP+INF:equi:OBJ",
"NP+NP+INF:equi:SBJ",
"NP+NP+INF:rais:OBJ",
"NP+NP+INF:rais:SBJ",
"NP+NP+NP",
"NP+NP+NP+PP",
"NP+NP+NPpred",
"NP+NP+PP",
"NP+NP+PP[INF",
"NP+NP+PP[INF:equiOBJ",
"NP+NP+PP[INF:equiSBJ",
"NP+NP+PP[INF:raisOBJ",
"NP+NP+PP[S",
"NP+NP+PPpred",
"NP+NP+PRTP[INF:raisOBJ",
"NP+NP+PRTPpred",
"NP+NP+PRTPpred",
"NP+NP+PRTPpredSBJ",
"NP+NP+S",
"NP+NPdat+NP",
"NP+NPdat+NP+PP",
"NP+NPpred",
"NP+NPpred",
"NP+NPrefl",
"NP+NPrefl+adpos",
"NP+NPrefl+ADVpred",
"NP+NPrefl+APpred",
"Np+NPrefl+APpred+adpos",
"NP+NPrefl+INF",
"NP+NPrefl+INF:equi:OBJ",
"NP+NPrefl+INF:equiSBJ",
"NP+NPrefl+INF:raisSBJ",
"NP+NPrefl+NP",
"NP+NPrefl+NPpred",
"NP+NPrefl+PP",
"NP+NPrefl+PP[INF",
"NP+NPrefl+PP[INF:equiSBJ",
"NP+NPrefl+PP[INF:raisOBJ",
"NP+NPrefl+PPpred",
"NP+NPrefl+PRTP[INF:raisOBJ",
"NP+NPrefl+PRTPpred",
"NP+NPrefl+S",
"NP+phrasalVerb",
"NP+PP",
"NP+PP[INF",
"NP+PP[INF:equiSBJ",
"NP+PP[INF:raisSBJ",
"NP+PP[S",
"NP+PPpred",
"NP+PRTPpred",
"NP+PTCP",
"NP+S",
"NPdat",
"S",
"S+APpred",
"S+idiomatic",
"S+NP",
"S+NP+APpred",
"S+NP+NP",
"S+NP+S",
"S+NPpred",
"S+PARTPpred",
"S+PP",
"S+PP+NP",
"S+PPpred",
]
class Command(BaseCommand):
help = 'Populates the ValenceFrame table with tags'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
valence_frames = []
for tag in tags:
valence_frames.append(ValenceFrame(name=tag))
ValenceFrame.objects.bulk_create(valence_frames)
| [
"tormod.haugland@gmail.com"
] | tormod.haugland@gmail.com |
0f4cae1c82065decfb1458cf930ea6fa0d20184f | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_1480487_0/Python/TV4Fun/CodePython.py | d48048e4b9227440502b06435ae41e03eaa57447 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | filename = raw_input("Enter the name of the input file: ")
f = open(filename, 'r')
outfile = open("outfile.txt", 'w')
t = int(f.readline())
for case in range(t):
x = [int(i) for i in f.readline().split()]
n = x.pop(0)
xsum = sum(x)
outfile.write('Case #' + str(case + 1) + ':')
minscore = 2.0 * xsum / n
changed = True
maylose = x[:]
while changed:
changed = False
for i in maylose:
if i > minscore:
maylose.remove(i)
if len(maylose) > 1:
minscore = (float(sum(maylose)) + xsum) / len(maylose)
changed = True
print minscore
for i in x:
if i >= minscore:
outfile.write(' 0.0')
else:
outfile.write(' ' + str(100.0 * float(minscore - i) / xsum))
outfile.write('\n')
outfile.close()
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
9185725abebca98dc093dd4d562ae61a619fb4cc | 29ed133feb870455ca619c9fa2ce9b7eb1dcc470 | /URIs/URI2968.py | aac803dbfada4346f0a947ba958b8c61eff6808a | [] | no_license | jrantunes/URIs-Python-3 | c5e676686a979b6bbfd10b8e7168a6d35fb8f6a2 | 4692f3fba4a1c9a0f51322a13e9e267d8b07ea3e | refs/heads/master | 2022-04-17T10:56:52.468275 | 2020-03-28T17:07:46 | 2020-03-28T17:07:46 | 250,395,664 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | #Hour for a Run
import math
v, n = input().split()
v = int(v)
n = int(n)
x = []
for i in range(1, 10):
r = math.ceil((n * v * i) / 10.0)
x.append(r)
print(x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8]) | [
"noreply@github.com"
] | jrantunes.noreply@github.com |
2e1f9b8a34984b316be5df9a700ccee570f03474 | 89cd8b77ad5171c336cc60b2133fe6468a6cb53f | /Module02/06-正则表达式/05-FilterLagou.py | 7a68143d021baee871dc9c51bfd6f9e213906972 | [
"MIT"
] | permissive | fenglihanxiao/Python | 75178f6b6b0c53345e1ed54226ea645216572d6c | 872baf3a3a5ee42740161152605ca2b1ddf4cd30 | refs/heads/master | 2021-05-23T18:49:20.656433 | 2020-04-29T01:06:21 | 2020-04-29T01:06:21 | 253,199,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,017 | py | import re
html_str = """
<div class="job_at">
<p>岗位职责:</p>
<p>完成推荐算法、数据统计、接口、后台等服务器端相关工作</p>
<p><br></p>
<p>必备要求:</p>
<p>良好的自我驱动力和职业素养,工作积极主动、结果导向</p>
<p> <br></p>
<p>技术要求:</p>
<p>1、一年以上 Python 开发经验,掌握面向对象分析和设计,了解设计模式</p>
<p>2、掌握HTTP协议,熟悉MVC、MVVM等概念以及相关WEB开发框架</p>
<p>3、掌握关系数据库开发设计,掌握 SQL,熟练使用 MySQL/PostgreSQL 中的一种<br></p>
<p>4、掌握NoSQL、MQ,熟练使用对应技术解决方案</p>
<p>5、熟悉 Javascript/CSS/HTML5,JQuery、React、Vue.js</p>
<p> <br></p>
<p>加分项:</p>
<p>大数据,数理统计,机器学习,sklearn,高性能,大并发。</p>
</div>
"""
ret = re.sub(r"<[^>]*>| ", " ", html_str)
print(ret) | [
"fenglihanxiao@qq.com"
] | fenglihanxiao@qq.com |
6297beb8ba52f92cb896761b185e0a4502499949 | d7016f69993570a1c55974582cda899ff70907ec | /sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2022_02_01/operations/_private_link_resources_operations.py | b8989a164064339b3ceb0b30e1c860798d4401ef | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 6,861 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-02-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-02-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateLinkResources",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class PrivateLinkResourcesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2022_02_01.ContainerServiceClient`'s
:attr:`private_link_resources` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self, resource_group_name: str, resource_name: str, **kwargs: Any
) -> _models.PrivateLinkResourcesListResult:
"""Gets a list of private link resources in the specified managed cluster.
To learn more about private clusters, see:
https://docs.microsoft.com/azure/aks/private-clusters.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResourcesListResult or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_02_01.models.PrivateLinkResourcesListResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-02-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-02-01"))
cls: ClsType[_models.PrivateLinkResourcesListResult] = kwargs.pop("cls", None)
request = build_list_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("PrivateLinkResourcesListResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateLinkResources"
}
| [
"noreply@github.com"
] | kurtzeborn.noreply@github.com |
8ea4e4fe4fcab2f4511431a6d1a91c8e94bcd4cc | 029aa4fa6217dbb239037dec8f2e64f5b94795d0 | /数据结构练习/Python算法指南数据结构/123_删除链表中倒数第n个节点.py | e5895f57b7dca317281edff54280a7e8d0b3517a | [] | no_license | tonyyo/algorithm | 5a3f0bd4395a75703f9ee84b01e42a74283a5de9 | 60dd5281e7ce4dfb603b795aa194a67ff867caf6 | refs/heads/master | 2022-12-14T16:04:46.723771 | 2020-09-23T06:59:33 | 2020-09-23T06:59:33 | 270,216,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,439 | py | class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
class Solution(object):
def removeNthFromEnd(self, head, n):
res = ListNode(0)
res.next = head
tmp = res
for i in range(0, n):
head = head.next
while head != None:
head = head.next
tmp = tmp.next
tmp.next = tmp.next.next
return res.next
def removeNthFromEnd2(self, head, n):
newHead = ListNode(0)
newHead.next = head
pre_slow = newHead
slow = head
fast = head
for _ in range(n - 1):
fast = fast.next
while fast.next:
fast = fast.next
pre_slow = pre_slow.next
slow = slow.next
pre_slow.next = slow.next
return newHead.next
#主函数
if __name__ == "__main__":
node1 = ListNode(1)
node2 = ListNode(2)
node3 = ListNode(3)
node4 = ListNode(4)
node5 = ListNode(5)
node1.next = node2
node2.next = node3
node3.next = node4
node4.next = node5
list1 = []
n = 1
#创建对象
solution = Solution()
print("初始链表是:", [node1.val, node2.val, node3.val, node4.val, node5.val])
newlist = solution.removeNthFromEnd2(node1, n)
while (newlist):
list1.append(newlist.val)
newlist = newlist.next
print("最终链表是:", list1) | [
"1325338208@qq.com"
] | 1325338208@qq.com |
1d30a93b7b0fdf10bb8df12a279bd831e5c4df12 | 50f57af6bc95c755597074138ebef2200c0e0022 | /第二天作业/9.将首尾反转,自己写算法实现.py | c44f033e04040dda41193c2a113ad331fe4f9a76 | [] | no_license | qiaoy9377/python-base | f18666c1851f5d5f214b72723e04298471e55d63 | 6c7e4acf96d3582b85464efb8f9d5a6d6d8c2271 | refs/heads/main | 2023-04-05T05:42:40.021651 | 2021-04-20T12:53:10 | 2021-04-20T12:53:10 | 359,812,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | #有一堆字符串,‘abcdef’,将首尾反转,结果:fedcba,不能使用现有的函数或方法,自己写算法实现
#定义字符串
str1 = 'abcdef'
#创建一个空列表
list1 = []
#逆着循环取出字符串的数据,插入列表
#1、使用while循环
l = len(str1)
#循环变量--数据下标
i = l-1
#循环判断-判断下标大于等于0
while i >= 0:
#循环体--取数据插入列表
list1.append(str1[i])
#循环变量发生变化
i -= 1
print(''.join(list1))
#2、使用for循环实现
list2 = []
for i in str1[::-1]:
list2.append(i)
print(''.join(list2)) | [
"18335909377@163.com"
] | 18335909377@163.com |
e81498d14e8c062e1e3e7a026d2372dea587c945 | a4844ab94268c60ccb3e58e3006bed8e187f4f9c | /decorator_eample.py | cad7c75d3b59eb3bbff7f455b05b056aa56d9c88 | [] | no_license | namitha89/python_works | 2d9eacaf0c73dcea5695c3446b0923610e963795 | d130f4f4f32bf40616fdb3e9eef518d58d2c6a51 | refs/heads/master | 2022-03-31T11:09:35.840452 | 2020-01-14T20:40:22 | 2020-01-14T20:40:22 | 115,792,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | import time
def log(filename):
def inner(func):
def innermost(*args, **kwargs):
start = time.time()
s = func(*args, **kwargs)
end = time.time()
with open(filename, "w") as f:
diff = end-start
x = [start,end,diff]
f.write(str(x)+'\n')
return s
return innermost
return inner
@log('myfile1.txt')
def view1(param):
print(param)
@log('myfile2.txt')
def view2(param_one, times=8):
for i in range(times):
print(param_one)
view2('hi am doing good', times=3) | [
"namigowda51@gmail.com"
] | namigowda51@gmail.com |
4fee9d06c23ab38f3284349a68a3ea91b4348d7f | e70a17e8a37847a961f19b136f3bbe74393fa2af | /RPI/src/video_stream_opencv/cfg/VideoStream.cfg | ed9b908047679eaf1c59bef03a222c023b12a809 | [
"MIT"
] | permissive | Mondiegus/ROS-4x4-CAR-AI | 1413ead6f46a8b16005abeea3e0b215caa45f27e | 124efe39168ce96eec13d57e644f4ddb6dfe2364 | refs/heads/Master | 2023-07-14T23:56:53.519082 | 2021-03-27T17:28:45 | 2021-03-27T17:28:45 | 334,233,839 | 0 | 0 | MIT | 2021-02-02T13:00:30 | 2021-01-29T18:46:16 | Makefile | UTF-8 | Python | false | false | 2,012 | cfg | #!/usr/bin/env python
from dynamic_reconfigure.parameter_generator_catkin import *
PKG = "video_stream_opencv"
gen = ParameterGenerator()
class LEVEL:
NORMAL = 0
RUNNING = 1
# name type level description default min max
gen.add("camera_name", str_t, LEVEL.NORMAL, "Camera name", "camera")
gen.add("set_camera_fps", double_t, LEVEL.RUNNING, "Image Publish Rate", 30.0, 0.0, 1000.0)
gen.add("buffer_queue_size", int_t, LEVEL.NORMAL, "Buffer size for capturing frames", 100, 1, 1000)
gen.add("fps", double_t, LEVEL.RUNNING, "Image Publish Rate", 240.0, 0.0, 1000.0)
gen.add("frame_id", str_t, LEVEL.RUNNING, "Camera FrameID", "camera")
gen.add("camera_info_url", str_t, LEVEL.RUNNING, "Camera info URL", "")
gen.add("flip_horizontal", bool_t, LEVEL.NORMAL, "Flip image horizontally", False)
gen.add("flip_vertical", bool_t, LEVEL.NORMAL, "Flip image vertically", False)
gen.add("width", int_t, LEVEL.RUNNING, "Target width", 0, 0, 10000)
gen.add("height", int_t, LEVEL.RUNNING, "Target height", 0, 0, 10000)
gen.add("brightness", double_t, LEVEL.RUNNING, "Target brightness", 0.5019607843137255, 0.0, 1.0)
gen.add("contrast", double_t, LEVEL.RUNNING, "Target contrast", 0.12549019607843137, 0.0, 1.0)
gen.add("hue", double_t, LEVEL.RUNNING, "Target hue", 0.5, 0.0, 1.0)
gen.add("saturation", double_t, LEVEL.RUNNING, "Target saturation", 0.64, 0.0, 1.0)
gen.add("auto_exposure", bool_t, LEVEL.RUNNING, "Target auto exposure", True)
gen.add("exposure", double_t, LEVEL.RUNNING, "Target exposure", 0.5, 0.0, 1.0)
gen.add("loop_videofile", bool_t, LEVEL.RUNNING, "Loop videofile", False)
gen.add("reopen_on_read_failure", bool_t, LEVEL.RUNNING, "Re-open camera device on read failure", False)
gen.add("output_encoding", str_t, LEVEL.NORMAL, "Output encoding", "bgr8")
gen.add("start_frame", int_t, LEVEL.RUNNING, "Start frame of the video ", 0, 0)
gen.add("stop_frame", int_t, LEVEL.RUNNING, "Stop frame of the video", -1, -1)
exit(gen.generate(PKG, PKG, "VideoStream"))
| [
"Mondiegus9@gmail.com"
] | Mondiegus9@gmail.com |
4c2077d7acf3d765cf747f4fb6fbcdf1dffc6276 | e36225e61d95adfabfd4ac3111ec7631d9efadb7 | /problems/CR/auto/problem9_CR.py | 9ab4523e3e2e108161cae160b7e968f1641b787e | [
"BSD-3-Clause"
] | permissive | sunandita/ICAPS_Summer_School_RAE_2020 | d2ab6be94ac508e227624040283e8cc6a37651f1 | a496b62185bcfdd2c76eb7986ae99cfa85708d28 | refs/heads/main | 2023-01-01T02:06:40.848068 | 2020-10-15T17:25:01 | 2020-10-15T17:25:01 | 301,263,711 | 5 | 2 | BSD-3-Clause | 2020-10-15T17:25:03 | 2020-10-05T01:24:08 | Python | UTF-8 | Python | false | false | 1,111 | py | __author__ = 'patras'
from domain_chargeableRobot import *
from timer import DURATION
from state import state
DURATION.TIME = {
'put': 2,
'take': 2,
'perceive': 3,
'charge': 5,
'move': 10,
'moveToEmergency': 5,
'moveCharger': 15,
'addressEmergency': 10,
'wait': 5,
}
DURATION.COUNTER = {
'put': 2,
'take': 2,
'perceive': 3,
'charge': 5,
'move': 10,
'moveToEmergency': 5,
'moveCharger': 15,
'addressEmergency': 10,
'wait': 5,
}
rv.LOCATIONS = [1, 2, 3, 4, 5, 6, 7, 8]
rv.EDGES = {1: [7], 2: [8], 3: [8], 4: [8], 5: [7], 6: [7], 7: [1, 5, 6, 8], 8: [2, 3, 4, 7]}
rv.OBJECTS=['o1']
rv.ROBOTS=['r1']
def ResetState():
state.loc = {'r1': 2}
state.charge = {'r1': 2}
state.load = {'r1': NIL}
state.pos = {'c1': 1, 'o1': 1}
state.containers = { 1:['o1'],2:[],3:[],4:[],5:[],6:[],7:[],8:[],}
state.emergencyHandling = {'r1': False, 'r2': False}
state.view = {}
for l in rv.LOCATIONS:
state.view[l] = False
tasks = {
3: [['fetch', 'r1', 'o1']],
5: [['emergency', 'r1', 2, 1]],
}
eventsEnv = {
} | [
"sunandita.patra@gmail.com"
] | sunandita.patra@gmail.com |
b453edd28f0abe75121ce1976ba9a9c00c0b850a | c20f811f26afd1310dc0f75cb00992e237fdcfbd | /202-happy-number.py | f7b8c53ffaef5cd4e475b3ce7753353ed5dae8fe | [
"MIT"
] | permissive | dchentech/leetcode | 4cfd371fe4a320ab3e95925f1b5e00eed43b38b8 | 3111199beeaefbb3a74173e783ed21c9e53ab203 | refs/heads/master | 2022-10-21T09:59:08.300532 | 2016-01-04T03:21:16 | 2016-01-04T03:21:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,756 | py | """
Question:
Happy Number
Write an algorithm to determine if a number is "happy".
A happy number is a number defined by the following process: Starting with any positive integer, replace the number by the sum of the squares of its digits, and repeat the process until the number equals 1 (where it will stay), or it loops endlessly in a cycle which does not include 1. Those numbers for which this process ends in 1 are happy numbers.
Example: 19 is a happy number
1^2 + 9^2 = 82
8^2 + 2^2 = 68
6^2 + 8^2 = 100
1^2 + 0^2 + 0^2 = 1
Credits:
Special thanks to @mithmatt and @ts for adding this problem and creating all test cases.
Performance:
1. Total Accepted: 34378 Total Submissions: 104779 Difficulty: Easy
2. Your runtime beats 55.38% of python submissions.
"""
class Solution(object):
def isHappy(self, n):
"""
:type n: int
:rtype: bool
"""
result = self.compute(n)
if result["is_endless"]:
return False
return True
def compute(self, n):
num = n # n is already a positive integer
is_endless = False
same_nums = set([]) # check if it's already in a endless loop
while num != 1 and not is_endless:
num = sum(map(lambda i: i * i, map(int, list(str(num)))))
if num in same_nums:
is_endless = True
break
same_nums.add(num)
return {"num": num, "is_endless": is_endless}
assert Solution().compute(19)["num"] == 1
assert Solution().compute(0)["num"] == 0
assert Solution().compute(1)["num"] == 1
assert Solution().isHappy(19) is True
assert Solution().isHappy(0) is False
assert Solution().isHappy(1) is True
| [
"mvjome@gmail.com"
] | mvjome@gmail.com |
2120159042ebf409524843722f2a78301223752c | d42a9128898d504a9831f1afee3198c4677236c9 | /Level_2/기능개발.py | 012a8d1f581b77ca300f381fa928db03d99a1447 | [] | no_license | ketkat001/Programmers-coding | 6848a9c8cffd97b792cfc8856ec135b72af5d688 | 799baba8d66a9971b43233d231cecbf262b4ea27 | refs/heads/master | 2023-09-02T23:07:25.614820 | 2021-10-17T18:12:02 | 2021-10-17T18:12:02 | 235,016,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | def solution(progresses, speeds):
answer = []
while progresses:
time = 0
if (100 - progresses[0]) % speeds[0] != 0:
time += 1
time += (100 - progresses[0]) // speeds[0]
for i in range(len(progresses)):
progresses[i] += speeds[i] * time
temp, idx = 1, 1
while idx < len(progresses):
if progresses[idx] >= 100:
temp += 1
idx += 1
else:
break
answer.append(temp)
progresses = progresses[idx:]
speeds = speeds[idx:]
return answer
print(solution([99, 99, 99, 99, 99] , [3, 3, 3, 3, 3])) | [
"ketkat001@gmail.com"
] | ketkat001@gmail.com |
9f12561ad304b6686343604c8f927cda626574b8 | 26e91aead18d0fad6f5ce8fc4adf7d8e05a2f07f | /tests/util/datetime/calc/test_calculate_days_util.py | 53cf0307f8290841c7e426a19284122c18f21258 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | leathe/byceps | 40c1f8a1aab3521fcac45d88eab6364d448d4e67 | cd0c618af63fed1cd7006bb67da46eac0ddbb1c7 | refs/heads/master | 2020-12-02T09:02:51.087511 | 2019-12-14T17:00:22 | 2019-12-14T17:00:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | """
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from datetime import date
import pytest
from byceps.util.datetime.calc import calculate_days_until
SOME_DATE = date(1994, 3, 18)
@pytest.mark.parametrize('today, expected', [
(date(2014, 3, 16), 2),
(date(2014, 3, 17), 1),
(date(2014, 3, 18), 0),
(date(2014, 3, 19), 364),
])
def test_calculate_days_until(today, expected):
actual = calculate_days_until(SOME_DATE, today)
assert actual == expected
| [
"homework@nwsnet.de"
] | homework@nwsnet.de |
f0f826009c7d7edec9d20ab836fbd2f002481a5f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03088/s886343007.py | cb0bffa8d09a57e497014975538a18cc8134c4f7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,102 | py | N = int(input())
mod = 10 ** 9 + 7
# 直大さんの解説風
dp = [[[[0] * 4 for _ in range(4)] for _ in range(4)] for _ in range(N + 1)]
dp[0][3][3][3] = 1
for Length in range(N):
for i in range(4):
for j in range(4):
for k in range(4):
if dp[Length][i][j][k] == 0:
continue
for d in range(4): # 追加する文字
if d == 1 and j == 0 and k == 2:
continue
if d == 2:
if j == 0 and k == 1:
continue
if j == 1 and k == 0:
continue
if i == 0 and k == 1:
continue
if i == 0 and j == 1:
continue
dp[Length + 1][j][k][d] += dp[Length][i][j][k]
dp[Length + 1][j][k][d] %= mod
ans = 0
for i in range(4):
for j in range(4):
for k in range(4):
ans = (ans + dp[N][i][j][k]) % mod
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
a35a19b9d67715a726dacdfc4e9dc71bcf0d3f70 | 13f5984be7be77852e4de29ab98d5494a7fc6767 | /100Cases/51-100/num52按位或.py | 7b1d3c7885a6b8ca4068d3022d343b79d8057e73 | [] | no_license | YuanXianguo/Python-Interview-Master | 4252514763fc3f563d9b94e751aa873de1719f91 | 2f73786e8c51dbd248341559de171e18f67f9bf2 | refs/heads/master | 2020-11-26T18:14:50.190812 | 2019-12-20T02:18:03 | 2019-12-20T02:18:03 | 229,169,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | """程序分析:0|0=0; 0|1=1; 1|0=1; 1|1=1"""
a = 0o77
a_d = 63
a_b = 0b111111
b = 3
b_b = 0b11
num = a | b
num_b = 0b111111
print(num_b,num)
d = 7
d_b = 0b111
num_ = num | d
num_b_ = 0b111111
print(num_b_,num_)
| [
"736913978@qq.com"
] | 736913978@qq.com |
072ffb1a0289a7bf669e19f91d43bfb644b269de | d3c518b69525c04022ff76c583b4c31aae1e4295 | /tests/func/grammar/test_special_grammars.py | b04bfc98a47e74b30dfbc9f3986e1c5b5948384c | [
"Python-2.0",
"MIT"
] | permissive | boriel/parglare | 6714ed8c9c52b174f8c7fdf0bb986446ad2d55d9 | 74a6d98b6e510ae3c814c517924796c5dccefae0 | refs/heads/master | 2023-01-28T09:15:44.402669 | 2020-12-08T10:47:22 | 2020-12-08T10:47:22 | 103,997,403 | 0 | 0 | MIT | 2020-12-08T10:47:23 | 2017-09-18T22:13:24 | Python | UTF-8 | Python | false | false | 7,118 | py | # -*- coding: utf-8 -*-
"""
Test non-deterministic parsing.
"""
import pytest # noqa
import sys
from parglare import Parser, GLRParser, Grammar, SLR, LALR
from parglare.exceptions import ParseError, SRConflicts, RRConflicts
def test_lr_1_grammar():
"""From the Knuth's 1965 paper: On the Translation of Languages from Left to
Right
"""
grammar = """
S: 'a' A 'd' | 'b' A 'd';
A: 'c' A | 'c';
"""
g = Grammar.from_string(grammar)
parser = Parser(g)
parser.parse("acccccccccd")
parser.parse("bcccccccccd")
parser = GLRParser(g)
assert len(parser.parse("accccccccd")) == 1
assert len(parser.parse("bccccccccd")) == 1
def test_slr_conflict():
"""
Unambiguous grammar which is not SLR(1).
From the Dragon Book.
This grammar has a S/R conflict if SLR tables are used.
"""
grammar = """
S: L '=' R | R;
L: '*' R | 'id';
R: L;
"""
grammar = Grammar.from_string(grammar)
with pytest.raises(SRConflicts):
Parser(grammar, tables=SLR, prefer_shifts=False)
Parser(grammar, tables=LALR, prefer_shifts=False)
def test_lalr_reduce_reduce_conflict():
"""
Naive merging of states can lead to R/R conflict as shown in this grammar
from the Dragon Book.
But the extended LALR state compression algorithm used in parglare doesn't
exibit this problem.
"""
grammar = """
S: 'a' A 'd' | 'b' B 'd' | 'a' B 'e' | 'b' A 'e';
A: C;
B: C;
C: 'c';
"""
grammar = Grammar.from_string(grammar)
Parser(grammar)
def test_nondeterministic_LR_raise_error():
"""Language of even length palindromes.
This is a non-deterministic grammar and the language is non-ambiguous.
If the string is a even length palindrome parser should reduce EMPTY at he
middle of the string and start to reduce by A and B.
LR parsing is deterministic so this grammar can't parse the input as the
EMPTY reduction will be tried only after consuming all the input by
implicit disambiguation strategy of favouring shifts over empty reductions.
OTOH, GLR parser can handle this by forking parser at each step and trying
both empty reductions and shifts. Only the parser that has reduced empty at
the middle of the input will succeed.
"""
grammar = """
S: A | B | EMPTY;
A: '1' S '1';
B: '0' S '0';
"""
g = Grammar.from_string(grammar)
with pytest.raises(ParseError):
p = Parser(g)
p.parse('0101000110001010')
p = GLRParser(g)
results = p.parse('0101000110001010')
assert len(results) == 1
def test_cyclic_grammar_1():
"""
From the paper: "GLR Parsing for e-Grammers" by Rahman Nozohoor-Farshi
"""
grammar = """
S: A;
A: S;
A: 'x';
"""
g = Grammar.from_string(grammar)
with pytest.raises(SRConflicts):
Parser(g, prefer_shifts=False)
p = GLRParser(g)
results = p.parse('x')
# x -> A -> S
assert len(results) == 1
@pytest.mark.skipif(sys.version_info < (3, 6),
reason="list comparison doesn't work "
"correctly in pytest 4.1")
def test_cyclic_grammar_2():
"""
From the paper: "GLR Parsing for e-Grammers" by Rahman Nozohoor-Farshi
"""
grammar = """
S: S S;
S: 'x';
S: EMPTY;
"""
g = Grammar.from_string(grammar)
with pytest.raises(SRConflicts):
Parser(g, prefer_shifts=False)
p = GLRParser(g)
results = p.parse('xx')
# We have 11 valid solutions
assert len(results) == 11
expected = [
['x', 'x'],
[[[], 'x'], 'x'],
[[[], [[], 'x']], 'x'],
['x', [[], 'x']],
[[[], 'x'], [[], 'x']],
[[], ['x', 'x']],
[[], [[], ['x', 'x']]],
['x', [[], 'x']],
[[[], 'x'], [[], 'x']],
[[[], [[], 'x']], [[], 'x']],
[[], [[[], 'x'], 'x']]
]
assert expected == results
@pytest.mark.skipif(sys.version_info < (3, 6),
reason="list comparison doesn't work "
"correctly in pytest 4.1")
def test_cyclic_grammar_3():
"""
Grammar with indirect cycle.
r:EMPTY->A ; r:A->S; r:EMPTY->A; r:SA->S; r:EMPTY->A; r:SA->S;...
"""
grammar = """
S: S A | A;
A: "a" | EMPTY;
"""
g = Grammar.from_string(grammar)
Parser(g)
p = GLRParser(g)
results = p.parse('aa')
assert len(results) == 2
expected = [
['a', 'a'],
[[[], 'a'], 'a']
]
assert results == expected
def test_highly_ambiguous_grammar():
"""
This grammar has both Shift/Reduce and Reduce/Reduce conflicts and
thus can't be parsed by a deterministic LR parsing.
Shift/Reduce can be resolved by prefer_shifts strategy.
"""
grammar = """
S: "b" | S S | S S S;
"""
g = Grammar.from_string(grammar)
with pytest.raises(SRConflicts):
Parser(g, prefer_shifts=False)
# S/R are resolved by selecting prefer_shifts strategy.
# But R/R conflicts remain.
with pytest.raises(RRConflicts):
Parser(g, prefer_shifts=True)
# GLR parser handles this fine.
p = GLRParser(g, build_tree=True)
# For three tokens we have 3 valid derivations/trees.
results = p.parse("bbb")
assert len(results) == 3
# For 4 tokens we have 10 valid derivations.
results = p.parse("bbbb")
assert len(results) == 10
def test_indirect_left_recursive():
"""Grammar with indirect/hidden left recursion.
parglare LR parser will handle this using implicit disambiguation by
preferring shifts over empty reductions. It will greadily match "b" tokens
and than reduce EMPTY before "a" and start to reduce by 'B="b" B'
production.
"""
grammar = """
S: B "a";
B: "b" B | EMPTY;
"""
g = Grammar.from_string(grammar)
p = Parser(g)
p.parse("bbbbbbbbbbbba")
p = GLRParser(g)
results = p.parse("bbbbbbbbbbbba")
assert len(results) == 1
def test_reduce_enough_empty():
"""
In this unambiguous grammar parser must reduce as many empty A productions
as there are "b" tokens ahead to be able to finish successfully, thus it
needs unlimited lookahead
Language is: xb^n, n>=0
References:
Nozohoor-Farshi, Rahman: "GLR Parsing for ε-Grammers", Generalized LR
parsing, Springer, 1991.
Rekers, Joan Gerard: "Parser generation for interactive environments",
phD thesis, Universiteit van Amsterdam, 1992.
"""
grammar = """
S: A S "b";
S: "x";
A: EMPTY;
"""
g = Grammar.from_string(grammar)
p = GLRParser(g)
results = p.parse("xbbb")
assert len(results) == 1
def test_reduce_enough_many_empty():
"""
This is a generalization of the previous grammar where parser must reduce
enough A B pairs to succeed.
The language is the same: xb^n, n>=0
"""
grammar = """
S: A B S "b";
S: "x";
A: EMPTY;
B: EMPTY;
"""
g = Grammar.from_string(grammar)
p = GLRParser(g)
results = p.parse("xbbb")
assert len(results) == 1
| [
"igor.dejanovic@gmail.com"
] | igor.dejanovic@gmail.com |
861c1152d96532898f9e8f77ce6d44266c661726 | 8500de86f864e60856f4af17cfc7f620fd0e0ec9 | /test/counts_table/initialize.py | e8408b6b3e86675fc8984e895ded9060e1ef6026 | [
"MIT"
] | permissive | kaladharprajapati/singlet | c813ba7aa3d078659ef7868299b7093df207c6fc | 314ba68cbecebbda2806afdc97fdf6ac6f6c672e | refs/heads/master | 2020-05-18T06:58:04.697593 | 2019-02-15T15:50:59 | 2019-02-15T15:50:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | #!/usr/bin/env python
# vim: fdm=indent
'''
author: Fabio Zanini
date: 15/08/17
content: Test CountsTable class.
'''
# Script
if __name__ == '__main__':
# NOTE: an env variable for the config file needs to be set when
# calling this script
print('Instantiating CountsTable')
from singlet.counts_table import CountsTable
ct = CountsTable.from_tablename('example_table_tsv')
print('Done!')
| [
"fabio.zanini@fastmail.fm"
] | fabio.zanini@fastmail.fm |
88bdbcb1399726e566d59184082756a59e84d1fb | 609d5408f302c9188b723998762c2c1f7b883af9 | /.closet/jython.configurator.efr32/1.0.0.201606231656-435/pyradioconfig/parts/jumbo/profiles/__init__.py | 7450887a42996decd085e0a7530498b61cf25be7 | [] | no_license | acvilla/Sundial-Beta | 6ea4fd44cbf7c2df8100128aff5c39b6faf24a82 | 9f84e3b5a1397998dfea5287949fa5b1f4c209a6 | refs/heads/master | 2021-01-15T15:36:19.394640 | 2016-08-31T20:15:16 | 2016-08-31T20:15:16 | 63,294,451 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,740 | py | """
Dumbo specific Profiles
How to add a new Profile:
--------------------------
* Add a new Python file in this directory (example: Profile_[Name].py)
* Inside the Py file, create a class that implements IProfile:
>>> class Profile_Base(IProfile):
* Implement/override buildProfileModel() function. This function builds the profile inputs, forced, outputs into modem model.
Example:
>>> def buildProfileModel(self, model):
>>> # Build profile
>>> profile = self._makeProfile(model)
>>>
>>> profile.inputs.append(ModelInput(model.vars.xtal_frequency_hz, "crystal", input_type=ModelInputType.REQUIRED, readable_name="Crystal Frequency", value_limit_min=38000000, value_limit_max=40000000))
>>> profile.inputs.append(ModelInput(model.vars.rx_xtal_error_ppm, "crystal", input_type=ModelInputType.REQUIRED, readable_name="RX Crystal Accuracy", value_limit_min=0, value_limit_max=200))
>>>
>>> # Intermediate values
>>> self.make_linked_input_output(profile, model.vars.timing_detection_threshold , 'Advanced', readable_name='Timing Detection Threshold', value_limit_min=0, value_limit_max=255)
>>>
>>> # Output fields
>>> profile.outputs.append(ModelOutput(model.vars.SYNTH_CTRL_PRSMUX1 , '', ModelOutputType.SVD_REG_FIELD, readable_name='SYNTH.CTRL.PRSMUX1' ))
>>>
>>> return profile
"""
import os
import glob
modules = glob.glob(os.path.dirname(__file__)+"/*.py")
if len(modules) == 0:
modules = glob.glob(os.path.dirname(__file__)+"/*.pyc")
__all__ = [ os.path.basename(f)[:-3] for f in modules] | [
"acvilla@bu.edu"
] | acvilla@bu.edu |
dbd6e34e75a7e79edfad1c60ea67094198683509 | 15e818aada2b18047fa895690bc1c2afda6d7273 | /gs/monitor2/apps/plugins/layouts/gs_layout.py | becc28275206704a061906649bfea056979cc494 | [
"Apache-2.0"
] | permissive | ghomsy/makani | 4ee34c4248fb0ac355f65aaed35718b1f5eabecf | 818ae8b7119b200a28af6b3669a3045f30e0dc64 | refs/heads/master | 2023-01-11T18:46:21.939471 | 2020-11-10T00:23:31 | 2020-11-10T00:23:31 | 301,863,147 | 0 | 0 | Apache-2.0 | 2020-11-10T00:23:32 | 2020-10-06T21:51:21 | null | UTF-8 | Python | false | false | 2,790 | py | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layout to monitor ground station status."""
from makani.gs.monitor2.apps.layout import base
from makani.gs.monitor2.apps.plugins.indicators import aio_comms
from makani.gs.monitor2.apps.plugins.indicators import control
from makani.gs.monitor2.apps.plugins.indicators import gps
from makani.gs.monitor2.apps.plugins.indicators import ground_station
class GsLayout(base.BaseLayout):
"""The ground station layout."""
_NAME = 'Ground Station'
_DESIRED_VIEW_COLS = 2
_GPS_NODE = 'GpsBaseStation'
_ORDER_HORIZONTALLY = False
def Initialize(self):
self._AddIndicators('AIO Update', [
aio_comms.GsCoreSwitchAioUpdateIndicator(),
aio_comms.GsGpsAioUpdateIndicator(),
aio_comms.PlatformSensorsAioUpdateIndicator(),
])
self._AddIndicators('Wind', [
ground_station.WindIndicator(),
ground_station.WindSensorSpeedIndicator(),
ground_station.WindSensorStatusIndicator(),
control.WindStateEstIndicator(),
ground_station.WeatherSensorIndicator(),
ground_station.AirDensityIndicator(),
])
self._AddIndicators('GPS', [
gps.NovAtelNumSatsIndicator(self._GPS_NODE),
gps.NovAtelCn0Indicator(self._GPS_NODE),
gps.NovAtelSigmasIndicator(self._GPS_NODE),
gps.CompassHeadingIndicator(self._GPS_NODE),
])
self._AddIndicators('Winch PLC', [
ground_station.PerchAzimuthIndicator(),
ground_station.GsgAzimuthIndicator(['A']),
ground_station.GsgElevationIndicator(),
ground_station.PlcStatusIndicator(),
# TODO: The following indicators will be removed in the future
# when we test the top head for China Lake.
ground_station.LevelwindElevationIndicator(),
ground_station.WinchArmedIndicator(),
ground_station.DrumStateIndicator(),
ground_station.WinchProximityIndicator(),
])
self._AddBreak()
self._AddIndicators('PLC', [
ground_station.DetwistArmedIndicator(),
ground_station.DetwistStatusIndicator(),
ground_station.DetwistTemperatureIndicator(),
ground_station.DetwistStatusInfoIndicator(),
ground_station.Ground480VIndicator(),
])
| [
"luislarco@google.com"
] | luislarco@google.com |
b38ea4d4abae4e15f95f4f20b561b5ece41b2d6a | 108034973f9046a7603d5fe3f26c59b20a7e68da | /lab/lab05/lab05.py | d7a06219e929cbbf620509d6269d6ca247e50d07 | [] | no_license | paulhzq/cs61a | b1b1387cefbaaf1823c02d535891db7d085f3b04 | 9eee13df9ad113591dc55d106561951cea34abc5 | refs/heads/master | 2020-05-23T08:16:14.193086 | 2017-01-15T02:06:18 | 2017-01-15T02:06:18 | 70,255,875 | 8 | 8 | null | null | null | null | UTF-8 | Python | false | false | 3,704 | py | ## Lab 5: Mutable Sequences and Trees ##
# Sequences
def map(fn, seq):
"""Applies fn onto each element in seq and returns a list.
>>> map(lambda x: x*x, [1, 2, 3])
[1, 4, 9]
"""
"*** YOUR CODE HERE ***"
return [fn(x) for x in seq]
def filter(pred, seq):
"""Keeps elements in seq only if they satisfy pred.
>>> filter(lambda x: x % 2 == 0, [1, 2, 3, 4])
[2, 4]
"""
"*** YOUR CODE HERE ***"
return [x for x in seq if pred(x)]
def reduce(combiner, seq):
"""Combines elements in seq using combiner.
>>> reduce(lambda x, y: x + y, [1, 2, 3, 4])
10
>>> reduce(lambda x, y: x * y, [1, 2, 3, 4])
24
>>> reduce(lambda x, y: x * y, [4])
4
"""
"*** YOUR CODE HERE ***"
total = seq[0]
for elem in seq[1:]:
total = combiner(total,elem)
return total
# pyTunes
def make_pytunes(username):
"""Return a pyTunes tree as shown in the diagram with USERNAME as the value
of the root.
>>> pytunes = make_pytunes('i_love_music')
>>> print_tree(pytunes)
i_love_music
pop
justin bieber
single
what do you mean?
2015 pop mashup
trance
darude
sandstorm
"""
"*** YOUR CODE HERE ***"
return tree(username,[tree('pop',[tree('justin bieber',[tree('single',[tree('what do you mean?')])]),tree('2015 pop mashup')]), tree('trance',[tree('darude',[tree('sandstorm')])])])
def num_songs(t):
"""Return the number of songs in the pyTunes tree, t.
>>> pytunes = make_pytunes('i_love_music')
>>> num_songs(pytunes)
3
"""
"*** YOUR CODE HERE ***"
if is_leaf(t):
return 1
return sum([num_songs(b) for b in branches(t)])
def add_song(t, song, category):
"""Returns a new tree with SONG added to CATEGORY. Assume the CATEGORY
already exists.
>>> indie_tunes = tree('indie_tunes',
... [tree('indie',
... [tree('vance joy',
... [tree('riptide')])])])
>>> new_indie = add_song(indie_tunes, 'georgia', 'vance joy')
>>> print_tree(new_indie)
indie_tunes
indie
vance joy
riptide
georgia
"""
"*** YOUR CODE HERE ***"
if root(t) == category:
return tree(root(t), branches(t) + [tree(song)])
all_branches = [add_song(b, song, category) for b in branches(t)]
return tree(root(t), all_branches)
# Tree ADT
def tree(root, branches=[]):
for branch in branches:
assert is_tree(branch), 'branches must be trees'
return [root] + list(branches)
def root(tree):
return tree[0]
def branches(tree):
return tree[1:]
def is_tree(tree):
if type(tree) != list or len(tree) < 1:
return False
for branch in branches(tree):
if not is_tree(branch):
return False
return True
def is_leaf(tree):
return not branches(tree)
def print_tree(t, indent=0):
"""Print a representation of this tree in which each node is
indented by two spaces times its depth from the entry.
>>> print_tree(tree(1))
1
>>> print_tree(tree(1, [tree(2)]))
1
2
>>> numbers = tree(1, [tree(2), tree(3, [tree(4), tree(5)]), tree(6, [tree(7)])])
>>> print_tree(numbers)
1
2
3
4
5
6
7
"""
print(' ' * indent + str(root(t)))
for b in branches(t):
print_tree(b, indent + 1)
def copy_tree(t):
"""Returns a copy of t. Only for testing purposes.
>>> t = tree(5)
>>> copy = copy_tree(t)
>>> t = tree(6)
>>> print_tree(copy)
5
"""
return tree(root(t), [copy_tree(b) for b in branches(t)]) | [
"paul_hzq@hotmail.com"
] | paul_hzq@hotmail.com |
c14629b1a6e08472ce43288bee91fa595f6c20f5 | 32735726b6b6416f66e8f6d719382f3ffe3d1177 | /or2yw_examples/NYPL/script/ConfigTool.py | 0457fd07bb42522d033fa75db24e08329696f2b1 | [] | no_license | yixuan21/OR2YWTool | 5489d25a2a15d6d2513f909508fd5df97ee6d69b | eada19dccc6d03e80f0795efef949bc19f757fa0 | refs/heads/master | 2023-02-12T17:27:58.034974 | 2021-01-07T06:56:16 | 2021-01-07T06:56:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | import argparse
import LinearOriginalOR2YW as Linear
import SPOriginalOR2YW as SP
def main():
parser=argparse.ArgumentParser()
parser.add_argument('-L','--Linear',help="Generate Linear YW model",action="store_true")
parser.add_argument('-SP','--SerialParallel',help="Generate Serial-Parallel YW model",action="store_true")
args=parser.parse_args()
if args.Linear:
Linear.main()
elif args.SerialParallel:
SP.main()
if __name__=='__main__':
main()
| [
"lilan.scut@gmail.com"
] | lilan.scut@gmail.com |
0b2d82c28a801219156d6a83facb2da9e5069808 | d47956c25f4f7ce0ae4a39cde40ee8f958f6b8bd | /products/migrations/0009_auto_20161018_0947.py | 79ad2a25f0bc70671a7e4a9b1e670c066e1ab9dc | [] | no_license | vaerjngiar/eShopper | de6e6bc4aece91fd766fed35a0654feaf0285163 | 5c43bc8867e484e03cd18e1a7ed4faf64cad4b7a | refs/heads/master | 2020-06-14T09:41:58.021570 | 2016-12-31T13:25:47 | 2016-12-31T13:25:47 | 77,740,671 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-18 06:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0008_product_size'),
]
operations = [
migrations.AlterModelOptions(
name='catalogcategory',
options={'ordering': ('name',), 'verbose_name': 'category', 'verbose_name_plural': 'categories'},
),
migrations.RenameField(
model_name='product',
old_name='size',
new_name='code',
),
migrations.AddField(
model_name='product',
name='available',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='product',
name='name',
field=models.CharField(db_index=True, max_length=255),
),
migrations.AlterField(
model_name='product',
name='slug',
field=models.SlugField(max_length=255),
),
]
| [
"vaerjngiar@gmail.com"
] | vaerjngiar@gmail.com |
3663318075fd771c2027ed016c8bd9ecab76c0e4 | 3996539eae965e8e3cf9bd194123989741825525 | /RecoEgamma/EgammaHLTProducers/hltEgammaHLTNxNClusterProducer_cfi.py | 57de83bc38ca0be104c1d2bdd00e2e8a2c4304b3 | [] | no_license | cms-sw/cmssw-cfipython | 01990ea8fcb97a57f0b0cc44a8bf5cde59af2d98 | 25ee4c810103c4a507ca1b949109399a23a524c5 | refs/heads/CMSSW_11_2_X | 2023-09-01T16:56:00.658845 | 2022-06-20T22:49:19 | 2022-06-20T22:49:19 | 136,184,115 | 1 | 0 | null | 2022-10-19T14:04:01 | 2018-06-05T13:47:28 | Python | UTF-8 | Python | false | false | 1,133 | py | import FWCore.ParameterSet.Config as cms
hltEgammaHLTNxNClusterProducer = cms.EDProducer('EgammaHLTNxNClusterProducer',
doBarrel = cms.bool(True),
doEndcaps = cms.bool(True),
barrelHitProducer = cms.InputTag('hltEcalRegionalPi0EtaRecHit', 'EcalRecHitsEB'),
endcapHitProducer = cms.InputTag('hltEcalRegionalPi0EtaRecHit', 'EcalRecHitsEE'),
clusEtaSize = cms.int32(3),
clusPhiSize = cms.int32(3),
barrelClusterCollection = cms.string('Simple3x3ClustersBarrel'),
endcapClusterCollection = cms.string('Simple3x3ClustersEndcap'),
clusSeedThr = cms.double(0.5),
clusSeedThrEndCap = cms.double(1),
useRecoFlag = cms.bool(False),
flagLevelRecHitsToUse = cms.int32(1),
useDBStatus = cms.bool(True),
statusLevelRecHitsToUse = cms.int32(1),
posCalcParameters = cms.PSet(
T0_barl = cms.double(7.4),
T0_endc = cms.double(3.1),
T0_endcPresh = cms.double(1.2),
W0 = cms.double(4.2),
X0 = cms.double(0.89),
LogWeighted = cms.bool(True)
),
maxNumberofSeeds = cms.int32(1000),
maxNumberofClusters = cms.int32(200),
debugLevel = cms.int32(0),
mightGet = cms.optional.untracked.vstring
)
| [
"cmsbuild@cern.ch"
] | cmsbuild@cern.ch |
88c2969816720474e934b4cd7f2263875bbd96ae | 5935025f9c6e83aa006cd64755f71644009dedb5 | /core/utils.py | 75e2c316b8726d87611f4ba2bf628ecd43c882d9 | [] | no_license | zdYng/MyQuantification | 24bd546ce1b7ed94115a1eb94b2e11598df95f6b | 2fa874be4c8707e9c10bd7620cec2796946badcc | refs/heads/master | 2020-09-14T02:10:59.194409 | 2019-11-21T03:17:18 | 2019-11-21T03:17:18 | 222,980,387 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | import datetime as dt
class Timer():
def __init__(self):
self.start_dt = None
def start(self):
self.start_dt = dt.datetime.now()
def stop(self):
end_dt = dt.datetime.now()
print('Time token: %s'%(end_dt-self.start_dt))
| [
"qianzhongdao@163.com"
] | qianzhongdao@163.com |
14ec0e21f7e22754fe439ab3ac53c8b332534207 | d308fffe3db53b034132fb1ea6242a509f966630 | /pirates/world/ModularAreaBuilder.py | 7a52e5645c421e21103107fa08fc0966c217c0e6 | [
"BSD-3-Clause"
] | permissive | rasheelprogrammer/pirates | 83caac204965b77a1b9c630426588faa01a13391 | 6ca1e7d571c670b0d976f65e608235707b5737e3 | refs/heads/master | 2020-03-18T20:03:28.687123 | 2018-05-28T18:05:25 | 2018-05-28T18:05:25 | 135,193,362 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,335 | py | # uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.world.ModularAreaBuilder
from pandac.PandaModules import *
from otp.otpbase import OTPRender
from pirates.world.SectionAreaBuilder import SectionAreaBuilder
from pirates.leveleditor import EditorGlobals
class ModularAreaBuilder(SectionAreaBuilder):
__module__ = __name__
def __init__(self, master):
SectionAreaBuilder.__init__(self, master)
self.subLights = {}
self.adjTable = {}
self.subLights = {}
self.areaLights = {}
def _postLoadStep(self):
SectionAreaBuilder._postLoadStep(self)
adjTable = base.worldCreator.uidAdjTables.get(self.master.uniqueId, {})
for light in self.areaGeometry.findAllMatches('**/=SubLight;+s'):
zone = self.sectionsToParent.get(light.getTag('SubLight'))
if zone:
self.addSubLight(zone, light.find('**/+Light').node())
self.lightObjects()
def addChildObj(self, levelObj):
root = SectionAreaBuilder.addChildObj(self, levelObj)
if levelObj['Type'] == 'Cave_Pieces':
root.setTag('modular', '1')
if levelObj.get('OverrideFog', False):
root.setTag('fog-onset', str(levelObj.get('FogOnSet', 0)))
root.setTag('fog-peak', str(levelObj.get('FogPeak', 100)))
def lightObjects(self):
self.generateAdjLightSets()
for zone in self.sections:
parent = self.sectionsToParent[zone]
lightAttrib = self.areaLights.get(parent)
if not lightAttrib:
continue
self.sections[zone].setAttrib(lightAttrib)
for uid, obj in self.largeObjects.iteritems():
visZone = obj.getTag('visZone')
modular = obj.getTag('modular')
if modular:
self.largeObjects[uid].setAttrib(self.areaLights[uid])
elif visZone:
self.largeObjects[uid].setAttrib(self.areaLights[self.sectionsToParent[visZone]])
for node in self.areaGeometry.findAllMatches('**/=PortalVis'):
visZone = node.getTag('PortalVis')
node.setAttrib(self.areaLights[self.sectionsToParent[visZone]])
def generateAdjLightSets(self):
for zone in self.adjTable:
lightAttrib = LightAttrib.make()
group = self.subLights.get(zone, [])
for light in group:
lightAttrib = lightAttrib.addLight(light)
for adjZone in self.adjTable[zone]:
adjGroup = self.subLights.get(adjZone, [])
for light in adjGroup:
lightAttrib = lightAttrib.addLight(light)
self.areaLights[zone] = lightAttrib
def addSubLight(self, zone, light):
subLightGroup = self.subLights.get(zone)
if not subLightGroup:
subLightGroup = self.subLights[zone] = []
subLightGroup.append(light)
def makeLight(self, levelObj):
light = EditorGlobals.LightModular(levelObj, self.areaGeometry, drawIcon=False)
if levelObj.get('VisZone'):
if light:
light.setTag('SubLight', levelObj.get('VisZone'))
OTPRender.renderReflection(False, light, 'p_light', None)
return light
def handleLighting(self, obj, visZone):
parent = self.sectionsToParent.get(visZone)
if parent and self.areaLights.has_key(parent):
obj.setAttrib(self.areaLights[parent])
SectionAreaBuilder.handleLighting(self, obj, visZone)
def localAvLeaving(self):
localAvatar.clearAttrib(LightAttrib.getClassType())
def disableDynamicLights(self):
pass
def addSectionObj(self, obj, visZone, logError=0):
SectionAreaBuilder.addSectionObj(self, obj, visZone)
parent = self.sectionsToParent.get(visZone)
if parent:
self.areaLights.has_key(parent) and obj.setAttrib(self.areaLights[parent])
else:
if logError:
errorMessage = 'Chest missing parent visZone %s location %s position %s' % (visZone, localAvatar.getLocation(), localAvatar.getPos())
localAvatar.sendAILog(errorMessage)
else:
if __dev__:
set_trace()
def arrived(self):
render.setClipPlane(base.farCull)
def left(self):
render.clearClipPlane()
def triggerEffects(self, visZone):
SectionAreaBuilder.triggerEffects(self, visZone)
parent = self.sectionsToParent.get(visZone)
if parent:
module = self.largeObjects.get(parent)
if module:
if module.getTag('modular'):
onset = module.getTag('fog-onset')
peak = module.getTag('fog-peak')
onset = onset and float(onset)
peak = float(peak)
base.cr.timeOfDayManager.lerpLinearFog(onset, peak)
else:
base.cr.timeOfDayManager.restoreLinearFog()
def unloadObjects(self):
self.areaLights = {}
self.subLights = {}
self.adjTable = {}
SectionAreaBuilder.unloadObjects(self) | [
"33942724+itsyaboyrocket@users.noreply.github.com"
] | 33942724+itsyaboyrocket@users.noreply.github.com |
3be9d317ad09dd674355187358e491d131b899a5 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AlipayAcquireCreateandpayResponse.py | c14ee8bde981d6a5568c67d3c189308b17bf169e | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 2,910 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayAcquireCreateandpayResponse(AlipayResponse):
def __init__(self):
super(AlipayAcquireCreateandpayResponse, self).__init__()
self._buyer_logon_id = None
self._buyer_user_id = None
self._detail_error_code = None
self._detail_error_des = None
self._extend_info = None
self._out_trade_no = None
self._result_code = None
self._trade_no = None
@property
def buyer_logon_id(self):
return self._buyer_logon_id
@buyer_logon_id.setter
def buyer_logon_id(self, value):
self._buyer_logon_id = value
@property
def buyer_user_id(self):
return self._buyer_user_id
@buyer_user_id.setter
def buyer_user_id(self, value):
self._buyer_user_id = value
@property
def detail_error_code(self):
return self._detail_error_code
@detail_error_code.setter
def detail_error_code(self, value):
self._detail_error_code = value
@property
def detail_error_des(self):
return self._detail_error_des
@detail_error_des.setter
def detail_error_des(self, value):
self._detail_error_des = value
@property
def extend_info(self):
return self._extend_info
@extend_info.setter
def extend_info(self, value):
self._extend_info = value
@property
def out_trade_no(self):
return self._out_trade_no
@out_trade_no.setter
def out_trade_no(self, value):
self._out_trade_no = value
@property
def result_code(self):
return self._result_code
@result_code.setter
def result_code(self, value):
self._result_code = value
@property
def trade_no(self):
return self._trade_no
@trade_no.setter
def trade_no(self, value):
self._trade_no = value
def parse_response_content(self, response_content):
response = super(AlipayAcquireCreateandpayResponse, self).parse_response_content(response_content)
if 'buyer_logon_id' in response:
self.buyer_logon_id = response['buyer_logon_id']
if 'buyer_user_id' in response:
self.buyer_user_id = response['buyer_user_id']
if 'detail_error_code' in response:
self.detail_error_code = response['detail_error_code']
if 'detail_error_des' in response:
self.detail_error_des = response['detail_error_des']
if 'extend_info' in response:
self.extend_info = response['extend_info']
if 'out_trade_no' in response:
self.out_trade_no = response['out_trade_no']
if 'result_code' in response:
self.result_code = response['result_code']
if 'trade_no' in response:
self.trade_no = response['trade_no']
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
ab60bdb394634c3f7bf7a2949170c5796910f7d6 | ed06ef44c944707276a2fca16d61e7820596f51c | /Python/create-target-array-in-the-given-order.py | 79a41b9196316ba78a9aa7642dbd5d9bc4c51130 | [] | no_license | sm2774us/leetcode_interview_prep_2021 | 15842bef80637c6ff43542ed7988ec4b2d03e82c | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | refs/heads/master | 2023-05-29T14:14:49.074939 | 2021-06-12T19:52:07 | 2021-06-12T19:52:07 | 374,725,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | # Time: O(n^2)
# Space: O(1)
class Solution(object):
def createTargetArray(self, nums, index):
"""
:type nums: List[int]
:type index: List[int]
:rtype: List[int]
"""
for i in range(len(nums)):
for j in range(i):
if index[j] >= index[i]:
index[j] += 1
result = [0]*(len(nums))
for i in range(len(nums)):
result[index[i]] = nums[i]
return result
# Time: O(n^2)
# Space: O(1)
import itertools
class Solution2(object):
def createTargetArray(self, nums, index):
"""
:type nums: List[int]
:type index: List[int]
:rtype: List[int]
"""
result = []
for i, x in itertools.zip(index, nums):
result.insert(i, x)
return result
| [
"sm2774us@gmail.com"
] | sm2774us@gmail.com |
7d944e3db94712dc9662468f900911a794ff785a | 27e890f900bd4bfb2e66f4eab85bc381cf4d5d3f | /plugins/module_utils/network/slxos/slxos.py | 32f64d815194641c0d66b6c029f0e8f8720df836 | [] | no_license | coll-test/notstdlib.moveitallout | eb33a560070bbded5032385d0aea2f3cf60e690b | 0987f099b783c6cf977db9233e1c3d9efcbcb3c7 | refs/heads/master | 2020-12-19T22:28:33.369557 | 2020-01-23T18:51:26 | 2020-01-23T18:51:26 | 235,865,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,455 | py | #
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import json
from ansible_collections.notstdlib.moveitallout.plugins.module_utils._text import to_text
from ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.common.utils import to_list, ComplexList
from ansible_collections.notstdlib.moveitallout.plugins.module_utils.connection import Connection
def get_connection(module):
"""Get switch connection
Creates reusable SSH connection to the switch described in a given module.
Args:
module: A valid AnsibleModule instance.
Returns:
An instance of `ansible.module_utils.connection.Connection` with a
connection to the switch described in the provided module.
Raises:
AnsibleConnectionFailure: An error occurred connecting to the device
"""
if hasattr(module, 'slxos_connection'):
return module.slxos_connection
capabilities = get_capabilities(module)
network_api = capabilities.get('network_api')
if network_api == 'cliconf':
module.slxos_connection = Connection(module._socket_path)
else:
module.fail_json(msg='Invalid connection type %s' % network_api)
return module.slxos_connection
def get_capabilities(module):
"""Get switch capabilities
Collects and returns a python object with the switch capabilities.
Args:
module: A valid AnsibleModule instance.
Returns:
A dictionary containing the switch capabilities.
"""
if hasattr(module, 'slxos_capabilities'):
return module.slxos_capabilities
capabilities = Connection(module._socket_path).get_capabilities()
module.slxos_capabilities = json.loads(capabilities)
return module.slxos_capabilities
def run_commands(module, commands):
"""Run command list against connection.
Get new or previously used connection and send commands to it one at a time,
collecting response.
Args:
module: A valid AnsibleModule instance.
commands: Iterable of command strings.
Returns:
A list of output strings.
"""
responses = list()
connection = get_connection(module)
for cmd in to_list(commands):
if isinstance(cmd, dict):
command = cmd['command']
prompt = cmd['prompt']
answer = cmd['answer']
else:
command = cmd
prompt = None
answer = None
out = connection.get(command, prompt, answer)
try:
out = to_text(out, errors='surrogate_or_strict')
except UnicodeError:
module.fail_json(msg=u'Failed to decode output from %s: %s' % (cmd, to_text(out)))
responses.append(out)
return responses
def get_config(module):
"""Get switch configuration
Gets the described device's current configuration. If a configuration has
already been retrieved it will return the previously obtained configuration.
Args:
module: A valid AnsibleModule instance.
Returns:
A string containing the configuration.
"""
if not hasattr(module, 'device_configs'):
module.device_configs = {}
elif module.device_configs != {}:
return module.device_configs
connection = get_connection(module)
out = connection.get_config()
cfg = to_text(out, errors='surrogate_then_replace').strip()
module.device_configs = cfg
return cfg
def load_config(module, commands):
"""Apply a list of commands to a device.
Given a list of commands apply them to the device to modify the
configuration in bulk.
Args:
module: A valid AnsibleModule instance.
commands: Iterable of command strings.
Returns:
None
"""
connection = get_connection(module)
connection.edit_config(commands)
| [
"wk@sydorenko.org.ua"
] | wk@sydorenko.org.ua |
e3a6d3a7c141c5ff1c7cc9d862462fb3c5c9136b | c8a6246f1695521c9acb0eb1ba8552c7f1917ce7 | /provider/manga.py | 84d1514624cb497c704deed06f0e64e1137d7b1a | [
"Apache-2.0",
"MIT",
"CC-BY-4.0"
] | permissive | wafle/ebedke | 5a391c78e94f056a7aa8f7dda40a5a1bbad3c96b | 94d6a3431b674aafc00d88826307dcb74bde3943 | refs/heads/master | 2020-04-06T11:17:44.297396 | 2019-01-27T21:08:42 | 2019-01-27T21:08:42 | 157,411,406 | 0 | 0 | NOASSERTION | 2018-11-13T16:31:05 | 2018-11-13T16:31:05 | null | UTF-8 | Python | false | false | 644 | py | from datetime import datetime as dt, timedelta
from provider.utils import get_dom, on_workdays
URL = "http://mangacowboy.hu/"
@on_workdays
def getMenu(today):
dom = get_dom(URL)
date = today.strftime("%Y. %m. %d.")
menu = dom.xpath(f'//section[@id="weekly_menu"]/ul/li[.//time[contains(text(), "{ date }")]]'
'//div[@class="weeklyMenuPreview-content"]')
if menu:
menu = list(menu[0].xpath("./p/text()"))
else:
menu = []
return menu
menu = {
'name': 'Manga',
'id': 'mc',
'url': URL,
'get': getMenu,
'ttl': timedelta(hours=23),
'cards': ['szep', 'erzs']
}
| [
"ijanos@gmail.com"
] | ijanos@gmail.com |
a55e1d3310d8edf5e2ef9a5d7119e535b1785777 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_bookstores.py | c045d55bc6dc153405204c67df564094fb1eb46b | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py |
#calss header
class _BOOKSTORES():
def __init__(self,):
self.name = "BOOKSTORES"
self.definitions = bookstore
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['bookstore']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
9a1ab659558877fc943ccdb9a204d0b3ea6f68aa | 683417cb26b5a4d5b17b437eb49aa4fdbc77de72 | /src/nameless/cli.py | 591c249cbfaa60b8431e5246476eba4deed5a2fd | [
"BSD-2-Clause"
] | permissive | admdev8/python-nameless | 1c36286b0a2f308c4fdc43e05abf5ec403f91c97 | 838e1f0d90c9a33abba820ffd05beb39b2ef6763 | refs/heads/master | 2023-04-15T18:19:59.065808 | 2020-08-06T10:57:15 | 2020-08-06T10:57:15 | 292,904,530 | 0 | 0 | BSD-2-Clause | 2023-04-04T01:54:02 | 2020-09-04T17:10:41 | null | UTF-8 | Python | false | false | 917 | py | """
Module that contains the command line app.
Why does this file exist, and why not put this in __main__?
You might be tempted to import things from __main__ later, but that will cause
problems: the code will get executed twice:
- When you run `python -mnameless` python will execute
``__main__.py`` as a script. That means there won't be any
``nameless.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``nameless.__main__`` in ``sys.modules``.
Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration
"""
import argparse
parser = argparse.ArgumentParser(description='Command description.')
parser.add_argument('names', metavar='NAME', nargs=argparse.ZERO_OR_MORE,
help="A name of something.")
def main(args=None):
args = parser.parse_args(args=args)
print(args.names)
| [
"contact@ionelmc.ro"
] | contact@ionelmc.ro |
0609114ce349631e21e78619f2e13aaeae8803f9 | 06933693ff601156402dc1c0a424cf292bf0c6ed | /home/management/commands/load_initial_data.py | 9cb5d99213107dee3c487d2b72e77805191def94 | [] | no_license | crowdbotics-apps/onboard-5295 | 77f6daad336d7a14c296cf7015147d711d412505 | 45cd684a02613a2527e52faadf96a38991b76987 | refs/heads/master | 2022-12-14T23:39:31.154518 | 2019-06-27T23:42:07 | 2019-06-27T23:42:07 | 194,176,736 | 0 | 0 | null | 2022-12-08T20:50:16 | 2019-06-27T23:41:28 | Python | UTF-8 | Python | false | false | 719 | py |
from django.core.management import BaseCommand
from home.models import CustomText, HomePage
def load_initial_data():
homepage_body = """
<h1 class="display-4 text-center">OnBoard.</h1>
<p class="lead">
This is the sample application created and deployed from the crowdbotics slack app. You can
view list of packages selected for this application below
</p>"""
customtext_title = 'OnBoard.'
CustomText.objects.create(title=customtext_title)
HomePage.objects.create(body=homepage_body)
class Command(BaseCommand):
can_import_settings = True
help = 'Load initial data to db'
def handle(self, *args, **options):
load_initial_data()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
ac8854b98b82c5621e509614b257be316cedf096 | 066ee4df594a5dc90335d271b9d5a1b1e2a4d34c | /src/lib/wtforms/ext/i18n/form.py | 60544824f90cb1b77f8f92b83f9b91d28f78b627 | [] | permissive | ychen820/microblog | a2d82447525325ec58285c2e5db58b79cceaca1b | d379afa2db3582d5c3be652165f0e9e2e0c154c6 | refs/heads/master | 2021-01-20T05:58:48.424357 | 2015-04-28T22:03:09 | 2015-04-28T22:03:09 | 32,948,331 | 0 | 2 | BSD-3-Clause | 2020-07-25T05:04:35 | 2015-03-26T19:45:07 | Python | UTF-8 | Python | false | false | 1,259 | py | from wtforms import form
from wtforms.ext.i18n.utils import get_translations
translations_cache = {}
class Form(form.Form):
"""
Base form for a simple localized WTForms form.
This will use the stdlib gettext library to retrieve an appropriate
translations object for the language, by default using the locale
information from the environment.
If the LANGUAGES class variable is overridden and set to a sequence of
strings, this will be a list of languages by priority to use instead, e.g::
LANGUAGES = ['en_GB', 'en']
One can also provide the languages by passing `LANGUAGES=` to the
constructor of the form.
Translations objects are cached to prevent having to get a new one for the
same languages every instantiation.
"""
LANGUAGES = None
def __init__(self, *args, **kwargs):
if 'LANGUAGES' in kwargs:
self.LANGUAGES = kwargs.pop('LANGUAGES')
super(Form, self).__init__(*args, **kwargs)
def _get_translations(self):
languages = tuple(self.LANGUAGES) if self.LANGUAGES else None
if languages not in translations_cache:
translations_cache[languages] = get_translations(languages)
return translations_cache[languages]
| [
"ychen207@binghamton.edu"
] | ychen207@binghamton.edu |
29254a283d32280f056293e7391c7261bde9d15c | 3655215852ee2fb1864dbfa1ce924290a2c4f4b9 | /Tuple_operation.py | 43fe5822ba8f491fdff046af229963119819a470 | [] | no_license | shubhamkanade/Niyander-Python | 8b318df2b8ae33b943dcf83eb01c00577914ca59 | 2b6a4780707e26852aa950d7c3e54be1f3b2080b | refs/heads/main | 2023-06-16T03:44:07.833619 | 2021-07-06T03:09:16 | 2021-07-06T03:09:16 | 383,021,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py |
Tuple=('a',)
Tuple1="a","b","c","d"
Tuple2=(1,2,3,4,5)
print Tuple1+Tuple2
print Tuple
del Tuple
print Tuple #name 'tuple is not defined'
| [
"shubhamkanade98@gmail.com"
] | shubhamkanade98@gmail.com |
e623bae8fc46dc8fb0e597e972905d9563a89e96 | fcd965c9333ee328ec51bc41f5bc0300cc06dc33 | /LeetCode/Blind 75/Dynamic Programming/1D Dynamic/91_decode_ways.py | d0549868e32f709b896aa8002f90fd411e175c0e | [] | no_license | henrylin2008/Coding_Problems | 699bb345481c14dc3faa8bab439776c7070a1cb0 | 281067e872f73a27f76ae10ab0f1564916bddd28 | refs/heads/master | 2023-01-11T11:55:47.936163 | 2022-12-24T07:50:17 | 2022-12-24T07:50:17 | 170,151,972 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,074 | py | # 91. Decode Ways
# Link: https://leetcode.com/problems/decode-ways/
# Medium
# A message containing letters from A-Z can be encoded into numbers using the following mapping:
#
# 'A' -> "1"
# 'B' -> "2"
# ...
# 'Z' -> "26"
# To decode an encoded message, all the digits must be grouped then mapped back into letters using the reverse of the
# mapping above (there may be multiple ways). For example, "11106" can be mapped into:
#
# "AAJF" with the grouping (1 1 10 6)
# "KJF" with the grouping (11 10 6)
# Note that the grouping (1 11 06) is invalid because "06" cannot be mapped into 'F' since "6" is different from "06".
#
# Given a string s containing only digits, return the number of ways to decode it.
#
# The test cases are generated so that the answer fits in a 32-bit integer.
#
#
#
# Example 1:
# Input: s = "12"
# Output: 2
# Explanation: "12" could be decoded as "AB" (1 2) or "L" (12).
#
# Example 2:
# Input: s = "226"
# Output: 3
# Explanation: "226" could be decoded as "BZ" (2 26), "VF" (22 6), or "BBF" (2 2 6).
#
# Example 3:
# Input: s = "06"
# Output: 0
# Explanation: "06" cannot be mapped to "F" because of the leading zero ("6" is different from "06").
#
#
# Constraints:
#
# 1 <= s.length <= 100
# s contains only digits and may contain leading zero(s).
# Note: can cur char be decoded in one or two ways? Recursion -> cache -> iterative dp solution, a lot of edge cases
# to determine, 52, 31, 29, 10, 20 only decoded one way, 11, 26 decoded two ways
class Solution:
# recursive + cache solution
# Time: O(n)
# Space: O(n)
def numDecodings(self, s: str) -> int:
dp = {len(s): 1} # cache; empty string = 1
def dfs(i): # recursive func, i: current position
if i in dp: # i is already cached or i is the last position of s
return dp[i]
if s[i] == "0": # base case, if char starts with "0", return 0 (no way to decode it)
return 0
res = dfs(i + 1) # sub-problem: next position
# condition for following char: i+1 is inbound, and next char is 1 or 2 and the following char in "0123456"
# if next char inbound, and the following 2 strs/digs is between 10 and 26:
if i + 1 < len(s) and (s[i] == "1" or s[i] == "2" and s[i + 1] in "0123456"):
res += dfs(i + 2) # add the following str
dp[i] = res # cached it; dp[i] = dp[i+1] + dp[i+2]
return res
return dfs(0)
# Dynamic: bottom-up solution
# Time: O(n)
# Space: O(n)
# def numDecodings(self, s: str) -> int:
# dp = {len(s): 1} # cache; base case: len(s): 1 if it's an empty str
#
# for i in range(len(s) - 1, -1, -1): # iterate through in the reverse order
# if s[i] == "0": # base case
# dp[i] = 0
# else:
# dp[i] = dp[i + 1]
# if i + 1 < len(s) and (s[i] == "1" or s[i] == "2" and s[i + 1] in "0123456"):
# dp[i] += dp[i + 2] # add the following str
# return dp[0]
| [
"henrylin2008@yahoo.com"
] | henrylin2008@yahoo.com |
31e19091980ddd7815391fd7ccbb4d6062a92ac5 | 571a89f94f3ebd9ec8e6b618cddb7d05811e0d62 | /abc177/c/main.py | 0341cb88173c4b0336bda72891d1c6cad1f960a8 | [] | no_license | ryu19-1/atcoder_python | 57de9e1db8ff13a107b5861f8f6a231e40366313 | cc24b3c2895aad71d40cefbb8e2893dc397b8f4f | refs/heads/master | 2023-05-10T05:32:16.507207 | 2021-05-19T17:48:10 | 2021-05-19T17:48:10 | 368,954,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | #!/usr/bin/env python3
import sys
from collections import deque, Counter
from heapq import heappop, heappush
from bisect import bisect_right
from itertools import accumulate
sys.setrecursionlimit(10**6)
INF = 10**12
m = 10**9 + 7
def main():
N = int(input())
A = list(map(int, input().split()))
ans = 0
for i in range(N):
ans += A[i]
ans %= m
ans = ans**2
ans %= m
for i in range(N):
ans -= pow(A[i],2,m)
ans %= m
ans = ans * pow(2,m-2,m) % m
print(ans)
if __name__ == "__main__":
main() | [
"ryu1007kami@gmail.com"
] | ryu1007kami@gmail.com |
75a8e3640a339ab44e8583e808b41f364ce368d4 | ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3 | /python/baiduads-sdk-auto/test/test_plat_shop_map_vo.py | 8fd6e86688a996ef5f7a879a93e7551fba632853 | [
"Apache-2.0"
] | permissive | baidu/baiduads-sdk | 24c36b5cf3da9362ec5c8ecd417ff280421198ff | 176363de5e8a4e98aaca039e4300703c3964c1c7 | refs/heads/main | 2023-06-08T15:40:24.787863 | 2023-05-20T03:40:51 | 2023-05-20T03:40:51 | 446,718,177 | 16 | 11 | Apache-2.0 | 2023-06-02T05:19:40 | 2022-01-11T07:23:17 | Python | UTF-8 | Python | false | false | 656 | py | """
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import baiduads
from baiduads.subshopopenapi.model.plat_shop_map_vo import PlatShopMapVo
class TestPlatShopMapVo(unittest.TestCase):
"""PlatShopMapVo unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPlatShopMapVo(self):
"""Test PlatShopMapVo"""
# FIXME: construct object with mandatory attributes with example values
# model = PlatShopMapVo() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"yangxp@YangxpdeMacBook-Pro.local"
] | yangxp@YangxpdeMacBook-Pro.local |
4bc7a4f38cde5846e34d476537c242652d64db22 | c71fc2c91b119eeac9eafe2038b0e42ef11098f7 | /new/trucks/migrations/0005_auto_20190124_1843.py | 1af8d75d9619bbc06040522809a2a6f4308001c1 | [] | no_license | abhinavsharma629/Reminder-System | c3848fda4947506bcd35759c2966392b34e8ef6a | fb95e3b8ae8ce6eee116fb0a0aac9187189d515d | refs/heads/master | 2020-04-20T08:47:40.068485 | 2019-02-01T19:36:20 | 2019-02-01T19:36:20 | 168,749,751 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | # Generated by Django 2.1.5 on 2019-01-24 18:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trucks', '0004_remove_notifications_boolean1'),
]
operations = [
migrations.AddField(
model_name='notifications',
name='fitness_id',
field=models.CharField(default=-1, max_length=30),
),
migrations.AddField(
model_name='notifications',
name='insurance_id',
field=models.PositiveIntegerField(default=1),
),
migrations.AlterField(
model_name='notifications',
name='truck_number',
field=models.CharField(max_length=14),
),
]
| [
"abhinavsharma629@gmail.com"
] | abhinavsharma629@gmail.com |
d753f4c532b52b9485026a9353250a6782eec47b | 1d6abe27a802d53f7fbd6eb5e59949044cbb3b98 | /tensorflow/python/keras/layers/preprocessing/normalization_distribution_test.py | 4bf15da435814ac8a2af97f42807594c54a819ee | [
"Apache-2.0"
] | permissive | STSjeerasak/tensorflow | 6bc8bf27fb74fd51a71150f25dc1127129f70222 | b57499d4ec0c24adc3a840a8e7e82bd4ce0d09ed | refs/heads/master | 2022-12-20T20:32:15.855563 | 2020-09-29T21:22:35 | 2020-09-29T21:29:31 | 299,743,927 | 5 | 1 | Apache-2.0 | 2020-09-29T21:38:19 | 2020-09-29T21:38:18 | null | UTF-8 | Python | false | false | 5,270 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for keras.layers.preprocessing.normalization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations as ds_combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import context
from tensorflow.python.framework import test_combinations as combinations
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras.layers.preprocessing import normalization
from tensorflow.python.keras.layers.preprocessing import normalization_v1
from tensorflow.python.keras.layers.preprocessing import preprocessing_test_utils
from tensorflow.python.platform import test
def get_layer_class():
if context.executing_eagerly():
return normalization.Normalization
else:
return normalization_v1.Normalization
def _get_layer_computation_test_cases():
test_cases = ({
"adapt_data": np.array([[1.], [2.], [3.], [4.], [5.]], dtype=np.float32),
"axis": -1,
"test_data": np.array([[1.], [2.], [3.]], np.float32),
"expected": np.array([[-1.414214], [-.707107], [0]], np.float32),
"testcase_name": "2d_single_element"
}, {
"adapt_data": np.array([[1.], [2.], [3.], [4.], [5.]], dtype=np.float32),
"axis": None,
"test_data": np.array([[1.], [2.], [3.]], np.float32),
"expected": np.array([[-1.414214], [-.707107], [0]], np.float32),
"testcase_name": "2d_single_element_none_axis"
}, {
"adapt_data": np.array([[1., 2., 3., 4., 5.]], dtype=np.float32),
"axis": None,
"test_data": np.array([[1.], [2.], [3.]], np.float32),
"expected": np.array([[-1.414214], [-.707107], [0]], np.float32),
"testcase_name": "2d_single_element_none_axis_flat_data"
}, {
"adapt_data":
np.array([[[1., 2., 3.], [2., 3., 4.]], [[3., 4., 5.], [4., 5., 6.]]],
np.float32),
"axis":
1,
"test_data":
np.array([[[1., 2., 3.], [2., 3., 4.]], [[3., 4., 5.], [4., 5., 6.]]],
np.float32),
"expected":
np.array([[[-1.549193, -0.774597, 0.], [-1.549193, -0.774597, 0.]],
[[0., 0.774597, 1.549193], [0., 0.774597, 1.549193]]],
np.float32),
"testcase_name":
"3d_internal_axis"
}, {
"adapt_data":
np.array(
[[[1., 0., 3.], [2., 3., 4.]], [[3., -1., 5.], [4., 5., 8.]]],
np.float32),
"axis": (1, 2),
"test_data":
np.array(
[[[3., 1., -1.], [2., 5., 4.]], [[3., 0., 5.], [2., 5., 8.]]],
np.float32),
"expected":
np.array(
[[[1., 3., -5.], [-1., 1., -1.]], [[1., 1., 1.], [-1., 1., 1.]]],
np.float32),
"testcase_name":
"3d_multiple_axis"
})
crossed_test_cases = []
# Cross above test cases with use_dataset in (True, False)
for use_dataset in (True, False):
for case in test_cases:
case = case.copy()
if use_dataset:
case["testcase_name"] = case["testcase_name"] + "_with_dataset"
case["use_dataset"] = use_dataset
crossed_test_cases.append(case)
return crossed_test_cases
@ds_combinations.generate(
combinations.times(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager", "graph"]), _get_layer_computation_test_cases()))
class NormalizationTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_layer_computation(self, distribution, adapt_data, axis, test_data,
use_dataset, expected):
input_shape = tuple([None for _ in range(test_data.ndim - 1)])
if use_dataset:
# Keras APIs expect batched datasets
adapt_data = dataset_ops.Dataset.from_tensor_slices(adapt_data).batch(
test_data.shape[0] // 2)
test_data = dataset_ops.Dataset.from_tensor_slices(test_data).batch(
test_data.shape[0] // 2)
with distribution.scope():
input_data = keras.Input(shape=input_shape)
layer = get_layer_class()(axis=axis)
layer.adapt(adapt_data)
output = layer(input_data)
model = keras.Model(input_data, output)
output_data = model.predict(test_data)
self.assertAllClose(expected, output_data)
if __name__ == "__main__":
test.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
ef358f189d6d0f154253c9a6be092c1bde2adf0f | 4085874e861c40f94b0ba82fdbff273762c26eb2 | /test/functional/p2p_permissions.py | 058d216bc61d9623ec3c1c5d214f10d8cf5c2b01 | [
"MIT"
] | permissive | zortcoin/zortcoin | 82632c9ae46d57ef77ea7e79ab000f2e002dfaf3 | 379a1d01cc40c5c8ea50bdc41eded0dfbd1724f1 | refs/heads/master | 2022-07-27T20:33:34.543723 | 2021-08-25T16:25:17 | 2021-08-25T16:25:17 | 380,851,757 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,160 | py | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The Zortcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test p2p permission message.
Test that permissions are correctly calculated and applied
"""
from test_framework.address import ADDRESS_BCRT1_P2WSH_OP_TRUE
from test_framework.messages import (
CTransaction,
CTxInWitness,
FromHex,
)
from test_framework.p2p import P2PDataStore
from test_framework.script import (
CScript,
OP_TRUE,
)
from test_framework.test_node import ErrorMatch
from test_framework.test_framework import ZortcoinTestFramework
from test_framework.util import (
assert_equal,
p2p_port,
)
class P2PPermissionsTests(ZortcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def run_test(self):
self.check_tx_relay()
self.checkpermission(
# default permissions (no specific permissions)
["-whitelist=127.0.0.1"],
# Make sure the default values in the command line documentation match the ones here
["relay", "noban", "mempool", "download"],
True)
self.checkpermission(
# check without deprecatedrpc=whitelisted
["-whitelist=127.0.0.1"],
# Make sure the default values in the command line documentation match the ones here
["relay", "noban", "mempool", "download"],
None)
self.checkpermission(
# no permission (even with forcerelay)
["-whitelist=@127.0.0.1", "-whitelistforcerelay=1"],
[],
False)
self.checkpermission(
# relay permission removed (no specific permissions)
["-whitelist=127.0.0.1", "-whitelistrelay=0"],
["noban", "mempool", "download"],
True)
self.checkpermission(
# forcerelay and relay permission added
# Legacy parameter interaction which set whitelistrelay to true
# if whitelistforcerelay is true
["-whitelist=127.0.0.1", "-whitelistforcerelay"],
["forcerelay", "relay", "noban", "mempool", "download"],
True)
# Let's make sure permissions are merged correctly
# For this, we need to use whitebind instead of bind
# by modifying the configuration file.
ip_port = "127.0.0.1:{}".format(p2p_port(1))
self.replaceinconfig(1, "bind=127.0.0.1", "whitebind=bloomfilter,forcerelay@" + ip_port)
self.checkpermission(
["-whitelist=noban@127.0.0.1"],
# Check parameter interaction forcerelay should activate relay
["noban", "bloomfilter", "forcerelay", "relay", "download"],
False)
self.replaceinconfig(1, "whitebind=bloomfilter,forcerelay@" + ip_port, "bind=127.0.0.1")
self.checkpermission(
# legacy whitelistrelay should be ignored
["-whitelist=noban,mempool@127.0.0.1", "-whitelistrelay"],
["noban", "mempool", "download"],
False)
self.checkpermission(
# check without deprecatedrpc=whitelisted
["-whitelist=noban,mempool@127.0.0.1", "-whitelistrelay"],
["noban", "mempool", "download"],
None)
self.checkpermission(
# legacy whitelistforcerelay should be ignored
["-whitelist=noban,mempool@127.0.0.1", "-whitelistforcerelay"],
["noban", "mempool", "download"],
False)
self.checkpermission(
# missing mempool permission to be considered legacy whitelisted
["-whitelist=noban@127.0.0.1"],
["noban", "download"],
False)
self.checkpermission(
# all permission added
["-whitelist=all@127.0.0.1"],
["forcerelay", "noban", "mempool", "bloomfilter", "relay", "download", "addr"],
False)
self.stop_node(1)
self.nodes[1].assert_start_raises_init_error(["-whitelist=oopsie@127.0.0.1"], "Invalid P2P permission", match=ErrorMatch.PARTIAL_REGEX)
self.nodes[1].assert_start_raises_init_error(["-whitelist=noban@127.0.0.1:230"], "Invalid netmask specified in", match=ErrorMatch.PARTIAL_REGEX)
self.nodes[1].assert_start_raises_init_error(["-whitebind=noban@127.0.0.1/10"], "Cannot resolve -whitebind address", match=ErrorMatch.PARTIAL_REGEX)
def check_tx_relay(self):
block_op_true = self.nodes[0].getblock(self.nodes[0].generatetoaddress(100, ADDRESS_BCRT1_P2WSH_OP_TRUE)[0])
self.sync_all()
self.log.debug("Create a connection from a forcerelay peer that rebroadcasts raw txs")
# A test framework p2p connection is needed to send the raw transaction directly. If a full node was used, it could only
# rebroadcast via the inv-getdata mechanism. However, even for forcerelay connections, a full node would
# currently not request a txid that is already in the mempool.
self.restart_node(1, extra_args=["-whitelist=forcerelay@127.0.0.1"])
p2p_rebroadcast_wallet = self.nodes[1].add_p2p_connection(P2PDataStore())
self.log.debug("Send a tx from the wallet initially")
tx = FromHex(
CTransaction(),
self.nodes[0].createrawtransaction(
inputs=[{
'txid': block_op_true['tx'][0],
'vout': 0,
}], outputs=[{
ADDRESS_BCRT1_P2WSH_OP_TRUE: 5,
}]),
)
tx.wit.vtxinwit = [CTxInWitness()]
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
txid = tx.rehash()
self.log.debug("Wait until tx is in node[1]'s mempool")
p2p_rebroadcast_wallet.send_txs_and_test([tx], self.nodes[1])
self.log.debug("Check that node[1] will send the tx to node[0] even though it is already in the mempool")
self.connect_nodes(1, 0)
with self.nodes[1].assert_debug_log(["Force relaying tx {} from peer=0".format(txid)]):
p2p_rebroadcast_wallet.send_txs_and_test([tx], self.nodes[1])
self.wait_until(lambda: txid in self.nodes[0].getrawmempool())
self.log.debug("Check that node[1] will not send an invalid tx to node[0]")
tx.vout[0].nValue += 1
txid = tx.rehash()
# Send the transaction twice. The first time, it'll be rejected by ATMP because it conflicts
# with a mempool transaction. The second time, it'll be in the recentRejects filter.
p2p_rebroadcast_wallet.send_txs_and_test(
[tx],
self.nodes[1],
success=False,
reject_reason='{} from peer=0 was not accepted: txn-mempool-conflict'.format(txid)
)
p2p_rebroadcast_wallet.send_txs_and_test(
[tx],
self.nodes[1],
success=False,
reject_reason='Not relaying non-mempool transaction {} from forcerelay peer=0'.format(txid)
)
def checkpermission(self, args, expectedPermissions, whitelisted):
if whitelisted is not None:
args = [*args, '-deprecatedrpc=whitelisted']
self.restart_node(1, args)
self.connect_nodes(0, 1)
peerinfo = self.nodes[1].getpeerinfo()[0]
if whitelisted is None:
assert 'whitelisted' not in peerinfo
else:
assert_equal(peerinfo['whitelisted'], whitelisted)
assert_equal(len(expectedPermissions), len(peerinfo['permissions']))
for p in expectedPermissions:
if not p in peerinfo['permissions']:
raise AssertionError("Expected permissions %r is not granted." % p)
def replaceinconfig(self, nodeid, old, new):
with open(self.nodes[nodeid].zortcoinconf, encoding="utf8") as f:
newText = f.read().replace(old, new)
with open(self.nodes[nodeid].zortcoinconf, 'w', encoding="utf8") as f:
f.write(newText)
if __name__ == '__main__':
P2PPermissionsTests().main()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
f1d0ba5bd2156ebedefeb9b9ed9f945bc33c6384 | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-config/aliyunsdkconfig/request/v20200907/UpdateConfigRuleRequest.py | 675e72a0b4aec9655860d7318e001d677ada76f5 | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 4,698 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkconfig.endpoint import endpoint_data
class UpdateConfigRuleRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Config', '2020-09-07', 'UpdateConfigRule')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ConfigRuleId(self): # String
return self.get_body_params().get('ConfigRuleId')
def set_ConfigRuleId(self, ConfigRuleId): # String
self.add_body_params('ConfigRuleId', ConfigRuleId)
def get_TagKeyScope(self): # String
return self.get_body_params().get('TagKeyScope')
def set_TagKeyScope(self, TagKeyScope): # String
self.add_body_params('TagKeyScope', TagKeyScope)
def get_ClientToken(self): # String
return self.get_body_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_body_params('ClientToken', ClientToken)
def get_ResourceTypesScope(self): # Array
return self.get_body_params().get('ResourceTypesScope')
def set_ResourceTypesScope(self, ResourceTypesScope): # Array
for index1, value1 in enumerate(ResourceTypesScope):
self.add_body_params('ResourceTypesScope.' + str(index1 + 1), value1)
def get_Description(self): # String
return self.get_body_params().get('Description')
def set_Description(self, Description): # String
self.add_body_params('Description', Description)
def get_ConfigRuleTriggerTypes(self): # String
return self.get_body_params().get('ConfigRuleTriggerTypes')
def set_ConfigRuleTriggerTypes(self, ConfigRuleTriggerTypes): # String
self.add_body_params('ConfigRuleTriggerTypes', ConfigRuleTriggerTypes)
def get_TagValueScope(self): # String
return self.get_body_params().get('TagValueScope')
def set_TagValueScope(self, TagValueScope): # String
self.add_body_params('TagValueScope', TagValueScope)
def get_RegionIdsScope(self): # String
return self.get_body_params().get('RegionIdsScope')
def set_RegionIdsScope(self, RegionIdsScope): # String
self.add_body_params('RegionIdsScope', RegionIdsScope)
def get_RiskLevel(self): # Integer
return self.get_body_params().get('RiskLevel')
def set_RiskLevel(self, RiskLevel): # Integer
self.add_body_params('RiskLevel', RiskLevel)
def get_ResourceGroupIdsScope(self): # String
return self.get_body_params().get('ResourceGroupIdsScope')
def set_ResourceGroupIdsScope(self, ResourceGroupIdsScope): # String
self.add_body_params('ResourceGroupIdsScope', ResourceGroupIdsScope)
def get_InputParameters(self): # String
return self.get_body_params().get('InputParameters')
def set_InputParameters(self, InputParameters): # String
self.add_body_params('InputParameters', InputParameters)
def get_ConfigRuleName(self): # String
return self.get_body_params().get('ConfigRuleName')
def set_ConfigRuleName(self, ConfigRuleName): # String
self.add_body_params('ConfigRuleName', ConfigRuleName)
def get_TagKeyLogicScope(self): # String
return self.get_body_params().get('TagKeyLogicScope')
def set_TagKeyLogicScope(self, TagKeyLogicScope): # String
self.add_body_params('TagKeyLogicScope', TagKeyLogicScope)
def get_MaximumExecutionFrequency(self): # String
return self.get_body_params().get('MaximumExecutionFrequency')
def set_MaximumExecutionFrequency(self, MaximumExecutionFrequency): # String
self.add_body_params('MaximumExecutionFrequency', MaximumExecutionFrequency)
def get_ExcludeResourceIdsScope(self): # String
return self.get_body_params().get('ExcludeResourceIdsScope')
def set_ExcludeResourceIdsScope(self, ExcludeResourceIdsScope): # String
self.add_body_params('ExcludeResourceIdsScope', ExcludeResourceIdsScope)
| [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
b859d46b4f18efbfc0b5398705cd8d08f1294d73 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03738/s549313659.py | 12a3cd23e0f77bb5e3c9bf8518cad064d24634fd | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | a = int(input())
b = int(input())
ret = ''
if( a > b):
ret = 'GREATER'
elif( a < b):
ret = 'LESS'
else:
ret = 'EQUAL'
print(ret) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
728ca7506be2deabb34816b5fe10cfbb388e53ee | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_thoughts.py | ce645c20272a682102eb7f3134c14f62de160bfa | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
#calss header
class _THOUGHTS():
def __init__(self,):
self.name = "THOUGHTS"
self.definitions = thought
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['thought']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
2c716b8c2b1ae34ef0a61b9c3da1bd67c44ace48 | de27e6d143f40d5948244597b861d522a9a272f6 | /fjord/heartbeat/migrations/0002_auto_20150213_0947.py | a4f74d469a7dc0c38c5f0fcaed78e8aba1a95b7d | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mozilla/fjord | 7f31af6dd80869ca856f8a02ff10e72c81685368 | 0fcb81e6a5edaf42c00c64faf001fc43b24e11c0 | refs/heads/master | 2023-07-03T18:20:01.651759 | 2017-01-10T20:12:33 | 2017-01-10T20:12:33 | 5,197,539 | 18 | 22 | null | 2016-08-22T14:56:11 | 2012-07-26T21:25:00 | Python | UTF-8 | Python | false | false | 784 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('heartbeat', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='survey',
name='description',
field=models.TextField(default=b'', help_text='Informal description of the survey so we can tell them apart', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='survey',
name='name',
field=models.CharField(help_text='Unique name for the survey. e.g. heartbeat-question-1', unique=True, max_length=100),
preserve_default=True,
),
]
| [
"willkg@mozilla.com"
] | willkg@mozilla.com |
08ca57f7bfb8b2b56205a27f3306087d7ff3dfbb | 63bf6161532eefa72aa3be8b01cde601b08507dc | /python-mapping-example/fhir_model_generator/model/namingsystem.py | e7edf545c950a359c609ec28b6f86a71fd632773 | [
"Apache-2.0"
] | permissive | Healthedata1/mFHIR | 4ef370b87e03e973918e5683977d32fe262655bc | 1b4ea441cfa08b661416a3badedf7e90f2809163 | refs/heads/master | 2022-12-10T21:07:03.948406 | 2021-06-18T01:58:23 | 2021-06-18T01:58:23 | 129,964,251 | 9 | 5 | null | 2022-12-09T05:23:54 | 2018-04-17T20:57:15 | HTML | UTF-8 | Python | false | false | 1,832 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.1-9346c8cc45 (http://hl7.org/fhir/StructureDefinition/NamingSystem) on 2020-02-10.
# 2020, SMART Health IT.
import sys
from dataclasses import dataclass, field
from typing import ClassVar, Optional, List
from .backboneelement import BackboneElement
from .codeableconcept import CodeableConcept
from .contactdetail import ContactDetail
from .domainresource import DomainResource
from .fhirdate import FHIRDate
from .period import Period
from .usagecontext import UsageContext
@dataclass
class NamingSystemUniqueId(BackboneElement):
""" Unique identifiers used for system.
Indicates how the system may be identified when referenced in electronic
exchange.
"""
resource_type: ClassVar[str] = "NamingSystemUniqueId"
type: str = None
value: str = None
preferred: Optional[bool] = None
comment: Optional[str] = None
period: Optional[Period] = None
@dataclass
class NamingSystem(DomainResource):
""" System of unique identification.
A curated namespace that issues unique symbols within that namespace for
the identification of concepts, people, devices, etc. Represents a
"System" used within the Identifier and Coding data types.
"""
resource_type: ClassVar[str] = "NamingSystem"
name: str = None
status: str = None
kind: str = None
date: FHIRDate = None
publisher: Optional[str] = None
contact: Optional[List[ContactDetail]] = None
responsible: Optional[str] = None
type: Optional[CodeableConcept] = None
description: Optional[str] = None
useContext: Optional[List[UsageContext]] = None
jurisdiction: Optional[List[CodeableConcept]] = None
usage: Optional[str] = None
uniqueId: List[NamingSystemUniqueId] = field(default_factory=list) | [
"ehaas@healthedatainc.com"
] | ehaas@healthedatainc.com |
fe6c635055b56eee3c9d2f141ce391f88f17af89 | 87119ec9cea61be175f2a1f16f0e37d060cde9af | /django/myproject/guestbook/views.py | 10b4c58f64dd2648adb10abbea42488375fd7b7c | [] | no_license | atkins126/sample_nullpobug | bce9c1bf2a31921ac665a18dc2a62be3bdef493e | b2ba65f42f717f0ceb2cf14fe28e90c460bfde87 | refs/heads/master | 2023-02-16T11:37:05.290069 | 2021-01-18T14:43:40 | 2021-01-18T14:43:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | # coding: utf-8
from django.views.generic import CreateView
from django.core.urlresolvers import reverse
from guestbook.models import Greeting
from guestbook.forms import GreetingForm
class IndexView(CreateView):
u"""
テンプレートによるフォーム表示と
送信されたフォーム内容をモデルに保存する
クラスベース汎用ビューを使用
"""
model = Greeting
form_class = GreetingForm
def get_success_url(self):
return reverse('guestbook:index')
def get_context_data(self, **kwargs):
context = kwargs
context['greeting_list'] = Greeting.objects.all()
return context
| [
"tokibito@gmail.com"
] | tokibito@gmail.com |
e795fe5a43ef8191430bc02b8bfe80d6b2af1e33 | d60ee49abaee6c74c5b777f8f112a7f75f71f029 | /transcriptome/variants/combine_indels/merge_callers.py | 4ad773516e591cc4ea2af75aac5c24231071fc5b | [] | no_license | ak352/melanomics | 41530f623b4bfdbd5c7b952debcb47622d1a8e88 | fc5e6fdb1499616fb25a8dc05259add8a65aeca0 | refs/heads/master | 2020-12-24T16:14:42.271416 | 2015-08-06T12:48:52 | 2015-08-06T12:48:52 | 18,439,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,585 | py | import sys
for line in open(sys.argv[1]):
if line.startswith("##"):
print line.rstrip("\n")
elif line.startswith("#CHROM"):
line = line.rstrip("\n").split("\t")
line = line[0:9]
line.append(sys.argv[2])
print "\t".join(line)
else:
line = line.rstrip("\n").split("\t")
attribs = line[8].split(":")
num_callers_has_indel = 0
max_alternate_alleles = 0
for x in range(len(attribs)):
if attribs[x]=="GT":
#Since all positions are covered by some reads, all ./. are homozygous reference (0/0)
for sample in line[9:]:
sample = sample.split(":")
if sample[x]!="./.":
num_callers_has_indel += 1
alternate_alleles = set(sample[x].split("/"))
if "0" in alternate_alleles:
alternate_alleles.remove("0")
#Choose the merged genotype to be the one with the maximum different types of alleles - a sensitive strategy
if len(alternate_alleles) > max_alternate_alleles:
max_alternate_alleles = len(alternate_alleles)
consensus = ":".join(sample)
break
newline = line[0:9]
if num_callers_has_indel >= 2:
newline[6]="PASS"
else:
newline[6]="FAIL"
newline.append(consensus)
if num_callers_has_indel >= 2:
print "\t".join(newline)
| [
"ak@uni.fake"
] | ak@uni.fake |
8bc514d81e1bc5d9600495d88105cfc2b69996ab | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/scatter3d/error_y/_arrayminussrc.py | 3ac5aee82f37d414b78135b25b6c1c4ccf604ac0 | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 435 | py | import _plotly_utils.basevalidators
class ArrayminussrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="arrayminussrc", parent_name="scatter3d.error_y", **kwargs
):
super(ArrayminussrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| [
"noreply@github.com"
] | hugovk.noreply@github.com |
40439daa08c438b8507f12a556ecfd72a95a7444 | 284f2bfaabf91899211e56063026857c496965cf | /tuites/migrations/0001_initial.py | 8de3c24cc1b399b272364d1abd936290637e1a55 | [] | no_license | vanessa/building-tuirer | 7b56bb9791659fcd04942d2c84a393c3c226f8c4 | 61d85df7d120387700b2e449a6fde5fb9ca7cfaa | refs/heads/master | 2022-12-11T07:25:14.174448 | 2018-08-07T05:18:29 | 2018-08-07T05:18:29 | 142,210,249 | 18 | 0 | null | 2022-12-08T02:19:48 | 2018-07-24T20:35:34 | Python | UTF-8 | Python | false | false | 840 | py | # Generated by Django 2.0.7 on 2018-07-26 19:50
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Tuite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(max_length=280)),
('date_created', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tuites', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"vanessa@vinta.com.br"
] | vanessa@vinta.com.br |
9f8f7a635445ea0cbc31c0a85db8d79724111967 | d1b44d58d4eaa845e1b460f338e61857ac00cd6f | /ch08/misung/ch8_2_misung.py | 656f723a7c00f993ec717b0705ca0a4f183727ba | [] | no_license | hyo-eun-kim/algorithm-study | 549ffe1d453ceede9075c1a8df55a67cf76bde00 | 1ca8298361b6a030d2569c06a34d955cc5e4b1bb | refs/heads/main | 2023-03-26T14:32:49.597667 | 2021-03-25T23:29:00 | 2021-03-25T23:29:00 | 301,707,534 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | # 두 정렬 리스트의 병합
# 정렬되어있는 두 리스트를 연결하라!
class Node:
def __init__(self, val, next=None):
self.val = val
self.next = next
node1 = Node(1)
node2 = Node(2)
node3 = Node(4)
node1.next = node2
node2.next = node3
node4 = Node(1)
node5 = Node(3)
node6 = Node(4)
node4.next = node5
node5.next = node6
def mergeTwoLists(l1, l2):
if l1 is None :
return l2
if l2 is None :
return l1
if l1.val <= l2.val :
head = l1
head.next = mergeTwoLists(l1.next, l2)
else :
head =l2
head.next = mergeTwoLists(l1,l2.next)
return head
mergeTwoLists(node1, node4) | [
"noreply@github.com"
] | hyo-eun-kim.noreply@github.com |
c743c8ff68fe3bd53074f445c816621b0e9b75b1 | 700f9f9e319ebd26d2557d64ea3827808dfad2f5 | /tests/fixtures/test_contributors/content_04_expected.py | 0572936d0420acaa17e064c4a1f555556f4a9eb7 | [
"MIT"
] | permissive | elifesciences/elife-tools | 1b44e660e916a82ef8ff64dd5a6ee5506e517359 | bc16e7dd5d6245077e39f8561b99c9acd510ddf7 | refs/heads/develop | 2023-03-06T08:37:47.424282 | 2023-02-20T20:40:49 | 2023-02-20T20:40:49 | 30,274,058 | 13 | 11 | MIT | 2023-02-20T20:40:50 | 2015-02-04T01:14:41 | Python | UTF-8 | Python | false | false | 1,436 | py | # based on elife article 75374 which has a collab inside a collab
expected = [
{
"type": "author",
"group-author-key": "group-author-id1",
"collab": "the PRACTICAL consortium",
},
{
"type": "author non-byline",
"group-author-key": "group-author-id1",
"surname": "Eeles",
"given-names": "Rosalind A",
"affiliations": [
{
"institution": "The Institute of Cancer Research",
"country": "United Kingdom",
"city": "London",
"ror": "https://ror.org/043jzw605",
}
],
},
{
"type": "author non-byline",
"group-author-key": "group-author-id1",
"collab": "APCB BioResource (Australian Prostate Cancer BioResource)",
"affiliations": [
{
"institution": "Translational Research Institute",
"country": "Australia",
"city": "Brisbane",
}
],
},
{
"type": "author non-byline",
"group-author-key": "group-author-id1",
"surname": "Grönberg",
"given-names": "Henrik",
"affiliations": [
{
"institution": "Department of Medical Epidemiology and Biostatistics, Karolinska Institute",
"country": "Sweden",
"city": "Stockholm",
}
],
},
]
| [
"gnott@starglobal.ca"
] | gnott@starglobal.ca |
64cf23257d7a451e2b536d2a5acb97fafdb2eed9 | b2f256c584aa071a58d8905c3628d4a4df25a506 | /utils/python/python/oj/lonlat.py | c267b99d4fcd491d6c2252c584e21e195d26b8f2 | [
"MIT"
] | permissive | maximebenoitgagne/wintertime | 6865fa4aff5fb51d50ea883b2e4aa883366227d8 | 129758e29cb3b85635c878dadf95e8d0b55ffce7 | refs/heads/main | 2023-04-16T22:08:24.256745 | 2023-01-10T20:15:33 | 2023-01-10T20:15:33 | 585,290,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | from pylab import *
from scipy.special import ellipeinc
def lonlatdist(lonv, latv):
""" (spherical) distance along piece-wise linear paths in lat-lon space """
d = 0.
res = [d]
for i in range(len(lonv)-1):
lon0 = lonv[i]
lon1 = lonv[i+1]
lat0 = latv[i]
lat1 = latv[i+1]
if lat1-lat0 == 0.:
dd = cos(lat0*pi/180.)*(lon1-lon0)
else:
slope = (lon1-lon0)/(lat1-lat0)
slope2 = slope*slope
k = slope2/(1.+slope2)
dd = 180./pi * sqrt(1+slope2) * abs(ellipeinc(lat1*pi/180., k) - ellipeinc(lat0*pi/180., k))
d += dd
res.append(d)
return res
| [
"maxime.benoit-gagne@takuvik.ulaval.ca"
] | maxime.benoit-gagne@takuvik.ulaval.ca |
643dcab14fae0011f2251e73bc8a67b69fe53737 | 1b2ff7633c5c412afcd830a1ad47ed91e41dc603 | /backend/settings/asgi.py | 258cd8bdcc592f753899d56ebc8618e3d7bced9e | [] | no_license | taivy/objects_test_assignment | 931a1ed42c43eaae09ce6b34dc9ad26ca3590e6e | 7cd9d040d54f9643447eb3a0c9b1d41315643c52 | refs/heads/main | 2023-01-07T19:28:15.556318 | 2020-10-23T14:21:31 | 2020-10-23T14:21:31 | 305,916,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
ASGI config for objects_test project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings.settings')
application = get_asgi_application()
| [
"aie803ef4g@gmail.com"
] | aie803ef4g@gmail.com |
cead2db18a4505b8b8747c0f7dab990b7e7895db | af4ad182e46d032ddff504196be7d529b7c82078 | /overlap/vcfFindDifferentButSame.py | 34e3bb052a6ddc355640b30a0b942a42e92271c8 | [] | no_license | BD2KGenomics/brca-pipeline | a8423bf3d651ed395c16aa6b45add78436e870bb | 3df911a6a922338422ce17e8cedba9480d6977f2 | refs/heads/master | 2021-01-18T12:50:14.194467 | 2016-08-10T00:11:36 | 2016-08-10T00:11:36 | 34,352,507 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,474 | py | import pysam
import glob, gzip
from itertools import combinations
from os.path import basename
import logging, sys, optparse
from collections import defaultdict
from os.path import join, basename, dirname, isfile
# maximum distance between two variants to get compared
MAXDIST=50
# do we check if the refAllele sequences are really correct?
CHECKREF=False
#CHECKREF=True
# === COMMAND LINE INTERFACE, OPTIONS AND HELP ===
parser = optparse.OptionParser("usage: %prog [options] filenames - find variants in VCF that have a different position but lead to the same sequence. Can process many files at a time.")
parser.add_option("-d", "--debug", dest="debug", action="store_true", help="show debug messages")
#parser.add_option("-f", "--file", dest="file", action="store", help="run on file")
#parser.add_option("", "--test", dest="test", action="store_true", help="do something")
(options, args) = parser.parse_args()
if options.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
PATH = "/hive/groups/cgl/brca/phase1/data/cutoff_vcf/"
chr13 = open("brca2.txt", "r")
BRCA2 = chr13.read()
chr17 = open("brca1.txt", "r")
BRCA1 = chr17.read()
BRCA2_START = 32800000
BRCA1_START = 41100000
class FastaReader:
""" a class to parse a fasta file
Example:
fr = maxbio.FastaReader(filename)
for (id, seq) in fr.parse():
print id,seq """
def __init__(self, fname):
if hasattr(fname, 'read'):
self.f = fname
elif fname=="stdin":
self.f=sys.stdin
elif fname.endswith(".gz"):
self.f=gzip.open(fname)
else:
self.f=open(fname)
self.lastId=None
def parse(self):
""" Generator: returns sequences as tuple (id, sequence) """
lines = []
for line in self.f:
if line.startswith("\n") or line.startswith("#"):
continue
elif not line.startswith(">"):
lines.append(line.replace(" ","").strip())
continue
else:
if len(lines)!=0: # on first >, seq is empty
faseq = (self.lastId, "".join(lines))
self.lastId=line.strip(">").strip()
lines = []
yield faseq
else:
if self.lastId!=None:
sys.stderr.write("warning: when reading fasta file: empty sequence, id: %s\n" % line)
self.lastId=line.strip(">").strip()
lines=[]
# if it's the last sequence in a file, loop will end on the last line
if len(lines)!=0:
faseq = (self.lastId, "".join(lines))
yield faseq
else:
yield (None, None)
def main(args, options):
fnames = args
dbs = []
for fname in fnames:
dbName, vars = readDb(fname)
dbs.append( (dbName, vars) )
print "Unique variants in %s:%d" %(dbName, len(vars))
for db1, db2 in combinations(dbs, 2):
get_overlap(db1, db2)
def readDb(fname):
" return vcf as (dbName, dict (chrom, pos, ref, alt) -> desc "
db_name = basename(fname).split(".")[0]
if fname.endswith(".gz"):
varFile = gzip.open(fname, "r")
else:
varFile = open(fname, "r")
variants = defaultdict(list)
for line in varFile:
if line.startswith("#"):
continue
chrom, pos, varId, ref, alt = line.strip().split("\t")[:5]
# skip variants that don't lead to change
if ref==alt:
continue
alts = alt.split(",")
for alt in alts:
variants[ (chrom, int(pos), ref, alt) ] = (chrom, pos, varId, ref, alt)
return db_name, variants
def get_overlap(db1, db2):
" print variants that are different but lead to same sequence "
db1Name, db1Vars = db1
db2Name, db2Vars = db2
for var1, desc1 in db1Vars.iteritems():
for var2, desc2 in db2Vars.iteritems():
# don't compare if diff chromosome or start position too far away
if var1[0]!=var2[0] or abs(var1[1]-var2[1]) > MAXDIST :
continue
if var1!=var2:
seq1, seq2, fullSeq = variant_seqs(var1, var2)
if seq1 is None:
continue
if seq1==seq2:
chr1, pos1, id1, from1, to1 = desc1
chr2, pos2, id2, from2, to2 = desc2
pretty1 = "%s:%s->%s (%s)" % (int(pos1), from1, to1, id1)
pretty2 = "%s:%s->%s (%s)" % (int(pos2), from2, to2, id2)
print "%s-%s:" % (db1Name, db2Name), pretty1, "/", pretty2, fullSeq
#print "overlap between the %s and %s: %d" %(name_db1, name_db2, num_overlap)
def variant_seqs(v1, v2):
" return (edited1, edited2) "
chr1, pos1, ref1, alt1 = v1
chr2, pos2, ref2, alt2 = v2
pos1 = int(pos1)
pos2 = int(pos2)
# make sure that v1 is upstream of v2
if pos1 > pos2:
#(chr1, pos1, ref1, alt1 ), (chr2, pos2, ref2, alt2 ) = (chr2, pos2, ref2, alt2), (chr1, pos1, ref1, alt1)
return variant_seqs(v2, v1)
# lift coordinates and make everything 0-based
if chr1 == "13":
seq = BRCA2
pos1 = pos1 -1 - BRCA2_START
pos2 = pos2 -1 - BRCA2_START
elif chr1 == "17":
seq = BRCA1
pos1 = pos1 - 1 - BRCA1_START
pos2 = pos2 - 1 - BRCA1_START
else:
assert(False)
assert(pos1>0)
assert(pos2>0)
assert(pos1 < 200000)
assert(pos2 < 200000)
assert(len(ref1)!=0)
assert(len(ref2)!=0)
if len(ref2)>100 or len(ref1)>100:
return None, None, None
# replace vcf ref string with alt string
if CHECKREF:
genomeRef1 = seq[pos1:pos1+len(ref1)].upper()
genomeRef2 = seq[pos2:pos2+len(ref2)].upper()
if (genomeRef1!=ref1):
print "ref1 is not in genome", genomeRef1, ref1
if (genomeRef2!=ref2):
print "ref2 is not in genome", genomeRef2, ref2
assert(genomeRef1==ref1)
assert(genomeRef2==ref2)
edited_v1 = seq[0:pos1]+alt1+seq[pos1+len(ref1):]
edited_v2 = seq[0:pos2]+alt2+seq[pos2+len(ref2):]
fullSeq = seq[min(pos1,pos2):max(pos1+len(ref1),pos1+len(alt1),pos2+len(alt2),pos2+len(ref2))]
return edited_v1, edited_v2, fullSeq
if __name__ == "__main__":
main(args, options)
| [
"max@soe.ucsc.edu"
] | max@soe.ucsc.edu |
b4ebc885ac2131d59ae87a4e0685e7737963e6e7 | 530b180c3aade8e67cc61ad2baddff018f7d59a8 | /robocorp-code/src/robocorp_code/_language_server_login.py | 3ff66378a390392099399481b4633b3dec39aa58 | [
"Apache-2.0"
] | permissive | robocorp/robotframework-lsp | 67a1f35b9268d349045eb8fe930ea381c2d94cae | d72e5310ed4a8165d7ee516d79e0accccaf7748c | refs/heads/master | 2023-08-17T05:12:43.598270 | 2023-08-12T12:11:22 | 2023-08-12T12:13:21 | 235,202,865 | 167 | 72 | Apache-2.0 | 2023-09-13T22:39:09 | 2020-01-20T21:31:20 | Python | UTF-8 | Python | false | false | 3,071 | py | from robocorp_ls_core.protocols import IEndPoint, ActionResultDict
from robocorp_ls_core.robotframework_log import get_logger
from robocorp_code import commands
from robocorp_ls_core.command_dispatcher import _SubCommandDispatcher
from robocorp_code.protocols import IRcc, CloudLoginParamsDict
log = get_logger(__name__)
login_command_dispatcher = _SubCommandDispatcher("_login")
class _Login(object):
def __init__(
self,
dir_cache,
endpoint: IEndPoint,
base_command_dispatcher,
rcc: IRcc,
feedback,
clear_caches_on_login_change,
):
from robocorp_ls_core.cache import DirCache
from robocorp_code._language_server_feedback import _Feedback
self._dir_cache: DirCache = dir_cache
self._endpoint = endpoint
self._rcc = rcc
self._feedback: _Feedback = feedback
self._clear_caches_on_login_change = clear_caches_on_login_change
base_command_dispatcher.register_sub_command_dispatcher(
login_command_dispatcher
)
@login_command_dispatcher(commands.ROBOCORP_IS_LOGIN_NEEDED_INTERNAL)
def _is_login_needed_internal(self) -> ActionResultDict:
from robocorp_ls_core.progress_report import progress_context
with progress_context(
self._endpoint, "Validating Control Room credentials", self._dir_cache
):
login_needed = not self._rcc.credentials_valid()
return {"success": login_needed, "message": None, "result": login_needed}
@login_command_dispatcher(commands.ROBOCORP_CLOUD_LOGIN_INTERNAL)
def _cloud_login(self, params: CloudLoginParamsDict) -> ActionResultDict:
from robocorp_ls_core.progress_report import progress_context
self._feedback.metric("vscode.cloud.login")
# When new credentials are added we need to remove existing caches.
self._clear_caches_on_login_change()
credentials = params["credentials"]
with progress_context(
self._endpoint, "Adding Control Room credentials", self._dir_cache
):
action_result = self._rcc.add_credentials(credentials)
self._endpoint.notify("$/linkedAccountChanged")
if not action_result.success:
return action_result.as_dict()
result = self._rcc.credentials_valid()
return {"success": result, "message": None, "result": result}
@login_command_dispatcher(commands.ROBOCORP_CLOUD_LOGOUT_INTERNAL)
def _cloud_logout(self) -> ActionResultDict:
from robocorp_ls_core.progress_report import progress_context
self._feedback.metric("vscode.cloud.logout")
# When credentials are removed we need to remove existing caches.
self._clear_caches_on_login_change()
with progress_context(
self._endpoint, "Removing Control Room credentials", self._dir_cache
):
ret = self._rcc.remove_current_credentials().as_dict()
self._endpoint.notify("$/linkedAccountChanged")
return ret
| [
"fabiofz@gmail.com"
] | fabiofz@gmail.com |
9d9e8ec272587da4d0b57094865c1e442afa7573 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/nlp/Data2vec_for_PyTorch/fairseq/models/speech_dlm/__init__.py | 6ea914d6a578651fecd18cc7f352382623de303a | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 249 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .speech_dlm import * # noqa
from .hub_interface import * # noqa
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
4d1d1206749d6326010dd874cf58439b9b23189c | 41ede4fd3bfba1bff0166bca7aee80dcf21434c6 | /vedat/dist/gtk/gtkspell3/actions.py | 6e8ee6ed12a4b2248e6106133f1bda3ac07b6278 | [] | no_license | pisilinux/playground | a7db4b42559a21cc72fd4c8649e0231ab6a3eb3c | e4e12fff8a847ba210befc8db7e2af8556c3adf7 | refs/heads/master | 2022-08-12T23:03:27.609506 | 2022-08-11T18:28:19 | 2022-08-11T18:28:19 | 8,429,459 | 16 | 22 | null | 2022-08-11T18:28:20 | 2013-02-26T09:37:11 | Python | UTF-8 | Python | false | false | 727 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
shelltools.export("HOME", get.workDIR())
def setup():
autotools.autoreconf('-fi')
autotools.configure("--disable-static \
--enable-gtk3")
pisitools.dosed("libtool", " -shared ", " -Wl,-O1,--as-needed -shared ")
def build():
autotools.make()
def install():
autotools.install()
# Empty files: NEWS,
pisitools.dodoc("COPYING", "README", "AUTHORS", "ChangeLog")
| [
"vedat@pisi_linux1.0"
] | vedat@pisi_linux1.0 |
cac379b18eb97023b4c5fdcf142c87d983e6794e | 3a4fbde06794da1ec4c778055dcc5586eec4b7d2 | /@lib/12-13-2011-01/vyperlogix/misc/date.py | 725848771c1c8d2be9463ec460abd9baef7c236f | [] | no_license | raychorn/svn_python-django-projects | 27b3f367303d6254af55c645ea003276a5807798 | df0d90c72d482b8a1e1b87e484d7ad991248ecc8 | refs/heads/main | 2022-12-30T20:36:25.884400 | 2020-10-15T21:52:32 | 2020-10-15T21:52:32 | 304,455,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,166 | py | # to get the local (current / here) time
import time
import CooperativeClass
__copyright__ = """\
(c). Copyright 2008-2014, Vyper Logix Corp., All Rights Reserved.
Published under Creative Commons License
(http://creativecommons.org/licenses/by-nc/3.0/)
restricted to non-commercial educational use only.,
http://www.VyperLogix.com for details
THE AUTHOR VYPER LOGIX CORP DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
USE AT YOUR OWN RISK.
"""
# allows quick, easy creation of enumeration objects
def enum(*names):
class enum(object):
def __setattr__(self, parameter, value):
raise AttributeError
def __delattr__(self, parameter):
raise AttributeError
obj = enum()
for value, parameter in enumerate(names):
obj.__dict__[parameter] = value
return obj
# an enumeration object with the list attributes
format = enum('mdyy', 'yymd', 'mdy', 'ymd')
class date(CooperativeClass.Cooperative):
def __init__(self, form, string=None):
self.__form = form
if string:
self.__set_via_string(string)
else:
temp = time.localtime()
self.__year = temp.tm_year
self.__month = temp.tm_mon
self.__day = temp.tm_mday
def __set_via_string(self, string):
numbers = string.split('/')
assert len(numbers) == 3
for index in range(len(numbers)):
numbers[index] = int(numbers[index])
if self.__form == format.mdyy or self.__form == format.mdy:
self.__year = numbers[2]
self.__month = numbers[0]
self.__day = numbers[1]
elif self.__form == format.yymd or self.__form == format.ymd:
self.__year = numbers[0]
self.__month = numbers[1]
self.__day = number[2]
else:
raise 'bad format'
def __set_via_string_DEPRECATED(self, string):
length = len(string)
if self.__form == format.mdyy:
if length == 10:
assert string[2] == '/' and string[5] == '/'
self.__year = int(string[6:])
self.__month = int(string[:2])
self.__day = int(string[3:5])
elif length == 8:
self.__year = int(string[4:])
self.__month = int(string[:2])
self.__day = int(string[2:4])
else:
raise 'bad string'
elif self.__form == format.yymd:
if length == 10:
assert string[4] == '/' and string[7] == '/'
self.__year = int(string[:4])
self.__month = int(string[5:7])
self.__day = int(string[8:])
elif length == 8:
self.__year = int(string[:4])
self.__month = int(string[4:6])
self.__day = int(string[6:])
else:
raise 'bad string'
elif self.__form == format.mdy:
if length == 8:
assert string[2] == '/' and string[5] == '/'
self.__year = int(string[6:])
self.__month = int(string[:2])
self.__day = int(string[3:5])
elif length == 6:
self.__year = int(string[4:])
self.__month = int(string[:2])
self.__day = int(string[2:4])
else:
raise 'bad string'
elif self.__form == format.ymd:
if length == 8:
assert string[2] == '/' and string[5] == '/'
self.__year = int(string[:2])
self.__month = int(string[3:5])
self.__day = int(string[6:])
elif length == 6:
self.__year = int(string[:2])
self.__month = int(string[2:4])
self.__day = int(string[4:])
else:
raise 'bad string'
else:
raise 'bad format'
def GetDate(self, form=None):
if form is None:
form = self.__form
if form == format.mdyy:
return str(self.__month)[-2:].zfill(2) + '/' + str(self.__day)[-2:].zfill(2) + '/' + str(self.__year)[-4:].zfill(4)
elif form == format.yymd:
return str(self.__year)[-4:].zfill(4) + '/' + str(self.__month)[-2:].zfill(2) + '/' + str(self.__day)[-2:].zfill(2)
elif form == format.mdy:
return str(self.__month)[-2:].zfill(2) + '/' + str(self.__day)[-2:].zfill(2) + '/' + str(self.__year)[-2:].zfill(2)
elif form == format.ymd:
return str(self.__year)[-2:].zfill(2) + '/' + str(self.__month)[-2:].zfill(2) + '/' + str(self.__day)[-2:].zfill(2)
else:
raise 'bad format'
def GetDateShort(self):
return time.strftime('%a %b %d, %Y', time.strptime(self.GetDate(format.mdyy), '%m/%d/%Y'))
def GetDateLong(self):
return time.strftime('%A %B %d, %Y', time.strptime(self.GetDate(format.mdyy), '%m/%d/%Y'))
def GetDay(self):
return self.__day
def GetMonth(self):
return self.__month
def GetYear(self):
return self.__year
def GetDayOfWeek(self):
wday = time.strptime(self.GetDate(format.mdyy), '%m/%d/%Y').tm_wday
wday += 1
if wday == 7:
return 0
return wday
def GetJulianDay(self):
return time.strptime(self.GetDate(format.mdyy), '%m/%d/%Y').tm_yday
def IsValid(self):
try:
time.strptime(self.GetDate(format.mdyy), '%m/%d/%Y')
return True
except:
return False
def AddDays(self, days):
temp = time.localtime(time.mktime(time.strptime(self.GetDate(format.mdyy), '%m/%d/%Y')) + int(days) * (60 * 60 *24))
self.__year = temp.tm_year
self.__month = temp.tm_mon
self.__day = temp.tm_mday
return self
def AddYears(self, years):
self.__year += int(years)
return self
def AddMonths(self, months):
candidate_month = self.__month + int(months)
if 0 < candidate_month < 13:
self.__month = candidate_month
elif candidate_month > 12:
self.__year += candidate_month / 12
self.__month = ((candidate_month - 1) % 12) + 1
elif candidate_month < 1:
candidate_month = abs(candidate_month) + 1
self.__year -= candidate_month / 12
self.__month = 13 - (((candidate_month - 1) % 12) + 1)
else:
raise 'there is a problem if this runs'
return self
def SubtractDays(self, days):
return self.AddDays(-days)
def SubtractYears(self, years):
return self.AddYears(-years)
def SubtractMonths(self, months):
return self.AddMonths(-months)
def DateDiff(self, form, string):
temp = date(form, string)
now = self.__get_relative_day()
then = temp.__get_relative_day()
return int(abs(now - then))
def __get_relative_day(self):
return time.mktime(time.strptime(self.GetDate(format.mdyy), '%m/%d/%Y')) / (60 * 60 * 24)
def YearsOld(self):
temp = date(format.mdyy)
candidate_year = temp.GetYear() - self.GetYear()
if temp.GetMonth() - self.GetMonth() > 0:
return candidate_year
elif temp.GetMonth() - self.GetMonth() < 0:
return candidate_year - 1
else:
if self.GetDay() - temp.GetDay() <= 0:
return candidate_year
else:
return candidate_year - 1
| [
"raychorn@gmail.com"
] | raychorn@gmail.com |
b964c7d3ac0997d189160beb6397ce66674c1b0e | 1afa6c852dfc922d1a26a384d965976f31a87692 | /Interaction/Style/Testing/Python/TestStyleTrackballCamera.py | ec1a5770ffd8efdb2a48a09334f9989151a0a7a6 | [
"BSD-3-Clause"
] | permissive | dgobbi/VTK | 631d037aacc7258861e70f77c586b01cd4ebff3f | 17f232ee440025c26bc78a897edef78e9fc78510 | refs/heads/master | 2021-01-04T22:27:46.611907 | 2013-03-01T19:44:02 | 2013-03-01T19:44:02 | 938,377 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,282 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
# Run this test like so:
# vtkpython TestStyleTrackballCamera.py -D $VTK_DATA_ROOT \
# -B $VTK_DATA_ROOT/Baseline/Rendering
import sys
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
'''
Prevent .pyc files from being created.
Stops the vtk source being polluted
by .pyc files.
'''
sys.dont_write_bytecode = True
# Load base (spike and test)
import TestStyleBaseSpike
import TestStyleBase
class TestStyleTrackballCamera(vtk.test.Testing.vtkTest):
def testStyleTrackballCamera(self):
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
iRen = vtk.vtkRenderWindowInteractor()
iRen.SetRenderWindow(renWin);
testStyleBaseSpike = TestStyleBaseSpike.StyleBaseSpike(ren, renWin, iRen)
# Set interactor style
inStyle = vtk.vtkInteractorStyleSwitch()
iRen.SetInteractorStyle(inStyle)
# Switch to Trackball+Actor mode
iRen.SetKeyEventInformation(0, 0, 't', 0, '0')
iRen.InvokeEvent("CharEvent")
iRen.SetKeyEventInformation(0, 0, 'c', 0, '0')
iRen.InvokeEvent("CharEvent")
# Test style
testStyleBase = TestStyleBase.TestStyleBase(ren)
testStyleBase.test_style(inStyle.GetCurrentStyle())
# render and interact with data
img_file = "TestStyleTrackballCamera.png"
vtk.test.Testing.compareImage(iRen.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestStyleTrackballCamera, 'test')])
| [
"nikhil.shetty@kitware.com"
] | nikhil.shetty@kitware.com |
9e75dc1d0187593e791de4ce6185ce127d5bea53 | 221e8d2c7f8f0044e2884d3c1a1327d49ca50356 | /hackbright_web.py | 2a52d90b7672a1ebd04e39d8bbce1684a8f89873 | [] | no_license | lakeeja/HBF_wk4_project_tracker_flask | d8411cfc12741c6662badc32cc8e2bcec399958a | a45669b5389051f8fd68d5fb8fd5865a5a059880 | refs/heads/master | 2021-03-19T07:39:32.490693 | 2017-06-14T20:09:32 | 2017-06-14T20:09:32 | 94,369,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,698 | py | """A web application for tracking projects, students, and student grades."""
from flask import Flask, request, render_template
import hackbright
app = Flask(__name__)
@app.route("/student")
def get_student():
"""Show information about a student."""
github = request.args.get('github')
first, last, github = hackbright.get_student_by_github(github)
project = hackbright.get_grades_by_github(github)
print project
html = render_template("student_info.html",
first=first,
last=last,
github=github,
project=project)
return html
@app.route("/student-search")
def get_student_form():
"""Show form for searching for a student."""
return render_template('student_search.html')
@app.route("/student-add", methods=['POST'])
def student_add():
"""Add a student."""
github = request.form.get("github")
first_name = request.form.get("first_name")
last_name = request.form.get("last_name")
hackbright.make_new_student(github, first_name, last_name)
return "made it to student add whew"
@app.route('/project')
def project_info():
""" list info about a project """
title = request.args.get('title')
title, description, max_grade = hackbright.get_project_by_title(title)
projects = render_template("project_info.html",
title=title,
description=description,
max_grade=max_grade)
return projects #incomplete
if __name__ == "__main__":
hackbright.connect_to_db(app)
app.run(debug=True)
| [
"no-reply@hackbrightacademy.com"
] | no-reply@hackbrightacademy.com |
b8aa11e2fbe1bcf8389ab5e53c2fbb4cea60c170 | c857d225b50c5040e132d8c3a24005a689ee9ce4 | /problem350.py | 9e621c086dc23d4c33e491b4fc312ed60a93f07d | [] | no_license | pythonsnake/project-euler | 0e60a6bd2abeb5bf863110c2a551d5590c03201e | 456e4ef5407d2cf021172bc9ecfc2206289ba8c9 | refs/heads/master | 2021-01-25T10:44:27.876962 | 2011-10-21T00:46:02 | 2011-10-21T00:46:02 | 2,335,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | """
A list of size n is a sequence of n natural numbers. examples are (2,4,6), (2,6,4), (10,6,15,6), and (11).
the greatest common divisor, or gcd, of a list is the largest natural number that divides all entries of the list. examples: gcd(2,6,4) = 2, gcd(10,6,15,6) = 1 and gcd(11) = 11.
the least common multiple, or lcm, of a list is the smallest natural number divisible by each entry of the list. examples: lcm(2,6,4) = 12, lcm(10,6,15,6) = 30 and lcm(11) = 11.
let f(g, l, n) be the number of lists of size n with gcd g and lcm l. for example:
f(10, 100, 1) = 91.
f(10, 100, 2) = 327.
f(10, 100, 3) = 1135.
f(10, 100, 1000) mod 1014 = 3286053.
find f(106, 1012, 1018) mod 1014.
""" | [
"pythonsnake98@gmail.com"
] | pythonsnake98@gmail.com |
21a873fd59c00a1d52720a6c3195a0abdef5efd0 | faf2852a357a2e077d0e7f0a28055c250f5edcd0 | /myshop/authapp/migrations/0002_authapp_user.py | 7f5e3935c03ee2acb2f53026a1448723437e08b6 | [] | no_license | Pegorino82/GUDjangoProject_2 | dc68ff9d21afb56ce29f3c5fe4672c5f79633196 | 579189b576af3bdcd98927d054030b7a2ebc9f46 | refs/heads/master | 2020-04-07T20:13:25.701650 | 2019-01-17T12:01:21 | 2019-01-17T12:01:21 | 158,679,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | # Generated by Django 2.1.4 on 2018-12-19 20:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('authapp', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='authapp',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"eshkryabin@yandex.ru"
] | eshkryabin@yandex.ru |
fbccffcd8524d8b244b026b2bed39e87748354d3 | 1dead366d7d1152a2dd1c7dd5f2ab91a4119310a | /GMOOC/GMOOC/settings.py | 602b234767d99af5cf8c0179a8ebc850bd52fd7b | [] | no_license | SmallSir/Python-and-Xadmin | 6b95bcb401c1f4ca232d3c77bb6d883467e5ea7f | fe29cb24d3a047f9a97319d4e33f07ad0e1562e3 | refs/heads/master | 2020-04-09T07:42:29.380396 | 2018-12-03T09:45:13 | 2018-12-03T09:45:13 | 160,167,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,531 | py | """
Django settings for GMOOC project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,os.path.join(BASE_DIR,'apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9=k8^e+l@w&1w#s!_9be1zjf41_sty25p_sll#%tyxau$%sqi+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
AUTHENTICATION_BACKENDS = (
'users.views.CustomBackend',
)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'users',
'courses',
'organization',
'operation',
'xadmin',
'crispy_forms',
'captcha'
]
AUTH_USER_MODEL = 'users.UserFile'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'GMOOC.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'GMOOC.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'PASSWORD':'qiu961030.',
'HOST':'localhost',
'USER':'root',
'NAME':'gmooc',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
) | [
"280690956@qq.com"
] | 280690956@qq.com |
b9f99c6b7e9712d01382336f8494ecf25633cb41 | 53b529e8edf046971db0ef4a740520e0b3e60699 | /.history/recipebox/models_20200202160352.py | fafdd926b9e070ad125ea70f80232e6b1b526195 | [] | no_license | EnriqueGalindo/recipebox | 6b6662e517ac045a23cd43aaf296c83cf61608b2 | ace7b2699db8be20568ced7062dc85ae92aa2eee | refs/heads/master | 2020-12-28T00:46:09.079202 | 2020-02-04T04:44:35 | 2020-02-04T04:44:35 | 238,124,659 | 0 | 1 | null | 2020-04-04T21:40:24 | 2020-02-04T04:42:05 | Python | UTF-8 | Python | false | false | 300 | py | from django.db import models
class Author(models.Model)
name = models.CharField(max_length=30)
bio = models.TextField
class Recipe(models.Model)
title = models.CharField(max_length=30)
author = models.ForeignKey('Author', on_delete=models.CASCADE)
description = models.TextField | [
"egalindo@protonmail.com"
] | egalindo@protonmail.com |
9c91b7b8e91ddc4fc9938f0c66b1f9450885d3b0 | 82090f948cce1bf26c0cc25da58e7739d7e9c624 | /core/models.py | 0490988914eb2b8e0dcbc3b8b2dbc82c7ccf8d9b | [] | no_license | Ehsan-63/django-qa | 4e98e9ee2bcc96238829e6a515cfdd1ed2733d61 | e2f467aeba21e082d0c2761e05e0cdc31bd7bf22 | refs/heads/master | 2022-11-27T21:42:03.531635 | 2020-07-29T15:57:32 | 2020-07-29T15:57:32 | 284,274,854 | 0 | 0 | null | 2020-08-01T14:23:12 | 2020-08-01T14:23:12 | null | UTF-8 | Python | false | false | 935 | py | from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
class Question(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='questions')
title = models.CharField(max_length=200)
slug = models.SlugField(max_length=200)
body = models.TextField()
created = models.DateTimeField(auto_now_add=True)
closed = models.BooleanField(default=False)
def __str__(self):
return f'{self.user} - {self.title[:20]}'
def get_absolute_url(self):
return reverse('core:detail', args=[self.id, self.slug])
class Answer(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='answers')
question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='answers')
body = models.TextField()
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f'{self.user} - {self.question.title[:20]}'
| [
"amirbig44@gmail.com"
] | amirbig44@gmail.com |
76f66d757b47e9b8fd951a8d9ffb16d55d1a1c70 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /n4JA3je7FEFfZKaWp_22.py | 97d9d5e1b036a363bf62e5a79d65b41da93f716f | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py |
def million_in_month(first_month, multiplier):
month = 1
total_earnings = 0
while total_earnings < 10**6:
first_month *= multiplier
total_earnings += first_month
month += 1
return month
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
4e14cfd9cbd068bcaa900c144725772e638d912d | 3be8da1d39bef1e09e4c8e7a6b736d7fc74a3c0f | /webserver/opentrain/common/static/common/ot_i18n.py | 220f6e9b537c255d90f967abf1f980bd02e07611 | [
"BSD-3-Clause"
] | permissive | amitzini/OpenTrain | bbe5b2fc1b1b118931f7aac94667083c1b5cf4da | 25ff81df668a9eba1c4369f9a789e34c60b44096 | refs/heads/master | 2020-04-01T22:36:01.131143 | 2014-10-27T22:07:40 | 2014-10-27T22:07:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,382 | py | from django.utils.translation import ugettext_noop
ugettext_noop("Search In")
ugettext_noop("Device Reports")
ugettext_noop("Live Trains")
ugettext_noop("Report Details")
ugettext_noop("Distances")
ugettext_noop("Akko")
ugettext_noop("Modiin")
ugettext_noop("Modiin Center")
ugettext_noop("Kiryat Hayyim")
ugettext_noop("Kiryat Motzkin")
ugettext_noop("Leb Hmifratz")
ugettext_noop("Hutsot HaMifrats")
ugettext_noop("Akko")
ugettext_noop("Nahariyya")
ugettext_noop("Haifa Center HaShmona")
ugettext_noop("Haifa Bat Gallim")
ugettext_noop("Haifa Hof HaKarmel (Razi'el)")
ugettext_noop("Atlit")
ugettext_noop("Binyamina")
ugettext_noop("Kesariyya - Pardes Hanna")
ugettext_noop("Hadera West")
ugettext_noop("Natanya")
ugettext_noop("Bet Yehoshua")
ugettext_noop("Herzliyya")
ugettext_noop("Tel Aviv - University")
ugettext_noop("Tel Aviv Center - Savidor")
ugettext_noop("Bne Brak")
ugettext_noop("Petah Tikva Kiryat Arye")
ugettext_noop("Petah Tikva Sgulla")
ugettext_noop("Tel Aviv HaShalom")
ugettext_noop("Holon Junction")
ugettext_noop("Holon - Wolfson")
ugettext_noop("Bat Yam - Yoseftal")
ugettext_noop("Bat Yam - Komemiyyut")
ugettext_noop("Kfar Habbad")
ugettext_noop("Tel Aviv HaHagana")
ugettext_noop("Lod")
ugettext_noop("Ramla")
ugettext_noop("Ganey Aviv")
ugettext_noop("Rehovot E. Hadar")
ugettext_noop("Be'er Ya'akov")
ugettext_noop("Yavne")
ugettext_noop("Ashdod Ad Halom")
ugettext_noop("Ashkelon")
ugettext_noop("Bet Shemesh")
ugettext_noop("Jerusalem Biblical Zoo")
ugettext_noop("Jerusalem Malha")
ugettext_noop("Kiryat Gat")
ugettext_noop("Be'er Sheva North University")
ugettext_noop("Be'er Sheva Center")
ugettext_noop("Dimona")
ugettext_noop("Lehavim - Rahat")
ugettext_noop("Ben Gurion Airport")
ugettext_noop("Kfar Sava")
ugettext_noop("Rosh Ha'Ayin North")
ugettext_noop("Yavne - West")
ugettext_noop("Rishon LeTsiyyon HaRishonim")
ugettext_noop("Hod HaSharon")
ugettext_noop("Sderot")
ugettext_noop("Rishon LeTsiyyon - Moshe Dayan")
# routes
ugettext_noop("Tel Aviv Center - Rishon LeTsiyyon HaRishonim")
ugettext_noop("Nahariyya - Modiin Center")
ugettext_noop("Nahariyya - Be'er Sheva Center")
ugettext_noop("Binyamina - Ashkelon")
ugettext_noop("Nahariyya - Ben Gurion Airport -Be'er Sheva Center")
ugettext_noop("Kiryat Motzkin - Haifa Hof HaKarmel (Razi'el)")
ugettext_noop("Tel Aviv Center - Savidor - Jerusalem Malha")
ugettext_noop("Be'er Sheva North University - Dimona")
ugettext_noop("Hod HaSharon - Ashkelon")
ugettext_noop("Hertsliyya - Be'er Sheva Center")
# days
ugettext_noop("Sunday")
ugettext_noop("Monday")
ugettext_noop("Tuesday")
ugettext_noop("Wendesay")
ugettext_noop("Thursday")
ugettext_noop("Friday")
ugettext_noop("Saturday")
ugettext_noop("Stop")
ugettext_noop("Arrival")
ugettext_noop("Departure")
ugettext_noop("Live")
ugettext_noop('Live Trains');
ugettext_noop('Simulated');
ugettext_noop('WIP');
ugettext_noop('No Trips Now');
ugettext_noop('Current Trains List')
ugettext_noop("Total # of reports (with loc)")
ugettext_noop("to")
ugettext_noop("on")
ugettext_noop("Search Reports")
ugettext_noop("Go Live")
ugettext_noop("Stop Live")
ugettext_noop("auto zoom")
ugettext_noop("Stops Only")
ugettext_noop("All Reports")
ugettext_noop("Please wait. Loading Reports, will take some time...")
ugettext_noop("Map for device id")
ugettext_noop("Total # of reports (with loc)")
ugettext_noop('cur')
ugettext_noop('exp')
| [
"ekeydar@gmail.com"
] | ekeydar@gmail.com |
32d121133bb107b2caf9971092b1b2fbaff65cc6 | 38ee4430af92b52230a79a6965c03ae3f2375bf4 | /setup.py | 357fc8235e22e0ef1714b73e14cf517462aff641 | [
"MIT"
] | permissive | dannguyen/poppler_wrap | 78df3dc391411b8770c6d37e647582e49a7a2343 | abd23dbd273e10cb34ceeb4b6a078faa525c3bb4 | refs/heads/master | 2020-03-17T00:06:03.328034 | 2018-05-12T04:16:13 | 2018-05-12T04:16:13 | 133,103,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,598 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ['Click>=6.0', ]
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest', ]
setup(
author="Dan Nguyen",
author_email='dansonguyen@gmail.com',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
description="Python wrapper around my favorite poppler functions for working with PDFs",
entry_points={
'console_scripts': [
'poppler_wrap=poppler_wrap.cli:main',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='poppler_wrap',
name='poppler_wrap',
packages=find_packages(include=['poppler_wrap']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/dannguyen/poppler_wrap',
version='0.1.0',
zip_safe=False,
)
| [
"dansonguyen@gmail.com"
] | dansonguyen@gmail.com |
35915cf212498c6065c0b5e3a79edb5030459d37 | b09a8df80c35e3ccca43cd74cec6e1a14db76ad7 | /user_import/views.py | 2282c2db1b9624c1c3db81b4feb4e259794dff40 | [
"MIT"
] | permissive | ofa/everyvoter | 79fd6cecb78759f5e9c35ba660c3a5be99336556 | 3af6bc9f3ff4e5dfdbb118209e877379428bc06c | refs/heads/master | 2021-06-24T19:38:25.256578 | 2019-07-02T10:40:57 | 2019-07-02T10:40:57 | 86,486,195 | 7 | 3 | MIT | 2018-12-03T19:52:20 | 2017-03-28T17:07:15 | Python | UTF-8 | Python | false | false | 3,269 | py | """Views for Import App"""
from django.contrib.messages.views import SuccessMessageMixin
from django.http import HttpResponse
from django.views.generic import CreateView, DetailView
from django.urls import reverse_lazy
from django_filters.views import FilterView
import unicodecsv
from manage.mixins import ManageViewMixin
from branding.mixins import OrganizationViewMixin, OrganizationCreateViewMixin
from everyvoter_common.utils.slug import slugify_header
from everyvoter_common.utils.uuid_slug_mixin import UUIDSlugMixin
from user_import.models import UserImport
from user_import.tasks import ingest_import
from user_import.forms import UserImportForm
from user_import.filters import UserImportFilter
class ImportListView(OrganizationViewMixin, ManageViewMixin, FilterView):
"""List all imports"""
model = UserImport
template_name = "user_import/list_imports.html"
paginate_by = 15
context_object_name = 'imports'
filterset_class = UserImportFilter
class ImportCreateView(OrganizationViewMixin, ManageViewMixin,
SuccessMessageMixin, OrganizationCreateViewMixin,
CreateView):
"""Create a new import"""
model = UserImport
form_class = UserImportForm
template_name = "user_import/create_import.html"
success_url = reverse_lazy('manage:user_import:list_imports')
success_message = "Import %(name)s was started"
def form_valid(self, form):
"""Handle a valid form"""
form.instance.uploader = self.request.user
form.instance.status = 'pending'
response = super(ImportCreateView, self).form_valid(form)
ingest_import.delay(self.object.pk)
return response
class ImportErrorCSVView(OrganizationViewMixin, ManageViewMixin,
UUIDSlugMixin, DetailView):
"""Download errors from a specific import"""
model = UserImport
slug_field = 'uuid'
def render_to_response(self, context, **response_kwargs):
"""Render to response"""
response = HttpResponse(content_type='text/csv')
# pylint: disable=line-too-long
response['Content-Disposition'] = 'attachment; filename="{}.csv"'.format(
slugify_header(self.object.name))
import_record_statuses = self.object.importrecordstatus_set.filter(
status='failed').select_related('import_record')
field_names = [
'status',
'error_type',
'first_name',
'last_name',
'email',
'address',
'note'
]
writer = unicodecsv.DictWriter(response, fieldnames=field_names)
writer.writeheader()
for import_record_status in import_record_statuses:
import_record = import_record_status.import_record
row = {
'status': import_record_status.status,
'error_type': import_record_status.error_type,
'note': import_record_status.note,
'first_name': import_record.first_name,
'last_name': import_record.last_name,
'email': import_record.email,
'address': import_record.address
}
writer.writerow(row)
return response
| [
"nickcatal@gmail.com"
] | nickcatal@gmail.com |
31b0b4a0f36dc1c645ae3945bbe72e3193013840 | d7ad696cd1b550bb41d20f87b83c984ec7f19aa7 | /atcoder/python/_old/educational_dp/01/f_lcs.py | daed11e02ec4f5622ab55e37e0c7107af0ef7faf | [] | no_license | mida-hub/hobby | 2947d10da7964d945e63d57b549c1dcb90ef7305 | 6e6f381e59fc2b0429fab36474d867aa3855af77 | refs/heads/master | 2022-12-21T23:33:14.857931 | 2022-12-19T16:30:34 | 2022-12-19T16:30:34 | 147,890,434 | 0 | 0 | null | 2021-03-20T04:31:58 | 2018-09-08T01:31:59 | Jupyter Notebook | UTF-8 | Python | false | false | 861 | py | s = input()
t = input()
len_s = len(s)
len_t = len(t)
dp = [[0] * (len_t + 1) for x in range(len_s + 1)]
# print(dp)
dp[0][0] = 0
for i in range(0, len_s + 1):
for j in range(0, len_t + 1):
if i > 0 and j > 0:
if s[i-1] == t[j-1]:
dp[i][j] = dp[i-1][j-1] + 1
else:
dp[i][j] = max([dp[i-1][j], dp[i][j-1]])
for j, d in enumerate(dp):
print(d[:][:])
# print(dp[len_s][len_t])
dp_len = dp[len_s][len_t]
ans = ''
dp_len -= 1
len_s -= 1
len_t -= 1
while dp_len >= 0:
# print(f'dp_len:{dp_len}')
# print(f'len_s:{len_s}')
# print(f'len_t:{len_t}')
if s[len_s] == t[len_t]:
ans = s[len_s] + ans
dp_len -= 1
len_s -= 1
len_t -= 1
elif dp[len_s][len_t] == dp[len_s-1][len_t]:
len_s -= 1
else:
len_t -= 1
print(ans)
| [
"rusuden0106@gmail.com"
] | rusuden0106@gmail.com |
a66f5a08fb150733ba6bca0867715ea027d57c3c | 4cb9b7ddc5df9e528ce6b36ab13f8c842d8c0cfa | /vistrails/packages/URL/https.py | ce53bcfbb2b4be9f8b97751bc4c68bcc2bd50dbc | [
"BSD-3-Clause"
] | permissive | anukat2015/VisTrails | bca4f812ffe9e69a1aa4174267a4225f1245638f | c24e310bf62cc0151e084aa4f9e50026e788afbd | refs/heads/master | 2021-01-18T15:42:35.910689 | 2015-11-04T16:15:19 | 2015-11-05T18:53:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,378 | py | ###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
# Python's handling of certificate verification is irresponsible and wrong.
# Having to include the code below to get what should be the only acceptable
# default behavior is a shame
# Code from https://gist.github.com/schlamar/2993700
from __future__ import division
import httplib
import urllib2
import ssl
import certifi
from backports.ssl_match_hostname import match_hostname
__all__ = ['VerifiedHTTPSHandler', 'https_handler', 'build_opener']
class CertValidatingHTTPSConnection(httplib.HTTPConnection):
default_port = httplib.HTTPS_PORT
def __init__(self, host, port=None, key_file=None, cert_file=None,
ca_certs=None, strict=None, **kwargs):
httplib.HTTPConnection.__init__(self, host, port, strict, **kwargs)
self.key_file = key_file
self.cert_file = cert_file
self.ca_certs = ca_certs
if self.ca_certs:
self.cert_reqs = ssl.CERT_REQUIRED
else:
self.cert_reqs = ssl.CERT_NONE
def connect(self):
httplib.HTTPConnection.connect(self)
self.sock = ssl.wrap_socket(self.sock, keyfile=self.key_file,
certfile=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs)
if self.cert_reqs & ssl.CERT_REQUIRED:
cert = self.sock.getpeercert()
hostname = self.host.split(':', 0)[0]
match_hostname(cert, hostname)
class VerifiedHTTPSHandler(urllib2.HTTPSHandler):
def __init__(self, **kwargs):
urllib2.HTTPSHandler.__init__(self)
self._connection_args = kwargs
def https_open(self, req):
def http_class_wrapper(host, **kwargs):
full_kwargs = dict(self._connection_args)
full_kwargs.update(kwargs)
return CertValidatingHTTPSConnection(host, **full_kwargs)
return self.do_open(http_class_wrapper, req)
https_handler = VerifiedHTTPSHandler(ca_certs=certifi.where())
def build_opener(*handlers, **kwargs):
# Keyword-only argument 'insecure'
insecure = kwargs.pop('insecure', False)
if kwargs:
raise TypeError("build_opener() got unexpected keyword argument %r" %
next(iter(kwargs)))
if not insecure:
handlers = handlers + (https_handler,)
handlers = handlers + (urllib2.ProxyHandler(),)
return urllib2.build_opener(*handlers)
| [
"remirampin@gmail.com"
] | remirampin@gmail.com |
ae779f9bd683ed5f93e9a47dcc1000e53c0dc0b7 | ea522b496372174216fba2aad29bf231a28cc819 | /QuikLab/trunk/.metadata/.plugins/org.eclipse.core.resources/.history/69/a0b8d3f1e48f00181164c0c8bfbc6595 | 0f9acdf33db2a7fb37006af4de7657f023ec3da1 | [] | no_license | gatdotZF/svn192.168.1.5 | 15fdf074042b0a1e5e8c795d3c340080c04ba496 | 196bd4c83b12e3476c269057a4ef3b730a967d9e | refs/heads/master | 2020-07-26T17:45:16.259037 | 2019-09-16T06:36:35 | 2019-09-16T06:36:35 | null | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 7,738 | #! /usr/bin/env python
#coding=GB18030
from pywinauto import application
import pywinauto.base_wrapper as ba
import SendKeys
import time
import os
import pywinauto.mouse as mouse
class Pywin(object):
def __init__(self):
self.app = application.Application(backend='uia')
def start(self,tl_dir,tl_name):
os.chdir(tl_dir)
self.app.start(tl_name)
def connect(self, window_name):
self.app.connect(title = window_name)
time.sleep(1)
def pr(self, window_name):
self.app[window_name].print_control_identifiers()
def close(self, window_name):
self.app[window_name].Close()
time.sleep(1)
def max_window(self, window_name):
self.app[window_name].Maximize()
time.sleep(1)
def menu_click(self, window_name, menulist):
self.app[window_name].MenuSelect(menulist)
time.sleep(1)
def input(self, window_name, controller, content):
self.app[window_name][controller].type_keys(content)
time.sleep(1)
def click(self, window_name, controller):
self.app[window_name][controller].click_input()
time.sleep(1)
def right_click(self, window_name, controller):
self.app[window_name][controller].right_click_input()
def double_click(self, window_name, controller, x ,y):
self.app[window_name][controller].double_click_input(button = "left",coords = (x, y))
time.sleep(1)
def focus(self,window_name,controller):
self.app[window_name][controller].set_focus()
def drag(self,window_name,controller,dx,dy,sx,sy):
self.app[window_name][controller].drag_mouse_input(dst=(dx,dy),src=(sx,sy),button='left',pressed='',absolute=True)
def Sendk(self,key_name,times):
SendKeys.SendKeys('{%s %d}'%(key_name,times))
if __name__ == "__main__":
app=Pywin()
# # tl_dir = r'D:\Program Files\QuiKLab3.0'
# # tl_name = r'D:\Program Files\QuiKLab3.0\MainApp.exe'
#
# # app.start(tl_dir,tl_name)
# # time.sleep(2)
# # window_name =u'登录--试验自动测试管理系统'
window_name = r'QuiKLab V3.0'
time.sleep(2)
# # window_name = u'图形监控'
dlg=app.connect(window_name)
# time.sleep(2)
# controller="Button17"
#app.focus(window_name, controller)
# app.Sendk('TAB')
# time.sleep(2)
# app.input(window_name,controller,123)
# #
# app.click(window_name,'CheckBox')
#
app.pr(window_name)
#
app=application.Application(backend='uia')
window_name = r'QuiKLab V3.0'
# app.connect(title = window_name)
# print "11111"
# app[window_name]['Button10'].click_input()
# app[window_name][u'确定'].click_input()
# app[window_name]['ComBox1'].select(1)
# app[window_name].Maximize()
app[window_name][u'编辑__信号__signal'].click_input()
# print app[window_name]['Static8'].texts()
# if 'name' in app[window_name]['statics2'].texts()[0]:
# print "get"
# app.click(window_name, 'Edit8')
# test=ctr.UIAWrapper()
# test = ba.BaseWrapper()
# test.get_properties()
# print "finish!!!!"
# time.sleep(100000)
# app.right_click(window_name, 'TreeView2')
# app.Sendk('DOWN', 2)
# app.Sendk('ENTER',1)
# app.click(window_name, 'Button4')
# app.right_click(window_name,'TreeItem16') #测试用例root
# time.sleep(1)
# app.Sendk('DOWN',4)
# app.Sendk('ENTER',1)
'''
#添加总线
app.click(window_name,'TreeItem10') #进入环境配置
mouse.right_click(coords=(1577, 492))
app.Sendk('DOWN', 1)
app.Sendk('ENTER',1)
app.click(window_name, 'COmboBox1')
app.input(window_name, 'ComboBox1', 'tcp')#添加 TCP/IP协议
app.Sendk('ENTER',1)
app.click(window_name, 'Button5') #确定
#添加设备
app.right_click(window_name,'Pane2')
app.Sendk('DOWN', 3)
app.Sendk('ENTER',1) #选择添加设备
#添加目标机
app.click(window_name, 'ComboBox1')
app.Sendk('UP', 1)
app.Sendk('ENTER', 1)
#添加IP
app.click(window_name, 'Edit2')
app.Sendk('RIGHT', 1)
app.input(window_name, 'Edit2', '192')
app.Sendk('.', 1)
app.input(window_name, 'Edit3', '168')
app.Sendk('.', 1)
app.input(window_name, 'Edit4', '1')
app.Sendk('.', 1)
app.input(window_name, 'Edit5', '5')
app.click(window_name, 'Button5')#确定
#添加客户端接口
app.click(window_name, 'ComboBox1')
app.Sendk('DOWN', 1)
app.Sendk('ENTER', 1)
#添加IP
app.click(window_name, 'Edit9')
app.Sendk('RIGHT', 1)
app.input(window_name, 'Edit9', '192')
app.Sendk('.', 1)
app.input(window_name, 'Edit10', '168')
app.Sendk('.', 1)
app.input(window_name, 'Edit11', '1')
app.Sendk('.', 1)
app.input(window_name, 'Edit12', '5')
#设置端口
app.click(window_name, 'Edit13')
app.Sendk('BACKSPACE', 1)
app.input(window_name, 'Edit13', '6060')
app.click(window_name, 'Button7')#确定
#添加服务端接口
pywinauto.mouse.right_click(coords=(923,510))
app.Sendk('DOWN', 2)
app.Sendk('ENTER', 1)
app.click(window_name, 'ComboBox1')
app.Sendk('DOWN', 2)
app.Sendk('ENTER', 1)
app.click(window_name, 'Button7')#确定
#添加信号
pywinauto.mouse.press(button='left', coords=(902, 456))
pywinauto.mouse.move(coords=(940, 456))
pywinauto.mouse.release(button='left', coords=(940, 456))
app.click(window_name, 'ComboBox5')
app.input(window_name, 'ComboBox5', 'i_block')#添加数据结构
app.click(window_name, 'Button11')#确定
#新建测试用例
app.click(window_name, 'TreeItem11')
app.right_click(window_name, 'TreeView2')
app.Sendk('DOWN', 2)
app.Sendk('ENTER',1)
app.click(window_name, 'Button4')
app.right_click(window_name,'TreeItem16') #测试用例root
time.sleep(1)
app.Sendk('DOWN',4)
app.Sendk('ENTER',1)
app.Sendk('TAB',2)
app.input(window_name, 'Edit1', 'content2')#输入用例名
app.click(window_name,'Button4') #确定
app.click(window_name,'TreeItem16')
app.Sendk('RIGHT',1)
app.Sendk('DOWN',1)
#添加信号
app.click(window_name, 'TabItem2')
app.click(window_name, 'Button16')
app.click(window_name, 'Table1')
pywinauto.mouse.click(coords=(602,450))#复选信号
app.click(window_name, 'Button14')#确定
#测试用例编辑
app.click(window_name, 'TabItem1')
app.right_click(window_name, 'TreeItem16')
app.Sendk('UP', 1)
app.Sendk('ENTER', 1)
pywinauto.mouse.press(button='left',coords=(600,229))
pywinauto.mouse.move(coords=(661, 646))
pywinauto.mouse.release(button='left', coords=(661, 646))
app.click(window_name, 'Button10')
#添加UUT
app.right_click(window_name,'Pane2')
app.click(window_name, 'ComboBox1')
app.Sendk('UP', 1)
app.Sendk('DOWN', 1)
app.Sendk('ENTER', 1)
app.click(window_name, 'Button5')
# app.click(window_name,'TreeItem12') #测试用例
# SendKeys.SendKeys('{LEFT}')
# app.right_click(window_name, 'TreeItem16')
# SendKeys.SendKeys('{DOWN}')
# app.Sendk('ENTER',1)
app.click(window_name, 'TabItem5')#图形监控
controller='Button4'
app.drag(window_name, controller, 810, 484, 1298, 406)#发送按钮控件
app.drag(window_name, controller, 849, 280, 1302, 307)#发送旋钮控件
window_name = 'QuiKLab V3.0'
app.connect(window_name)
app.click(window_name, "TabItem1") #测试用例编辑
# app.click(window_name, "TabItem2") #测试用例变量
# app.double_click(window_name, 'Pane0',10,10)
# app.drag(window_name,'Pane0',960,540,960,300)
app.drag(window_name,'Pane0',960,300,960,540)
'''
| [
"994430058@qq.com"
] | 994430058@qq.com | |
4a6fb5bb1f0a3073cf9fe3811f9b0bbeccd8fe8f | 6f1034b17b49f373a41ecf3a5a8923fb4948992b | /pychron/furnace/tasks/thermo/furnace_plugin.py | 10d2f425a6afc473edc838188bb86750b9aa9268 | [
"Apache-2.0"
] | permissive | NMGRL/pychron | a6ec1854488e74eb5d3ff53eee8537ecf98a6e2f | 8cfc8085393ace2aee6b98d36bfd6fba0bcb41c6 | refs/heads/main | 2023-08-30T07:00:34.121528 | 2023-06-12T17:43:25 | 2023-06-12T17:43:25 | 14,438,041 | 38 | 28 | Apache-2.0 | 2023-08-09T22:47:17 | 2013-11-15T23:46:10 | Python | UTF-8 | Python | false | false | 2,352 | py | # ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from pychron.furnace.ifurnace_manager import IFurnaceManager
from pychron.furnace.tasks.furnace_plugin import BaseFurnacePlugin
from pychron.furnace.tasks.thermo.preferences import ThermoFurnacePreferencesPane
from pychron.furnace.tasks.thermo.task import ThermoFurnaceTask
class ThermoFurnacePlugin(BaseFurnacePlugin):
name = "ThermoFurnace"
id = "pychron.furnace.thermo.plugin"
klass = ("pychron.furnace.thermo.furnace_manager", "ThermoFurnaceManager")
task_klass = ThermoFurnaceTask
# def _help_tips_default(self):
# return ['']
def _deactivations_default(self):
application = self.application
def func():
manager = application.get_service(IFurnaceManager)
if manager:
for window in application.windows:
if "furnace" in window.active_task.id:
break
else:
manager.stop_update()
return [func]
def _activations_default(self):
man = self._get_manager()
return [man.start_update]
def _panes_default(self):
def f():
from pychron.furnace.tasks.thermo.panes import ExperimentFurnacePane
manager = self._get_manager()
fpane = ExperimentFurnacePane(model=manager)
return fpane
return [f]
def test_furnace_api(self):
man = self._get_manager()
return man.test_furnace_api()
def _preferences_panes_default(self):
return [ThermoFurnacePreferencesPane]
# ============= EOF =============================================
| [
"jirhiker@gmail.com"
] | jirhiker@gmail.com |
c4335347dc7e11719221f609202691493973425b | ef7eabdd5f9573050ef11d8c68055ab6cdb5da44 | /codeEval/hard/closest_pair.py | 1388ae98fc85075f1ac1d62410aeba96440d15da | [
"WTFPL"
] | permissive | gauravsingh58/algo | cdbf68e28019ba7c3e4832e373d32c71902c9c0d | 397859a53429e7a585e5f6964ad24146c6261326 | refs/heads/master | 2022-12-28T01:08:32.333111 | 2020-09-30T19:37:53 | 2020-09-30T19:37:53 | 300,037,652 | 1 | 1 | WTFPL | 2020-10-15T09:26:32 | 2020-09-30T19:29:29 | Java | UTF-8 | Python | false | false | 654 | py | import sys
from operator import itemgetter
def get_distance(p, q):
return ((p[0]-q[0])**2 + (p[1]-q[1])**2)**0.5
def find_closest_pair(ls):
ls = sorted(ls, key=itemgetter(0, 1))
dist, m = [], 40000
for i in range(len(ls)-1):
m = min(m, get_distance(ls[i], ls[i+1]))
if m > 10000:
return "INFINITY"
else:
return "%.4f" % m
points, n = [], 0
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
if n == 0:
if len(points) > 0:
print find_closest_pair(points)
points = []
n = int(test.strip())
if n == 0:
break
else:
points.append(map(int, test.split()))
n -= 1
test_cases.close() | [
"elmas.ferhat@gmail.com"
] | elmas.ferhat@gmail.com |
70156d945934f471e9dfe882949c342b2e29c2f7 | 3bae1ed6460064f997264091aca0f37ac31c1a77 | /apps/cloud_api_generator/generatedServer/tasklets/lan/getNextMacRange/lan_getNextMacRange.py | 3928da8aa16225a7fb32607614557e6d5b8b9246 | [] | no_license | racktivity/ext-pylabs-core | 04d96b80ac1942754257d59e91460c3a141f0a32 | 53d349fa6bee0ccead29afd6676979b44c109a61 | refs/heads/master | 2021-01-22T10:33:18.523799 | 2017-06-08T09:09:28 | 2017-06-08T09:09:28 | 54,314,984 | 0 | 0 | null | 2017-06-08T09:09:29 | 2016-03-20T11:55:01 | Python | UTF-8 | Python | false | false | 182 | py | __author__ = 'aserver'
__tags__ = 'lan', 'getNextMacRange'
__priority__= 3
def main(q, i, params, tags):
params['result'] = ''
def match(q, i, params, tags):
return True
| [
"devnull@localhost"
] | devnull@localhost |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.