blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b1c38b5c878766c01da5a18679414e6c7da92591 | acf7457d3a799cb9bff12686d2d616688bcd4b5b | /packages/python/plotly/plotly/validators/scatter3d/marker/colorbar/_yref.py | 7e6de12fbde3d181d7496e94eb85d4bc1c52ff16 | [
"MIT"
] | permissive | plotly/plotly.py | f4f61639f08160f16195efc95b5901dc5a937346 | 975a704074f01c078e0fdfa32bdf17130bf89e69 | refs/heads/master | 2023-09-06T06:15:08.340035 | 2023-08-24T12:28:14 | 2023-08-24T12:28:14 | 14,579,099 | 14,751 | 2,989 | MIT | 2023-09-08T19:55:32 | 2013-11-21T05:53:08 | Python | UTF-8 | Python | false | false | 489 | py | import _plotly_utils.basevalidators
class YrefValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="yref", parent_name="scatter3d.marker.colorbar", **kwargs
):
super(YrefValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["container", "paper"]),
**kwargs,
)
| [
"liam@plot.ly"
] | liam@plot.ly |
0b6359ff6e3d4a1aad8a712944c8a1246ff1efa7 | 28ce113cc12aab7f2809c2cb26763055f1722293 | /groceryProject/groceryProject/wsgi.py | 082460953e1d280c971b7a18c90b76e6d43315ab | [] | no_license | sd8917/GroceryWebApp | a899263b5220436d52a0c7a9beb7a379d3f6e112 | 5aa4f3c3e2c9fe0351dafe9ca9471a9fb2fad49e | refs/heads/master | 2022-04-15T15:09:41.375925 | 2020-04-13T10:07:39 | 2020-04-13T10:07:39 | 255,280,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | """
WSGI config for groceryProject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'groceryProject.settings')
application = get_wsgi_application()
| [
"sudhanshuraj8917@gmail.com"
] | sudhanshuraj8917@gmail.com |
8d1f53f681778dcace34d0bad67e4ce83baac7db | 58d832a76c6cf2d80c31310631efca20658f68d6 | /neural_network/word2vec/createDataset.py | 8280f31ba06e3928f5b80bb70f24df59431b160b | [] | no_license | OneClickDeepLearning/classificationOfResidentialRequests | e6fe0071054337af8cf14acaa3b598123575ce6c | 397dfa6ef0e08669b329f17bd5741de0484e3c9c | refs/heads/master | 2020-05-30T18:52:01.085590 | 2019-09-16T02:36:36 | 2019-09-16T02:36:36 | 189,906,790 | 3 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,006 | py | import csv
import re
import jieba
import pandas as pd
file1 = '/Users/sunjincheng/Desktop/Prof-project/NLPproject/data/valid_data_all.csv'
# df = pd.read_csv(file1,encoding='gb18030')
file2 = '../data/split_data.txt'
read_file = open(file1, encoding='gb18030')
lines = csv.reader(read_file)
count=0
lengths = []
write_file = open(file2,'a',encoding='utf-8')
for line in lines:
count +=1
if(count == 1):
continue
sent = line[8]
sent = re.sub('市民来电咨询', '', sent)
sent = re.sub('市民来电反映', '', sent)
sent = re.sub("[\s+\.\!\/_,$%^*(+\"\']+|[a-zA-Z0-9+——!,。?、~@#¥%……&*()《》::]+", "",sent)
splits = jieba.cut(sent)
length = len(list(splits))
lengths.append(length)
# result = ' '.join(splits)
# print(sent)
# print(result)
# print(result.split())
# write_file.write(result)
# write_file.write('\n')
if (count % 10000 == 0):
print(count)
write_file.close()
read_file.close()
| [
"sjc951213@gmail.com"
] | sjc951213@gmail.com |
789e558277eb3ba1fd9cfb152bc79cdc9a2e5c2c | 53eb6921a7e6ad41f4cf1636290b7704ed0aa736 | /update_recipe.py | 1a87ab2ec667e87d845bffb44d576e825a547cbf | [] | no_license | jakirkham/conda_recipe_tools | 17e903695c1ab4505283efcc7d6e5e73f340e3a2 | 18545e04f3436ec7c67a38274e21e3fb76083797 | refs/heads/master | 2021-01-19T23:48:56.938068 | 2017-01-31T23:55:19 | 2017-01-31T23:55:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,110 | py | #! /usr/bin/env python
import argparse
import json
import re
from urllib.request import urlopen
try:
from packaging.version import parse as parse_version
except ImportError:
from pip._vendor.packaging.version import parse as parse_version
import jinja2
import yaml
class GitHubSource(object):
RELEASE_URL = 'https://api.github.com/repos/{user}/{repo}/releases'
def __init__(self, user, repo):
self.releases = json.load(open('./github_releases.json'))
@property
def latest_version(self):
versions = [parse_version(r['name']) for r in self.releases]
filtered = [v for v in versions if not v.is_prerelease]
if len(filtered) == 0:
return None
return str(max(filtered))
def get_hash(self, version, filename, hash_type):
pass
class PyPISource(object):
def __init__(self, name):
self.name = name
url = 'https://pypi.io/pypi/' + self.name + '/json'
response = urlopen(url)
self._info = json.loads(response.read().decode('utf8'))
@property
def latest_version(self):
all_releases = self._info['releases'].keys()
versions = [parse_version(s) for s in all_releases]
filtered = [v for v in versions if not v.is_prerelease]
if len(filtered) == 0:
return None
return str(max(filtered))
def get_hash(self, version, filename, hash_type):
release_info = self._info['releases'][version]
entry = [e for e in release_info if e['filename'] == filename][0]
hash_value = entry['digests'][hash_type]
return hash_value
class CondaRecipe(object):
"""
Representation of a conda recipe meta.yaml file.
Parameters
----------
meta_filename : str
meta.yaml path.
"""
def __init__(self, meta_filename):
""" initalize """
# read the meta.yaml file for the recipe
with open(meta_filename) as f:
self._lines = f.readlines()
@property
def _info(self):
""" Dictionary of recipe after rendering using jinja2. """
text = ''.join(self._lines)
rendered_text = jinja2.Template(text).render()
return yaml.load(rendered_text)
@property
def name(self):
""" package name. """
return self._info['package']['name']
@property
def version(self):
return self._info['package']['version']
@version.setter
def version(self, version):
quoted_version = '"' + version + '"'
pattern = '(?<=set version = ).*(?= %})'
self._lines = [re.sub(pattern, quoted_version, l) for l in self._lines]
if self._info['package']['version'] != version:
raise AttributeError("version could not be set")
@property
def hash_type(self):
source_section = self._info['source']
if 'md5' in source_section:
hash_type = 'md5'
elif 'sha256' in source_section:
hash_type = 'sha256'
else:
hash_type = None
return hash_type
@property
def hash_value(self):
return self._info['source'][self.hash_type]
@hash_value.setter
def hash_value(self, hash_value):
hash_type = self.hash_type
lines = self._lines
# replace jinja templated hash tempates
quoted_hash = '"' + hash_value + '"'
pattern = '(?<=set hash_val = ).*(?= %})'
lines = [re.sub(pattern, quoted_hash, l) for l in lines]
pattern = '(?<=set hash = ).*(?= %})'
lines = [re.sub(pattern, quoted_hash, l) for l in lines]
if hash_type == 'sha256':
pattern = '(?<=set sha256 = ).*(?= %})'
lines = [re.sub(pattern, quoted_hash, l) for l in lines]
if hash_type == 'md5':
pattern = '(?<=set md5 = ).*(?= %})'
lines = [re.sub(pattern, quoted_hash, l) for l in lines]
# replace yaml hash values
if hash_type == 'sha256':
pattern = '(?<=sha256: )[0-9A-Fa-f]+'
lines = [re.sub(pattern, hash_value, l) for l in lines]
if hash_type == 'md5':
pattern = '(?<=md5: )[0-9A-Fa-f]+'
lines = [re.sub(pattern, hash_value, l) for l in lines]
self._lines = lines
if self._info['source'][self.hash_type] != hash_value:
raise AttributeError("hash_value could not be set")
@property
def url_filename(self):
url = self._info['source']['url']
filename = url.split('/')[-1]
return filename
@property
def source(self):
source_url = self._info['source']['url']
if source_url.startswith('https://pypi.io'):
return PyPISource(self.name)
elif source_url.startswith('https://github.com'):
pattern = '(?<=https://github.com/)(.*?)/(.*?)/'
user, repo = re.search(pattern, source_url).groups()
return GitHubSource(user, repo)
else:
return None
def write(self, filename):
with open(filename, 'wb') as f:
f.write(''.join(self._lines).encode('utf8'))
def parse_arguments():
parser = argparse.ArgumentParser(
description="Update a conda recipe to a given version")
parser.add_argument(
'--meta', '-m', action='store', default='meta.yaml',
help="path to the recipe's meta.yaml files.")
parser.add_argument(
'--version', '-v', action='store', default=None,
help="version to update the recipe to, defaults is to latest.")
return parser.parse_args()
def main():
args = parse_arguments()
recipe = CondaRecipe(args.meta)
source = recipe.source
# update the version
if args.version is None:
recipe.version = source.latest_version
else:
recipe.version = args.version
# update the hash
hash_value = source.get_hash(
recipe.version, recipe.url_filename, recipe.hash_type)
recipe.hash_value = hash_value
recipe.write(args.meta)
print("Updated", args.meta, "to version", recipe.version)
if __name__ == "__main__":
main()
| [
"jjhelmus@gmail.com"
] | jjhelmus@gmail.com |
3e32bdba53b1986cb51d700d78ad9809ec72fd2a | 2eeeefe48c56d0dfae4fd568dbaee3c8d2cf3463 | /0Demo/networkanalysisDemo/dns.py | e043c685cc2249e44892b70aef683077cc79c071 | [] | no_license | lijianmingCN/pybase | f6377f7944c043f7241452fcffccc3f49ef0cef9 | 7286a022ff7f40a7289cf69d73e8418a1ecf7b88 | refs/heads/master | 2021-01-02T08:10:42.215672 | 2017-08-01T03:16:29 | 2017-08-01T03:16:29 | 98,953,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | # -*- coding: utf-8 -*-
import pcap,dpkt
pc = pcap.pcap()
#We'll start off by setting pypcap's BPF expression to "udp dst port 53"
pc.setfilter('tcp port 80')
for ts, pkt in pc:
eth = dpkt.ethernet.Ethernet(pkt)
ip = eth.data
tcp = ip.data
print tcp.sport
print tcp.dport
if tcp.dport == 80 and len(tcp.data) > 0:
http = dpkt.http.Request(tcp.data)
#Once the HTTP payload has been parsed, we can examine its various attributes
print http.method
print http.uri
print http.version
print http.headers['user-agent']
| [
"lijianming@baidu.com"
] | lijianming@baidu.com |
d3e2f4e03843e37f530804f7007b2e645767f953 | 70b50139dcee04f94de958a9ce13236ef4c3bc34 | /modules/ImageChannels.py | 5276cc81486c8a68314652c3c956420809e48bf7 | [
"MIT"
] | permissive | CGL-Deeplearning/KalleR | 8bf8918b25c2afd5ab4dd6a269c2459413d1a96d | d788020a8e63657ed4f26e64be93b92648ee8d9f | refs/heads/master | 2021-09-07T17:41:27.563516 | 2018-02-27T01:28:03 | 2018-02-27T01:28:03 | 119,736,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,868 | py | DEFAULT_MIN_MAP_QUALITY = 5
IMAGE_HEIGHT = 300
IMAGE_WIDTH = 300
IMAGE_BUFFER = 0
CIGAR_MATCH = 0
CIGAR_IN = 1
CIGAR_DEL = 2
MAX_COLOR_VALUE = 254.0
BASE_QUALITY_CAP = 40.0
MAP_QUALITY_CAP = 60.0
MAP_QUALITY_FILTER = 10.0
class ImageChannels:
"""
Handles how many channels to create for each base and their way of construction.
"""
@staticmethod
def get_base_color(base):
"""
Get color based on a base.
- Uses different band of the same channel.
:param base:
:return:
"""
if base == 'A':
return 250.0
if base == 'C':
return 100.0
if base == 'G':
return 180.0
if base == 'T':
return 30.0
if base == '*' or 'N':
return 5.0
@staticmethod
def get_base_quality_color(base_quality):
"""
Get a color spectrum given base quality
:param base_quality: value of base quality
:return:
"""
c_q = min(base_quality, BASE_QUALITY_CAP)
color = MAX_COLOR_VALUE * c_q / BASE_QUALITY_CAP
return color
@staticmethod
def get_map_quality_color(map_quality):
"""
Get a color spectrum given mapping quality
:param map_quality: value of mapping quality
:return:
"""
c_q = min(map_quality, MAP_QUALITY_CAP)
color = MAX_COLOR_VALUE * c_q / MAP_QUALITY_CAP
return color
@staticmethod
def get_strand_color(is_rev):
"""
Get color for forward and reverse reads
:param is_rev: True if read is reversed
:return:
"""
if is_rev is True:
return 240
else:
return 70
@staticmethod
def get_match_ref_color(is_match):
"""
Get color for base matching to reference
:param is_match: If true, base matches to reference
:return:
"""
if is_match is True:
return MAX_COLOR_VALUE * 0.2
else:
return MAX_COLOR_VALUE * 1.0
@staticmethod
def get_alt_support_color(is_in_support):
"""
***NOT USED YET***
:param is_in_support:
:return:
"""
if is_in_support is True:
return MAX_COLOR_VALUE * 1.0
else:
return MAX_COLOR_VALUE * 0.6
@staticmethod
def get_empty_channels():
"""
Get empty channel values
:return:
"""
return [0, 0, 0, 0, 0, 0]
@staticmethod
def get_channels(attribute_tuple):
"""
Get a bases's channel construction
:return: [color spectrum of channels based on base attributes]
"""
base, base_q, map_q, is_rev, is_match, is_supporting = attribute_tuple
base_color = ImageChannels.get_base_color(base)
base_quality_color = ImageChannels.get_base_quality_color(base_q)
map_quality_color = ImageChannels.get_map_quality_color(map_q)
strand_color = ImageChannels.get_strand_color(is_rev)
match_color = ImageChannels.get_match_ref_color(is_match)
get_support_color = ImageChannels.get_alt_support_color(is_supporting)
return [base_color, base_quality_color, map_quality_color, strand_color, match_color, get_support_color]
@staticmethod
def get_ref_channels(base):
"""
Get a reference bases's channel construction
:param base: Reference base
:return: [color spectrum of channels based on some default values]
"""
base_color = ImageChannels.get_base_color(base)
base_quality_color = ImageChannels.get_base_quality_color(60)
map_quality_color = ImageChannels.get_map_quality_color(60)
strand_color = ImageChannels.get_strand_color(is_rev=False)
match_color = ImageChannels.get_match_ref_color(is_match=True)
support_color = ImageChannels.get_alt_support_color(is_in_support=True)
return [base_color, base_quality_color, map_quality_color, strand_color, match_color, support_color]
# RGB image creator
# ---ONLY USED FOR TESTING--- #
@staticmethod
def get_empty_rgb_channels():
return [0, 0, 0, 255]
@staticmethod
def get_color_for_base_rgb(ref, base):
if ref == base and ref != '*':
return 255, 255, 255
elif base == 'A':
return 255, 0, 0
elif base == 'C':
return 255, 255, 0
elif base == 'T':
return 0, 0, 255
elif base == 'G':
return 0, 255, 0
else:
return 255, 0, 255
@staticmethod
def get_channels_only_rgb(attribute_tuple, ref_base):
base, base_q, map_q, is_rev, is_match, is_supporting = attribute_tuple
base_color = ImageChannels.get_base_color(base)
base_quality_color = ImageChannels.get_base_quality_color(base_q)
map_quality_color = ImageChannels.get_map_quality_color(map_q)
strand_color = ImageChannels.get_strand_color(is_rev)
match_color = ImageChannels.get_match_ref_color(is_match)
support_color = ImageChannels.get_alt_support_color(is_supporting)
r, g, b = ImageChannels.get_color_for_base_rgb(ref_base, base)
return [r, g, b, support_color]
@staticmethod
def get_ref_channels_rgb(base):
base_color = ImageChannels.get_base_color(base)
base_quality_color = ImageChannels.get_base_quality_color(60)
map_quality_color = ImageChannels.get_map_quality_color(60)
strand_color = ImageChannels.get_strand_color(is_rev=False)
get_match_color = ImageChannels.get_match_ref_color(is_match=True)
r, g, b = ImageChannels.get_color_for_base_rgb('', base)
support_color = ImageChannels.get_alt_support_color(is_in_support=True)
return [r, g, b, support_color] | [
"kishwar.shafin@gmail.com"
] | kishwar.shafin@gmail.com |
994c43c41bf04d16ce58d05102eb476e9d4a0d08 | 8f2e6e38bb7ba2205cba57b0beae146d29f0ad3b | /chap7/chap7_1_ngram_2.py | de7b69eb1d767e0de948234dc5e5071f6c3c5927 | [] | no_license | KimDoKy/WebScrapingWithPython | fa08ba83ba560d4f24cddb5e55de938a380dfec2 | bc7dd8a36d3ee0f8e3a13ae9fe0d074733b45938 | refs/heads/master | 2020-12-02T19:20:26.285450 | 2017-08-25T14:27:36 | 2017-08-25T14:27:36 | 96,326,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
def ngrams(input, n):
input = re.sub('\n+', " ", input)
input = re.sub(' +', " ", input)
input = bytes(input, "UTF-8")
input = input.decode("ascii", "ignore")
print(input)
input = input.split(' ')
output = []
for i in range(len(input)-n+1):
output.append(input[i:i+n])
return output
html = urlopen("http://en.wikipedia.org/wiki/Python_(programming_language)")
bsObj = BeautifulSoup(html, "html.parser")
content = bsObj.find("div", {"id":"mw-content-text"}).get_text()
ngrams = ngrams(content, 2)
print(ngrams)
print("2-grams count is: "+str(len(ngrams)))
| [
"makingfunk0@gmail.com"
] | makingfunk0@gmail.com |
df057008387983d394659ff6fc680fea42885a44 | 52e83d67c8b76f83278b61a4c0787abebfa2423c | /DeepLense/Shubham Jain/pipelines/beginner/features/sub_gridding.py | 1d1655e7801803a7e6f870c7e9a0151e7059e250 | [] | no_license | mlsft/gsc_tasks- | 3935142c93cebc978ff35e3f37486438c4dceeed | 84b62aa04f2333d26f8f95a7c0b24c3922bac647 | refs/heads/master | 2022-04-13T16:22:18.054908 | 2020-04-14T11:59:45 | 2020-04-14T11:59:45 | 249,394,940 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,955 | py | import autofit as af
import autolens as al
### PIPELINE DESCRIPTION ###
# In this pipeline we use sub-grids with different resoultions, that oversample the calculation of light profile
# intensities and mass profile deflection angles. In general, a higher level of sub-gridding provides numerically
# more precise results, at the expense of longer calculations and higher memory usage.
# The 'sub_size' is an input parameter of the pipeline, meaning we can run the pipeline with different binning up
# factors using different runners.
# Phase names are tagged, ensuring phases using different sub-sizes have a unique output path.
# We'll perform a basic analysis which fits a lensed source galaxy using a parametric light profile where
# the lens's light is omitted. This pipeline uses two phases:
# Phase 1:
# Fit the lens mass model and source light profile using x1 source with a sub grid size of 2.
# Lens Mass: EllipticalIsothermal + ExternalShear
# Source Light: EllipticalSersic
# Prior Passing: None
# Notes: Uses a sub grid size of 2
# Phase 1:
# Refine the lens and source model using a sub grid size of 4.
# Lens Mass: EllipticalIsothermal + ExternalShear
# Source Light: EllipticalSersic
# Prior Passing: Lens mass (model -> phase 1), source light (model -> phase 1)
# Notes: Uses a sub grid size of 4.
def make_pipeline(phase_folders=None, sub_size=2):
### SETUP PIPELINE & PHASE NAMES, TAGS AND PATHS ###
pipeline_name = "pipeline__feature"
pipeline_tag = "sub_gridding"
# This function uses the phase folders and pipeline name to set up the output directory structure,
# e.g. 'autolens_workspace/output/pipeline_name/pipeline_tag/phase_name/phase_tag/'
phase_folders.append(pipeline_name)
phase_folders.append(pipeline_tag)
# When a phase is passed a 'sub_size,' a setup tag is automatically generated and added to the phase path,
# to make it clear what sub-grid was used. The setup tag, phase name and phase paths are shown for 3
# example sub_sizes:
# sub_size=2 -> phase_path=phase_name/setup_sub_2
# sub_size=3 -> phase_path=phase_name/setup_sub_3
# If the sub-grid size is 1, the tag is an empty string, thus not changing the setup tag:
# sub_size=1 -> phase_path=phase_name/setup
### PHASE 1 ###
# In phase 1, we fit the lens galaxy's mass and one source galaxy, where we:
# 1) Use a sub-grid size of 2x2 in every image pixel.
mass = af.PriorModel(al.mp.EllipticalIsothermal)
mass.centre_0 = af.GaussianPrior(mean=0.0, sigma=0.1)
mass.centre_1 = af.GaussianPrior(mean=0.0, sigma=0.1)
phase1 = al.PhaseImaging(
phase_name="phase_1__x1_source",
phase_folders=phase_folders,
galaxies=dict(
lens=al.GalaxyModel(redshift=0.5, mass=mass, shear=al.mp.ExternalShear),
source=al.GalaxyModel(redshift=1.0, sersic=al.lp.EllipticalSersic),
),
sub_size=2,
)
phase1.optimizer.const_efficiency_mode = True
phase1.optimizer.n_live_points = 80
phase1.optimizer.sampling_efficiency = 0.2
### PHASE 2 ###
# In phase 2, we fit the lens galaxy's mass and two source galaxies, where we:
# 1) Use a sub-grid size of 4x4 in every image pixel.
phase2 = al.PhaseImaging(
phase_name="phase_2__x2_source",
phase_folders=phase_folders,
galaxies=dict(
lens=al.GalaxyModel(
redshift=0.5,
mass=phase1.result.model.galaxies.lens.mass,
shear=phase1.result.model.galaxies.lens.shear,
),
source=al.GalaxyModel(
redshift=1.0, sersic=phase1.result.model.galaxies.source.sersic
),
),
sub_size=sub_size,
)
phase2.optimizer.const_efficiency_mode = True
phase2.optimizer.n_live_points = 50
phase2.optimizer.sampling_efficiency = 0.3
return al.PipelineDataset(pipeline_name, phase1, phase2)
| [
"alihariri@MacBook-Air-de-Ali.local"
] | alihariri@MacBook-Air-de-Ali.local |
72d053c6d05b60d62f967c533e27d55e1dabc622 | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/era5_scripts/02_preprocessing/combine82/20-tideGauge.py | 998dddd3759e2e9b2093bf2a17792b529133cbb8 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,115 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 16 16:11:00 2020
--------------------------------------------
Load predictors for each TG and combine them
--------------------------------------------
@author: Michael Tadesse
"""
import os
import pandas as pd
#define directories
dir_in = '/lustre/fs0/home/mtadesse/eraFiveConcat'
dir_out = '/lustre/fs0/home/mtadesse/ereaFiveCombine'
def combine():
os.chdir(dir_in)
#get names
tg_list_name = os.listdir()
#cd to where the actual file is
os.chdir(dir_in)
x = 20
y = 21
for t in range(x, y):
tg_name = tg_list_name[t]
print(tg_name, '\n')
#looping through each TG folder
os.chdir(tg_name)
#defining the path for each predictor
where = os.getcwd()
csv_path = {'slp' : os.path.join(where, 'slp.csv'),\
"wnd_u": os.path.join(where, 'wnd_u.csv'),\
'wnd_v' : os.path.join(where, 'wnd_v.csv')}
first = True
for pr in csv_path.keys():
print(tg_name, ' ', pr)
#read predictor
pred = pd.read_csv(csv_path[pr])
#remove unwanted columns
pred.drop(['Unnamed: 0', 'Unnamed: 0.1'], axis = 1, inplace=True)
#give predictor columns a name
pred_col = list(pred.columns)
for pp in range(len(pred_col)):
if pred_col[pp] == 'date':
continue
pred_col[pp] = pr + str(pred_col[pp])
pred.columns = pred_col
#merge all predictors
if first:
pred_combined = pred
first = False
else:
pred_combined = pd.merge(pred_combined, pred, on = 'date')
#saving pred_combined
os.chdir(dir_out)
pred_combined.to_csv('.'.join([tg_name, 'csv']))
os.chdir(dir_in)
print('\n')
#run script
combine()
| [
"michaelg.tadesse@gmail.com"
] | michaelg.tadesse@gmail.com |
cbfc17ecc8356e425a4ddb2c99cb8b93386b6a16 | 8e2e28a191fa5ec5a6c070ec7e9ccad98c8b4a0b | /test/45-类的getattr属性的链式调用.py | b3eac894c94b012291f1fb95de6113032241a472 | [
"Apache-2.0"
] | permissive | kellanfan/python | 4cd61cbc062e2eee3a900fa7447ca5f0b8f1a999 | 912dc05a3bd0ded9544166a68da23ca0a97b84da | refs/heads/master | 2023-04-06T03:04:38.851928 | 2023-04-01T02:45:56 | 2023-04-01T02:45:56 | 65,542,280 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 846 | py | #/usr/bin/env python
# pylint: disable=no-member
# -*- encoding: utf-8 -*-
'''
@File : ex_chain.py
@Time : 2019/06/04 14:54:20
@Author : Kellan Fan
@Version : 1.0
@Contact : kellanfan1989@gmail.com
@Desc : 现在很多网站都搞REST API,像:http://api.server/user/timeline/list
如果要写SDK,给每个URL对应的API都写一个方法,那得累死,而且,API一旦改动,SDK也要改,
利用完全动态的__getattr__,我们可以写出一个链式调用
'''
# here put the import lib
class Chain(object):
def __init__(self, path=''):
self._path = path
def __getattr__(self, path):
return Chain('%s/%s' % (self._path, path))
def __str__(self):
return self._path
if __name__ == "__main__":
print(Chain().status.user.timeline.list) | [
"icyfk1989@163.com"
] | icyfk1989@163.com |
26bf48c842613a75440758d9f1938cf84989e3ca | 868a66fc60f314dfdb8f434f88d2e8d7c2a6552c | /src/marketing/migrations/0001_initial.py | d626ccf4668a311c8695e8fee618007be70012f6 | [] | no_license | RommelTJ/ecommerce | cdd7a1323ff505b70363f6abc2ce9dff9f52b0a5 | 694dab843b0ca2d4c7cd3b671c3fdb69f063d14b | refs/heads/master | 2023-08-14T22:54:23.368159 | 2023-05-31T05:07:30 | 2023-05-31T05:07:30 | 118,721,454 | 0 | 0 | null | 2023-07-25T20:47:48 | 2018-01-24T06:17:34 | Python | UTF-8 | Python | false | false | 974 | py | # Generated by Django 2.0.1 on 2018-03-05 00:02
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='MarketingPreference',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subscribed', models.BooleanField(default=True)),
('mailchimp_msg', models.TextField(blank=True, null=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('update', models.DateTimeField(auto_now=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"rommeltj@gmail.com"
] | rommeltj@gmail.com |
34c6a52952b931e9219f69d24931833e3c778a63 | d26f0a7571f951c5c17688a4a395d1bf9039c453 | /torchbiggraph/filtered_eval.py | 0c883dfc9fbf735a3d3863b287df42e059237910 | [
"BSD-3-Clause"
] | permissive | 007vasy/PyTorch-BigGraph | c538af56ff03487b3e108e17747d8a7c4e4d0e7b | c649cf3b70d083d74d253cc95f509eb74fd64fce | refs/heads/master | 2021-01-05T13:38:52.949899 | 2020-02-16T11:40:26 | 2020-02-16T11:41:56 | 241,037,539 | 1 | 0 | NOASSERTION | 2020-02-17T06:42:25 | 2020-02-17T06:42:24 | null | UTF-8 | Python | false | false | 3,744 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE.txt file in the root directory of this source tree.
import logging
from collections import defaultdict
from typing import Dict, List, Tuple
from torchbiggraph.config import ConfigSchema
from torchbiggraph.edgelist import EdgeList
from torchbiggraph.eval import RankingEvaluator
from torchbiggraph.graph_storages import EDGE_STORAGES
from torchbiggraph.model import Scores
from torchbiggraph.stats import Stats
from torchbiggraph.types import Partition
logger = logging.getLogger("torchbiggraph")
class FilteredRankingEvaluator(RankingEvaluator):
"""
This Evaluator is meant for datasets such as FB15K, FB15K-237, WN18, WN18RR
used in knowledge base completion. We only support one non featurized,
non-partitioned entity type and evaluation with all negatives to be
comparable to standard benchmarks.
"""
def __init__(self, config: ConfigSchema, filter_paths: List[str]):
super().__init__()
if len(config.relations) != 1 or len(config.entities) != 1:
raise RuntimeError("Filtered ranking evaluation should only be used "
"with dynamic relations and one entity type.")
if not config.relations[0].all_negs:
raise RuntimeError("Filtered Eval can only be done with all negatives.")
entity, = config.entities.values()
if entity.featurized:
raise RuntimeError("Entity cannot be featurized for filtered eval.")
if entity.num_partitions > 1:
raise RuntimeError("Entity cannot be partitioned for filtered eval.")
self.lhs_map: Dict[Tuple[int, int], List[int]] = defaultdict(list)
self.rhs_map: Dict[Tuple[int, int], List[int]] = defaultdict(list)
for path in filter_paths:
logger.info(f"Building links map from path {path}")
e_storage = EDGE_STORAGES.make_instance(path)
# Assume unpartitioned.
edges = e_storage.load_edges(Partition(0), Partition(0))
for idx in range(len(edges)):
# Assume non-featurized.
cur_lhs = int(edges.lhs.to_tensor()[idx])
# Assume dynamic relations.
cur_rel = int(edges.rel[idx])
# Assume non-featurized.
cur_rhs = int(edges.rhs.to_tensor()[idx])
self.lhs_map[cur_lhs, cur_rel].append(cur_rhs)
self.rhs_map[cur_rhs, cur_rel].append(cur_lhs)
logger.info(f"Done building links map from path {path}")
def eval(
self,
scores: Scores,
batch_edges: EdgeList,
) -> Stats:
for idx in range(len(batch_edges)):
# Assume non-featurized.
cur_lhs = int(batch_edges.lhs.to_tensor()[idx])
# Assume dynamic relations.
cur_rel = int(batch_edges.rel[idx])
# Assume non-featurized.
cur_rhs = int(batch_edges.rhs.to_tensor()[idx])
rhs_edges_filtered = self.lhs_map[cur_lhs, cur_rel]
lhs_edges_filtered = self.rhs_map[cur_rhs, cur_rel]
assert cur_lhs in lhs_edges_filtered
assert cur_rhs in rhs_edges_filtered
# The rank is computed as the number of non-negative margins (as
# that means a negative with at least as good a score as a positive)
# so to avoid counting positives we give them a negative margin.
scores.lhs_neg[idx][lhs_edges_filtered] = -1e9
scores.rhs_neg[idx][rhs_edges_filtered] = -1e9
return super().eval(scores, batch_edges)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
f3772896550c91f8ddfe5217abbaf0e8bf640fc2 | 66a2276f011feb5386db5e480bc0f9c2da6f4f3f | /3ProjectEuler/i101_125/i112bouncy_numbers.py | cd28bd1e6ce44784988e2521946541bbf3762d51 | [] | no_license | greatabel/puzzle_I_cracked | 09edee29464974f9b5358910c5097aa1b4d9aec2 | 7dc252923f17fd89de201dc2ac7540d54d69420d | refs/heads/master | 2021-06-02T11:05:19.962065 | 2019-10-19T14:21:56 | 2019-10-19T14:21:56 | 31,331,091 | 13 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,845 | py | '''
Working from left-to-right if no digit is exceeded by the digit to its left it is called
an increasing number; for example, 134468.
Similarly if no digit is exceeded by the digit to its right it is called a decreasing number;
for example, 66420.
We shall call a positive integer that is neither increasing nor decreasing a "bouncy" number;
for example, 155349.
Clearly there cannot be any bouncy numbers below one-hundred, but just over half of the numbers
below one-thousand (525) are bouncy. In fact, the least number for which the proportion of bouncy
numbers first reaches 50% is 538.
Surprisingly, bouncy numbers become more and more common and by the time we reach 21780 the proportion
of bouncy numbers is equal to 90%.
Find the least number for which the proportion of bouncy numbers is exactly 99%.
'''
import time
from termcolor import colored
def is_increase(n):
n = str(n)
for i in range(len(n)-1):
if int(n[i+1]) < int(n[i]):
return False
return True
def is_decrease(n):
n = str(n)
for i in range(len(n)-1):
if int(n[i+1]) > int(n[i]):
return False
return True
def is_bouncy(n):
increase = is_increase(n)
decrease = is_decrease(n)
if not increase and not decrease:
return True
else:
return False
def main_process():
i = 0
t = 0
bouncy_count = 0
while t < 0.99:
i += 1
if is_bouncy(i):
bouncy_count += 1
t = bouncy_count / i
if i % 10 ** 4 == 0:
print(i)
# for i in range(190, 220):
# print(i, 'is_increase=', is_increase(i), 'is_decrease=', is_decrease(i))
print(colored('mycount=', 'red'), i)
if __name__ == "__main__":
tic = time.clock()
main_process()
toc = time.clock()
print("time=",toc - tic) | [
"myreceiver2for2github@gmail.com"
] | myreceiver2for2github@gmail.com |
1e01b6ba10197c7552618bd21d4987fae7536559 | 1a7b21525a2bc95511c2c382f301250dec3a2c39 | /test/test_validation.py | d2a5d61c8202f27b843d8dabd336f8325021aec0 | [
"BSD-2-Clause"
] | permissive | ajbansal/sphinxcontrib-confluencebuilder | 4ad742dc1ca73402076f51ff991e8196f8a62cab | 330e31afc4d80b78595113a538b057ecd3323867 | refs/heads/master | 2020-03-28T02:26:55.020788 | 2018-08-08T08:26:08 | 2018-08-08T08:26:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,850 | py | # -*- coding: utf-8 -*-
"""
:copyright: Copyright 2018 by the contributors (see AUTHORS file).
:license: BSD-2-Clause, see LICENSE for details.
"""
from sphinxcontrib.confluencebuilder.builder import ConfluenceBuilder
from sphinxcontrib_confluencebuilder_util import ConfluenceTestUtil as _
from subprocess import check_output
import io
import os
import sys
import unittest
DEFAULT_TEST_BASE = 'sphinxcontrib-confluencebuilder Home'
DEFAULT_TEST_DESC = 'test state'
DEFAULT_TEST_KEY = 'test-holder'
DEFAULT_TEST_SPACE = 'confluencebuilder'
DEFAULT_TEST_URL = 'https://jdknight.atlassian.net/wiki/'
DEFAULT_TEST_USER = 'sphinxcontrib-confluencebuilder'
class TestConfluenceValidation(unittest.TestCase):
@classmethod
def setUpClass(cls):
_.enableVerbose()
# build configuration
cls.config = _.prepareConfiguration()
cls.config['confluence_disable_notifications'] = True
cls.config['confluence_disable_xmlrpc'] = True
cls.config['confluence_page_hierarchy'] = True
cls.config['confluence_parent_page'] = DEFAULT_TEST_BASE
cls.config['confluence_publish'] = True
cls.config['confluence_space_name'] = DEFAULT_TEST_SPACE
cls.config['confluence_server_url'] = DEFAULT_TEST_URL
cls.config['confluence_server_user'] = DEFAULT_TEST_USER
cls.config['confluence_timeout'] = 1
cls.test_desc = DEFAULT_TEST_DESC
cls.test_key = DEFAULT_TEST_KEY
# overrides from user
try:
from validation_test_overrides import config_overrides
cls.config.update(config_overrides)
except ImportError:
pass
try:
from validation_test_overrides import config_test_desc
cls.test_desc = config_test_desc
except ImportError:
pass
try:
from validation_test_overrides import config_test_key
cls.test_key = config_test_key
except ImportError:
pass
# finalize configuration
cls.config['confluence_publish_prefix'] = ''
cls.config['confluence_purge'] = False
cls.config['rst_epilog'] = """
.. |test_key| replace:: {}
.. |test_desc| replace:: {}
""".format(cls.test_key, cls.test_desc)
# find validate-sets base folder
test_dir = os.path.dirname(os.path.realpath(__file__))
cls.datasets = os.path.join(test_dir, 'validation-sets')
# setup base structure
dataset = os.path.join(cls.datasets, 'base')
doc_dir, doctree_dir = _.prepareDirectories('validation-set-base')
# build/publish test base page
app = _.prepareSphinx(dataset, doc_dir, doctree_dir, cls.config)
app.build(force_all=True)
# finalize configuration for tests
cls.config['confluence_master_homepage'] = False
cls.config['confluence_purge'] = True
cls.config['confluence_purge_from_master'] = True
if cls.test_key != DEFAULT_TEST_KEY:
cls.config['confluence_publish_prefix'] = '{}-'.format(cls.test_key)
cls.config['confluence_parent_page'] = cls.test_key
def test_autodocs(self):
config = dict(self.config)
config['extensions'].append('sphinx.ext.autodoc')
dataset = os.path.join(self.datasets, 'autodocs')
doc_dir, doctree_dir = _.prepareDirectories('validation-set-autodocs')
sys.path.insert(0, os.path.join(dataset, 'src'))
app = _.prepareSphinx(dataset, doc_dir, doctree_dir, config)
app.build(force_all=True)
sys.path.pop(0)
def test_common(self):
config = dict(self.config)
dataset = os.path.join(self.datasets, 'common')
doc_dir, doctree_dir = _.prepareDirectories('validation-set-common')
app = _.prepareSphinx(dataset, doc_dir, doctree_dir, config)
app.build(force_all=True)
def test_common_macro_restricted(self):
config = dict(self.config)
dataset = os.path.join(self.datasets, 'common')
doc_dir, doctree_dir = _.prepareDirectories('validation-set-common-nm')
config['confluence_adv_restricted_macros'] = [
'anchor',
'children',
'code',
'info',
]
config['confluence_header_file'] = os.path.join(dataset, 'no-macro.tpl')
config['confluence_publish_prefix'] += 'nomacro-'
app = _.prepareSphinx(dataset, doc_dir, doctree_dir, config)
app.build(force_all=True)
def test_header_footer(self):
config = dict(self.config)
dataset = os.path.join(self.datasets, 'header-footer')
doc_dir, doctree_dir = _.prepareDirectories('validation-set-hf')
config['confluence_header_file'] = os.path.join(dataset, 'header.tpl')
config['confluence_footer_file'] = os.path.join(dataset, 'footer.tpl')
app = _.prepareSphinx(dataset, doc_dir, doctree_dir, config)
app.build(force_all=True)
def test_hierarchy(self):
config = dict(self.config)
config['confluence_max_doc_depth'] = 2
config['confluence_page_hierarchy'] = True
dataset = os.path.join(self.datasets, 'hierarchy')
doc_dir, doctree_dir = _.prepareDirectories('validation-set-hierarchy')
app = _.prepareSphinx(dataset, doc_dir, doctree_dir, config)
app.build(force_all=True)
def test_xmlrpc(self):
config = dict(self.config)
config['confluence_disable_rest'] = True
config['confluence_disable_xmlrpc'] = False
dataset = os.path.join(self.datasets, 'xmlrpc')
doc_dir, doctree_dir = _.prepareDirectories('validation-set-xmlrpc')
app = _.prepareSphinx(dataset, doc_dir, doctree_dir, config)
app.build(force_all=True)
if __name__ == '__main__':
sys.exit(unittest.main(verbosity=0))
| [
"james.d.knight@live.com"
] | james.d.knight@live.com |
32c7fb2166b64dd14f3224dddb5854d96e353e2d | d32c039a725c9c1e22c4432d777144584b10c040 | /testcase/Shop_test16_shop_follow.py | 46af76e5e59cb07fd757029bf397274daa6a9296 | [] | no_license | heyu1229/Vaffle_interface | 311a2e6f0be4120aaeaccdeb13203529c6f240ca | 0c78ec8709c6af7d31b28f8e2efc8efe4cc3797c | refs/heads/master | 2022-11-24T04:30:48.698641 | 2020-07-22T05:22:47 | 2020-07-22T05:22:47 | 281,582,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,374 | py | # -*- coding:UTF-8 -*-
import unittest
import requests
import sys,time
import json,xlrd
# sys.path.append("/usr/lib/python3/heaven_interface_vaffle2.0_auto2/public")
import global_list
sys.path.append(global_list.path+"/public_1")
from get_url import Url
from get_version import Version
from get_token import Token
from read_data import Read_ExcelData
from write_data import Write_ExcelData
from func_requests import FuncRequests
#---------------关注、取消关注店铺----------------------
class Shop(unittest.TestCase):
def setUp(self):
self.r=FuncRequests()
#-----------------关注店铺----------------------------------
def testcase_001(self):
sheet_index = 12
row = 19
member_id='744'
print ("testcase_001关注店铺:")
result = self.r.interface_requests(member_id, sheet_index, row)
self.assertEqual(10000, result['code'])
print("code返回值:10000")
#-----------------取消关注店铺----------------------------------
def testcase_002(self):
sheet_index = 12
row = 20
member_id='744'
print ("testcase_002取消关注店铺:")
result = self.r.interface_requests(member_id, sheet_index, row)
self.assertEqual(10000, result['code'])
print("code返回值:10000")
if __name__ == "__main__":
unittest.main() | [
"1004856404@qq.com"
] | 1004856404@qq.com |
e9009e7baa2f49697d7ccc13779a6d676cf5539c | 1f5ecc9bf6dbc82719a94145916b36a76a4e4207 | /mysqlapp/models.py | 79ff5e99b02b29c0eb2c475e84f0bfbe39dfeaf6 | [] | no_license | GanapathiAmbore/Django-with-Multiple-databases | ad32c56af20f3cdaf546f78636aa65257498da33 | f13594e831f136b331554803fa25287642a24c35 | refs/heads/master | 2020-05-30T21:36:25.063995 | 2019-06-03T11:25:49 | 2019-06-03T11:25:49 | 189,973,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | from django.db import models
from django.core.validators import RegexValidator
class Teacher(models.Model):
name=models.CharField(max_length=50)
phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$',
message="Phone number must be entered in the format: '+91XXXXXXX'. Up to 10 digits allowed.")
phone_number = models.CharField(validators=[phone_regex], max_length=17, blank=True) # validators should be a list
email=models.EmailField()
image=models.ImageField()
date=models.DateTimeField(auto_now_add=True)
address=models.TextField()
def __str__(self):
return self.name
class Student(models.Model):
name=models.CharField(max_length=25)
age=models.IntegerField()
addres=models.TextField()
def __str__(self):
return self.name | [
"ganapathiambore@gmail.com"
] | ganapathiambore@gmail.com |
49fc4bd9ec51d4875264fe2d0a104fd1b0a32a30 | 20a3cc1106fa86fc2d45cd1728cc87d5db97e1f7 | /old/s3c/gen.py | c350d19d23465b2c9eeb5b5c3d8bc2e898069c2a | [] | no_license | sarahboufelja54/galatea | f5664f0b3117629b2c5bbe078a1bd52bb5e359e6 | 002a9f2905868be25b71770190fb2d5eda11c861 | refs/heads/master | 2020-12-04T13:45:07.697189 | 2018-12-12T16:27:09 | 2018-12-12T16:27:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | #script = open('config/6x6/extract/H_interm.sh','w')
for i in xrange(10):
start = i * 5000
end = (i+1) * 5000
char = chr(ord('A')+i)
fname = 'config/6x6/extract/H_interm_exp_h_3_train_cpu_'+char+'.yaml'
#script.write('THEANO_FLAGS="device=gpu0" python extract_features.py '+fname+'\n')
f = open(fname,'w')
f.write("""!obj:galatea.s3c.extract_features.FeatureExtractor {
"batch_size" : 1,
"model_path" : "${GALATEA_PATH}/s3c/config/6x6/H_interm_cpu.pkl", "pooling_region_counts": [3],
"save_paths" : [ "${FEATURE_EXTRACTOR_YAML_PATH}.npy" ],
"feature_type" : "exp_h",
"dataset_name" : "cifar10",
"restrict" : [ %d, %d ],
"which_set" : "train"
}""" % (start, end))
f.close()
#script.close()
| [
"goodfellow.ian@gmail.com"
] | goodfellow.ian@gmail.com |
f3cba79e478e55147fa74e9fbd0f25f9ede5f85e | 8af71789222675dddd541bafba681143162f4206 | /apps/descuentos/functions.py | 53dd6efeaf6dbe9c64bcf9da39bf5b304bf74a74 | [] | no_license | RubenAlvarenga/nhakanina | b82d23d80e06aaf49693c8fb65a70ee73e130994 | 3e39a522029c9a6cbb455b2e736ce335ebc4bf1d | refs/heads/master | 2021-01-10T15:32:01.550423 | 2016-03-07T17:34:09 | 2016-03-07T17:34:09 | 43,449,047 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from apps.entidades.models import Persona
def es_socio_afemec(request):
try: persona = Persona.objects.get(pk=int(request.POST['id_persona']))
except: persona = Persona.objects.get(pk=int(request.POST['persona']))
if persona.socio_afemec: return True
else: return False
def es_docente_ise(request):
try: persona = Persona.objects.get(pk=int(request.POST['id_persona']))
except: persona = Persona.objects.get(pk=int(request.POST['persona']))
if persona.docente_ise: return True
else: return False | [
"rubenalvarengan@gmail.com"
] | rubenalvarengan@gmail.com |
28e423a7b1759f7093939087b0f412e5562322e7 | f521c77da715d4a1deba79af95e8f95465a9679f | /plot_ZonalTAS_NCEP.py | 22386514675e786724ad9a4873dd003a4deafb99 | [
"MIT"
] | permissive | hairui-hao/ArcticSeaIce | 37e4891d77ad6070e7aa66bfeaf1530be7794384 | 32f630c931e145e1d492aa32a5f32ca3c998876f | refs/heads/master | 2022-03-28T19:25:12.083837 | 2017-10-24T05:25:36 | 2017-10-24T05:25:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,378 | py | """
Plots Arctic mean surface temperature (1948-2016) for Jan-month
Website : http://www.esrl.noaa.gov/psd/cgi-bin/data/timeseries/timeseries1.pl
Author : Zachary M. Labe
Date : 15 May 2016
"""
### Import modules
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as c
### Directory and time
directoryfigure = '...' # enter directory
directorydata = '...' # enter directory
### Insert month
month = 'June'
### Retrieve Data
year,temp = np.genfromtxt(directorydata + 'Arctic_Tsurf_Jan%s.txt' % month,
unpack=True)
currentyear = int(year[-1])
### Define parameters (dark)
plt.rc('savefig', facecolor='black')
plt.rc('axes', edgecolor='white')
plt.rc('xtick', color='white')
plt.rc('ytick', color='white')
plt.rc('axes', labelcolor='white')
plt.rc('axes', facecolor='black')
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
### Plot for zonal mean temperature
fig = plt.figure()
ax = plt.subplot(111)
### Adjust axes in time series plots
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 10))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
### Set colormap
cmap = plt.get_cmap('YlGn_r')
cmap2 = plt.get_cmap('PuBuGn')
cmap3 = plt.get_cmap('YlOrBr')
cmaplist = [cmap(i) for i in xrange(cmap.N)]
cmaplist2 = [cmap2(i) for i in xrange(100,cmap2.N)]
cmaplist3 = [cmap3(i) for i in xrange(cmap3.N)]
cms = c.ListedColormap(cmaplist2 + cmaplist + cmaplist3)
cm = plt.get_cmap(cms)
no_points = len(year)
ax.set_color_cycle([cm(1.*i/(no_points-1))
for i in range(no_points-1)])
for i in range(no_points-1):
bar = ax.plot(year[i:i+2],temp[i:i+2],linewidth=3.5,zorder=1)
plt.scatter(year[-1],temp[-1],
s=40,color='r',zorder=2)
ax.tick_params('both',length=7.5,width=2,which='major')
adjust_spines(ax, ['left', 'bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
plt.subplots_adjust(bottom=0.15)
### y-ticks
plt.yticks(np.arange(int(min(temp))-1,int(max(temp))+1,1),
map(str,np.arange(int(min(temp))-1,int(max(temp))+1,1)),
fontsize=11)
plt.ylabel(r'\textbf{Surface Temperature [$^{\circ}$C]}',fontsize=11)
plt.ylim([int(min(temp))-1,int(max(temp))])
### x-ticks
plt.xticks(np.arange(1950,2021,10),map(str,np.arange(1950,2021,10)),
fontsize=11)
plt.xlabel(r'\textbf{NCEP/NCAR Reanalysis : [Jan-%s] : Arctic, 66N+}' % month,
fontsize=11)
plt.xlim([1948,2020])
### Insert text
plt.text(currentyear-8,int(max(temp)),r'\textbf{You are here!}',
fontsize=11,rotation='horizontal',ha='left',color='r')
plt.text(1999.8,int(min(temp))-0.5,r'Zachary Labe (@ZLabe)',
fontsize=8,rotation='horizontal',ha='left',color='w',
bbox=dict(boxstyle='square,pad=0.3',fc='k',
edgecolor='w',linewidth=0.2))
### Save figure
plt.savefig(directoryfigure + 'ZonalTAS_NCEP_sample.png',dpi=900)
| [
"zml5@cornell.edu"
] | zml5@cornell.edu |
265d659a26b92f71678e40a3fcd28a041cc9a42a | 24d8cf871b092b2d60fc85d5320e1bc761a7cbe2 | /eXe/rev1889-1952/base-trunk-1889/exe/engine/attachmentidevice.py | 38aeb4d13f3b951965ace76cba69d9ed7c493573 | [] | no_license | joliebig/featurehouse_fstmerge_examples | af1b963537839d13e834f829cf51f8ad5e6ffe76 | 1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad | refs/heads/master | 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,517 | py | """
An Attachment Idevice allows a file to be attached to a package.
"""
from exe.engine.idevice import Idevice
from exe.engine.path import Path
from exe.engine.translate import lateTranslate
from exe.engine.resource import Resource
import logging
log = logging.getLogger(__name__)
class AttachmentIdevice(Idevice):
"""
An Attachment Idevice allows a file to be attached to a package.
"""
persistenceVersion = 3
def __init__(self):
Idevice.__init__(self,
x_(u"Attachment"),
x_(u"University of Auckland"),
u"",
x_(u"The attachment iDevice is used to attach "
"existing files to your .elp content. For example, "
"you might have a PDF file or a PPT presentation "
"file that you wish the learners to have access "
"to, these can be attached and labeled to indicate "
"what the attachment is and how large the file is. "
"Learners can click on the attachment link and can "
"download the attachment."), u"", u"")
self.emphasis = Idevice.NoEmphasis
self._filenameInstruc = x_(u'Click <strong>Select a file</strong>, '
'browse to the file you want '
'to attach and select it.')
self.label = u""
self._labelInstruc = x_(u"<p>"
"Assign a label for the attachment. It "
"is useful to include the type of file. "
"Eg. pdf, ppt, etc."
"</p>"
"<p>"
"Including the size is also recommended so "
"that after your package is exported "
"to a web site, people will have an idea "
"how long it would take to download this "
"attachment."
"</p>"
"<p>"
"For example: "
"<code>Sales Forecast.doc (500kb)</code>"
"</p>")
self.description = u""
self._descriptionInstruc = x_(u"Provide a brief description of the "
"file")
filenameInstruc = lateTranslate('filenameInstruc')
labelInstruc = lateTranslate('labelInstruc')
descriptionInstruc = lateTranslate('descriptionInstruc')
def setAttachment(self, attachmentPath):
"""
Store the attachment in the package
Needs to be in a package to work.
"""
log.debug(u"setAttachment "+unicode(attachmentPath))
resourceFile = Path(attachmentPath)
assert(self.parentNode,
_('Attachment %s has no parentNode') % self.id)
assert(self.parentNode.package,
_('iDevice %s has no package') % self.parentNode.id)
if resourceFile.isfile():
if self.userResources:
for resource in self.userResources:
resource.delete()
self.userResources = []
self.userResources = [ Resource(self.parentNode.package,
resourceFile) ]
else:
log.error('File %s is not a file' % resourceFile)
def upgradeToVersion1(self):
"""
Upgrades v0.6 to v0.7.
"""
self.lastIdevice = False
def upgradeToVersion2(self):
"""
Upgrades to v0.10
"""
self._upgradeIdeviceToVersion1()
self._filenameInstruc = self.__dict__.get('filenameInstruc', '')
self._labelInstruc = self.__dict__.get('labelInstruc', '')
self._descriptionInstruc = self.__dict__.get('descriptionInstruc', '')
def upgradeToVersion3(self):
"""
Upgrades to v0.12
"""
self._upgradeIdeviceToVersion2()
if self.filename and self.parentNode:
self.userResources = [ Resource(self.parentNode.package,
Path(self.filename)) ]
del self.filename
| [
"joliebig@fim.uni-passau.de"
] | joliebig@fim.uni-passau.de |
57d10a493096e2eeeb88d8446c7a2a26f7492e49 | ec523d48de9c38e0c5d81ac865d43779117650aa | /apps/school/migrations/0007_auto_20210206_1152.py | f836f0186d39b5ae72c999e876c2070cf52dd30a | [] | no_license | drc-ima/moon-school | b4b82e2c71d14a6b57aa75e36100851015f3696a | 94c7675642583ed4b97a4eb716015510fe93ca84 | refs/heads/master | 2023-03-06T10:22:56.699977 | 2021-02-11T15:28:16 | 2021-02-11T15:28:16 | 334,768,970 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,154 | py | # Generated by Django 3.1.6 on 2021-02-06 11:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('school', '0006_auto_20210127_0025'),
]
operations = [
migrations.AlterField(
model_name='holiday',
name='day',
field=models.IntegerField(blank=True, choices=[(11, '11th'), (27, '27th'), (28, '28th'), (12, '12th'), (3, '3rd'), (6, '6th'), (29, '29th'), (5, '5th'), (14, '14th'), (19, '19th'), (25, '25th'), (24, '24th'), (22, '22nd'), (13, '13th'), (10, '10th'), (18, '18th'), (21, '21st'), (9, '9th'), (15, '15th'), (23, '23rd'), (30, '30th'), (2, '2nd'), (20, '20th'), (16, '16th'), (26, '26th'), (31, '31st'), (8, '8th'), (7, '7th'), (17, '17th'), (1, '1st'), (4, '4th')], null=True),
),
migrations.AlterField(
model_name='holiday',
name='month',
field=models.CharField(blank=True, choices=[('7', 'July'), ('6', 'June'), ('10', 'October'), ('12', 'December'), ('8', 'August'), ('4', 'April'), ('3', 'March'), ('11', 'November'), ('9', 'September'), ('5', 'May'), ('1', 'January'), ('2', 'February')], max_length=255, null=True),
),
migrations.CreateModel(
name='PassGrade',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('score', models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10, null=True)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('school_id', models.CharField(blank=True, max_length=200, null=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='pass_grades', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'pass_grade',
},
),
]
| [
"emmanuelofosu472@gmail.com"
] | emmanuelofosu472@gmail.com |
e18554ebd5d4e16b5fa0cf27ef48a8651a8f5eca | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03148/s022846837.py | b6ba33db833753e64e34435339d47116fcc3ac8b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 934 | py | import sys
n,k=map(int,input().split())
data=[]
for i in range(n):
t,d=map(int,input().split())
data.append([d,t])
data.sort(reverse=True)
flag=[0]*(n+1)
eat=[]
lst=[]
count=0
for u in data:
d,t=u
if flag[t]==0:
eat.append(u)
flag[t]=1
count+=1
if count==k:
break
else:
lst.append(u)
else:
lst.sort(reverse=True)
sum=0
for i in range(n-count):
if i<k-count:
sum+=lst[i][0]
else:
if lst[i][0]>eat[count-1][0]+2*count-1:
eat[count-1]=lst[i]
count-=1
else:
break
for u in eat:
sum+=u[0]
sum+=count**2
print(sum)
sys.exit()
lst.sort(reverse=True)
for u in lst:
d=u[0]
if d>eat[count-1][0]+2*count-1:
eat[count-1]=u
count-=1
else:
break
sum=0
for u in eat:
sum+=u[0]
sum+=count**2
print(sum) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
32671589ba59b738fb6c30283f9d8173ee8e4853 | 48894ae68f0234e263d325470178d67ab313c73e | /sa/apps/reportobjectsummary/tests/test.py | ed8238d78acdc906a1aa1807b5128c5cc4843525 | [
"BSD-3-Clause"
] | permissive | DreamerDDL/noc | 7f949f55bb2c02c15ac2cc46bc62d957aee43a86 | 2ab0ab7718bb7116da2c3953efd466757e11d9ce | refs/heads/master | 2021-05-10T18:22:53.678588 | 2015-06-29T12:28:20 | 2015-06-29T12:28:20 | 118,628,133 | 0 | 0 | null | 2018-01-23T15:19:51 | 2018-01-23T15:19:51 | null | UTF-8 | Python | false | false | 615 | py | # -*- coding: utf-8 -*-
##----------------------------------------------------------------------
## reportobjectsummary Test
##----------------------------------------------------------------------
## Copyright (C) 2007-2009 The NOC Project
## See LICENSE for details
##----------------------------------------------------------------------
from noc.lib.test import ReportApplicationTestCase
class reportobjectsummaryTestCase(ReportApplicationTestCase):
posts=[
{"report_type":"profile"},
{"report_type":"domain"},
{"report_type":"tag"},
{"report_type":"domain-profile"},
]
| [
"dvolodin7@gmail.com"
] | dvolodin7@gmail.com |
b5321f30fb4be916f71c029472ed6c1fd1b65cc0 | 3844f6dce7967f56585474d96e609da2ee251ed4 | /backend/tony_test_0630_dev_22679/settings.py | 63bec43e250ae0fcdeec769bcb1b50874a5fcd6d | [] | no_license | crowdbotics-apps/tony-test-0630-dev-22679 | 66d13e68dcd8cf48e561984b4118e874af6a2824 | 885bd36fc55149688f46130c0d3606411811123e | refs/heads/master | 2023-06-06T18:17:34.140606 | 2021-06-30T19:37:29 | 2021-06-30T19:37:29 | 381,814,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,209 | py | """
Django settings for tony_test_0630_dev_22679 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
from modules.manifest import get_modules
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tony_test_0630_dev_22679.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tony_test_0630_dev_22679.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
66dd03de2184826c0ac554612f22e8026e2888a4 | a140fe192fd643ce556fa34bf2f84ddbdb97f091 | /.history/리스트_20200628151653.py | daf2fc9fc4c9d7445937cfe68f4f1d4f67f6b05a | [] | no_license | sangha0719/py-practice | 826f13cb422ef43992a69f822b9f04c2cb6d4815 | 6d71ce64bf91cc3bccee81378577d84ba9d9c121 | refs/heads/master | 2023-03-13T04:40:55.883279 | 2021-02-25T12:02:04 | 2021-02-25T12:02:04 | 342,230,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,087 | py | # 리스트 []
# 지하철 칸별로 10명, 20명, 30명
# subway1 = 10
# subway2 = 20
# subway3 = 30
subway = [10, 20, 30]
print(subway)
subway = ["유재석", "조세호", "박명수"]
print(subway)
# 조세호씨가 몇 번째 칸에 타고 있는가?
print(subway.index("조세호"))
# 다음 정류장에서 하하씨가 탔다.
subway.append("하하")
print(subway)
# 정형돈씨를 유재석 / 조세호 사이에 넣어본다.
subway.insert(1, "정형돈")
print(subway)
# 지하철에 있는 사람을 한 명씩 뒤에서 꺼낸다.
print(subway.pop())
print(subway)
# print(subway.pop())
# print(subway)
# print(subway.pop())
# print(subway)
# 같은 이름의 사람이 몇 명 있는지 확인
subway.append("유재석")
print(subway)
print(subway.count("유재석"))
# 정렬도 가능하다.
num_list = [5, 2, 4, 3, 1]
num_list.sort()
print(num_list)
# 순서 뒤집기 가능
num_list.reverse()
print(num_list)
# 모두 지우기
num_list.clear()
print(num_list)
# 다양한 자료형 함께 사용가능
mix_list = ["조세호", 20, True]
print(mix_list)
| [
"sangha0719@gmail.com"
] | sangha0719@gmail.com |
f95a708b13556e954e95830289cd4ceab5e767c8 | 0db05f7b843e8450bafd5ae23f8f70f9a9a8c151 | /Src/StdLib/Lib/site-packages/win32com/demos/outlookAddin.py | 8e4ced151dd05e5396080c1dc369becfa471b8d6 | [
"BSD-3-Clause",
"Python-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | IronLanguages/ironpython2 | 9c7f85bd8e6bca300e16f8c92f6384cecb979a6a | d00111890ce41b9791cb5bc55aedd071240252c4 | refs/heads/master | 2023-01-21T21:17:59.439654 | 2023-01-13T01:52:15 | 2023-01-13T01:52:15 | 91,620,472 | 1,171 | 288 | Apache-2.0 | 2023-01-13T01:52:16 | 2017-05-17T21:11:51 | Python | UTF-8 | Python | false | false | 4,637 | py | # A demo plugin for Microsoft Outlook (NOT Outlook Express)
#
# This addin simply adds a new button to the main Outlook toolbar,
# and displays a message box when clicked. Thus, it demonstrates
# how to plug in to Outlook itself, and hook outlook events.
#
# Additionally, each time a new message arrives in the Inbox, a message
# is printed with the subject of the message.
#
# To register the addin, simply execute:
# outlookAddin.py
# This will install the COM server, and write the necessary
# AddIn key to Outlook
#
# To unregister completely:
# outlookAddin.py --unregister
#
# To debug, execute:
# outlookAddin.py --debug
#
# Then open Pythonwin, and select "Tools->Trace Collector Debugging Tool"
# Restart Outlook, and you should see some output generated.
#
# NOTE: If the AddIn fails with an error, Outlook will re-register
# the addin to not automatically load next time Outlook starts. To
# correct this, simply re-register the addin (see above)
from win32com import universal
from win32com.server.exception import COMException
from win32com.client import gencache, DispatchWithEvents
import winerror
import pythoncom
from win32com.client import constants
import sys
# Support for COM objects we use.
gencache.EnsureModule('{00062FFF-0000-0000-C000-000000000046}', 0, 9, 0, bForDemand=True) # Outlook 9
gencache.EnsureModule('{2DF8D04C-5BFA-101B-BDE5-00AA0044DE52}', 0, 2, 1, bForDemand=True) # Office 9
# The TLB defiining the interfaces we implement
universal.RegisterInterfaces('{AC0714F2-3D04-11D1-AE7D-00A0C90F26F4}', 0, 1, 0, ["_IDTExtensibility2"])
class ButtonEvent:
def OnClick(self, button, cancel):
import win32ui # Possible, but not necessary, to use a Pythonwin GUI
win32ui.MessageBox("Hello from Python")
return cancel
class FolderEvent:
def OnItemAdd(self, item):
try:
print "An item was added to the inbox with subject:", item.Subject
except AttributeError:
print "An item was added to the inbox, but it has no subject! - ", repr(item)
class OutlookAddin:
_com_interfaces_ = ['_IDTExtensibility2']
_public_methods_ = []
_reg_clsctx_ = pythoncom.CLSCTX_INPROC_SERVER
_reg_clsid_ = "{0F47D9F3-598B-4d24-B7E3-92AC15ED27E2}"
_reg_progid_ = "Python.Test.OutlookAddin"
_reg_policy_spec_ = "win32com.server.policy.EventHandlerPolicy"
def OnConnection(self, application, connectMode, addin, custom):
print "OnConnection", application, connectMode, addin, custom
# ActiveExplorer may be none when started without a UI (eg, WinCE synchronisation)
activeExplorer = application.ActiveExplorer()
if activeExplorer is not None:
bars = activeExplorer.CommandBars
toolbar = bars.Item("Standard")
item = toolbar.Controls.Add(Type=constants.msoControlButton, Temporary=True)
# Hook events for the item
item = self.toolbarButton = DispatchWithEvents(item, ButtonEvent)
item.Caption="Python"
item.TooltipText = "Click for Python"
item.Enabled = True
# And now, for the sake of demonstration, setup a hook for all new messages
inbox = application.Session.GetDefaultFolder(constants.olFolderInbox)
self.inboxItems = DispatchWithEvents(inbox.Items, FolderEvent)
def OnDisconnection(self, mode, custom):
print "OnDisconnection"
def OnAddInsUpdate(self, custom):
print "OnAddInsUpdate", custom
def OnStartupComplete(self, custom):
print "OnStartupComplete", custom
def OnBeginShutdown(self, custom):
print "OnBeginShutdown", custom
def RegisterAddin(klass):
import _winreg
key = _winreg.CreateKey(_winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Office\\Outlook\\Addins")
subkey = _winreg.CreateKey(key, klass._reg_progid_)
_winreg.SetValueEx(subkey, "CommandLineSafe", 0, _winreg.REG_DWORD, 0)
_winreg.SetValueEx(subkey, "LoadBehavior", 0, _winreg.REG_DWORD, 3)
_winreg.SetValueEx(subkey, "Description", 0, _winreg.REG_SZ, klass._reg_progid_)
_winreg.SetValueEx(subkey, "FriendlyName", 0, _winreg.REG_SZ, klass._reg_progid_)
def UnregisterAddin(klass):
import _winreg
try:
_winreg.DeleteKey(_winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Office\\Outlook\\Addins\\" + klass._reg_progid_)
except WindowsError:
pass
if __name__ == '__main__':
import win32com.server.register
win32com.server.register.UseCommandLine(OutlookAddin)
if "--unregister" in sys.argv:
UnregisterAddin(OutlookAddin)
else:
RegisterAddin(OutlookAddin)
| [
"pawel.jasinski@gmail.com"
] | pawel.jasinski@gmail.com |
c8a7d31b96a73a8e2c8054f70ecfe0934ec53a43 | 9ab6875589442c7c27f1796de06c6711ffb34d1f | /torchelastic/distributed/launch.py | ecf5cb61b3d7063b2aa7890336b394a8f7ae1875 | [
"BSD-3-Clause"
] | permissive | pytorch/elastic | 4acd9569cc19dcc8a843cc37857008bd55e6ee44 | bc88e6982961d4117e53c4c8163ecf277f35c2c5 | refs/heads/master | 2023-08-29T22:53:09.679927 | 2022-06-15T12:59:47 | 2022-06-15T12:59:47 | 212,205,845 | 777 | 110 | BSD-3-Clause | 2022-05-03T20:08:46 | 2019-10-01T21:48:15 | Python | UTF-8 | Python | false | false | 503 | py | #!/usr/bin/env/python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
os.environ["LOGLEVEL"] = "INFO"
# Since logger initialized during imoprt statement
# the log level should be set first
from torch.distributed.run import main as run_main
def main(args=None) -> None:
run_main(args)
if __name__ == "__main__":
main()
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
cb0027a369d0113ba738205dedcaf8a48f3b7e4e | 82256676fd6b857b7a9fd54ab339d390c1364ab0 | /py/testdir_0xdata_only/test_parse_airline_multi_hdfs_many.py | 1e17b907d870ce16f8b97798c409591f80397d9d | [
"Apache-2.0"
] | permissive | ivanliu1989/h2o | 8d0def46c070e78718ba13761f20ef2187545543 | e00b367df0a33c400ae33bc869a236f254f625ed | refs/heads/master | 2023-04-27T20:40:16.666618 | 2014-10-23T02:12:36 | 2014-10-23T02:12:36 | 25,618,199 | 0 | 0 | Apache-2.0 | 2023-04-15T23:24:35 | 2014-10-23T03:49:40 | null | UTF-8 | Python | false | false | 3,566 | py | import unittest, sys, random, time
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_hosts, h2o_jobs as h2j
RANDOM_UDP_DROP = False
if RANDOM_UDP_DROP:
print "random_udp_drop!!"
# NAME_NODE = 'mr-0x6'
# VERSION = 'cdh4'
NAME_NODE = 'mr-0xd6'
VERSION = 'hdp2.1'
TRIAL_MAX = 10
print "Using", VERSION, "on", NAME_NODE
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
pass
print "Will build clouds with incrementing heap sizes and import folder/parse"
@classmethod
def tearDownClass(cls):
# the node state is gone when we tear down the cloud, so pass the ignore here also.
h2o.tear_down_cloud(sandboxIgnoreErrors=True)
def test_parse_airline_multi_hdfs_many(self):
h2o.beta_features = True
# default
csvFilename = "hex_10"
csvFilePattern = '*' # all files in the folder
for tryHeap in [24]:
print "\n", tryHeap,"GB heap, 1 jvm per host, import mr-0x6 hdfs, then parse"
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(java_heap_GB=tryHeap, random_udp_drop=RANDOM_UDP_DROP,
use_hdfs=True, hdfs_name_node=NAME_NODE, hdfs_version=VERSION)
else:
h2o_hosts.build_cloud_with_hosts(java_heap_GB=tryHeap, random_udp_drop=RANDOM_UDP_DROP, disable_assertions=False,
use_hdfs=True, hdfs_name_node=NAME_NODE, hdfs_version=VERSION)
# don't raise exception if we find something bad in h2o stdout/stderr?
# h2o.nodes[0].sandboxIgnoreErrors = True
timeoutSecs = 500
importFolderPath = "datasets/airlines_multi"
csvPathname = importFolderPath + "/" + csvFilePattern
parseResult = h2i.import_only(path=csvPathname, schema='hdfs',
timeoutSecs=timeoutSecs, retryDelaySecs=10, pollTimeoutSecs=60)
for trial in range(TRIAL_MAX):
# each parse now just does one
csvFilePattern = "*%s.csv" % trial
# if we want multifile
# csvFilePattern = "*"
hex_key = csvFilename + "_" + str(trial) + ".hex"
csvPathname = importFolderPath + "/" + csvFilePattern
start = time.time()
# print "Don't wait for completion. Just load things up!"
print "Drat. the source file is locked if we noPoll. Would have to increment across the individual files?"
print "Drat. We can't re-import the folder, if there's a parse using one of the source files?"
parseResult = h2i.parse_only(pattern=csvFilePattern, hex_key=hex_key, noPoll=True, delete_on_done=0,
timeoutSecs=timeoutSecs, retryDelaySecs=10, pollTimeoutSecs=60)
elapsed = time.time() - start
print "parse result:", parseResult['destination_key']
print "Parse #", trial, "completed in", "%6.2f" % elapsed, "seconds.", \
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
h2o_cmd.runStoreView()
# we don't delete the hex key. it will start spilling? slow
h2j.pollWaitJobs(timeoutSecs=300, pollTimeoutSecs=30)
h2o.tear_down_cloud()
# sticky ports? wait a bit.
time.sleep(5)
if __name__ == '__main__':
h2o.unit_main()
| [
"kevin@0xdata.com"
] | kevin@0xdata.com |
e4b92222521611368b819e3349ccd7734bf50406 | 7950c4faf15ec1dc217391d839ddc21efd174ede | /leetcode-cn/sword2offer/000剑指0_Offer_04._二维数组中的查找.py | feea5d7d328dfdb05f6f63ef45fde22a96c9592e | [] | no_license | lixiang2017/leetcode | f462ecd269c7157aa4f5854f8c1da97ca5375e39 | f93380721b8383817fe2b0d728deca1321c9ef45 | refs/heads/master | 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,662 | py | '''
from up-right to bottom-left
T: O(M + N)
S: O(1)
执行用时:48 ms, 在所有 Python3 提交中击败了17.63%的用户
内存消耗:19.1 MB, 在所有 Python3 提交中击败了45.87%的用户
通过测试用例:129 / 129
'''
class Solution:
def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
n, m = len(matrix), len(matrix[0]) if matrix else 0
i, j = 0, m - 1
while i < n and j >= 0:
if matrix[i][j] < target:
i += 1
elif matrix[i][j] > target:
j -= 1
else:
return True
return False
'''
from bottom-left to up-right
T: O(M + N)
S: O(1)
执行用时:44 ms, 在所有 Python3 提交中击败了34.66% 的用户
内存消耗:19.1 MB, 在所有 Python3 提交中击败了43.89% 的用户
通过测试用例:129 / 129
'''
class Solution:
def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
n, m = len(matrix), len(matrix[0]) if matrix else 0
i, j = n - 1, 0
while i >= 0 and j < m:
if matrix[i][j] > target:
i -= 1
elif matrix[i][j] < target:
j += 1
else:
return True
return False
'''
brute force
T: O(MN)
S: O(1)
执行用时:44 ms, 在所有 Python3 提交中击败了34.66% 的用户
内存消耗:19.1 MB, 在所有 Python3 提交中击败了45.27% 的用户
通过测试用例:129 / 129
'''
class Solution:
def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
for row in matrix:
for x in row:
if x == target:
return True
return False
'''
binary search every row
T: O(NlogM)
S: O(1)
执行用时:40 ms, 在所有 Python3 提交中击败了58.27% 的用户
内存消耗:19.1 MB, 在所有 Python3 提交中击败了26.42% 的用户
通过测试用例:129 / 129
'''
class Solution:
def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
m = len(matrix[0]) if matrix else 0
for row in matrix:
idx = bisect_left(row, target)
if idx < m and row[idx] == target:
return True
return False
'''
binary search every column
T: O(MlogN)
S: O(1)
执行用时:52 ms, 在所有 Python3 提交中击败了8.55% 的用户
内存消耗:19 MB, 在所有 Python3 提交中击败了63.71% 的用户
通过测试用例:129 / 129
'''
class Solution:
def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
n = len(matrix)
for col in zip(*matrix):
idx = bisect_left(col, target)
if idx < n and col[idx] == target:
return True
return False
'''
binary search by the longer one (row/column)
T: O( min(N,M)*log(max(N,M)) )
S: O(1)
执行用时:40 ms, 在所有 Python3 提交中击败了58.27% 的用户
内存消耗:19.2 MB, 在所有 Python3 提交中击败了12.85% 的用户
通过测试用例:129 / 129
'''
class Solution:
def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
n, m = len(matrix), len(matrix[0]) if matrix else 0
if n < m:
for row in matrix:
idx = bisect_left(row, target)
if idx < m and row[idx] == target:
return True
else:
for col in zip(*matrix):
idx = bisect_left(col, target)
if idx < n and col[idx] == target:
return True
return False
'''
binary search by the longer one (row/column)
T: O( min(N,M)*log(max(N,M)) )
S: O(1)
执行用时:40 ms, 在所有 Python3 提交中击败了58.27% 的用户
内存消耗:19.1 MB, 在所有 Python3 提交中击败了35.85% 的用户
通过测试用例:129 / 129
'''
class Solution:
def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
n, m = len(matrix), len(matrix[0]) if matrix else 0
if n > m:
matrix = zip(*matrix)
for longer in matrix:
idx = bisect_left(longer, target)
if idx < max(n, m) and longer[idx] == target:
return True
return False
'''
brute force
执行用时:48 ms, 在所有 Python3 提交中击败了17.63% 的用户
内存消耗:19.1 MB, 在所有 Python3 提交中击败了31.53% 的用户
通过测试用例:129 / 129
'''
class Solution:
def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
return any(target in row for row in matrix)
'''
divide and conquer, cut to four parts + binary search
执行用时:48 ms, 在所有 Python3 提交中击败了17.63% 的用户
内存消耗:20.2 MB, 在所有 Python3 提交中击败了5.10% 的用户
通过测试用例:129 / 129
'''
class Solution:
def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
n, m = len(matrix), len(matrix[0]) if matrix else 0
def isExist(i1, j1, i2, j2):
if i1 > i2 or j1 > j2:
return False
if i1 == i2:
# matrix[i1][j1: j2 + 1]
lo, hi = j1, j2
while lo <= hi:
mid = (lo + hi) // 2
if matrix[i1][mid] == target:
return True
elif matrix[i1][mid] > target:
hi = mid - 1
else:
lo = mid + 1
return False
elif j1 == j2:
# matrix[i1: i2 + 1][j1]
lo, hi = i1, i2
while lo <= hi:
mid = (lo + hi) // 2
if matrix[mid][j1] == target:
return True
elif matrix[mid][j1] > target:
hi = mid - 1
else:
lo = mid + 1
return False
# cut to four parts
midi, midj = (i1 + i2) // 2, (j1 + j2) // 2
if matrix[midi][midj] == target:
return True
elif matrix[midi][midj] > target:
return isExist(i1, j1, midi, midj) or \
isExist(i1, midj + 1, midi, j2) or \
isExist(midi + 1, j1, i2, midj)
else:
return isExist(midi + 1, midj + 1, i2, j2) or \
isExist(i1, midj + 1, midi, j2) or \
isExist(midi + 1, j1, i2, midj)
return isExist(0, 0, n - 1, m - 1)
| [
"laoxing201314@outlook.com"
] | laoxing201314@outlook.com |
caa343d8d4edab26a54015730302a0d32560c803 | a372a816373d63ad626a9947077e137eac2e6daf | /pyquiz/leetcode/WordLadder.py | b6f0657c1efc7f109343211ef78947d12c497129 | [] | no_license | DmitryPukhov/pyquiz | 07d33854a0e04cf750b925d2c399dac8a1b35363 | 8ae84f276cd07ffdb9b742569a5e32809ecc6b29 | refs/heads/master | 2021-06-13T14:28:51.255385 | 2021-06-13T08:19:36 | 2021-06-13T08:19:36 | 199,842,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,362 | py | import collections
from collections import deque, defaultdict
from typing import List, Set
class WordLadder:
"""
127. Word Ladder
Given two words (beginWord and endWord), and a dictionary's word list, find the length of shortest
transformation sequence from beginWord to endWord, such that:
Only one letter can be changed at a time.
Each transformed word must exist in the word list.
Note:
Return 0 if there is no such transformation sequence.
All words have the same length.
All words contain only lowercase alphabetic characters.
You may assume no duplicates in the word list.
You may assume beginWord and endWord are non-empty and are not the same.
Example 1:
Input:
beginWord = "hit",
endWord = "cog",
wordList = ["hot","dot","dog","lot","log","cog"]
Output: 5
Explanation: As one shortest transformation is "hit" -> "hot" -> "dot" -> "dog" -> "cog",
return its length 5.
Example 2:
Input:
beginWord = "hit"
endWord = "cog"
wordList = ["hot","dot","dog","lot","log"]
Output: 0
Explanation: The endWord "cog" is not in wordList, therefore no possible transformation.
"""
def ladderLength(self, beginWord, endWord, wordList):
"""
For each word create word->transformations and transformation->word dictionaries to not calculate it every cycle
Then do BFA from beginWord until we transform it to endWord.
"""
if endWord not in wordList:
return 0
# Fill in transformations dictionaries
wtdict = defaultdict(set)
twdict = defaultdict(set)
for w in [beginWord] + wordList:
for i in range(len(beginWord)):
trans = w[:i] + '*' + w[i + 1:]
wtdict[w].add(trans)
if w != beginWord:
twdict[trans].add(w)
# BFS considering transformation dictionaries
q = deque()
q.append((beginWord, 1))
visited = set()
while q:
w, level = q.popleft()
if w == endWord:
return level
if w in visited:
continue
for t in wtdict[w]:
nextwords = twdict[t]
q.extend([(nw, level+1) for nw in nextwords])
visited.add(w)
return 0
| [
"dmitry.pukhov@gmail.com"
] | dmitry.pukhov@gmail.com |
925d5b3d47f5b7bcf1fc083d4694149fae7043f5 | 9399d687b2e41245968ba0e9d413a6789d773b1d | /CI/python-coroutine/download_server/scheduler.py | 095d52c381995a12aa0a59eb12fc4de1054779f9 | [] | no_license | jiangliu888/DemoForSpeed | be41bdb85a1d1f5ca9350a3a1f681ced5ec9b929 | 11319bc19c074327d863ac2813a04cef3487f8d6 | refs/heads/main | 2023-08-23T14:16:21.686155 | 2021-10-17T12:01:34 | 2021-10-17T12:01:34 | 388,452,435 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,573 | py | # -*- encoding=utf-8 -*-
import os
import prettytable
import utils
from const import CalcType
from modules.downloader import Downloader
from modules.hasher import Hasher
from modules.storager import Storager
class Scheduler:
"""调度模块
"""
def __init__(self):
self.downloader = Downloader()
self.hasher = Hasher()
self.storager = Storager()
def _wrap_path(self, md5):
filename = '{}.jpg'.format(md5)
STORAGE_PATH = os.path.join('.', 'images')
path = os.path.join(STORAGE_PATH, filename)
return path
def set_calc_type(self, type_):
self.downloader.set_calc_type(type_)
self.hasher.set_calc_type(type_)
self.storager.set_calc_type(type_)
def process(self):
time_statictics = {}
time_statictics['network_time'] = []
time_statictics['cpu_time'] = []
time_statictics['disk_time'] = []
timer = utils.Timer()
# 1. 加载图片url列表
url_list = utils.urllist()
# 2. 调度下载模块
timer.tick()
content_list = self.downloader.process(url_list)
time_cost = timer.tock()
time_statictics['network_time'].append(time_cost)
# 3. 调度哈希模块
timer.tick()
md5_list = self.hasher.process(content_list)
time_cost = timer.tock()
time_statictics['cpu_time'].append(time_cost)
# 4. 调度存储模块
item_list = []
for content, md5 in zip(content_list, md5_list):
path = self._wrap_path(md5)
item = (content, path)
item_list.append(item)
timer.tick()
self.storager.process(item_list)
time_cost = timer.tock()
time_statictics['disk_time'].append(time_cost)
return time_statictics
def statictics(self, single_log, multi_log, multiprocess_log, pycoroutine_log):
table = prettytable.PrettyTable(['类型', '单线程总耗时', '多线程总耗时', '多线程提升率', '多进程总耗时', '多进程提升率', '协程总耗时', '协程提升率'])
network_row = ['network']
cpu_row = ['cpu']
disk_row = ['disk']
# 单线程数据
network_row.append(single_log['network_time'][0])
cpu_row.append(single_log['cpu_time'][0])
disk_row.append(single_log['disk_time'][0])
# 多线程数据
network_row.append(multi_log['network_time'][0])
cpu_row.append(multi_log['cpu_time'][0])
disk_row.append(multi_log['disk_time'][0])
# 多线程提升率
time_ = single_log['network_time'][0] - multi_log['network_time'][0]
lift_rate = '%.4f%%' % ((time_ / single_log['network_time'][0]) * 100)
network_row.append(lift_rate)
time_ = single_log['cpu_time'][0] - multi_log['cpu_time'][0]
lift_rate = '%.4f%%' % ((time_ / single_log['cpu_time'][0]) * 100)
cpu_row.append(lift_rate)
time_ = single_log['disk_time'][0] - multi_log['disk_time'][0]
lift_rate = '%.4f%%' % ((time_ / single_log['disk_time'][0]) * 100)
disk_row.append(lift_rate)
# 多进程数据
network_row.append(multiprocess_log['network_time'][0])
cpu_row.append(multiprocess_log['cpu_time'][0])
disk_row.append(multiprocess_log['disk_time'][0])
# 多进程提升率
time_ = single_log['network_time'][0] - multiprocess_log['network_time'][0]
lift_rate = '%.4f%%' % ((time_ / single_log['network_time'][0]) * 100)
network_row.append(lift_rate)
time_ = single_log['cpu_time'][0] - multiprocess_log['cpu_time'][0]
lift_rate = '%.4f%%' % ((time_ / single_log['cpu_time'][0]) * 100)
cpu_row.append(lift_rate)
time_ = single_log['disk_time'][0] - multiprocess_log['disk_time'][0]
lift_rate = '%.4f%%' % ((time_ / single_log['disk_time'][0]) * 100)
disk_row.append(lift_rate)
# 协程运行数据
network_row.append(pycoroutine_log['network_time'][0])
cpu_row.append(pycoroutine_log['cpu_time'][0])
disk_row.append(pycoroutine_log['disk_time'][0])
# 协程运行提升率
time_ = single_log['network_time'][0] - pycoroutine_log['network_time'][0]
lift_rate = '%.4f%%' % ((time_ / single_log['network_time'][0]) * 100)
network_row.append(lift_rate)
time_ = single_log['cpu_time'][0] - pycoroutine_log['cpu_time'][0]
lift_rate = '%.4f%%' % ((time_ / single_log['cpu_time'][0]) * 100)
cpu_row.append(lift_rate)
time_ = single_log['disk_time'][0] - pycoroutine_log['disk_time'][0]
lift_rate = '%.4f%%' % ((time_ / single_log['disk_time'][0]) * 100)
disk_row.append(lift_rate)
table.add_row(network_row)
table.add_row(cpu_row)
table.add_row(disk_row)
print(table)
if __name__ == '__main__':
scheduler = Scheduler()
# 单线程运行
scheduler.set_calc_type(CalcType.SingleThread)
singlethread_time = scheduler.process()
# 多线程运行
scheduler.set_calc_type(CalcType.MultiThread)
multithread_time = scheduler.process()
# 多进程运行
scheduler.set_calc_type(CalcType.MultiProcess)
multiprocess_time = scheduler.process()
# 协程运行
scheduler.set_calc_type(CalcType.PyCoroutine)
pyprocess_time = scheduler.process()
# 合并数据
scheduler.statictics(singlethread_time, multithread_time, multiprocess_time, pyprocess_time)
| [
"admin@example.com"
] | admin@example.com |
e31fc49bffcfe984a800f9e7b2471b955301d6ae | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02901/s081848102.py | c85e2160af148705771e000edbc7bda04900a5b9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | def main():
N, M = map(int, input().split())
inf = 10**9
p = 1<<N
dp = [inf]*p
dp[0] = 0
for i in range(M):
a, b = map(int, input().split())
C = list(map(int, input().split()))
c_bit = sum([1<<(c-1) for c in C])
for j in range(p):
#print(min(dp[i][j|c_bit], dp[i][j] + a))
dp[j|c_bit] = min(dp[j|c_bit], dp[j] + a)
if dp[p-1] == inf:
print(-1)
else:
print(int(dp[p-1]))
if __name__ == "__main__":
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
8009bc9f9d4141df5d051625b246946cb0fc9952 | 6c418592d5f6ab598dbba3451ab6be3be11ece0a | /env/bin/thresholder.py | 4a23a3afa73c7133de719519cd42fa85dc8f249c | [] | no_license | maxhasan882/Online | 0b9b77ed6fef8462b9d7544487e510833353462b | 53f33b8d39c6d546b7b3da13c5dc8d021f38e263 | refs/heads/master | 2022-11-27T22:05:13.740413 | 2019-12-28T05:09:20 | 2019-12-28T05:09:20 | 230,561,289 | 1 | 0 | null | 2022-11-22T01:40:38 | 2019-12-28T05:07:42 | Python | UTF-8 | Python | false | false | 1,914 | py | #!/home/hasan/PycharmProjects/wiz_sns_updated/env/bin/python
#
# The Python Imaging Library
# $Id$
#
# this demo script illustrates how a 1-bit BitmapImage can be used
# as a dynamically updated overlay
#
try:
from tkinter import *
except ImportError:
from Tkinter import *
from PIL import Image, ImageTk
import sys
#
# an image viewer
class UI(Frame):
def __init__(self, master, im, value=128):
Frame.__init__(self, master)
self.image = im
self.value = value
self.canvas = Canvas(self, width=im.size[0], height=im.size[1])
self.backdrop = ImageTk.PhotoImage(im)
self.canvas.create_image(0, 0, image=self.backdrop, anchor=NW)
self.canvas.pack()
scale = Scale(self, orient=HORIZONTAL, from_=0, to=255,
resolution=1, command=self.update_scale, length=256)
scale.set(value)
scale.bind("<ButtonRelease-1>", self.redraw)
scale.pack()
# uncomment the following line for instant feedback (might
# be too slow on some platforms)
# self.redraw()
def update_scale(self, value):
self.value = float(value)
self.redraw()
def redraw(self, event=None):
# create overlay (note the explicit conversion to mode "1")
im = self.image.point(lambda v, t=self.value: v >= t, "1")
self.overlay = ImageTk.BitmapImage(im, foreground="green")
# update canvas
self.canvas.delete("overlay")
self.canvas.create_image(0, 0, image=self.overlay, anchor=NW,
tags="overlay")
# --------------------------------------------------------------------
# main
if len(sys.argv) != 2:
print("Usage: thresholder file")
sys.exit(1)
root = Tk()
im = Image.open(sys.argv[1])
if im.mode != "L":
im = im.convert("L")
# im.thumbnail((320,200))
UI(root, im).pack()
root.mainloop()
| [
"rhmithu50@gmail.com"
] | rhmithu50@gmail.com |
37829dfedbbb50d23a8a2bfa1dacda84f5575b25 | a3d6556180e74af7b555f8d47d3fea55b94bcbda | /components/breadcrumbs/DEPS | dd3a8164073471de8250f98e1c77abfbbaf839ac | [
"BSD-3-Clause"
] | permissive | chromium/chromium | aaa9eda10115b50b0616d2f1aed5ef35d1d779d6 | a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c | refs/heads/main | 2023-08-24T00:35:12.585945 | 2023-08-23T22:01:11 | 2023-08-23T22:01:11 | 120,360,765 | 17,408 | 7,102 | BSD-3-Clause | 2023-09-10T23:44:27 | 2018-02-05T20:55:32 | null | UTF-8 | Python | false | false | 187 | include_rules = [
"+base",
"+components/crash/core/common/crash_key.h",
"+components/infobars/core",
"+components/keyed_service/core",
"+net/base/net_errors.h",
"+ui/base",
]
| [
"chromium-scoped@luci-project-accounts.iam.gserviceaccount.com"
] | chromium-scoped@luci-project-accounts.iam.gserviceaccount.com | |
808f5e2470653a06b31ae8ad6859d95156e9b594 | dce02a47c3c7f5affc2f0628fb0c21c8582b9b12 | /src/Microsoft_LexicographicallySmallestString.py | 53abb0caea17e33a8d0445683a73cb0b7fb82d2c | [] | no_license | varshajayaraman/SheCodesInPython | c7e6a9b3441f14bf6ebe31f7cc0c1e39eb502020 | d71327e16fdf2702542ec585fd6eb48a9a0dc8d0 | refs/heads/master | 2021-06-19T19:01:41.054234 | 2021-06-11T04:19:32 | 2021-06-11T04:19:32 | 218,898,393 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | def sol(strs):
res=""
i=0
while i < len(strs)-1:
if strs[i]>strs[i+1]:
break
else:
res+=strs[i]
i+=1
if i == len(strs)-1:
return res
for j in range(i+1, len(strs)):
res+=strs[j]
return res | [
"vjayar6@uic.edu"
] | vjayar6@uic.edu |
30c605defe051db8df4ec0debf6d86f4a0faf4aa | a929ce53417526ea532607450fed0a75a1134ba3 | /albumy/extensions.py | df75477d29d3cddff3ff1fdfcc4d618f8f701161 | [
"MIT"
] | permissive | tbshow/albumy | 8da9b42f0e199313317cd3ab9ea8681c7a9db1fb | 18ccd51a8ecbeecdea0f693beb10dbba75be09e4 | refs/heads/master | 2022-11-16T12:34:14.553618 | 2020-07-04T04:11:38 | 2020-07-04T04:11:42 | 276,822,110 | 0 | 0 | MIT | 2020-07-03T06:12:32 | 2020-07-03T06:12:31 | null | UTF-8 | Python | false | false | 1,314 | py | # -*- coding: utf-8 -*-
"""
:author: Grey Li (李辉)
:url: http://greyli.com
:copyright: © 2018 Grey Li <withlihui@gmail.com>
:license: MIT, see LICENSE for more details.
"""
from flask_avatars import Avatars
from flask_bootstrap import Bootstrap
from flask_dropzone import Dropzone
from flask_login import LoginManager, AnonymousUserMixin
from flask_mail import Mail
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import CSRFProtect
bootstrap = Bootstrap()
db = SQLAlchemy()
login_manager = LoginManager()
mail = Mail()
dropzone = Dropzone()
moment = Moment()
avatars = Avatars()
csrf = CSRFProtect()
@login_manager.user_loader
def load_user(user_id):
from albumy.models import User
user = User.query.get(int(user_id))
return user
login_manager.login_view = 'auth.login'
# login_manager.login_message = 'Your custom message'
login_manager.login_message_category = 'warning'
login_manager.refresh_view = 'auth.re_authenticate'
# login_manager.needs_refresh_message = 'Your custom message'
login_manager.needs_refresh_message_category = 'warning'
class Guest(AnonymousUserMixin):
def can(self, permission_name):
return False
@property
def is_admin(self):
return False
login_manager.anonymous_user = Guest
| [
"withlihui@gmail.com"
] | withlihui@gmail.com |
b7e37eec06094261814cdf3bc44dd856aa98368d | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /ET2voBkuSPLb3mFSD_10.py | bc52bfbef9c3e6d242755af581c701c41dc92e22 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py |
def sum_every_nth(numbers, n):
l = len(numbers)
index_list = []
for i in range(l):
k = i+1
if k % n == 0:
index_list.append(i)
count = 0
for location in index_list:
count += int(numbers[location])
return(count)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
40a80f492736a92b65f5641c675ab9c35eb15034 | 2f15fdd78643f449f2749b31a3f8c84f2d4703b5 | /braintree/android_pay_card.py | 33c56f0b7be0ee70891583fddee9e98c2bebc78b | [
"MIT"
] | permissive | bhargavrpatel/braintree_python | 131fbbf5c9f1aaa0f37dec2f435e469e5f05ae05 | 89219d691eafe763a09ad28f04400c4fcef4991e | refs/heads/master | 2020-03-28T23:50:48.283637 | 2019-05-21T15:07:41 | 2019-05-21T15:07:41 | 149,311,839 | 1 | 2 | MIT | 2019-05-06T01:15:34 | 2018-09-18T15:34:12 | Python | UTF-8 | Python | false | false | 789 | py | import braintree
from braintree.resource import Resource
class AndroidPayCard(Resource):
"""
A class representing Braintree Android Pay card objects.
"""
def __init__(self, gateway, attributes):
Resource.__init__(self, gateway, attributes)
if hasattr(self, 'expired'):
self.is_expired = self.expired
if "subscriptions" in attributes:
self.subscriptions = [braintree.subscription.Subscription(gateway, subscription) for subscription in self.subscriptions]
@property
def expiration_date(self):
return self.expiration_month + "/" + self.expiration_year
@property
def last_4(self):
return self.virtual_card_last_4
@property
def card_type(self):
return self.virtual_card_type
| [
"code@getbraintree.com"
] | code@getbraintree.com |
cd325c42334ca0c47afcfd380347919d3b16607b | 3f46af2da32d9f02d1ebbdef6784ece1d64aace3 | /Production/python/PrivateSamples/EMJ_2016_mMed-1500_mDark-10_ctau-0p1_unflavored-down_cff.py | c47c46d40e0cf697bb2cd8e8fbee4e1818232468 | [] | no_license | cms-svj/TreeMaker | 53bf4b1e35d2e2a4fa99c13c2c8b60a207676b6d | 0ded877bcac801a2a394ad90ed987a20caa72a4c | refs/heads/Run2_2017 | 2023-07-19T07:14:39.175712 | 2020-10-06T21:10:26 | 2020-10-06T21:10:26 | 305,753,513 | 0 | 0 | null | 2021-01-26T18:58:54 | 2020-10-20T15:32:19 | null | UTF-8 | Python | false | false | 1,892 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-10_ctau-0p1_unflavored-down_n-500_part-1.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-10_ctau-0p1_unflavored-down_n-500_part-2.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-10_ctau-0p1_unflavored-down_n-500_part-3.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-10_ctau-0p1_unflavored-down_n-500_part-4.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-10_ctau-0p1_unflavored-down_n-500_part-5.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-10_ctau-0p1_unflavored-down_n-500_part-6.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-10_ctau-0p1_unflavored-down_n-500_part-7.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-10_ctau-0p1_unflavored-down_n-500_part-8.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-10_ctau-0p1_unflavored-down_n-500_part-9.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-10_ctau-0p1_unflavored-down_n-500_part-10.root',
] )
| [
"enochnotsocool@gmail.com"
] | enochnotsocool@gmail.com |
23e21a35aa2873bd64b2132d86b739f15a362f81 | ffb78204e89b36509a0e76dc04c551aa56ebbee0 | /draw/ciyun_cn.py | bf607fa73b6fa7575cc2c047444c95f4ea181d02 | [] | no_license | b1668952669/python_study | c3624fcb6341341cc7baf267a7999c9246e0d309 | fa2c2b323c1a83f62d569c0576341e26f34ea456 | refs/heads/master | 2022-12-26T19:02:30.031100 | 2020-10-14T09:57:12 | 2020-10-14T09:57:12 | 302,525,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | # -*- coding: utf-8 -*-
from os import path
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import jieba
from wordcloud import WordCloud, STOPWORDS
###当前文件路径
d = path.dirname(__file__)
# Read the whole text.
file = open(path.join(d, 'alice.txt')).read()
##进行分词
#刚开始是分完词放进txt再打开却总是显示不出中文很奇怪
default_mode =jieba.cut(file)
text = " ".join(default_mode)
alice_mask = np.array(Image.open(path.join(d, "1.png")))
stopwords = set(STOPWORDS)
stopwords.add("said")
wc = WordCloud(
#设置字体,不指定就会出现乱码,这个字体文件需要下载
font_path=r'/usr/share/fonts/wqy-microhei/wqy-microhei.ttc',
background_color="white",
max_words=2000,
mask=alice_mask,
stopwords=stopwords)
# generate word cloud
wc.generate(text)
# store to file
wc.to_file(path.join(d, "qq_result.jpg"))
# show
plt.imshow(wc, interpolation='bilinear')
plt.axis("off")
plt.figure()
plt.imshow(alice_mask, cmap=plt.cm.gray, interpolation='bilinear')
plt.axis("off")
plt.show() | [
"you@example.com"
] | you@example.com |
c8e34fd1a8706819e83a5c25f9985ce4e9eb8215 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-cognitiveservices-search-customimagesearch/setup.py | 455120b62d9306c1671ae6ef4302e6df0f223100 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 2,917 | py | #!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-cognitiveservices-search-customimagesearch"
PACKAGE_PPRINT_NAME = "Cognitive Services Custom Image Search"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.rst', encoding='utf-8') as f:
readme = f.read()
with open('HISTORY.rst', encoding='utf-8') as f:
history = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + history,
license='MIT License',
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=[
'tests',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
'azure.cognitiveservices',
'azure.cognitiveservices.search',
]),
install_requires=[
'msrest>=0.5.0',
'azure-common~=1.1',
],
extras_require={
":python_version<'3.0'": ['azure-cognitiveservices-search-nspkg'],
}
)
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
cd28bd5ff31c255a37db6a9689eb48cc564517f2 | 49002dd74a1e00ba0d9358912881586f26ce45b9 | /tags/models.py | 80b12639fdfa3b2c35f1423b9510ffd272c3dd8b | [] | no_license | aditya2222/django-ecommerce | 9da44959405694f2f97afa753474b5f437d319ec | 57c33a384346c4be905efde804541c91a789d750 | refs/heads/master | 2020-03-16T01:48:32.653692 | 2018-09-19T16:06:11 | 2018-09-19T16:06:11 | 126,989,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | from django.db import models
from django.db.models.signals import pre_save
from ecommerce.utils import unique_slug_generator
from django.urls import reverse
from products.models import Product
# Create your models here.
class Tag(models.Model):
title = models.CharField(max_length=120)
slug = models.SlugField()
timestamp = models.DateTimeField(auto_now_add=True)
active = models.BooleanField(default=True)
products = models.ManyToManyField(Product,blank=True)
def __str__(self):
return self.title
def tag_pre_save_receiver(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = unique_slug_generator(instance)
pre_save.connect(tag_pre_save_receiver, sender=Tag) | [
"adityasingh222247@gmail.com"
] | adityasingh222247@gmail.com |
aad632055722322373c3263a9680cb624f5c8ac4 | a74ea2dcde5d47344177606da383e0f006f2f35c | /test/unit/test_browserstack.py | 3c2d53ab901db3129f4ace09545f79478abefb7d | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | mikalindh/qweb | 050344a55c7e4e1c9070f1ecb8b0371bdbe3b435 | d497e60081cf440ebd0eb70f84f11f7ad405100a | refs/heads/master | 2023-03-25T09:30:45.082882 | 2021-03-24T09:49:50 | 2021-03-24T09:49:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,194 | py | # -*- coding: utf-8 -*-
# --------------------------
# Copyright © 2014 - 2020 Qentinel Group. All rights reserved.
#
# The software code and any other material contained in this file are property
# of Qentinel Group and protected by international copyright laws.
# Any copying, re-licensing, re-distribution, development of
# derivative works, or reverse engineering are prohibited.
# ---------------------------
from QWeb.internal.browser.bs_desktop import open_browser as ob_desktop
from QWeb.internal.browser.bs_mobile import open_browser as ob_mobile
from unittest.mock import patch
from QWeb.internal.exceptions import QWebException
import pytest
@patch('QWeb.internal.browser.bs_desktop.BuiltIn.get_variable_value')
def test_desktop_bs_open_browser(patch_robot_builtin):
patch_robot_builtin.return_value = 'foobar'
with pytest.raises(QWebException):
ob_desktop('asd', 'project_name', 'run_id_test')
@patch('QWeb.internal.browser.bs_mobile.BuiltIn.get_variable_value')
def test_mobile_bs_open_browser(patch_robot_builtin):
patch_robot_builtin.return_value = 'foobar'
with pytest.raises(QWebException):
ob_mobile('asd', 'project_name', 'run_id_test')
| [
"tuomas.koukkari@qentinel.com"
] | tuomas.koukkari@qentinel.com |
1b7080fb26e0ce9cf9dbd7d96cff8cbea8235f30 | 63b7671b5296b97aa5271d076540e0d81b85d599 | /strongr/schedulerdomain/model/vm.py | e6068057a52774cf1b55e44b231bfcae90ef6e46 | [
"Apache-2.0"
] | permissive | bigr-erasmusmc/StrongR | bd30d7f41c1fef87bd241c0c4ea059f88c5c426e | 48573e170771a251f629f2d13dba7173f010a38c | refs/heads/master | 2021-10-10T10:17:15.344510 | 2018-11-20T10:21:50 | 2018-11-20T10:21:50 | 112,336,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | import strongr.core.gateways as gateways
from sqlalchemy import Column, ForeignKey, Integer, String, Enum, DateTime, func
from sqlalchemy.orm import relationship, synonym
from strongr.schedulerdomain.model.vmstate import VmState
Base = gateways.Gateways.sqlalchemy_base()
class Vm(Base):
__tablename__ = 'vms'
vm_id = Column(String(64), primary_key=True)
cores = Column(Integer)
ram = Column(Integer)
deadline = Column(DateTime())
jobs = relationship('Job', back_populates='vm')
_state = Column('state', Enum(VmState))
# In classical SQL we would put a trigger to update this field with NOW() if the state-field is updated.
# SQLAlchemy has no way to write triggers without writing platform-dependent SQL at the time of writing.
# Instead we use a setter on the state-field, this setter updates the state_date as well.
# The disadvantage of this solution is that other apps need to implement logic like this as well making
# the solution less portable.
state_date = Column(DateTime())
@property
def state(self):
return self._state
# update state_date-field as well when we update state-field
@state.setter
def state(self, value):
self._state = value
self.state_date = func.now()
# create a synonym so that _state and state are considered the same field by the mapper
state = synonym('_state', descriptor=state)
| [
"thomas@tphil.nl"
] | thomas@tphil.nl |
e1e388a78a05c376e2cbdfeb5ec293a6dca9f083 | d048a865519b5f944e1430c6181d00399c979d9c | /Assingnment/gallery/img_gallery/migrations/0001_initial.py | 8fb53d3015c4545c39b089c4691af2117f8fda08 | [] | no_license | jithinvijayan007/PaceWisdom- | 5f84261c4ba7f51e25c8c21074b48214a24cb6d2 | 1ba00814a757edb327923afcaf20fe04652efa0e | refs/heads/master | 2023-03-06T04:00:21.729404 | 2021-02-21T18:56:54 | 2021-02-21T18:56:54 | 340,974,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | # Generated by Django 3.0.3 on 2021-02-21 15:58
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='images')),
('date', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"jithinvijayan007@gmail.com"
] | jithinvijayan007@gmail.com |
7bc1b6b21028f0853439b41809f0d22eaed51200 | f11ecb59dab63af605c6e5f256ee59e00447ecc1 | /412-fizz-buzz.py | 9f47ee36c12dc713615cf3877642178d6e215b00 | [] | no_license | floydchenchen/leetcode | 626d55f72ec914764385ce82b0f3c57f5a7e9de8 | 9d9e0c08992ef7dbd9ac517821faa9de17f49b0e | refs/heads/master | 2022-10-07T20:33:55.728141 | 2020-06-08T16:09:17 | 2020-06-08T16:09:17 | 269,525,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | # 412. Fizz Buzz
# Write a program that outputs the string representation of numbers from 1 to n.
#
# But for multiples of three it should output “Fizz” instead of the number and for the multiples of five output “Buzz”.
# For numbers which are multiples of both three and five output “FizzBuzz”.
#
# Example:
#
# n = 15,
#
# Return:
# [
# "1",
# "2",
# "Fizz",
# "4",
# "Buzz",
# "Fizz",
# "7",
# "8",
# "Fizz",
# "Buzz",
# "11",
# "Fizz",
# "13",
# "14",
# "FizzBuzz"
# ]
class Solution:
def fizzBuzz(self, n):
"""
:type n: int
:rtype: List[str]
"""
return ["Fizz" * (i % 3 == 0) + "Buzz" * (i % 5 == 0) or str(i) for i in range(1, n + 1)] | [
"chen2918@umn.edu"
] | chen2918@umn.edu |
88d6064b90f738ef9c08e4382d55d2de6915acf0 | 32174f2b74b286a52a2f3b0bfd120a0711bfc6dc | /src/mediafiles/urls.py | 57a892f53a61549764e3bdc61a328905c8a3fadb | [
"MIT"
] | permissive | hdknr/django-mediafiles | d13172162506cba2abdab0d85bc2815e2e24b6e6 | 7526e35eb7f532e36c95e7aa76290bb95a9ac41a | refs/heads/master | 2020-06-04T03:16:28.824865 | 2014-10-30T04:10:40 | 2014-10-30T04:10:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | from django.conf.urls import patterns, include, url
from views import *
urlpatterns = patterns('',
url('thumbnail/(?P<id>.+)/(?P<width>\d+)x(?P<height>\d+)',thumbnail,name='mediafiles_thumbnail'),
url('preview/(?P<id>.+)',preview,name='mediafiles_preview'),
url('download/(?P<id>.+)',download,name='mediafiles_download'),
url('gallery/admin/(?P<id>\d+)/media/create',GalleryAdminMediaCreateView, name='gallery_admin_media_create'),
url('gallery/admin/(?P<id>\d+)/media/(?P<mid>\d+)/delete',GalleryAdminMediaDeleteView, name='gallery_admin_media_delete'),
url('gallery/admin/(?P<id>\d+)/media/(?P<mid>\d+)/image',GalleryAdminMediaImageView, name='gallery_admin_media_image'),
url('gallery/admin/(?P<id>\d+)/media/(?P<mid>\d+)/thumb',GalleryAdminMediaThumbView, name='gallery_admin_media_thumb'),
url('gallery/admin/(?P<id>\d+)',GalleryAdminDetailView, name='gallery_admin_detail'),
url('gallery/admin/',GalleryAdminListView, name='gallery_admin_list'),
)
| [
"gmail@hdknr.com"
] | gmail@hdknr.com |
cd6044a0a3f8e54b2352e2173edf15d2d0c570ee | 55e7503b0b4370c9537334359019cdc8e0e70096 | /Liao/051Pillow.py | 2863be318ecb27f0aaba22bcdd341e3e21d78976 | [] | no_license | dumin199101/PythonLearning | 4f3a86e529c3c873174cf12baf694c4d6c3ef5eb | deece6b4d0b44d4817f24e1cf9da6d69301139d4 | refs/heads/master | 2021-01-22T07:52:25.668045 | 2018-06-13T02:03:29 | 2018-06-13T02:03:29 | 92,582,921 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | #!/usr/bin/env python3
# _*_ coding=utf-8 _*_
from PIL import Image
# 打开一个jpg图像文件,注意是当前路径:
im = Image.open('test.jpg')
# 获得图像尺寸:
w, h = im.size
print('Original image size: %sx%s' % (w, h))
# 缩放到50%:
im.thumbnail((w//2, h//2))
print('Resize image to: %sx%s' % (w//2, h//2))
# 把缩放后的图像用jpeg格式保存:
im.save('thumbnail.jpg', 'jpeg') | [
"1766266374@qq.com"
] | 1766266374@qq.com |
99d67089539c2c79d725824080ed8b7654b0f8f4 | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2009/kernel/default/kernel/files/scripts/bump-config.py | 02e04e83c5aae09d6cc0f57ef41f91c4685e14fb | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import pisi
oldwd = os.getcwd()
kpspec = pisi.specfile.SpecFile('pspec.xml')
kworkdir = kpspec.source.archive.name.split('.tar.bz2')[0]
kname = kpspec.source.name
kver = kpspec.history[0].version
krel = kpspec.history[0].release
kpath = "/var/pisi/%s-%s-%s/work/%s" % (kname, kver, krel, kworkdir)
if os.path.exists(kpath):
os.chdir(kpath)
open(os.path.join(oldwd, "files/pardus/kernel-config.patch"), "w").write(os.popen("diff -u /dev/null .config").read())
os.chdir(oldwd)
else:
print "%s doesn't exist." % kpath
| [
"yusuf.aydemir@istanbul.com"
] | yusuf.aydemir@istanbul.com |
1977dce0a9d97752ddddd0f13ff2041d285c3f7e | 9468507c1beeb2cb69591889605ea155d2cb7a63 | /polls/urls.py | 8256069f1b75de4dabb4adbe29504e0dff123368 | [] | no_license | nimal54/drf-polls | 2375e2f5b78670de40c72b51eb616a69e7f49a65 | 9b29230998146eb225e0cffa0703d6bed1cc876a | refs/heads/master | 2020-04-25T00:21:14.952917 | 2018-03-16T11:54:53 | 2018-03-16T11:54:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | from django.urls import path
from .views import QuestionViewSet
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register('', QuestionViewSet, base_name='questions')
urlpatterns = router.urls | [
"will@wsvincent.com"
] | will@wsvincent.com |
23dd4389391429f8cce0aacb515e18bd761cbdf1 | 4c62ac4d7e5e28baf41eb44a39a41225d1b56d6f | /prospection/settings.py | 9e563e2848c2859065428dc47c083fb042aef606 | [] | no_license | RogerMendez/prospection | de10fc3d67f5c80d475b68c43fd3579eed8d5fc3 | 1634ff07ac25ae3280ec4f02bd99c408ab9fec86 | refs/heads/master | 2016-08-12T13:47:51.349905 | 2016-04-04T15:42:51 | 2016-04-04T15:42:51 | 53,613,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,944 | py | """
Django settings for prospection project.
Generated by 'django-admin startproject' using Django 1.9.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r_8f_28ozie1g@60s_295_#srd+61y4w-=+ea&zbd6prxp_#y7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'users',
'publishers',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'prospection.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'Templates'),],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'prospection.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'prospection_development',
'USER':'postgres',
'PASSWORD': '76176338',
'HOST': '127.0.0.1',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'es-BO'
TIME_ZONE = 'America/La_Paz'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = '/static/'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
MEDIA_URL = '/media/'
MESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'
'''EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'sieboliva@gmail.com'
EMAIL_HOST_PASSWORD = 'siebolivia2012'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
'''
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'sieboliva@gmail.com'
EMAIL_HOST_PASSWORD = 'fsmfarxuvbzhrmav'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
ADDREES = 'http://127.0.0.1:8000'
#ADDREES = 'http://192.168.1.107:8000' | [
"Roger.Mendez.R@gmail.com"
] | Roger.Mendez.R@gmail.com |
2c4e66fe5238df3f91668e2e39749c7c36ffcfd5 | 13d93c2922005af35056d015f1ae3ebebe05ee31 | /python/oreilly/cours_python/chap12/formes.py | 862c0a179aa1ad87fcd1a5f2d597996742b25e61 | [] | no_license | scls19fr/openphysic | 647cc2cdadbdafd050d178e02bc3873bd2b07445 | 67bdb548574f4feecb99b60995238f12f4ef26da | refs/heads/master | 2021-04-30T23:16:26.197961 | 2020-11-16T20:21:17 | 2020-11-16T20:21:17 | 32,207,155 | 1 | 1 | null | null | null | null | ISO-8859-1 | Python | false | false | 959 | py | #! /usr/bin/env python
# -*- coding: Latin-1 -*-
class Rectangle:
"Classe de rectangles"
def __init__(self, longueur =0, largeur =0):
self.L = longueur
self.l = largeur
self.nom ="rectangle"
def perimetre(self):
return "(%d + %d) * 2 = %d" % (self.L, self.l,
(self.L + self.l)*2)
def surface(self):
return "%d * %d = %d" % (self.L, self.l, self.L*self.l)
def mesures(self):
print "Un %s de %d sur %d" % (self.nom, self.L, self.l)
print "a une surface de %s" % (self.surface(),)
print "et un périmètre de %s\n" % (self.perimetre(),)
class Carre(Rectangle):
"Classe de carrés"
def __init__(self, cote):
Rectangle.__init__(self, cote, cote)
self.nom ="carré"
if __name__ == "__main__":
r1 = Rectangle(15, 30)
r1.mesures()
c1 = Carre(13)
c1.mesures()
| [
"s.celles@gmail.com@41f3eeec-7763-abce-c6e2-0c955b6d8259"
] | s.celles@gmail.com@41f3eeec-7763-abce-c6e2-0c955b6d8259 |
e0c94600e33f8b9d33d36a64155d9371bb088189 | 349a835358dd431feb78ca0d62e1c22b3eb1b7a5 | /instatools/instaapp/insta.py | 6b137bcf1f7be3cc2d42c21668bfd29c331e609b | [] | no_license | priyankush-siloria/Insta | 44b04334ca7b6b4d195288f00daa647c0ed6f5f1 | e3a1ecfe2c6a4e646ea0bc10e836f649a36eef91 | refs/heads/master | 2020-07-07T13:18:39.976166 | 2019-08-20T11:02:27 | 2019-08-20T11:02:27 | 203,359,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,091 | py |
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import time
from selenium.common.exceptions import NoSuchElementException,StaleElementReferenceException, TimeoutException
import random
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
import sqlite3
from sqlite3 import Error
import os
import smtplib
import time
import imaplib
import email
import json
from .models import *
import traceback
options = webdriver.ChromeOptions()
options.add_argument("--start-maximized")
def send_keys(element, keys):
if element.get_attribute("type") == "checkbox":
if element.get_attribute("checked"):
element.send_keys(Keys.SPACE)
else:
element.clear()
element.send_keys(keys)
def user_login(driver,username,password):
try:
print(username,password)
driver.get('https://www.instagram.com/accounts/login/')
WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, ("//h1[@class='NXVPg Szr5J coreSpriteLoggedOutWordmark']"))))
send_keys(driver.find_element_by_name("username"), username)
send_keys(driver.find_element_by_name("password"), password)
WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, ("//button[@type='submit']"))))
driver.find_element_by_xpath("//button[@type='submit']").click()
# close the notification popup
WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, ("//button[contains(text(),'Not Now')]"))));
driver.find_element_by_xpath("//button[contains(text(),'Not Now')]").click()
return True
except Exception as e:
# raise e
print(str(e))
return False
driver.quit()
def get_data(insta_user,insta_pass):
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
driver_path = os.path.join(base_dir, 'chromedriver')
driver = webdriver.Chrome(executable_path=driver_path,options=options)
try:
is_user_logged=user_login(driver,insta_user,insta_pass)
print("is_user_logged",is_user_logged)
if is_user_logged:
driver.get('https://www.instagram.com')
WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, ("//div[@class='nwXS6']"))));
driver.find_element_by_xpath("//div[@class='nwXS6']").click()
WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, ("//div[@class='nZSzR']/h1"))));
login_username=driver.find_element_by_xpath("//div[@class='nZSzR']/h1").text
print("login_username",login_username)
username=driver.find_element_by_xpath("//h1[@class='rhpdm']").text
print("username",username)
WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, ("//ul[@class='k9GMp ']"))));
posts=driver.find_elements_by_xpath("//ul[@class='k9GMp ']/li")[0].text
followers=driver.find_elements_by_xpath("//ul[@class='k9GMp ']/li")[1].text
following=driver.find_elements_by_xpath("//ul[@class='k9GMp ']/li")[2].text
WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, ("//a[@class='_0ZPOP kIKUG ']"))));
driver.find_element_by_xpath('//a[@class="_0ZPOP kIKUG "]').click()
follow_requests=''
WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, ("//div[@class='JRHhD']"))));
follow_requests=driver.find_element_by_xpath("//div[@class='JRHhD']").text
if not follow_requests:
follow_requests=0
print(posts,followers,following,follow_requests)
try:
obj=UserAccounts.objects.get(email_account=insta_user)
except UserAccounts.DoesNotExist:
pass
try:
UserInstaDetail.objects.create(
insta_user=obj,
posts=posts,
total_followers=followers,
total_follows=following,
pending_follow_request=follow_requests
)
except:
pass
return True
else:
print("Error....User is not able to login")
return False
except Exception as e:
raise e
return False
driver.quit()
finally:
driver.quit()
| [
"you@example.com"
] | you@example.com |
85df5a236ac20d443220e956a0d722c99f0526b5 | 1df7ba55c4b61772c1a31c503e6b8881f1456dc5 | /untitled9/apps/courses/migrations/0012_auto_20170211_1218.py | c583d480b497bd5e2a05121592b29b4d9fca5848 | [] | no_license | fzk466569/python-django-pro | 35918756060fcae375d3c99ea1a6934949b6d605 | 9add086b7a910f255df5b192268f1e117057e053 | refs/heads/master | 2021-01-19T13:18:14.141880 | 2017-02-19T12:16:29 | 2017-02-19T12:16:29 | 82,374,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-02-11 12:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('courses', '0011_course_is_banner'),
]
operations = [
migrations.AlterField(
model_name='courseresource',
name='course',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courses.Course', verbose_name='\u8bfe\u7a0b\u540d\u79f0'),
),
]
| [
"fzk466569"
] | fzk466569 |
e9da4a005399d3609d065e6748fe5a15aa10506a | 6daaf3cecb19f95265188adc9afc97e640ede23c | /排序.py | cb6b07db9e984f205c218909dd2b24b41c29796d | [] | no_license | guoweifeng216/python | 723f1b29610d9f536a061243a64cf68e28a249be | 658de396ba13f80d7cb3ebd3785d32dabe4b611d | refs/heads/master | 2021-01-20T13:11:47.393514 | 2019-12-04T02:23:36 | 2019-12-04T02:23:36 | 90,457,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | #coding=utf-8
"""
@athor:weifeng.guo
@data:2018/9/28 14:41
@filename:排序
"""
from operator import itemgetter
alist = [('2', '3', '10'), ('1', '2', '3'), ('5', '6', '7'), ('2', '5', '10'), ('2', '4', '10')]
# 多级排序,先按照第3个元素排序,然后按照第2个元素排序:
print sorted(alist, cmp=None, key=itemgetter(2, 1), reverse=False)
print sorted(alist, cmp=None, key=lambda x: itemgetter(2, 1)(x), reverse=False)
print sorted(alist, cmp=None, key=lambda x: map(int, itemgetter(2, 1)(x)), reverse=False) | [
"weifeng.guo@cnexlabs.com"
] | weifeng.guo@cnexlabs.com |
e56eec74dc030dc0c2711eb24adffbec735c8b01 | dc418f7d888ffe73b8f8df2e1fe7b7795f12518a | /code/treePlotter.py | 7590055925b488adca592c1794421e26d0ac2fc8 | [] | no_license | ForrestPi/ML | f936458fa48033940c4d1629e7bc85a0f28a601d | 4e931f2f09beecadbaa79aed5061ae11f5b5d456 | refs/heads/master | 2021-01-17T12:55:10.832957 | 2016-07-12T15:13:32 | 2016-07-12T15:13:32 | 60,149,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,820 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import matplotlib.pyplot as plt
font = FontProperties(fname=r"c:\windows\fonts\simsun.ttc", size=14)
decisionNode = dict(boxstyle="sawtooth",fc="0.8")
leafNode = dict(boxstyle="round4",fc="0.8")
arrow_args = dict(arrowstyle="<-")
def plotNode(nodeTxt,centerPt,parentPt,nodeType):
createPlot.ax1.annotate(nodeTxt,fontproperties=font,xy=parentPt,xycoords='axes fraction',
xytext=centerPt,textcoords='axes fraction',
va="center",ha="center",bbox=nodeType,arrowprops=arrow_args)
def createPlot():
fig=plt.figure(1,facecolor='white')
fig.clf()
createPlot.ax1 = plt.subplot(111,frameon=False)
plotNode(U'决策节点',(0.5,0.1),(0.1,0.5),decisionNode)
plotNode(U'叶节点',(0.8,0.1),(0.3,0.8),leafNode)
plt.show()
def createPlot(inTree):
fig = plt.figure(1, facecolor='white')
fig.clf()
axprops = dict(xticks=[], yticks=[])
createPlot.ax1 = plt.subplot(111, frameon=False, **axprops) #no ticks
#createPlot.ax1 = plt.subplot(111, frameon=False) #ticks for demo puropses
plotTree.totalW = float(getNumLeafs(inTree))
plotTree.totalD = float(getTreeDepth(inTree))
plotTree.xOff = -0.5/plotTree.totalW; plotTree.yOff = 1.0;
plotTree(inTree, (0.5,1.0), '')
plt.show()
def getNumLeafs(myTree):
numLeafs=0
firstStr=myTree.keys()[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':
numLeafs += getNumLeafs(secondDict[key])
else:
numLeafs += 1
return numLeafs
def getTreeDepth(myTree):
maxDepth = 0
firstStr = myTree.keys()[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':
thisDepth = 1 + getTreeDepth(secondDict[key])
else:
thisDepth = 1
if thisDepth > maxDepth: maxDepth = thisDepth
return maxDepth
def retrieveTree(i):
listOfTrees =[{'no surfacing': {0: 'no', 1: {'flippers': {0: 'no', 1: 'yes'}}}},
{'no surfacing': {0: 'no', 1: {'flippers': {0: {'head': {0: 'no', 1: 'yes'}}, 1: 'no'}}}}
]
return listOfTrees[i]
def plotMidText(cntrPt, parentPt, txtString):
xMid = (parentPt[0]-cntrPt[0])/2.0 + cntrPt[0]
yMid = (parentPt[1]-cntrPt[1])/2.0 + cntrPt[1]
createPlot.ax1.text(xMid, yMid, txtString, va="center", ha="center", rotation=30)
def plotTree(myTree, parentPt, nodeTxt):#if the first key tells you what feat was split on
numLeafs = getNumLeafs(myTree) #this determines the x width of this tree
depth = getTreeDepth(myTree)
firstStr = myTree.keys()[0] #the text label for this node should be this
cntrPt = (plotTree.xOff + (1.0 + float(numLeafs))/2.0/plotTree.totalW, plotTree.yOff)
plotMidText(cntrPt, parentPt, nodeTxt)
plotNode(firstStr, cntrPt, parentPt, decisionNode)
secondDict = myTree[firstStr]
plotTree.yOff = plotTree.yOff - 1.0/plotTree.totalD
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':#test to see if the nodes are dictonaires, if not they are leaf nodes
plotTree(secondDict[key],cntrPt,str(key)) #recursion
else: #it's a leaf node print the leaf node
plotTree.xOff = plotTree.xOff + 1.0/plotTree.totalW
plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff), cntrPt, leafNode)
plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key))
plotTree.yOff = plotTree.yOff + 1.0/plotTree.totalD
#if you do get a dictonary you know it's a tree, and the first element will be another dict
| [
"forrest_zhu@foxmail.com"
] | forrest_zhu@foxmail.com |
3bfa859eb68da1097de82b39eef158ca52df3a1f | 7298d1692c6948f0880e550d6100c63a64ce3ea1 | /deriva-annotations/catalog50/catalog-configs/PDB/pdbx_ion_info.py | 819386249a80989fe62fcaf387987aa437a1ee7b | [] | no_license | informatics-isi-edu/protein-database | b7684b3d08dbf22c1e7c4a4b8460248c6f0d2c6d | ce4be1bf13e6b1c22f3fccbb513824782609991f | refs/heads/master | 2023-08-16T10:24:10.206574 | 2023-07-25T23:10:42 | 2023-07-25T23:10:42 | 174,095,941 | 2 | 0 | null | 2023-06-16T19:44:43 | 2019-03-06T07:39:14 | Python | UTF-8 | Python | false | false | 3,844 | py | import argparse
from deriva.core import ErmrestCatalog, AttrDict, get_credential
import deriva.core.ermrest_model as em
from deriva.core.ermrest_config import tag as chaise_tags
from deriva.utils.catalog.manage.update_catalog import CatalogUpdater, parse_args
groups = {}
table_name = 'pdbx_ion_info'
schema_name = 'PDB'
column_annotations = {
'structure_id': {},
'id': {},
'name': {},
'numb_per_asym_unit': {},
'Owner': {}
}
column_comment = {
'structure_id': 'A reference to table entry.id.',
'id': 'type:text\nSerial number.\nexamples:1',
'name': 'type:text\nName of ion.\nexamples:MG',
'numb_per_asym_unit': 'type:int4\nNumber of ion molecules per asymmetric unit.\nexamples:1,2,3',
'Owner': 'Group that can update the record.'
}
column_acls = {}
column_acl_bindings = {}
column_defs = [
em.Column.define(
'structure_id',
em.builtin_types['text'],
nullok=False,
comment=column_comment['structure_id'],
),
em.Column.define('id', em.builtin_types['text'], nullok=False, comment=column_comment['id'],
),
em.Column.define(
'name', em.builtin_types['text'], nullok=False, comment=column_comment['name'],
),
em.Column.define(
'numb_per_asym_unit',
em.builtin_types['int4'],
nullok=False,
comment=column_comment['numb_per_asym_unit'],
),
em.Column.define('Owner', em.builtin_types['text'], comment=column_comment['Owner'],
),
]
visible_columns = {
'*': [
'RID', {
'source': [{
'outbound': ['PDB', 'pdbx_ion_info_structure_id_fkey']
}, 'RID'],
'comment': 'A reference to table entry.id.',
'markdown_name': 'structure id'
}, 'id', 'name', 'numb_per_asym_unit', ['PDB', 'pdbx_ion_info_RCB_fkey'],
['PDB', 'pdbx_ion_info_RMB_fkey'], 'RCT', 'RMT', ['PDB', 'pdbx_ion_info_Owner_fkey']
],
'entry': [
{
'source': [{
'outbound': ['PDB', 'pdbx_ion_info_structure_id_fkey']
}, 'RID'],
'comment': 'A reference to table entry.id.',
'markdown_name': 'structure id'
}, 'id', 'name', 'numb_per_asym_unit'
]
}
table_annotations = {chaise_tags.visible_columns: visible_columns, }
table_comment = 'Details of ions'
table_acls = {}
table_acl_bindings = {}
key_defs = [
em.Key.define(['RID'], constraint_names=[['PDB', 'pdbx_ion_info_RIDkey1']],
),
em.Key.define(
['structure_id', 'id'], constraint_names=[['PDB', 'pdbx_ion_info_primary_key']],
),
]
fkey_defs = [
em.ForeignKey.define(
['RCB'],
'public',
'ERMrest_Client', ['ID'],
constraint_names=[['PDB', 'pdbx_ion_info_RCB_fkey']],
),
em.ForeignKey.define(
['RMB'],
'public',
'ERMrest_Client', ['ID'],
constraint_names=[['PDB', 'pdbx_ion_info_RMB_fkey']],
),
]
table_def = em.Table.define(
table_name,
column_defs=column_defs,
key_defs=key_defs,
fkey_defs=fkey_defs,
annotations=table_annotations,
acls=table_acls,
acl_bindings=table_acl_bindings,
comment=table_comment,
provide_system=True
)
def main(catalog, mode, replace=False, really=False):
updater = CatalogUpdater(catalog)
table_def['column_annotations'] = column_annotations
table_def['column_comment'] = column_comment
updater.update_table(mode, schema_name, table_def, replace=replace, really=really)
if __name__ == "__main__":
host = 'pdb.isrd.isi.edu'
catalog_id = 50
mode, replace, host, catalog_id = parse_args(host, catalog_id, is_table=True)
catalog = ErmrestCatalog('https', host, catalog_id=catalog_id, credentials=get_credential(host))
main(catalog, mode, replace)
| [
"brinda.vallat@rcsb.org"
] | brinda.vallat@rcsb.org |
7a42e8446c4f29d13b600eb38a85f23b73061de8 | f640fcb49bf99ebec5f34603748121fbbe9171dc | /lib_openshift/apis/apisextensions_api.py | d993d831c5339f04067ee7968dc716a0c3814209 | [] | no_license | tbielawa/lib_openshift | bea8a11c4904a7d6c815abdd2b206de5a4cc7a93 | 34ca0f6a0c5388624a040223f29552dc4c0f8c49 | refs/heads/master | 2023-06-16T22:41:15.894021 | 2016-07-11T21:26:59 | 2016-07-11T21:26:59 | 63,156,531 | 0 | 0 | null | 2016-07-12T12:35:29 | 2016-07-12T12:35:29 | null | UTF-8 | Python | false | false | 3,843 | py | # coding: utf-8
"""
ApisextensionsApi.py
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class ApisextensionsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def get_api_group(self, **kwargs):
"""
get information of a group
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_api_group(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_group" % key
)
params[key] = val
del params['kwargs']
resource_path = '/apis/extensions'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
| [
"jdetiber@redhat.com"
] | jdetiber@redhat.com |
4de59e27dae5a22267169f15cda395acfeff2630 | 3b9d763180410bf0abf5b9c37391a64319efe839 | /toontown/suit/DistributedMintSuit.py | ea4e5dffb2a6dd4f64898cc7fb7e2feeab0c0a9e | [] | no_license | qphoton/Reverse_Engineering_Project_ToonTown | 442f15d484324be749f6f0e5e4e74fc6436e4e30 | 11468ab449060169191366bc14ff8113ee3beffb | refs/heads/master | 2021-05-08T00:07:09.720166 | 2017-10-21T02:37:22 | 2017-10-21T02:37:22 | 107,617,661 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | # File: D (Python 2.4)
from toontown.suit import DistributedFactorySuit
from direct.directnotify import DirectNotifyGlobal
class DistributedMintSuit(DistributedFactorySuit.DistributedFactorySuit):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedMintSuit')
| [
"Infinitywilee@rocketmail.com"
] | Infinitywilee@rocketmail.com |
2029409d84ed8500355b47b38c34f3ef01a08359 | 473568bf080e3637ee118b374f77e9f561286c6c | /InterviewCorner/HourGlassSum.py | 398c30d4e7319db8e920ee42c4d229357d020db2 | [] | no_license | VineetPrasadVerma/GeeksForGeeks | c2f7fc94b0a07ba146025ca8a786581dbf7154c8 | fdb4e4a7e742c4d67015977e3fbd5d35b213534f | refs/heads/master | 2020-06-02T11:23:11.421399 | 2020-01-07T16:51:18 | 2020-01-07T16:51:18 | 191,138,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | arr = [
[1, 1, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[0, 0, 2, 4, 4, 0],
[0, 0, 0, 2, 0, 0],
[0, 0, 1, 2, 4, 0]
]
lst_hourglass_sum = []
def find_hour_glass_sum(upper_limit, lower_limit):
temp = 0
while temp < 4:
per_hour_glass_sum = 0
flag = True
for i in range(upper_limit, lower_limit):
for j in range(temp, temp+3):
if flag:
per_hour_glass_sum += arr[i][j]
if not flag:
if j == temp + 3 - 2:
per_hour_glass_sum += arr[i][j]
flag = not flag
lst_hourglass_sum.append(per_hour_glass_sum)
temp += 1
for i in range(4):
find_hour_glass_sum(i, i+3)
print(lst_hourglass_sum) | [
"vineetpd1996@gmail.com"
] | vineetpd1996@gmail.com |
5ad94a29bd0907446a9d0f90e76f1aeacadf8912 | bb151500b0fc5bb9ef1b1a9e5bba98e485b4b34d | /problemSet/593D-Happy_Tree_Party.py | 1cffb9c69b44fe7fcb384b86014bc74d21c8e1d5 | [] | no_license | yamaton/codeforces | 47b98b23da0a3a8237d9021b0122eaa498d98628 | e0675fd010df852c94eadffdf8b801eeea7ad81b | refs/heads/master | 2021-01-10T01:22:02.338425 | 2018-11-28T02:45:04 | 2018-11-28T03:21:45 | 45,873,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | """
Codeforces Round #329 (Div. 2)
Problem 593 D. Happy Tree Party
@author yamaton
@date 2015-11-04
"""
import itertools as it
import functools
import operator
import collections
import math
import sys
def solve(triples):
pass
def print_stderr(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def main():
n = int(input())
[n, m] = [int(i) for i in input().strip().split()]
triples = [tuple(int(i) for i in input().strip().split()) for _ in range(n-1)]
operations = [tuple(int(i) for i in input().strip().split()) for _ in range(m)]
result = solve(triples)
# print(result)
if __name__ == '__main__':
main()
| [
"yamaton@gmail.com"
] | yamaton@gmail.com |
b321a58832872eba95d18547d884255292ba442a | 6a3dfbf1c0398731520dc34663423e18149a8976 | /web/pgadmin/browser/server_groups/servers/databases/schemas/tests/test_schema_delete.py | 53f33c2d45d0f4e6c9546878070462a2e9151a49 | [
"PostgreSQL"
] | permissive | Kibetchirchir/pgadmin4 | 29272912aa5ee32d33d0f8267e86ed958e4f2dd7 | fe264aafa7e2fb8eced49a5c24e13484a3552fdf | refs/heads/master | 2022-05-24T17:52:55.485701 | 2020-04-28T05:34:43 | 2020-04-28T05:34:43 | 259,569,015 | 1 | 0 | NOASSERTION | 2020-04-28T07:52:51 | 2020-04-28T07:52:51 | null | UTF-8 | Python | false | false | 2,865 | py | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import uuid
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as schema_utils
class SchemaDeleteTestCase(BaseTestGenerator):
""" This class will add new schema under database node. """
scenarios = [
# Fetching default URL for extension node.
('Check Schema Node URL', dict(url='/browser/schema/obj/'))
]
def setUp(self):
self.database_info = parent_node_dict["database"][-1]
self.db_name = self.database_info["db_name"]
# Change the db name, so that schema will create in newly created db
self.schema_name = "schema_get_%s" % str(uuid.uuid4())[1:8]
connection = utils.get_db_connection(self.db_name,
self.server['username'],
self.server['db_password'],
self.server['host'],
self.server['port'])
self.schema_details = schema_utils.create_schema(connection,
self.schema_name)
def runTest(self):
""" This function will delete schema under database node. """
server_id = self.database_info["server_id"]
db_id = self.database_info["db_id"]
db_con = database_utils.connect_database(self, utils.SERVER_GROUP,
server_id, db_id)
if not db_con['data']["connected"]:
raise Exception("Could not connect to database to delete the"
" schema.")
schema_id = self.schema_details[0]
schema_name = self.schema_details[1]
schema_response = schema_utils.verify_schemas(self.server,
self.db_name,
schema_name)
if not schema_response:
raise Exception("Could not find the schema to delete.")
response = self.tester.delete(self.url + str(utils.SERVER_GROUP) +
'/' + str(server_id) + '/' +
str(db_id) + '/' + str(schema_id),
follow_redirects=True)
self.assertEquals(response.status_code, 200)
def tearDown(self):
pass
| [
"dpage@pgadmin.org"
] | dpage@pgadmin.org |
edfad5971dddb8103a563afe3f46f6723ef8c121 | 1273032fadcc1abc6e660a0e076b3ce3b3f893ab | /goccina.com/news/migrations/0007_news_slug.py | 983e617db8e7506c2233352d66476d661df39d7d | [] | no_license | munisisazade/heroku_app | ed998ba147aea6fdef9884f26f2563c6f0a2741c | 65ee5b5b434a1c70bc84d5b7bc63c35c2a6aedb8 | refs/heads/master | 2021-01-10T23:13:06.104198 | 2016-10-11T15:33:40 | 2016-10-11T15:33:40 | 70,607,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-09-23 23:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0006_auto_20160924_0233'),
]
operations = [
migrations.AddField(
model_name='news',
name='slug',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='Slug'),
),
]
| [
"munisisazade@gmail.com"
] | munisisazade@gmail.com |
aa99931fa026da073c3ccbdea753003a3ddf62ec | 59239e9aecc4b0611cd1f82dad74a13b21c0a97e | /rest/Api/imanwser.py | d4b75c496cfc85b66207de2b6c8ad645710390dc | [] | no_license | charlesXu86/FAQ | 53a8160fef2920c1e5005ea4f55a642071986494 | d5e253f44377f63b3af9accd9eab39fc599ed7a7 | refs/heads/master | 2023-01-10T10:45:21.403238 | 2020-11-11T13:40:23 | 2020-11-11T13:40:23 | 278,859,443 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,472 | py | # -*- coding: utf-8 -*-
"""
@Author : Xu
@Software: PyCharm
@File : imanwser.py
@Time : 2020/7/11 6:17 下午
@Desc : im
"""
from model.FAQ import FAQ
def get_imanwser(msg):
result = {
'domain': '',
'content': '',
'type': 0,
'classId': 0,
'confidence': 0.978787,
'errorTime': 0,
'questionId': 0,
'transferFlag': 0,
'relatedQuestion': [],
'questionList': [],
'link': 'zrg'
}
robotUserData = {
"showEvaluate": 0,
"answer_type": "faq",
"is_many_answer": False,
"highLight": 0, # 是否需要前端进行高亮显示,一般在触发敏感词严重时会触发。
"sensitive_hit": "警告", # 敏感词,触发了哪个敏感词就列出来
"docid": 260801,
"search_id": "2156_1001_594726674", # 机器人标准问的Id,用于对机器人进行评价
"raw_query": ""
}
robot = FAQ(usedVec=False)
anwser = robot.answer(msg, 'simple_pos')
result['user_query'] = msg
robotUserData['raw_query'] = msg
result['content'] = anwser
result['robotUserData'] = robotUserData
return result
def get_imanwser2(msg):
result = {
'content': '',
'confidence': 0.978787
}
robot = FAQ(usedVec=False)
anwser = robot.answer(msg, 'simple_pos')
result['user_query'] = msg
result['content'] = anwser
return result | [
"charlesxu86@163.com"
] | charlesxu86@163.com |
b2aff70c810164f73e9a845e63406d1878b09603 | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/nos/v6_0_2f/brocade_firmware_rpc/activate_status/input/__init__.py | 5e225c3a63434f93fb5ae9625fe499cce76048a9 | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,831 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class input(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-firmware - based on the path /brocade_firmware_rpc/activate-status/input. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__rbridge_id',)
_yang_name = 'input'
_rest_name = 'input'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__rbridge_id = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[0-9]+(-[0-9]+)?(,[0-9]+(-[0-9]+)?)*'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'all'}),], is_leaf=True, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u"Please enter 'all' for activating all nodes in the logical-chassis or individual rbridge-ids of the form 1,2,3-6"}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='rbridge-ids-all-type', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_firmware_rpc', u'activate-status', u'input']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'activate-status', u'input']
def _get_rbridge_id(self):
"""
Getter method for rbridge_id, mapped from YANG variable /brocade_firmware_rpc/activate_status/input/rbridge_id (rbridge-ids-all-type)
"""
return self.__rbridge_id
def _set_rbridge_id(self, v, load=False):
"""
Setter method for rbridge_id, mapped from YANG variable /brocade_firmware_rpc/activate_status/input/rbridge_id (rbridge-ids-all-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_rbridge_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rbridge_id() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[0-9]+(-[0-9]+)?(,[0-9]+(-[0-9]+)?)*'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'all'}),], is_leaf=True, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u"Please enter 'all' for activating all nodes in the logical-chassis or individual rbridge-ids of the form 1,2,3-6"}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='rbridge-ids-all-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """rbridge_id must be of a type compatible with rbridge-ids-all-type""",
'defined-type': "brocade-firmware:rbridge-ids-all-type",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[0-9]+(-[0-9]+)?(,[0-9]+(-[0-9]+)?)*'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'all'}),], is_leaf=True, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u"Please enter 'all' for activating all nodes in the logical-chassis or individual rbridge-ids of the form 1,2,3-6"}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='rbridge-ids-all-type', is_config=True)""",
})
self.__rbridge_id = t
if hasattr(self, '_set'):
self._set()
def _unset_rbridge_id(self):
self.__rbridge_id = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[0-9]+(-[0-9]+)?(,[0-9]+(-[0-9]+)?)*'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'all'}),], is_leaf=True, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u"Please enter 'all' for activating all nodes in the logical-chassis or individual rbridge-ids of the form 1,2,3-6"}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='rbridge-ids-all-type', is_config=True)
rbridge_id = __builtin__.property(_get_rbridge_id, _set_rbridge_id)
_pyangbind_elements = {'rbridge_id': rbridge_id, }
| [
"badaniya@brocade.com"
] | badaniya@brocade.com |
090cb207ec1894ba12c2bc9f032823a404b11034 | b3c47795e8b6d95ae5521dcbbb920ab71851a92f | /AtCoder/AtCoder Beginner Contest 261/C.py | 80c46b4e0f48dc7c68108583ac091a34d9dde988 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | Wizmann/ACM-ICPC | 6afecd0fd09918c53a2a84c4d22c244de0065710 | 7c30454c49485a794dcc4d1c09daf2f755f9ecc1 | refs/heads/master | 2023-07-15T02:46:21.372860 | 2023-07-09T15:30:27 | 2023-07-09T15:30:27 | 3,009,276 | 51 | 23 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | from collections import defaultdict
n = int(raw_input())
d = defaultdict(int)
for i in xrange(n):
name = raw_input()
if name not in d:
d[name] += 1
print name
else:
print "%s(%d)" % (name, d[name])
d[name] += 1
'''
^^^^^TEST^^^^
5
newfile
newfile
newfolder
newfile
newfolder
---------
newfile
newfile(1)
newfolder
newfile(2)
newfolder(1)
$$$TEST$$$$
^^^^TEST^^^^
11
a
a
a
a
a
a
a
a
a
a
a
------------
a
a(1)
a(2)
a(3)
a(4)
a(5)
a(6)
a(7)
a(8)
a(9)
a(10)
$$$$TEST$$$$
'''
| [
"noreply@github.com"
] | Wizmann.noreply@github.com |
3246316975880ba09b2b8cf825732fd6fea112a2 | f4c0172e70ca5ffbe01695245e82a28291f88d04 | /v0.5.3-all/input_data.py | d80d7a4534edc1daf6653dc858591f3c3196b1fa | [] | no_license | huangxinkid/DeepLearning_Wavelet-LSTM | a84e667d5f2db477ac5a9993d8ae329ec9fd115f | b726f99a8631fc48e6943655ace222b0f6b0290b | refs/heads/master | 2020-03-24T07:11:52.832149 | 2018-05-30T18:43:38 | 2018-05-30T18:43:38 | 142,556,218 | 0 | 1 | null | 2018-07-27T09:21:18 | 2018-07-27T09:21:18 | null | UTF-8 | Python | false | false | 5,692 | py | # coding:utf-8
import os.path
import sys
import re
import os
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from MyController import Algorithm_CWT
from Model.Seg import SegFile
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#python pkl 文件读写
import pickle as pickle
import json
def myJsonLoad(filePath):
'''把文件打开从字符串转换成数据类型'''
with open(filePath,'rb') as load_file:
load_dict = json.load(load_file)
return load_dict
class MyData():
def __init__(self):
self.data_filePath = []
self.data_fileName = []
self.data_tpye = []
self.data = []
self.labels = []
# 遍历指定目录,显示目录下的所有文件名
def eachFile(filepath):
pathDir = os.listdir(filepath)
data = MyData()
for allDir in pathDir:
child = os.path.join('%s/%s' % (filepath, allDir))
if os.path.isfile(child):
data.data_filePath.append(child)
data.data_fileName.append(allDir)
theTpye = re.split('\.',allDir)[0]
# print(theTpye)
data.data_tpye.append( theTpye )
# # 显示
# for i in array:
# print(i)
return data
def myReadFile(py_data):
# 新建一个Session
with tf.Session() as sess:
# path = py_data.data_filePath[0]
for path in py_data.data_filePath:
# 读取文件
image_raw_data = tf.gfile.FastGFile(path, 'rb').read()
# 解码
img_data = tf.image.decode_jpeg(image_raw_data)
# print(img_data)
# 转灰度图
# img_data = sess.run(tf.image.rgb_to_grayscale(img_data))
# 改变图片尺寸
resized = tf.image.resize_images(img_data, [28, 28], method=0)
# 设定 shape
# resized = tf.reshape(resized, [28, 28, 1]) #最后一维代表通道数目,如果是rgb则为3
resized = tf.reshape(resized, [28, 28, 3]) #最后一维代表通道数目,如果是rgb则为3
# 标准化
standardization_image = tf.image.per_image_standardization(resized)#标准化
# print(standardization_image)
# print(standardization_image.eval())
resized = tf.reshape(standardization_image, [-1]) #最后一维代表通道数目,如果是rgb则为3
# resized = tf.reshape(resized, [-1]) #最后一维代表通道数目,如果是rgb则为3
## 链接
## resized = tf.expand_dims(resized, 0) # 增加一个维度
## print(resized)
## print(py_data.data)
## test_data = tf.concat(0, [test_data, resized])
py_data.data.append(resized.eval())
'''
# #验证数据转换正确
resized = tf.reshape(py_data.data[0], [28, 28, 3])
resized = np.asarray(resized.eval(), dtype='uint8')
plt.imshow(resized)
plt.show()
'''
def saveData(py_data, filePath_data, filePath_labels):
pass
'''
with tf.Session() as sess:
train_data =tf.convert_to_tensor(np.array( trainData.data ) )
'''
data = np.array( py_data.data )
labels = py_data.labels
# import os
if os.path.exists(filePath_data): #删除文件,可使用以下两种方法。
os.remove(filePath_data) #os.unlink(my_file)
if os.path.exists(filePath_labels): #删除文件,可使用以下两种方法。
os.remove(filePath_labels) #os.unlink(my_file)
with open(filePath_data,'wb') as f:
pickle.dump(data, f)
with open(filePath_labels,'wb') as f:
pickle.dump(labels, f)
print('\ndone!')
def run(filePath_loadData, filePath_data, filePath_labels):
loadData = eachFile(filePath_loadData) #注意:末尾不加/
# myReadFile(loadData)
# saveData(loadData, filePath_data, filePath_labels)
print(loadData.data_fileName)
'''
trainData = eachFile("../Data/logos/train") #注意:末尾不加/
# for i in range(0,len(data.data_fileName)):
# print(data.data_tpye[i])
# print(data.data_oneHot_labels[i])
myReadFile(trainData)
saveData(trainData, 'Model/train_data.plk', 'Model/train_labels.plk')
# print(trainData.data[0].shape)
# print(trainData.data[0])
'''
def opeanFile(fileName):
fileName = fileName
segFile = SegFile()
reply = segFile.loadFile(fileName)
if(reply != 0):
print('error!')
else:
# cwtmatr,freqs = Algorithm.MyPywtCWT( self.segFile.dataList[ self.segFile.TapeNumCurrent ].data )
# cwtmatr = Algorithm_CWT.MyScipyCwt(self.segFile.dataList[ self.segFile.TapeNumCurrent ].data, 128)
print('ok')
print(len(segFile.dataList[1].data))
print('ok')
cwtmatr = Algorithm_CWT.MyWavelets( segFile.dataList[1].data, 128)
print(type(cwtmatr))
print(len(cwtmatr))
print(len(cwtmatr[0]))
if __name__ == "__main__":
print('目前系统的编码为:',sys.getdefaultencoding())
########################################################################
''' 训练数据生成 '''
path = 'C:/锚索测量数据库/LSTMs训练数据/'
path_TagJson = 'C:/锚索测量数据库/LSTMs训练数据/tag.json'
TagDict = myJsonLoad(path_TagJson)
for i in TagDict:
for j in range(1,5):
print(i['items'][str(j)])
opeanFile(path + '1.seg')
# run( path, 'Model/train_seg_data.plk', 'Model/train_seg_labels.plk')
# #注意:path末尾不加/
| [
"hello.sea@qq.com"
] | hello.sea@qq.com |
d1495d37ceae7e3add58730e45b460716c5aef2f | 8e52db8ea2d4911ab852420e17b40ac9c0d6099b | /app/apps/coreExtend/context_processors.py | 825742d8964ccb45a7983f03bbb8f40dbc6e31a3 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | hilam/alifewellplayed.com | 11519cfea2507e0bc98393ae31ded1b1a7eb68f5 | 008645d1ccf1774f9fd3a4440cd82a9fc17e944e | refs/heads/master | 2021-05-06T11:16:49.863554 | 2017-12-03T03:50:18 | 2017-12-03T03:50:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | from django.conf import settings
import datetime
def template_settings(request):
return {
'network_name': settings.SITE_NAME,
'network_desc': settings.SITE_DESC,
'network_author': settings.SITE_AUTHOR,
'network_url': settings.SITE_URL,
'network_register': settings.ALLOW_NEW_REGISTRATIONS,
'BASE_URL': 'http://' + request.get_host(),
'BASE_HOST': request.get_host().split(':')[0],
}
def template_times(request):
return {
'today': datetime.datetime.now(),
'yesterday': datetime.datetime.now() - datetime.timedelta(1),
}
| [
"underlost@gmail.com"
] | underlost@gmail.com |
efe7f75a141578bfe254481977e20d289245b4f6 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2527/60829/277886.py | 63da376f91a582d2e5ce16635bb2d6e52a6c1726 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | x=list(input())
a=int(input())
b=int(input())
c=int(input())
d=[]
res=[]
for i in range(0,len(x)):
y=x[i]
if a==1:
if y[2]==1 and y[3]<b and y[4]<c:
res.append(y)
else:
if y[3]<b and y[4]<c:
res.append(y)
d={}
for i in range(0,len(res)):
d[res[i][0]]=d[i][1]
d.sort()
print(d) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
85d39ae7b28e171faad85e8dc46454c04082b5b2 | 9e765b38a03c2996e221a42c2a0dbc0fe02824cb | /cracking_the_coding_interview_qs/17.26/sparse_similarity.py | 8f2387f5edc2669ca227f3afc0c75688a110fa18 | [
"Apache-2.0"
] | permissive | angelusualle/algorithms | f709b4ae0c3275cece204d5fb56fd6ec34b4683b | 86286a49db2a755bc57330cb455bcbd8241ea6be | refs/heads/main | 2023-07-02T19:25:11.720114 | 2021-08-12T16:33:00 | 2021-08-12T16:33:00 | 269,791,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | from collections import defaultdict
# O(DW + PW)
def sparse_similarity(doc_words):
word_doc = defaultdict(set)
for k in doc_words:
for w in doc_words[k]:
word_doc[w].add(k)
doc_sim = []
for k in doc_words:
docs = set()
for w in doc_words[k]:
for d in word_doc[w]:
if d != k:
docs.add(d)
for d in docs:
sim = len(doc_words[k].intersection(doc_words[d])) / len(doc_words[k].union(doc_words[d]))
doc_sim.append('%s to %s : %f' % (k, d, sim))
return doc_sim
| [
"angelusualle@gmail.com"
] | angelusualle@gmail.com |
d01244a9f5f1e964c98b499b578a85e9ac829603 | 5a545262f7c053c1cfd1f7984664e3220c745161 | /exit_queue_tests/exit_queue_tester.py | 2be58b9aa2c02c4672079958833c81544f2b89c8 | [
"MIT"
] | permissive | ethereum/research | 2c523e5796cfdb6055e0107dc1768fbf164ecad0 | bb873f8ad0e673803ec6a55be26678e1f99b9ece | refs/heads/master | 2023-09-04T19:11:51.507361 | 2023-08-30T01:52:05 | 2023-08-30T01:52:05 | 42,808,596 | 1,774 | 603 | MIT | 2023-04-21T07:20:21 | 2015-09-20T10:13:12 | Python | UTF-8 | Python | false | false | 4,081 | py | from numpy.random import poisson
import math
# Target active staker size
TARGET_AMOUNT_STAKING = 312500
# Average time staking before withdrawal
AVG_STAKING_TIME = 500
# How many withdrawals are permitted in
# one day given a certain validator count?
def withdrawals_per_day(validators, total_eth_exiting):
# return (validators + total_eth_exiting) / 1.07 // 100
return validators // 100
# return validators * max(1, int(math.log2(total_eth_exiting))) / 13.5 // 100
# return int(1 + (total_eth_exiting * validators)**0.5) * 4.9 // 100
# Get the size of the largest staker. This assumes a
# Zipf's law distribution (ie. power law with power=1)
# where the nth largest staker is n times smaller than the
# largest staker. Calculates a value for the largest staker
# such that the total size of nonzero stakers equals the
# target amount staking.
def get_max_staker_size():
def get_sum(sz):
tot = 0
inc = 1
while sz // inc:
tot += (sz // inc) * inc
inc *= 2
return tot
size = 0
offset = TARGET_AMOUNT_STAKING
while offset:
if get_sum(size + offset) < TARGET_AMOUNT_STAKING:
size += offset
else:
offset //= 2
return size
# As a simplification, we make all stakers have validator sizes
# be close to the max size divided by a power of two
STAKER_SIZES = [get_max_staker_size()]
while STAKER_SIZES[-1] > 1:
STAKER_SIZES.append(STAKER_SIZES[-1] // 2)
# Active and not yet exiting stakers
stakers = {}
# Exiting stakers
exiting = {}
# The exit queue
exit_queue = []
# Total eth exiting
total_eth_exiting = 0
# How much of the first exiter's deposit we have processed
processing_current = 0
# Fill the staker set initially
for i, sz in enumerate(STAKER_SIZES):
stakers[sz] = poisson(2**i)
sz //= 2
# Count withdrawn stakers of each size, and total delays
# incurred by them, so we can eventually compute the average
withdrawn = {}
tot_delays = {}
print("Total staking ETH:", sum(k * v for k,v in stakers.items()))
for day in range(10000):
# Deposit new stakers at the rate needed to maintain the equilibrium size
for i, sz in enumerate(STAKER_SIZES):
stakers[sz] = stakers.get(sz, 0) + poisson(2**i / AVG_STAKING_TIME)
sz //= 2
# Each staker has a 1/AVG_STAKING_TIME probability of deciding to leave each day
for k in stakers.keys():
exit_count = min(poisson(stakers[k] / AVG_STAKING_TIME), stakers[k])
if exit_count > 0:
exit_queue.append((k, exit_count, day))
stakers[k] -= exit_count
exiting[k] = exiting.get(k, 0) + exit_count
total_eth_exiting += exit_count * k
total_validators = sum(k * v for k,v in stakers.items()) + sum(k * v for k,v in exiting.items())
# Process the queue
queue_to_empty_today = withdrawals_per_day(total_validators, total_eth_exiting)
while queue_to_empty_today > 0 and len(exit_queue) > 0:
key, exit_count, exit_day = exit_queue[0]
# Partially process the first exiter (exit next loop)
if key * exit_count > queue_to_empty_today + processing_current:
processing_current += queue_to_empty_today
queue_to_empty_today = 0
# Finish processing the first exiter (continue next loop)
else:
processing_current = 0
queue_to_empty_today -= key * exit_count - processing_current
exit_queue.pop(0)
exiting[key] -= exit_count
total_eth_exiting -= exit_count * key
withdrawn[key] = withdrawn.get(key, 0) + exit_count
tot_delays[key] = tot_delays.get(key, 0) + (day - exit_day) * exit_count
if day % 1000 == 999:
print("Report for day %d: %d total validators %d ETH in exit queue" % ((day+1), total_validators, total_eth_exiting))
print("Total delays in days")
for key in STAKER_SIZES:
print("%d: % .3f (min %.3f)" % (key, (tot_delays.get(key, 0) / withdrawn.get(key, 0.0001)), key / withdrawals_per_day(TARGET_AMOUNT_STAKING, key)))
| [
"v@buterin.com"
] | v@buterin.com |
c0f361d63969b7199316fa90ed6ae5fbb8b313bc | d0efa2026b7ed22ff4f9aa76c27ae2474c30f26d | /test/test_fraud_registration_response.py | a1fe7795c438367145d0919119d7d783346461d5 | [] | no_license | begum-akbay/Python | 2075650e0ddbf1c51823ebd749742646bf221603 | fe8b47e29aae609b7510af2d21e53b8a575857d8 | refs/heads/master | 2023-03-28T00:11:00.997194 | 2021-03-25T16:38:17 | 2021-03-25T16:38:17 | 351,499,957 | 0 | 0 | null | 2021-03-25T16:38:17 | 2021-03-25T16:15:16 | Python | UTF-8 | Python | false | false | 1,178 | py | # coding: utf-8
"""
Payment Gateway API Specification.
The documentation here is designed to provide all of the technical guidance required to consume and integrate with our APIs for payment processing. To learn more about our APIs please visit https://docs.firstdata.com/org/gateway. # noqa: E501
The version of the OpenAPI document: 21.1.0.20210122.001
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.fraud_registration_response import FraudRegistrationResponse # noqa: E501
from openapi_client.rest import ApiException
class TestFraudRegistrationResponse(unittest.TestCase):
"""FraudRegistrationResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFraudRegistrationResponse(self):
"""Test FraudRegistrationResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = openapi_client.models.fraud_registration_response.FraudRegistrationResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"emargules@bluepay.com"
] | emargules@bluepay.com |
0c5e91d431b0fb904a58137a798ae36099255259 | 578690b6babcbd691d8b6a96c65c9a17dcbbf27f | /build/catkin_generated/generate_cached_setup.py | 599e598cfa02738d95dc98f0cf8e325d40026202 | [
"MIT"
] | permissive | Sinchiguano/Multi-agent-system | d48c97b5c564cced68de096e0de2f05776d78e20 | 2b54ca6d08ea6fb42050aaf12788a34a53da60a1 | refs/heads/master | 2020-03-31T13:00:35.497228 | 2018-12-12T12:33:32 | 2018-12-12T12:33:32 | 152,238,167 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,291 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/kinetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/kinetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/casch/turtle/devel;/opt/ros/kinetic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/casch/turtle/devel/env.sh')
output_filename = '/home/casch/turtle/build/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"cesarsinchiguano@hotmail.es"
] | cesarsinchiguano@hotmail.es |
2a0e897eb6010e63a6c52b6b416a1314f113e5f1 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part001908.py | 3700d31361a5cebcba18b3adf131fc03892afed3 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,500 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher31208(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.2.2.2.1.0', 1, 1, None), Add)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Add
max_optional_count = 0
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher31208._instance is None:
CommutativeMatcher31208._instance = CommutativeMatcher31208()
return CommutativeMatcher31208._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 31207
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.2.2.1.1.0', S(1))
except ValueError:
pass
else:
pass
# State 31209
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.2.2.1.1.2', S(1))
except ValueError:
pass
else:
pass
# State 31210
if len(subjects) >= 1:
tmp3 = subjects.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.1.2.2.2.1', tmp3)
except ValueError:
pass
else:
pass
# State 31211
if len(subjects) == 0:
pass
# 0: e*x**r
yield 0, subst3
subjects.appendleft(tmp3)
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp5 = subjects.popleft()
subjects6 = deque(tmp5._args)
# State 31212
if len(subjects6) >= 1:
tmp7 = subjects6.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.2.2.1', tmp7)
except ValueError:
pass
else:
pass
# State 31213
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.1.2.2.2.1.1.2', 1)
except ValueError:
pass
else:
pass
# State 31214
if len(subjects6) == 0:
pass
# State 31215
if len(subjects) == 0:
pass
# 0: e*x**r
yield 0, subst3
if len(subjects6) >= 1:
tmp10 = subjects6.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.1.2.2.2.1.1.2', tmp10)
except ValueError:
pass
else:
pass
# State 31214
if len(subjects6) == 0:
pass
# State 31215
if len(subjects) == 0:
pass
# 0: e*x**r
yield 0, subst3
subjects6.appendleft(tmp10)
subjects6.appendleft(tmp7)
subjects.appendleft(tmp5)
if len(subjects) >= 1 and isinstance(subjects[0], Mul):
tmp12 = subjects.popleft()
associative1 = tmp12
associative_type1 = type(tmp12)
subjects13 = deque(tmp12._args)
matcher = CommutativeMatcher31217.get()
tmp14 = subjects13
subjects13 = []
for s in tmp14:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp14, subst0):
pass
if pattern_index == 0:
pass
# State 31224
if len(subjects) == 0:
pass
# 0: e*x**r
yield 0, subst1
subjects.appendleft(tmp12)
return
yield
from matchpy.matching.many_to_one import CommutativeMatcher
from .generated_part001909 import *
from collections import deque
from matchpy.utils import VariableWithCount
from multiset import Multiset | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
8a017d59c6a3aae183377f5536bffeb884407ec1 | 382e9ce9b99ce514ca16e08c029c1d0e9e14daef | /Python3/206. 反转链表 copy.py | fe988b891116477c04219ac5948443906ce3e3b2 | [] | no_license | a289237642/myLeetCode | e3e757f66af84f4e5b635c75f56424f92dd12b39 | 70ae207af6272ce5b8d246e3d7f092108a5272d7 | refs/heads/master | 2021-01-06T14:10:26.155778 | 2020-07-14T04:56:08 | 2020-07-14T04:56:08 | 241,354,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 882 | py | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
prev = None
curr = head
while curr:
next = curr.next
curr.next = prev
prev = curr
curr = next
return prev
def main():
head = cur = ListNode(1)
cur.next = ListNode(2)
cur = cur.next
cur.next = ListNode(3)
cur = cur.next
cur.next = ListNode(4)
cur = cur.next
cur.next = ListNode(5)
def print_all(head: ListNode) -> None:
while head:
print(head.val, '->', end='')
head = head.next
print('NULL')
print_all(head)
s = Solution()
res = s.reverseList(head)
print_all(res)
if __name__ == "__main__":
main()
| [
"a289237642@163.com"
] | a289237642@163.com |
383231ffdea33c2c4585c837f01e0ce08ac5d8dd | d2845579ea6aa51a2e150f0ffe6ccfda85d035ce | /kernel/components/boosting/horzsecureboost/horz_secureboosting_arbiter.py | e83061cb0e3f057a976ff5cde2226a0fc6884aae | [
"Apache-2.0"
] | permissive | as23187/WeFe | d8de9ff626f9f3e5d98e0850b0b717a80fd73e72 | ba92871d4b1d2eef6c606c34795f4575e84703bd | refs/heads/main | 2023-08-22T12:01:06.718246 | 2021-10-28T01:54:05 | 2021-10-28T01:54:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,544 | py | # Copyright 2021 Tianmian Tech. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from numpy import random
from common.python.utils import log_utils
from kernel.components.binning.horzfeaturebinning.horz_split_points import HorzFeatureBinningServer
from kernel.components.boosting import BoostingTree
from kernel.components.boosting.horzsecureboost.horz_decision_tree_arbiter import HorzDecisionTreeArbiter
from kernel.components.boosting.horzsecureboost.horz_secureboosting_aggregator import SecureBoostArbiterAggregator
from kernel.optimizer.convergence import converge_func_factory
from kernel.transfer.variables.transfer_class.horz_secure_boost_transfer_variable \
import HorzSecureBoostingTransferVariable
from kernel.utils import consts
LOGGER = log_utils.get_logger()
class HorzSecureBoostingArbiter(BoostingTree):
def __init__(self):
super(HorzSecureBoostingArbiter, self).__init__()
self.mode = consts.HORZ
self.feature_num = 0
self.role = consts.ARBITER
self.transfer_inst = HorzSecureBoostingTransferVariable()
self.check_convergence_func = None
self.tree_dim = None
self.aggregator = SecureBoostArbiterAggregator()
self.global_loss_history = []
# federated_binning obj
self.binning_obj = HorzFeatureBinningServer()
def sample_valid_feature(self):
chosen_feature = random.choice(range(0, self.feature_num),
max(1, int(self.subsample_feature_rate * self.feature_num)), replace=False)
valid_features = [False for i in range(self.feature_num)]
for fid in chosen_feature:
valid_features[fid] = True
return valid_features
def sync_feature_num(self):
feature_num_list = self.transfer_inst.feature_number.get(idx=-1, suffix=('feat_num',))
for num in feature_num_list[1:]:
assert feature_num_list[0] == num
return feature_num_list[0]
def sync_stop_flag(self, stop_flag, suffix):
self.transfer_inst.stop_flag.remote(stop_flag, idx=-1, suffix=suffix)
def sync_current_loss(self, suffix):
loss_status_list = self.transfer_inst.loss_status.get(idx=-1, suffix=suffix)
total_loss, total_num = 0, 0
for l_ in loss_status_list:
total_loss += l_['cur_loss'] * l_['sample_num']
total_num += l_['sample_num']
LOGGER.debug('loss status received, total_loss {}, total_num {}'.format(total_loss, total_num))
return total_loss / total_num
def sync_tree_dim(self):
tree_dims = self.transfer_inst.tree_dim.get(idx=-1, suffix=('tree_dim',))
dim0 = tree_dims[0]
for dim in tree_dims[1:]:
assert dim0 == dim
return dim0
def check_convergence(self, cur_loss):
LOGGER.debug('checking convergence')
return self.check_convergence_func.is_converge(cur_loss)
def generate_flowid(self, round_num, tree_num):
LOGGER.info("generate flowid, flowid {}".format(self.flowid))
return ".".join(map(str, [self.flowid, round_num, tree_num]))
def label_alignment(self) -> List:
labels = self.transfer_inst.local_labels.get(idx=-1, suffix=('label_align',))
label_set = set()
for local_label in labels:
label_set.update(local_label)
global_label = list(label_set)
global_label = sorted(global_label)
label_mapping = {v: k for k, v in enumerate(global_label)}
self.transfer_inst.label_mapping.remote(label_mapping, idx=-1, suffix=('label_mapping',))
return label_mapping
def federated_binning(self):
self.binning_obj.average_run()
def send_valid_features(self, valid_features, epoch_idx, t_idx):
self.transfer_inst.valid_features.remote(valid_features, idx=-1, suffix=('valid_features', epoch_idx, t_idx))
def fit(self, data_inst, valid_inst=None):
self.federated_binning()
# initializing
self.feature_num = self.sync_feature_num()
self.tree_dim = 1
if self.task_type == consts.CLASSIFICATION:
label_mapping = self.label_alignment()
LOGGER.debug('label mapping is {}'.format(label_mapping))
self.tree_dim = len(label_mapping) if len(label_mapping) > 2 else 1
if self.n_iter_no_change:
self.check_convergence_func = converge_func_factory("diff", self.tol)
LOGGER.debug('begin to fit a boosting tree')
for epoch_idx in range(self.num_trees):
for t_idx in range(self.tree_dim):
valid_feature = self.sample_valid_feature()
self.send_valid_features(valid_feature, epoch_idx, t_idx)
flow_id = self.generate_flowid(epoch_idx, t_idx)
new_tree = HorzDecisionTreeArbiter(self.tree_param, valid_feature=valid_feature, epoch_idx=epoch_idx,
tree_idx=t_idx, flow_id=flow_id)
new_tree.fit()
global_loss = self.aggregator.aggregate_loss(suffix=(epoch_idx,))
self.global_loss_history.append(global_loss)
LOGGER.debug('cur epoch global loss is {},epoch_idx is {}'.format(global_loss, epoch_idx))
metric_meta = {'abscissa_name': 'iters', 'ordinate_name': 'loss', 'metric_type': 'LOSS'}
self.callback_metric(metric_name='loss',
metric_namespace='train',
metric_meta=metric_meta,
metric_data=(epoch_idx, global_loss))
if self.n_iter_no_change:
should_stop = self.aggregator.broadcast_converge_status(self.check_convergence, (global_loss,),
suffix=(epoch_idx,))
LOGGER.debug('stop flag sent')
if should_stop:
break
self.tracker.add_task_progress(1)
self.callback_metric("loss",
"train",
{'abscissa_name': 'iters', 'ordinate_name': 'loss', 'metric_type': 'LOSS'},
metric_data=("Best", min(self.global_loss_history)))
LOGGER.debug('fitting horz decision tree done')
def predict(self, data_inst):
LOGGER.debug('start predicting')
| [
"winter.zou@welab-inc.com"
] | winter.zou@welab-inc.com |
35ba3b1715093fb966f96ff2d6575a4a330714a5 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02613/s533268389.py | 55553dc522e04c9c67b08c00fbde302fb1cc5c78 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | N = int(input())
S = [input() for _ in range(N)]
dic = {"AC":0, "WA":0, "TLE":0, "RE":0}
for i in range(N):
dic[S[i]] += 1
print("AC x {}".format(dic["AC"]))
print("WA x {}".format(dic["WA"]))
print("TLE x {}".format(dic["TLE"]))
print("RE x {}".format(dic["RE"]))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4b49df01e8cff7cd872cc4fdd703ee17d9f854de | d7e098fe9086a5a1a437e96e8ad977ad95f5457d | /lunch/lunch/lunch/settings/base.py | 9769cfaf61e1ab197d14a8dea7d2389d6630640c | [] | no_license | victorzhangw/django-tutorial-for-programmers | 6c03cca7ecdbe870d4769a0ff31d903f5d1e2553 | 98b734b83189e69dfdf39a0bda3376769d2ecc68 | refs/heads/master | 2021-01-15T14:11:41.015793 | 2014-10-20T02:57:13 | 2014-10-20T02:57:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,820 | py | """
Django settings for lunch project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
from django.core.urlresolvers import reverse_lazy
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'pages',
'events',
'stores',
'base',
'braces',
'crispy_forms',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'lunch.urls'
WSGI_APPLICATION = 'lunch.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = reverse_lazy('home')
CRISPY_TEMPLATE_PACK = 'bootstrap3'
| [
"uranusjr@gmail.com"
] | uranusjr@gmail.com |
c7740205e18c19615aa6a9809aeadbeba39e5275 | c78a135789f13c257da664bfbbe9a50e0aa34fc2 | /code/testing.py | f55e8bf3cc14b1181b60946345825c43c908d6d7 | [
"MIT"
] | permissive | mkness/TheCannon | f6049a7dab16ab38b32b228cdc68dfadc3d1df72 | da1aa0baa3d42e79b6cd1e511da477a021547936 | refs/heads/master | 2021-01-17T04:02:11.378483 | 2017-11-08T23:54:05 | 2017-11-08T23:54:05 | 22,962,109 | 6 | 1 | null | 2014-12-15T13:55:49 | 2014-08-14T17:14:40 | Python | UTF-8 | Python | false | false | 1,035 | py | def main():
nobservations = 4
a, b, c = 3.0, 2.0, 1.0
f, x, y, z = generate_data(nobservations, a, b, c)
# print 'Linear results (should be {}, {}, {}):'.format(a, b, c)
# print linear_invert(f, x, y, z)
#
# print 'Non-linear results (should be {}, {}, {}):'.format(a, b, c)
# print nonlinear_invert(f, x, y, z)
def generate_data(nobservations, a, b, c, noise_level=0.01):
x, y, z = np.random.random((3, nobservations))
noise = noise_level * np.random.normal(0, noise_level, nobservations)
f = func(x, y, z, a, b, c) + noise
return f, x, y, z
def func(x, y, z, a, b, c):
f = (a**2
+ x * b**2
+ y * a * b * np.cos(c)
+ z * a * b * np.sin(c))
return f
def nonlinear_invert(f, x, y, z):
def wrapped_func(observation_points, a, b, c):
x, y, z = observation_points
return func(x, y, z, a, b, c)
xdata = np.vstack([x, y, z])
model, cov = opt.curve_fit(wrapped_func, xdata, f)
return model
main()
#nonlinear_invert(0.98,1,2,3)
| [
"whatever"
] | whatever |
c7696f623e0fb4101aab65316da05917049a962a | f2807f439602ba047048791318e925bd43037089 | /src/GraphEmb_2/metapath2vec_g_executor.py | 9bc6984b526f3b4dc146a745a3223739f3cc0ae3 | [] | no_license | ddatta-DAC/AD_v2 | 49a88bb75d0e1b0753ca7df2da201dde57302cf9 | bfa9216da7d05d8a3a77c2bbef87f5563534e33e | refs/heads/test1 | 2022-12-01T23:24:10.365604 | 2020-04-29T19:58:44 | 2020-04-29T19:58:44 | 229,307,044 | 0 | 0 | null | 2022-11-21T21:55:15 | 2019-12-20T17:19:14 | Jupyter Notebook | UTF-8 | Python | false | false | 4,408 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
# ---------------
# Author : Debanjan Datta
# Email : ddatta@vt.edu
# ---------------
import yaml
import glob
import pandas as pd
import numpy as np
import os
import argparse
import pickle
import sys
sys.path.append('./../..')
sys.path.append('./..')
try:
from .metapath2vec_g import metapath2vec_g_model
except:
from metapath2vec_g import metapath2vec_g_model
try:
from src.data_fetcher import data_fetcher_v2 as data_fetcher
except:
from .src.data_fetcher import data_fetcher_v2 as data_fetcher
model_name = 'metapath2vec_gensim'
DIR = None
config_file = 'config.yaml'
model_use_data_DIR = None
randomWalk_DIR = None
SOURCE_DATA_DIR_1 = None
SOURCE_DATA_DIR_2 = None
mp2v_g_data_dir = None
text_data_file = None
model_save_loc= None
model_save_file= None
weights_np_save_path = None
def set_up_config(_DIR = None):
global CONFIG
global config_file
global DIR
global model_use_data_DIR
global serializedRandomWalk_DIR
global randomWalk_DIR
global model_name
global model_weights_data
global SOURCE_DATA_DIR_1
global SOURCE_DATA_DIR_2
global mp2v_g_data_dir
global text_data_file
global RW_dir
global model_save_loc
global model_save_file
global weights_np_save_path
if _DIR is not None:
DIR = _DIR
with open(config_file) as f:
CONFIG = yaml.safe_load(f)
if _DIR is None:
DIR = CONFIG['DIR']
else:
DIR = _DIR
SOURCE_DATA_DIR_1 = os.path.join(
CONFIG['SOURCE_DATA_DIR_1'], DIR
)
SOURCE_DATA_DIR_2 = os.path.join(
CONFIG['SOURCE_DATA_DIR_2'], DIR
)
model_use_data_DIR = CONFIG['model_use_data_DIR']
model_use_data_DIR = os.path.join(model_use_data_DIR, DIR)
model_weights_data = CONFIG['model_weights_data']
if not os.path.exists(model_weights_data):
os.mkdir(model_weights_data)
model_weights_data = os.path.join(
model_weights_data ,DIR , model_name
)
RW_dir = CONFIG['RW_Samples_DIR']
RW_dir = os.path.join(model_use_data_DIR, RW_dir)
mp2v_g_data_dir = CONFIG['mp2v_g_data_dir']
mp2v_g_data_dir = os.path.join(model_use_data_DIR, mp2v_g_data_dir)
if not os.path.exists(mp2v_g_data_dir):
os.mkdir(mp2v_g_data_dir)
text_data_file = os.path.join(mp2v_g_data_dir, 'gensim_corpus.txt')
setup_data()
model_save_path = CONFIG['model_weights_data']
if not os.path.exists(model_save_path):
os.mkdir(model_save_path)
model_save_path = os.path.join(model_save_path, model_name)
if not os.path.exists(model_save_path):
os.mkdir(model_save_path)
model_save_loc = os.path.join(model_save_path, DIR)
if not os.path.exists(model_save_path):
os.mkdir(model_save_path)
model_save_file = 'mp2v_gensim.data'
weights_np_save_path = os.path.join(model_save_loc,'mp2v_gensim_weights.npy')
return
def get_domain_dims():
global CONFIG
global DIR
return data_fetcher.get_domain_dims(CONFIG['SOURCE_DATA_DIR_1'], DIR)
# --------------------------------------------------------- #
def setup_data():
# Check if folder exists
global model_data_dir
global text_data_file
global model_use_data_DIR
global RW_dir
if os.path.exists(text_data_file):
print('Data file present')
# return
print(model_use_data_DIR)
target_files = glob.glob(
os.path.join(RW_dir,'**_walks.npy')
)
res = []
for _file in target_files:
np_arr = np.load(_file)
res.extend(np_arr)
res = np.array(res)
np.savetxt(
text_data_file,
res,
fmt ='%d',
delimiter=' ',
newline = '\n'
)
return
# ========================================================= #
parser = argparse.ArgumentParser()
parser.add_argument(
'--DIR', choices=['us_import1', 'us_import2', 'us_import3'],
default='us_import1'
)
args = parser.parse_args()
DIR = args.DIR
set_up_config(DIR)
model_obj = metapath2vec_g_model.get_model_obj(
corpus_txt_file_path = text_data_file,
emb_size=128,
model_save_path = os.path.join(model_save_loc,model_save_file),
load_saved = False
)
domain_dims = get_domain_dims()
entity_count = sum(list(domain_dims.values()))
metapath2vec_g_model.save_weights(
model_obj,
entity_count,
weights_np_save_path
)
| [
"ddatta@vt.edu"
] | ddatta@vt.edu |
521c287bd10bbbd9bbfde41e796c499eaee32c7a | 40d5ae8a553a39145d4396fe58b562d911bbd5da | /battles/migrations/0016_invite.py | 3aeb071dc539b3f05c1667ed638a626600b01840 | [
"MIT"
] | permissive | vanessa/pokebattle | 56a5f5592ddba999cc6a62aa080795a781ce9316 | 3017ad226bc05cc06cb5eb34455c13d959be10ab | refs/heads/develop | 2021-04-06T09:12:10.829904 | 2018-07-13T07:35:01 | 2018-07-13T07:35:01 | 125,147,775 | 3 | 2 | MIT | 2021-01-05T01:18:02 | 2018-03-14T03:08:22 | Python | UTF-8 | Python | false | false | 863 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-14 18:28
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('battles', '0015_auto_20180514_1722'),
]
operations = [
migrations.CreateModel(
name='Invite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('invitee', models.EmailField(max_length=254)),
('inviter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invites', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"vanessa@vinta.com.br"
] | vanessa@vinta.com.br |
78a5c21745ea8870d2d34bb1b868db982f4cf262 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/managednetwork/get_scope_assignment.py | f9ad938e743b2ff22373807430e29e91f6ab691a | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 5,226 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetScopeAssignmentResult',
'AwaitableGetScopeAssignmentResult',
'get_scope_assignment',
]
@pulumi.output_type
class GetScopeAssignmentResult:
"""
The Managed Network resource
"""
def __init__(__self__, assigned_managed_network=None, etag=None, id=None, location=None, name=None, provisioning_state=None, type=None):
if assigned_managed_network and not isinstance(assigned_managed_network, str):
raise TypeError("Expected argument 'assigned_managed_network' to be a str")
pulumi.set(__self__, "assigned_managed_network", assigned_managed_network)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="assignedManagedNetwork")
def assigned_managed_network(self) -> Optional[str]:
"""
The managed network ID with scope will be assigned to.
"""
return pulumi.get(self, "assigned_managed_network")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state of the ManagedNetwork resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
"""
return pulumi.get(self, "type")
class AwaitableGetScopeAssignmentResult(GetScopeAssignmentResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetScopeAssignmentResult(
assigned_managed_network=self.assigned_managed_network,
etag=self.etag,
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
type=self.type)
def get_scope_assignment(scope: Optional[str] = None,
scope_assignment_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetScopeAssignmentResult:
"""
The Managed Network resource
API Version: 2019-06-01-preview.
:param str scope: The base resource of the scope assignment.
:param str scope_assignment_name: The name of the scope assignment to get.
"""
__args__ = dict()
__args__['scope'] = scope
__args__['scopeAssignmentName'] = scope_assignment_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:managednetwork:getScopeAssignment', __args__, opts=opts, typ=GetScopeAssignmentResult).value
return AwaitableGetScopeAssignmentResult(
assigned_managed_network=__ret__.assigned_managed_network,
etag=__ret__.etag,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
type=__ret__.type)
| [
"noreply@github.com"
] | morrell.noreply@github.com |
caf39698290e129ad3792b9107ca5687a357449d | 974d04d2ea27b1bba1c01015a98112d2afb78fe5 | /test/dygraph_to_static/test_layer_hook.py | 55f4681a99005507aabe9e6bededeba3816e3732 | [
"Apache-2.0"
] | permissive | PaddlePaddle/Paddle | b3d2583119082c8e4b74331dacc4d39ed4d7cff0 | 22a11a60e0e3d10a3cf610077a3d9942a6f964cb | refs/heads/develop | 2023-08-17T21:27:30.568889 | 2023-08-17T12:38:22 | 2023-08-17T12:38:22 | 65,711,522 | 20,414 | 5,891 | Apache-2.0 | 2023-09-14T19:20:51 | 2016-08-15T06:59:08 | C++ | UTF-8 | Python | false | false | 2,894 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
import numpy as np
import paddle
def forward_post_hook1(layer, input, output):
return output + output
def forward_pre_hook1(layer, input):
input_return = (input[0] * 2,)
return input_return
class SimpleNet(paddle.nn.Layer):
def __init__(
self,
):
super().__init__()
self.fc1 = paddle.nn.Linear(10, 10)
# sublayer1 register post hook
self.fc1.register_forward_post_hook(forward_post_hook1)
self.fc2 = paddle.nn.Linear(10, 10)
# sublayer2 register pre hook
self.fc2.register_forward_pre_hook(forward_pre_hook1)
# register pre/post hook
self.register_forward_pre_hook(forward_pre_hook1)
self.register_forward_post_hook(forward_post_hook1)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
out = paddle.mean(x)
return out
class TestNestLayerHook(unittest.TestCase):
def setUp(self):
paddle.seed(2022)
self.x = paddle.randn([4, 10])
self.temp_dir = tempfile.TemporaryDirectory()
self.path = os.path.join(self.temp_dir.name, 'net_hook')
def tearDown(self):
self.temp_dir.cleanup()
def train_net(self, to_static=False):
paddle.seed(2022)
net = SimpleNet()
if to_static:
net = paddle.jit.to_static(net)
out = net(self.x)
if to_static:
paddle.jit.save(net, self.path)
return float(out)
def load_train(self):
net = paddle.jit.load(self.path)
out = net(self.x)
return float(out)
def test_hook(self):
dy_out = self.train_net(to_static=False)
st_out = self.train_net(to_static=True)
load_out = self.load_train()
print(st_out, dy_out, load_out)
np.testing.assert_allclose(
st_out,
dy_out,
rtol=1e-05,
err_msg='dygraph_res is {}\nstatic_res is {}'.format(
dy_out, st_out
),
)
np.testing.assert_allclose(
st_out,
load_out,
rtol=1e-05,
err_msg=f'load_out is {load_out}\nstatic_res is {st_out}',
)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | PaddlePaddle.noreply@github.com |
7c4f05135628bb093508fbb078b0550daa3593df | d7defc3b3753cecb97c6f2874c3e5210bf8557ce | /dashboard/admin.py | 16726e9cd1a13fb231939d07c2de53a659ce7219 | [] | no_license | vineetkashyap/tranageapp | 6f769febecc3d92827da1ea98707ae17a13f5b89 | cfc1d9b36f38fbebc290886661c25b05829412ec | refs/heads/master | 2023-06-02T00:05:30.059420 | 2021-06-22T11:26:12 | 2021-06-22T11:26:12 | 365,577,655 | 0 | 1 | null | 2021-06-21T17:18:55 | 2021-05-08T17:50:20 | CSS | UTF-8 | Python | false | false | 755 | py | from django.contrib import admin
from .models import Add_update,Add_project,Employee_model
# Register your models here.
@admin.register(Add_update)
class Add_update_Admin(admin.ModelAdmin):
list_display = ['id','heading','description','location','expiry_date']
@admin.register(Add_project)
class Add_project_Admin(admin.ModelAdmin):
list_display = ['id','project_name','project_loading_location','project_unloading_location','material_type','per_trip_cost','per_unit_cost','project_start_date','project_end_date','loading_unit','project_loading_employee','get_products']
@admin.register(Employee_model)
class Employee_model_Admin(admin.ModelAdmin):
list_display = ['id','employee_name','employee_email','employee_mobile','assigned_project'] | [
"vkvineet66@gmail.com"
] | vkvineet66@gmail.com |
7a2771ae7026f81dd8da84ca58cb0fc21461e410 | 71a7c4bacbc361fa6966464506468cea247d70bc | /euler45.py | 46e6fe54334aa76e96ac17ef6343ca9944abb526 | [] | no_license | sputnick1124/ProjectEuler | d347297798c5ec8802c9f3148e0ce71e345f71b5 | 8b6b770d54a7932dd8a14ec5ad596405f9a75916 | refs/heads/master | 2020-03-27T03:42:01.547600 | 2015-05-17T19:54:04 | 2015-05-17T19:54:04 | 31,292,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | from time import time
def ispent(num):
n=((24*num+1)**.5+1)/6
return int(n)==n
def ishex(num):
n=((8*num+1)**.5+1)/4
return int(n)==n
def isall(num):
return ispent(num) and ishex(num)
t1=time()
tph=40755
t=285
while tph==40755:
t+=1
tri=(t*t+t)/2
if isall(tri):
tph=tri
print(tri)
print(time()-t1)
| [
"sputnick1124@comcast.net"
] | sputnick1124@comcast.net |
2f5249c862bf38aff8a5601b4bafa5931ff822c8 | 75c8b9ff2fa8fcb5ea4029a4e640c1d3cd92fc79 | /examples/mailbox/distributed/pong.py | a3e758fb5efc7548e232cf3c8c7d081b8ce142b3 | [
"ISC"
] | permissive | victorpoluceno/xwing | b4244b86e1196c6b08ad1d9ea8bd92e49dea756d | a6d2b9ddfa1bdf383cbf52d4dc81035222ebeefa | refs/heads/development | 2021-01-18T12:14:13.771561 | 2016-10-22T16:44:21 | 2016-10-22T16:44:21 | 49,905,226 | 12 | 2 | null | 2016-10-22T16:44:21 | 2016-01-18T21:06:00 | Python | UTF-8 | Python | false | false | 436 | py | from xwing.mailbox import init_node, start_node, spawn
async def pong(mailbox):
while True:
data = await mailbox.recv()
if len(data) == 1 and data[0] == 'finished':
print('Pong finished')
break
print('Pong received ping')
message, pid = data
await mailbox.send(pid, 'pong')
if __name__ == '__main__':
init_node()
spawn(pong, name='pong')
start_node()
| [
"victorpoluceno@gmail.com"
] | victorpoluceno@gmail.com |
2ac2f027c2ebfbff13ac3ef457a9b83b6a35ce07 | 42c899d6ef5df0b93cef6257af4f444f8db2d610 | /mtdg/inputters/inputter.py | 9cb9d5ef34ef02afa50941468d932786ae1977e4 | [] | no_license | hongweizeng/Multi-Turn-Dialogue-Generation | 97031255b49f18634fb2acb5c742f3d4a8bcb1a3 | 9dbf7e9e9eb36550bbbb9a98627d28bc5f8906c3 | refs/heads/master | 2022-11-29T18:38:41.190486 | 2018-09-05T01:45:27 | 2018-09-05T01:45:27 | 141,300,694 | 3 | 2 | null | 2022-11-28T18:46:29 | 2018-07-17T14:24:16 | Python | UTF-8 | Python | false | false | 1,461 | py | def read_and_tokenize(dialog_path, min_turn=3):
"""
Read conversation
Args:
dialog_path (str): path of dialog (tsv format)
Return:
dialogs: (list of list of str) [dialog_length, sentence_length]
users: (list of str); [2]
"""
with open(dialog_path, 'r', encoding='utf-8') as f:
# Go through the dialog
first_turn = True
dialog = []
users = []
same_user_utterances = [] # list of sentences of current user
dialog.append(same_user_utterances)
for line in f:
_time, speaker, _listener, sentence = line.split('\t')
users.append(speaker)
if first_turn:
last_speaker = speaker
first_turn = False
# Speaker has changed
if last_speaker != speaker:
same_user_utterances = []
dialog.append(same_user_utterances)
same_user_utterances.append(sentence)
last_speaker = speaker
# All users in conversation (len: 2)
users = list(OrderedDict.fromkeys(users))
# 1. Concatenate consecutive sentences of single user
# 2. Tokenize
dialog = [tokenizer(" ".join(sentence)) for sentence in dialog]
if len(dialog) < min_turn:
print(f"Dialog {dialog_path} length ({len(dialog)}) < minimum required length {min_turn}")
return []
return dialog #, users | [
"zhw1025@gmail.com"
] | zhw1025@gmail.com |
e76ee9bd2e39b0f9c9ba6271d6e3596c11174f81 | e616ea35ead674ebb4e67cae54768aaaeb7d89c9 | /project/alma/sections/enum.py | 22fa36ba0bcd4ed43e21f687c26270bc03f661d2 | [] | no_license | VWApplications/VWAlmaAPI | 12bb1888533cf987739b0e069737afa6337141e1 | 3a8009b17518384c269dfee3c8fe44cbe2567cc0 | refs/heads/master | 2022-04-02T10:26:49.832202 | 2020-02-12T04:46:31 | 2020-02-12T04:46:31 | 161,098,215 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | from enum import Enum
class MethodologyTypeSet(Enum):
"""
Tipos de metodologia aplicado a seção.
"""
TRADITIONAL = "TRADITIONAL"
TBL = "TBL"
class ConfigTitleSet(Enum):
"""
Títulos de avaliações.
"""
TRADITIONAL = "TRADITIONAL"
EXERCISE = "EXERCISE"
IRAT = "IRAT"
GRAT = "GRAT"
PRACTICAL = "PRACTICAL"
PEER_REVIEW = "PEER_REVIEW" | [
"victorhad@gmail.com"
] | victorhad@gmail.com |
d88a28daced63f58af574720f1837e5b7d61e973 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_206/205.py | a6d9f9958d2fbf7364dcebe57e5ba6b53efb58ec | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 852 | py |
t = int(input())
for i in range(1, t + 1):
value = input();
goal, other_horses = [int(s) for s in value.split(" ")];
#print("gola:{}".format(goal))
#data containers
start_poses = [0 for h in range(other_horses)]
speeds = [0 for h in range(other_horses)]
#read input
for h in range(0, other_horses):
pos, speed = [int(s) for s in input().split(" ")];
start_poses[h] = pos;
speeds[h] = speed;
#SOLVE
timeToGoal = -1
for h in range(0, other_horses):
matka = goal - start_poses[h];
aika = matka / speeds[h];
#print("aika:{} matka:{}".format(aika,matka))
if(timeToGoal == -1 or timeToGoal < aika):
timeToGoal = aika;
#print("time to goal {}".format(timeToGoal))
#print(speeds)
#PRINT RESULT
vastausaika = goal / timeToGoal
print("Case #{0:.0f}: {1:.6f}".format(i, vastausaika));
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
2d04b36b8338a3cac4b74f030f5291425fd10e78 | b5f9f93a415a5cc0117a580c5da12804e68c141d | /scripts/motions/cvfilter1.py | 8504c5b1c6492a3996f710f7d07ab0847a7e8899 | [] | no_license | akihikoy/lfd_trick | 71f89d80abc27ffc6fbd5bc609322918a4f8264e | b7bf0189db7bcef07772db17de29302d6e8ba2bf | refs/heads/master | 2021-01-10T14:22:53.341666 | 2016-03-29T18:16:15 | 2016-03-29T18:16:15 | 50,623,958 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,309 | py | #!/usr/bin/python
from core_tool import *
def Help():
return '''Filter for cv (color_detector nodes).
Usage:
cvfilter1
Test this filter
Use CVApplyFilter from other scripts.
'''
#idx: index of color detection node (1 or 2)
def CVApplyFilter(cvfiltered, msg, idx):
if 'amount' not in cvfiltered:
#NOTE: following variables are from two sensors
cvfiltered.amount= [0.0, 0.0]
cvfiltered.spill= [0.0, 0.0]
#Horizontal positions of flow center on camera: [u1,u2]
cvfiltered.term_flow_center= [0.0, 0.0]
cvfiltered.term_flow_var= [0.0, 0.0]
cvfiltered.term_flow_max_dist= [0.0, 0.0]
cvfiltered.flow_amount= [0.0, 0.0]
cvfiltered.flow_amount_sum= [0.0, 0.0]
#Positions of receiver on camera: [u1,v1,u2,v2]
cvfiltered.uv_rcv= [0.0, 0.0, 0.0, 0.0]
#Bounds of receiver on camera: [u1,v1,w1,h1,u2,v2,w2,h2]
cvfiltered.uvbound_rcv= [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
#Area (number of pixels) of receiver base
cvfiltered.col_area= [0.0, 0.0]
alpha= 0.2
#NOTE: color-0: marker to detect position, color-1: material (colored)
if msg.col_area[0]>200: #Enough pixels
if cvfiltered.col_area[idx-1]==0.0:
cvfiltered.col_area[idx-1]= msg.col_area[0]
else:
cvfiltered.col_area[idx-1]= (1.0-alpha)*cvfiltered.col_area[idx-1] + alpha*msg.col_area[0]
for i in (0,1): #u,v
if cvfiltered.uv_rcv[2*(idx-1)+i]==0.0:
cvfiltered.uv_rcv[2*(idx-1)+i]= msg.col_center_xy[i]
else:
cvfiltered.uv_rcv[2*(idx-1)+i]= (1.0-alpha)*cvfiltered.uv_rcv[2*(idx-1)+i] + alpha*msg.col_center_xy[i]
for i in (0,1,2,3): #u,v,w,h
if cvfiltered.uvbound_rcv[4*(idx-1)+i]==0.0:
cvfiltered.uvbound_rcv[4*(idx-1)+i]= msg.col_bound[i]
else:
cvfiltered.uvbound_rcv[4*(idx-1)+i]= (1.0-alpha)*cvfiltered.uvbound_rcv[4*(idx-1)+i] + alpha*msg.col_bound[i]
#Sum the material colored blocks
area_rcv= 0.0
area_spill= 0.0
xmin= cvfiltered.uvbound_rcv[4*(idx-1)+0]
xmax= xmin + cvfiltered.uvbound_rcv[4*(idx-1)+2]
ymin= cvfiltered.uvbound_rcv[4*(idx-1)+1] # + cvfiltered.uvbound_rcv[4*(idx-1)+3]
for i in range(msg.nums_blocks[0], msg.nums_blocks[0]+msg.nums_blocks[1]):
ap= (msg.blocks_center_xy[2*i+0], msg.blocks_center_xy[2*i+1])
if ap[0]>=xmin and ap[0]<=xmax and ap[1]<=ymin:
area_rcv+= msg.blocks_area[i]
else:
area_spill+= msg.blocks_area[i]
div= max(200.0, cvfiltered.col_area[idx-1])
#am_adjust= 1.0 / 6.0 #1.5 / 6.0 #For b64
am_adjust= 1.5 #For b65
am_rcv= area_rcv / div * am_adjust
am_spill= area_spill / div / 6.0 * am_adjust
cvfiltered.amount[idx-1]= (1.0-alpha)*cvfiltered.amount[idx-1] + alpha*am_rcv
cvfiltered.spill[idx-1]= (1.0-alpha)*cvfiltered.spill[idx-1] + alpha*am_spill
flow_amount= 0.0
flow_center= 0.0
flow_var= 0.0
for i in range(msg.num_flows):
spddir= (msg.flows_spddir[2*i+0], abs(msg.flows_spddir[2*i+1]))
#if spddir[1]>0.5*math.pi-0.2 and spddir[1]<0.5*math.pi+0.2 and spddir[0]>20.0:
if spddir[0]>2.0: #TEST:FIXME
flow_amount+= msg.flows_amount[i]
flow_center+= msg.flows_xy[2*i+0] * msg.flows_amount[i]
flow_var= (msg.flows_xy[2*i+0]-cvfiltered.term_flow_center[idx-1])**2 * msg.flows_amount[i]
if flow_amount>0.0:
flow_center/= flow_amount
flow_var/= flow_amount
cvfiltered.flow_amount[idx-1]= flow_amount/div
cvfiltered.flow_amount_sum[idx-1]+= cvfiltered.flow_amount[idx-1]
alpha_f= 0.2 / (1.0 + 0.2*cvfiltered.flow_amount_sum[idx-1])
#if idx==1: print '###',cvfiltered.flow_amount_sum[idx-1], alpha_f
if cvfiltered.term_flow_center[idx-1]==0.0:
cvfiltered.term_flow_var[idx-1]= 0.0
cvfiltered.term_flow_max_dist[idx-1]= 0.0
cvfiltered.term_flow_center[idx-1]= flow_center
else:
#flow_var= (flow_center-cvfiltered.term_flow_center[idx-1])**2
cvfiltered.term_flow_var[idx-1]= (1.0-alpha_f)*cvfiltered.term_flow_var[idx-1] + alpha_f*flow_var
cvfiltered.term_flow_max_dist[idx-1]= max(cvfiltered.term_flow_max_dist[idx-1], math.sqrt(flow_var))
cvfiltered.term_flow_center[idx-1]= (1.0-alpha_f)*cvfiltered.term_flow_center[idx-1] + alpha_f*flow_center
else:
cvfiltered.flow_amount[idx-1]= 0.0
def VizCVFiltered(t, cvfiltered, idx, print_data=False):
if print_data:
print cvfiltered.amount[idx-1], cvfiltered.spill[idx-1],
print '\t', cvfiltered.term_flow_center[idx-1], cvfiltered.flow_amount[idx-1]
viz_msg= lfd_vision.msg.ColDetViz()
prim= lfd_vision.msg.ColDetVizPrimitive()
prim.type= prim.LINE
prim.color.r= 255.0; prim.color.g= 128.0; prim.color.b= 0.0
prim.line_width= 2.0
amt= 200.0
prim.param= [cvfiltered.term_flow_center[idx-1], cvfiltered.uvbound_rcv[4*(idx-1)+1],
cvfiltered.term_flow_center[idx-1], cvfiltered.uvbound_rcv[4*(idx-1)+1]-amt ]
viz_msg.objects.append(prim)
prim= lfd_vision.msg.ColDetVizPrimitive()
prim.type= prim.LINE
prim.color.r= 255.0; prim.color.g= 128.0; prim.color.b= 0.0
prim.line_width= 2.0
dev= math.sqrt(cvfiltered.term_flow_var[idx-1])
prim.param= [cvfiltered.term_flow_center[idx-1]-dev, cvfiltered.uvbound_rcv[4*(idx-1)+1],
cvfiltered.term_flow_center[idx-1]+dev, cvfiltered.uvbound_rcv[4*(idx-1)+1] ]
viz_msg.objects.append(prim)
if idx==1: t.pub.coldet_viz.publish(viz_msg)
elif idx==2: t.pub.coldet_viz2.publish(viz_msg)
def PassToOldCallbacks(msg, cvfiltered, idx, amount_observer=None, flow_speed_observer=None):
if amount_observer!=None:
msg2= std_msgs.msg.Float64()
msg2.data= cvfiltered.amount[idx-1]
amount_observer(msg2)
if flow_speed_observer!=None:
msg2= std_msgs.msg.Float64MultiArray()
msg2.data= msg.flow_avr_spddir
flow_speed_observer(msg2)
def Run(t,*args):
#t.ExecuteMotion('cv', 'setup')
#t.ExecuteMotion('cv', 'resume')
t.ExecuteMotion('cv', 'setup', 0)
t.ExecuteMotion('cv', 'resume', 0)
cvfiltered= TContainer(debug=True)
t.callback.cv= lambda msg: ( CVApplyFilter(cvfiltered, msg, 1), VizCVFiltered(t, cvfiltered, 1, True) )
t.callback.cv2= lambda msg: ( CVApplyFilter(cvfiltered, msg, 2), VizCVFiltered(t, cvfiltered, 2, True) )
time.sleep(20.0)
t.callback.cv= None
t.callback.cv2= None
#t.ExecuteMotion('cv', 'pause')
t.ExecuteMotion('cv', 'pause', 0)
| [
"info@akihikoy.net"
] | info@akihikoy.net |
f540455aeea0a47f0e5910571ffd339bbb8d69e8 | 33982dafec0e388d911853ebd81f7870a364f39e | /src/micro_client/registry/__init__.py | 53b6fa80b624f65047510adcc05bac5834516859 | [
"MIT"
] | permissive | ashcrow/micro-client | cc0cea27ceee21b36addda0c2075b31f7ebd8eb6 | cfe6ac45606eea7310a2336c5e733e2b540d5e01 | refs/heads/master | 2021-09-15T20:31:40.644763 | 2018-06-10T16:41:47 | 2018-06-10T16:43:13 | 115,768,076 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,131 | py | # Copyright 2017 Steve 'Ashcrow' Milner
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Registry implementations.
"""
| [
"stevem@gnulinux.net"
] | stevem@gnulinux.net |
88286460c7a9c654aa130ade5dde04444853e9bb | ad9c2138ab8e0e8b50277f294ccdb29ef9d67747 | /tests/endpoints/test_password_reset.py | 98cce8b19c91cd3e9ed20d9460ce405ae3561eee | [
"MIT"
] | permissive | accent-starlette/starlette-auth | 3c6e3804a7d49b673cf108e4874aff426b55da74 | d07f4bf7f856c54d05eb08b0ddff5d1bcbdda3d9 | refs/heads/master | 2022-01-02T02:17:25.041209 | 2021-12-10T17:19:37 | 2021-12-10T17:19:37 | 183,644,203 | 11 | 5 | MIT | 2021-12-10T17:00:58 | 2019-04-26T14:35:29 | Python | UTF-8 | Python | false | false | 2,956 | py | import pytest
from starlette_auth import config
def test_get(client):
response = client.get("/auth/password/reset")
assert response.status_code == 200
assert "form" in response.context
assert "request" in response.context
def test_post_redirects(client, monkeypatch):
# its important here that the post will redirect regardless
# of whether the user exists or not so we specifally dont use a valid email
def fake_send(msg):
raise Exception("An email should not have been sent")
monkeypatch.setattr("starlette_auth.forms.send_message", fake_send)
response = client.post("/auth/password/reset", data={"email": "user@example.com"})
assert response.status_code == 302
assert response.next.url == "http://testserver/auth/password/reset/done"
def test_email_not_sent_if_user_is_not_active(client, user, monkeypatch):
user.is_active = False
user.save()
def fake_send(msg):
raise Exception("An email should not have been sent")
monkeypatch.setattr("starlette_auth.forms.send_message", fake_send)
response = client.post("/auth/password/reset", data={"email": "user@example.com"})
assert response.status_code == 302
assert response.next.url == "http://testserver/auth/password/reset/done"
def test_txt_email_sent_if_user_exists(client, user, monkeypatch):
def fake_send(msg):
assert msg.get_content_maintype() == "text"
assert msg["To"] == user.email
assert msg["Subject"] == "Change Password at example.com"
assert "http://testserver/auth/password/reset" in msg.as_string()
monkeypatch.setattr("starlette_auth.forms.send_message", fake_send)
response = client.post("/auth/password/reset", data={"email": user.email})
assert response.status_code == 302
assert response.next.url == "http://testserver/auth/password/reset/done"
def test_html_email_sent_if_template_is_defined(client, user, monkeypatch):
def fake_send(msg):
assert msg.get_content_maintype() == "multipart"
assert msg["To"] == user.email
assert msg["Subject"] == "Change Password at example.com"
assert "http://testserver/auth/password/reset" in msg.as_string()
monkeypatch.setattr("starlette_auth.forms.send_message", fake_send)
config.reset_pw_html_email_template = "password_reset_body.html"
response = client.post("/auth/password/reset", data={"email": user.email})
assert response.status_code == 302
assert response.next.url == "http://testserver/auth/password/reset/done"
@pytest.mark.parametrize(
"test_data",
[
{},
{"email": ""},
{"email": " "},
{"email": "invalid"},
{"email": "user@invalid"},
],
)
def test_invalid_data(test_data, client, user):
response = client.post("/auth/password/reset", data=test_data)
assert response.status_code == 200
assert response.url == "http://testserver/auth/password/reset"
| [
"stuart@accentdesign.co.uk"
] | stuart@accentdesign.co.uk |
ed74886fda93206bc1650e8609437a1e31ba4732 | c42b8493a2d6fa0729772a4be32770cae185adfe | /nullandnotnull.py | 59860175376670cbdfa4c8eb6c04a907be67f3d2 | [] | no_license | ashishjsharda/Pandas | f834d2e88f62c6b989910fc4ea54475abc270cc5 | a4ffcbc5cb27f4007c56658eb0df08cabc7854d5 | refs/heads/master | 2023-01-30T01:09:56.160918 | 2020-12-15T04:43:45 | 2020-12-15T04:43:45 | 282,204,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | import pandas as pd
import numpy as np
ser=pd.Series([1,5,0,-1,np.nan,12])
print(ser)
print(ser.isnull())
print(ser.isna())
print(ser.notnull())
| [
"noreply@github.com"
] | ashishjsharda.noreply@github.com |
d1c440b76e4ab9e6077d3c54b44f9f8d30eb178c | 1f4204f903657884d9cccfd44b19ecb531b59ded | /test_settings/196_4_8_1000.py | c1b10e5ab910361946660ce5b7f94f0cb0b45ce4 | [] | no_license | fmcc/StylometricAnalyser | 795a8e4abe264ee18ab3bcb34bd128bcd06ac5ca | e86305a63c95d8b533cab4a3be0010c2fee0ff14 | refs/heads/master | 2021-01-23T08:38:44.961082 | 2013-08-31T20:23:36 | 2013-08-31T20:23:36 | 11,097,508 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 541 | py |
import os
DB_PATH = os.getcwd() + '/database/greek_texts.db'
LOGGING = True
DB_LOGGING = False
NGRAM_WORDS = False
NGRAM_LENGTHS = {
'MIN': 4,
'MAX': 8
}
NO_SPACES = True
RESTRICT_VECTOR_SPACE = 1000
# If selected, texts will be divided according to their original top-level divisions (Books etc. 'div1' in Perseus's TEI XML
USE_ORIGINAL_DIVISIONS = False
#If USE_ORIGINAL_DIVISIONS is False, the text will be divided into chunks the length defined here. If O the text will not be divided.
DIVISION_LENGTH = 5000
| [
"finlaymccourt@gmail.com"
] | finlaymccourt@gmail.com |
432413c5286986b43d8a1b35ae09e90cc1a0ab84 | 6cefb77248f43559a7aab1ef0a18dc513d408483 | /prokmer.py | e0bd448eb2bc912b48d69ad90a99e67042a4cc98 | [] | no_license | zheins/rosalind_algorithms | 8df10d0028d8c60e06a969c6b0d835fe2c498926 | b9856796e562cf5993f13da75adebed58aa3ca8f | refs/heads/master | 2021-01-10T01:48:20.209208 | 2015-10-31T17:35:39 | 2015-10-31T17:35:39 | 45,252,883 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,496 | py | import sys
def hamming(pattern, check):
match = False
distance = 0
for i in range(len(pattern)):
if check[i] != pattern[i]:
distance += 1
return distance
def neighbors(s, d):
l = []
if d==0 or len(s)==0:
return[s]
for n in 'ACGT':
d1 = d-1
if n==s[0]:
d1 = d
for m in neighbors(s[1:], d1):
l.append(n + m)
return l
def profileNuc(pro,nuc,seq):
for i in range(len(seq)):
if seq[i] == nuc:
pro[i] += 1
return pro
def enumMers(dna,mers,d,k):
for seq in dna:
for i in range(len(dna)):
if len(dna[i:i+k]) == k:
mers.update(neighbors(dna[i:i+k],d))
def calcProb(apro,cpro,gpro,tpro,mer):
prob = 1
for i in range(len(mer)):
if mer[i] == 'A':
prob = prob * apro[i]
if mer[i] == 'C':
prob = prob * cpro[i]
if mer[i] == 'G':
prob = prob * gpro[i]
if mer[i] == 'T':
prob = prob * tpro[i]
return prob
f = open(sys.argv[1],'r')
ins = f.read().split()
f.close()
sequence = ins[0]
k = int(ins[1])
apro = map(float,ins[2:2+k])
cpro = map(float,ins[2+k:2+2*k])
gpro = map(float,ins[2+2*k:2+3*k])
tpro = map(float,ins[2+3*k:2+4*k])
mers = set()
enumMers(sequence,mers,0,k)
merProbs = dict.fromkeys(mers,0)
for mer in merProbs:
merProbs[mer] = calcProb(apro,cpro,gpro,tpro,mer)
print max(merProbs,key=lambda x: merProbs[x])
| [
"="
] | = |
86e9016bc78025af85dd85536ca4111085b867e2 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2403/60839/290002.py | c20b0c3053800cbafb785bcf5e7ba93343c0316d | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | x=int(input())
y=int(input())
left=x
ans=[]
for i in range(0,y):
ans.append(0)
i=0
while left!=0:
if left-i-1>=0:
ans[i%y]+=i+1
left=left-i-1
else: #left!=0:
ans[i%y]+=left
left=left-left
i+=1
print(ans) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
bf00bd4f26d68eb020febf813f8adec10ef4cb6c | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /pkgs/ipython-1.2.1-py27_0/lib/python2.7/site-packages/IPython/nbconvert/writers/base.py | 914e9c4ffc8e4f2e55879544145da9b06d0d1302 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 1,770 | py | """
Contains writer base class.
"""
#-----------------------------------------------------------------------------
#Copyright (c) 2013, the IPython Development Team.
#
#Distributed under the terms of the Modified BSD License.
#
#The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from IPython.utils.traitlets import List
from ..utils.base import NbConvertBase
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class WriterBase(NbConvertBase):
"""Consumes output from nbconvert export...() methods and writes to a
useful location. """
files = List([], config=True, help="""
List of the files that the notebook references. Files will be
included with written output.""")
def __init__(self, config=None, **kw):
"""
Constructor
"""
super(WriterBase, self).__init__(config=config, **kw)
def write(self, output, resources, **kw):
"""
Consume and write Jinja output.
Parameters
----------
output : string
Conversion results. This string contains the file contents of the
converted file.
resources : dict
Resources created and filled by the nbconvert conversion process.
Includes output from transformers, such as the extract figure
transformer.
"""
raise NotImplementedError()
| [
"wgyumg@mgail.com"
] | wgyumg@mgail.com |
f9d4657e2b79d5391ae2e83c6bd47eaa7573ac62 | cd741a0d675ff99c297ed226439561f99ca2932c | /OOP_day01/gun.py | 8917eccc0d7226cbc55e1846378a3f5fc2772b2f | [] | no_license | SoliDeoGloria31/OOP | f5e52461f9e6f60f31af8f6e00641d88443626f2 | b09f6db93879b9f985770488dae513d77aa91942 | refs/heads/master | 2020-04-18T11:03:32.737852 | 2019-01-25T04:59:03 | 2019-01-25T04:59:03 | 167,405,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | # gun.py
# 枪械类
class Gun:
def __init__(self,
name, max_bullets, bullets_left,
dest_ratio, firing_bullets):
self.name = name # 名称
self.max_bullets = max_bullets # 弹夹容量
self.bullets_left = bullets_left
self.dest_ratio = dest_ratio # 杀伤系数
self.firing_bullets = firing_bullets # 每次开火击发子弹数量
def reload(self):
self.bullets_left = self.max_bullets
print('填弹完成,剩余子弹%2d发' % self.bullets_left)
def fire(self):
if self.bullets_left <= 0:
print('请按回车键填弹! ')
return
elif self.bullets_left > self.firing_bullets:
self.bullets_left -= self.firing_bullets
else:
self.bullets_left = 0
damage = int(self.dest_ratio * 100)
print('%s开火,杀伤力%d,剩余子弹%d' % (self.name, damage, self.bullets_left))
| [
"mortaltiger@163.com"
] | mortaltiger@163.com |
25c1c8dfa5ffc6fc27bb4c8b331aacd80bc386e7 | 43e900f11e2b230cdc0b2e48007d40294fefd87a | /ReferenceSolution/82.remove-duplicates-from-sorted-list-ii.139653354.ac.py | 72447ddba7e7ef7601e80c11712580e36a28d21c | [] | no_license | DarkAlexWang/leetcode | 02f2ed993688c34d3ce8f95d81b3e36a53ca002f | 89142297559af20cf990a8e40975811b4be36955 | refs/heads/master | 2023-01-07T13:01:19.598427 | 2022-12-28T19:00:19 | 2022-12-28T19:00:19 | 232,729,581 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,197 | py | #
# [82] Remove Duplicates from Sorted List II
#
# https://leetcode.com/problems/remove-duplicates-from-sorted-list-ii/description/
#
# algorithms
# Medium (29.87%)
# Total Accepted: 127.7K
# Total Submissions: 427.4K
# Testcase Example: '[]'
#
#
# Given a sorted linked list, delete all nodes that have duplicate numbers,
# leaving only distinct numbers from the original list.
#
#
# For example,
# Given 1->2->3->3->4->4->5, return 1->2->5.
# Given 1->1->1->2->3, return 2->3.
#
#
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
dummy = ListNode(0)
prev = dummy
curr = head
prev.next = curr
while curr:
while curr.next and curr.val == curr.next.val: ## continue move to next node
curr = curr.next
if prev.next != curr:
prev.next = curr.next
else:
prev = prev.next
curr = curr.next
return dummy.next
| [
"wangzhihuan0815@gmail.com"
] | wangzhihuan0815@gmail.com |
a9faca58ec4af00b6958d150bad0356673a11d1d | 057d662a83ed85897e9906d72ea90fe5903dccc5 | /.PyCharmCE2019.2/system/python_stubs/-1247971762/nacl/_sodium.py | f5d9843c516b16c196d8f850286b3017439858e2 | [] | no_license | Karishma00/AnsiblePractice | 19a4980b1f6cca7b251f2cbea3acf9803db6e016 | 932558d48869560a42ba5ba3fb72688696e1868a | refs/heads/master | 2020-08-05T00:05:31.679220 | 2019-10-04T13:07:29 | 2019-10-04T13:07:29 | 212,324,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | # encoding: utf-8
# module nacl._sodium
# from /usr/lib/python3/dist-packages/nacl/_sodium.abi3.so
# by generator 1.147
# no doc
# no imports
# no functions
# no classes
# variables with complex values
ffi = None # (!) real value is '<CompiledFFI object at 0x7fc5088234d0>'
__loader__ = None # (!) real value is '<_frozen_importlib_external.ExtensionFileLoader object at 0x7fc508503f28>'
__spec__ = None # (!) real value is "ModuleSpec(name='nacl._sodium', loader=<_frozen_importlib_external.ExtensionFileLoader object at 0x7fc508503f28>, origin='/usr/lib/python3/dist-packages/nacl/_sodium.abi3.so')"
| [
"karishma11198@gmail.com"
] | karishma11198@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.