blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7423d6bc889b8c2fd43e667212ff43a19ae44035 | f316d588441bdb6779bdac4ea611c18053f148c2 | /src/vegindex/generate_ndvi_timeseries.py | 9ab68751f846be29ed435d2ddffa46ac77de7a11 | [
"MIT"
] | permissive | tmilliman/python-vegindex | 4b366296131f2f5455a96c5f224423dc5c5ee9aa | 7fa3052f3590a5a4756b011e21dc335ae9fcc732 | refs/heads/master | 2022-09-19T03:10:32.415988 | 2022-08-16T15:20:18 | 2022-08-16T15:20:18 | 93,645,501 | 14 | 11 | MIT | 2020-04-09T20:06:50 | 2017-06-07T14:36:58 | Python | UTF-8 | Python | false | false | 9,582 | py | #!/usr/bin/env python
"""
Simple script to read in the RGB and IR ROI timeseries and
generate a camera NDVI timeseries csv file. Output
format will be:
"""
import argparse
import os
import sys
from datetime import datetime
from datetime import timedelta
import pandas as pd
# use this because numpy/openblas is automatically multi-threaded.
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
import numpy as np
from vegindex import vegindex as vi
from vegindex.roitimeseries import ROITimeSeries
# set vars
# you can set the archive directory to somewhere else for testing by
# using the env variable, PHENOCAM_ARCHIVE_DIR.
archive_dir = vi.config.archive_dir
# set missing/no-data values (move these to an include?)
ND_FLOAT = vi.config.ND_FLOAT
ND_INT = vi.config.ND_INT
ND_STRING = vi.config.ND_STRING
def main():
# set up command line argument processing
parser = argparse.ArgumentParser(
description="Merge RGB and IR stats and calculate camera NDVI"
)
# options
parser.add_argument(
"-v",
"--verbose",
help="increase output verbosity",
action="store_true",
default=False,
)
parser.add_argument(
"-n",
"--dry-run",
help="Process data but don't save results",
action="store_true",
default=False,
)
# positional arguments
parser.add_argument("site", help="PhenoCam site name")
parser.add_argument("roiname", help="ROI name, e.g. canopy_0001")
# get args
args = parser.parse_args()
sitename = args.site
roiname = args.roiname
verbose = args.verbose
dryrun = args.dry_run
if verbose:
print("site: {0}".format(sitename))
print("roiname: {0}".format(roiname))
print("verbose: {0}".format(verbose))
print("dryrun: {0}".format(dryrun))
# construct name for roistats files
indir = os.path.join(archive_dir, sitename, "ROI")
rgb_file = "{}_{}_roistats.csv".format(sitename, roiname)
ir_file = "{}_{}_IR_roistats.csv".format(sitename, roiname)
# set up output filename
outdir = os.path.join(archive_dir, sitename, "ROI")
outfile = "{}_{}_NDVI_roistats.csv".format(sitename, roiname)
outpath = os.path.join(outdir, outfile)
if verbose:
print("RGB roistats: ", rgb_file)
print("IR roistats: ", ir_file)
print("output file: ", outfile)
rgb_path = os.path.join(indir, rgb_file)
ir_path = os.path.join(indir, ir_file)
# read in the RGB ROI stats file using the vegindex class
# to get the header information
# Since this is an update throw exception if the file doesn't
# already exist
try:
roits = ROITimeSeries(site=sitename, ROIListID=roiname)
roits.readCSV(rgb_path)
except IOError:
errmsg = "Unable to read CSV file: {0}\n".format(rgb_path)
sys.stderr.write(errmsg)
sys.exit(1)
# use pandas to read in the RGB and IR CSV files into a data frame
# Throw an exception if the file doesn't exist
try:
df_rgb = pd.read_csv(
rgb_path, comment="#", parse_dates=[[0, 1]], na_values="NA"
)
except IOError:
errmsg = "Unable to read RGB CSV file: {}\n".format(rgb_path)
sys.stderr.write(errmsg)
sys.exit(1)
try:
df_ir = pd.read_csv(ir_path, comment="#", parse_dates=[[0, 1]], na_values="NA")
except IOError:
errmsg = "Unable to read IR CSV file: {}\n".format(ir_path)
sys.stderr.write(errmsg)
sys.exit(1)
# check the number of rows in each dataframe
nrows_ir = len(df_ir)
nrows_rgb = len(df_rgb)
if verbose:
print("IR rows: {}".format(nrows_ir))
print("RGB rows: {}".format(nrows_rgb))
# Merge the the two dataframes based on datetime. For sites
# which have been configured with the current PIT scripts the
# times will match identically. For older sites they will be
# close but not exact.
dt_tolerance = timedelta(minutes=10)
df_combined = pd.merge_asof(
df_rgb,
df_ir,
on="date_local_std_time",
suffixes=("_rgb", "_ir"),
direction="nearest",
tolerance=dt_tolerance,
)
# eliminate rows where there is no matching IR filename
df_combined = df_combined[df_combined.filename_ir.notnull()]
len_combined = len(df_combined)
if verbose:
print("Matched rows: {}".format(len_combined))
# eliminate rows where there is no RGB or IR exposure
df_combined = df_combined[df_combined.exposure_ir.notnull()]
df_combined = df_combined[df_combined.exposure_rgb.notnull()]
# eliminate rows where either exposure is 0. This is
# an indication that the OCR of the exposure failed so maybe
# letting this generate an error would be better. The
# resulting CSV would have "inf" or "-inf" in the output.
df_combined = df_combined[df_combined.exposure_ir != 0]
df_combined = df_combined[df_combined.exposure_rgb != 0]
# eliminate rows where there is no DN values
df_combined = df_combined[df_combined.r_mean.notnull()]
df_combined = df_combined[df_combined.g_mean.notnull()]
df_combined = df_combined[df_combined.b_mean.notnull()]
df_combined = df_combined[df_combined.ir_mean.notnull()]
# add some columns following Petach, et al.
df_combined["Y"] = (
0.3 * df_combined["r_mean"]
+ 0.59 * df_combined["g_mean"]
+ 0.11 * df_combined["b_mean"]
)
df_combined["Z_prime"] = df_combined["ir_mean"] / np.sqrt(
df_combined["exposure_ir"]
)
df_combined["R_prime"] = df_combined["r_mean"] / np.sqrt(
df_combined["exposure_rgb"]
)
df_combined["Y_prime"] = df_combined["Y"] / np.sqrt(df_combined["exposure_rgb"])
df_combined["X_prime"] = df_combined["Z_prime"] - df_combined["Y_prime"]
df_combined["NDVI_c"] = (df_combined["X_prime"] - df_combined["R_prime"]) / (
df_combined["X_prime"] + df_combined["R_prime"]
)
# add separate columns for date and local_std_time
df_combined["date"] = df_combined["date_local_std_time"].dt.date
df_combined["local_std_time"] = df_combined["date_local_std_time"].dt.time
# convert some columns to integers
df_combined = df_combined.astype(
{
"doy_rgb": "int32",
"exposure_rgb": "int32",
"exposure_ir": "int32",
"mask_index_rgb": "int32",
"r_mean": "int32",
"g_mean": "int32",
"b_mean": "int32",
"ir_mean": "int32",
}
)
# remove some columns
df_ndvi = df_combined[
[
"date",
"local_std_time",
"doy_rgb",
"filename_rgb",
"filename_ir",
"solar_elev_rgb",
"exposure_rgb",
"exposure_ir",
"mask_index_rgb",
"r_mean",
"g_mean",
"b_mean",
"ir_mean",
"ir_std",
"ir_5_qtl",
"ir_10_qtl",
"ir_25_qtl",
"ir_50_qtl",
"ir_75_qtl",
"ir_90_qtl",
"ir_95_qtl",
"gcc",
"Y",
"Z_prime",
"R_prime",
"Y_prime",
"X_prime",
"NDVI_c",
]
]
# rename some columns
df_ndvi = df_ndvi.rename(
columns={
"doy_rgb": "doy",
"solar_elev_rgb": "solar_elev",
"mask_index_rgb": "mask_index",
}
)
if not dryrun:
writeCSV(roits, df_ndvi, outpath)
def writeCSV(roits, df_ndvi, fpath):
"""
Write NDVI csv using the rgb ROI timeseries header information and
the combined dataframe.
"""
# write header
hdstrings = []
hdstrings.append("#\n")
hdstrings.append("# NDVI statistics timeseries for {0}\n".format(roits.site))
hdstrings.append("#\n")
hdstrings.append("# Site: {0}\n".format(roits.site))
hdstrings.append("# Veg Type: {0}\n".format(roits.roitype))
hdstrings.append("# ROI ID Number: {0:04d}\n".format(roits.sequence_number))
hdstrings.append("# Lat: {0}\n".format(roits.lat))
hdstrings.append("# Lon: {0}\n".format(roits.lon))
hdstrings.append("# Elev: {0}\n".format(roits.elev))
hdstrings.append("# UTC Offset: {0}\n".format(roits.tzoffset))
hdstrings.append("# Resize Flag: {0}\n".format(roits.resizeFlg))
hdstrings.append("# Version: 1\n")
# set create date and time
created_at = datetime.now()
created_time = created_at.time()
hdstrings.append("# Creation Date: {0}\n".format(created_at.date()))
hdstrings.append(
"# Creation Time: {0:02d}:{1:02d}:{2:02d}\n".format(
created_time.hour, created_time.minute, created_time.second
)
)
# set update date and time to same for generate script
hdstrings.append("# Update Date: {0}\n".format(created_at.date()))
hdstrings.append(
"# Update Time: {0:02d}:{1:02d}:{2:02d}\n".format(
created_time.hour, created_time.minute, created_time.second
)
)
hdstrings.append("#\n")
# use pandas to write CSV data then prepend header lines
df_ndvi.to_csv(fpath, sep=",", na_rep="NA", float_format="%.4f", index=False)
with open(fpath, "r+") as fh:
content = fh.read()
fh.seek(0, 0)
for line in hdstrings:
fh.write(line)
fh.write(content)
# run main when called from command line
if __name__ == "__main__":
main()
| [
"thomas.milliman@unh.edu"
] | thomas.milliman@unh.edu |
c0472642787e04491164dc3c9b22141d3f549a5d | 1bc02e52de571718d884deefa88bee73eb9ac1a5 | /fuzz_all.py | 4bddc769eff0a59a114cd770103dd67f84a0a417 | [] | no_license | benquike/cgc-challenge-corpus | f4d8460384a6f50046e651c0dd83eaa7136dca25 | 7695d43518dbb6b0cf347d736c1f1a5206d5a77b | refs/heads/master | 2020-09-14T06:01:01.021039 | 2018-04-21T03:16:35 | 2018-04-21T03:16:35 | 94,469,063 | 0 | 0 | null | 2017-06-15T18:46:54 | 2017-06-15T18:46:54 | null | UTF-8 | Python | false | false | 1,632 | py | #!/usr/bin/env python
import sys
import os
import re
import driller
import fuzzer
import time
def get_cbs_with_vul(root_dir):
'''
Find the cyber challenge with vulnerability in root_dir,
typically in bin dir.
The return value is a list
'''
bin_dir = os.path.join(root_dir, "bin")
return [os.path.join(bin_dir, x) for x in os.listdir(bin_dir) if x != '.' and x != '..' and
'patched' not in x or 'partial' in x]
def main():
if len(sys.argv) != 2:
print "Usage: fuzz_all.py <root_dir>"
sys.exit(-1)
root_dir = sys.argv[1]
challenges = [x for x in os.listdir(root_dir) if
os.path.isdir(os.path.join(root_dir, x)) and
re.match('[A-Z]*_[0-9]*', x) != None]
workdir = "/dev/shm/work/"
try:
os.mkdir(workdir)
except OSError:
pass
for ch in challenges:
print "starting to fuzz:" + ch
ch_dir = os.path.join(root_dir, ch)
binaries = get_cbs_with_vul(ch_dir)
print "binaries:" + str(binaries)
for bin in binaries:
print "fuzzing " + bin
driller_extension = driller.LocalCallback(num_workers=1)
phuzzer = fuzzer.Fuzzer(bin, workdir, afl_count=1, create_dictionary=True,
stuck_callback=driller_extension)
while True:
time.sleep(60)
if bool(phuzzer.stats) and int(phuzzer.stats['fuzzer-master']['pending_favs']) == 0:
break
phuzzer.kill()
driller_extension.kill()
if __name__ == '__main__':
main()
| [
"peng124@purdue.edu"
] | peng124@purdue.edu |
7ab34c90f6402e871718fc7299fa5701b912a3e5 | 82236c1cf2fe6ca26f52ce4eeae1745cf3cbc5ca | /docs/source/conf.py | 970611753ff44195353547e41808aed5480865fe | [
"Apache-2.0"
] | permissive | CKrawczyk/python-reducers-for-caesar | 8b607fddd7ce36cd81e1b4e2e7079e1a66526d22 | 9c5d9e072906d3fde2497fa61a66e4c8c0113ec2 | refs/heads/master | 2021-06-04T07:35:25.738616 | 2017-08-10T15:56:42 | 2017-08-10T15:56:42 | 91,355,049 | 1 | 2 | Apache-2.0 | 2019-04-03T20:28:31 | 2017-05-15T15:40:00 | Python | UTF-8 | Python | false | false | 5,261 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# panoptes_aggregation documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 7 13:22:24 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../panoptes_aggregation'))
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'sphinxcontrib.autohttp.flask'
]
napoleon_google_docstring = False
napoleon_use_param = False
napoleon_use_ivar = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'panoptes_aggregation'
copyright = '2017, Coleman Krawczyk'
author = 'Coleman Krawczyk'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'panoptes_aggregationdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'panoptes_aggregation.tex', 'panoptes\\_aggregation Documentation',
'Coleman Krawczyk', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'panoptes_aggregation', 'panoptes_aggregation Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'panoptes_aggregation', 'panoptes_aggregation Documentation',
author, 'panoptes_aggregation', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| [
"coleman.krawczyk@gmail.com"
] | coleman.krawczyk@gmail.com |
8082ee03c63189982d35ade7cefb62e74d0c64e3 | dc0f6bff378bf3943c005a262202ddda7d3ef95f | /webapp/donor/migrations/0001_initial.py | 0f8f5cd92768a19de7a20e1b422409df925b7f0f | [] | no_license | johnsonc/share-food | 78cdf3882a5442de7de308c1c036f776d3811fae | 0fc484dcd117a1d9a69204c805e0c485e0f5d556 | refs/heads/master | 2020-01-23T21:14:10.040708 | 2015-05-20T04:42:14 | 2015-05-20T04:42:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,721 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('dictionaries', '__first__'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('beneficiary', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Donor',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('default_beneficiary_group', models.ManyToManyField(to='beneficiary.BeneficiaryGroup', verbose_name='Beneficiary group')),
('user', models.OneToOneField(related_name='donor_profile', to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Offer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, verbose_name='Delivery name')),
('estimated_mass', models.PositiveIntegerField()),
('contact_person', models.CharField(max_length=255)),
('address', models.CharField(max_length=255, verbose_name='Pick up address')),
('driver_info', models.TextField(null=True, verbose_name='Driver information', blank=True)),
('time_from', models.DateTimeField(default=datetime.datetime(2015, 5, 19, 9, 43, 13, 125268, tzinfo=utc))),
('time_to', models.DateTimeField(default=datetime.datetime(2015, 5, 20, 9, 43, 13, 125300, tzinfo=utc))),
('repeating', models.BooleanField(default=False)),
('open', models.BooleanField(default=False)),
('beneficiary_group', models.ManyToManyField(to='beneficiary.BeneficiaryGroup', verbose_name='Beneficiary group')),
('donor', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
('food_category', models.ForeignKey(verbose_name='Food category', to='dictionaries.FoodCategory')),
('meat_issue', models.ForeignKey(to='dictionaries.MeatIssues')),
('not_contain', models.ManyToManyField(to='dictionaries.FoodIngredients', null=True, blank=True)),
('packaging', models.ForeignKey(to='dictionaries.PackagingCategory')),
('rel_issue', models.ForeignKey(to='dictionaries.ReligiousIssues')),
('temperature', models.ForeignKey(to='dictionaries.TemperatureCategory')),
],
options={
'verbose_name': 'Offer',
'verbose_name_plural': 'Offers',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OfferRepetition',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('day_freq', models.PositiveSmallIntegerField(verbose_name='Day frequency')),
('date_start', models.DateField()),
('date_stop', models.DateField()),
('days_of_week', models.ManyToManyField(to='dictionaries.DaysOfTheWeek', null=True, blank=True)),
('offer', models.ForeignKey(to='donor.Offer')),
],
options={
'verbose_name': 'Repetition',
'verbose_name_plural': 'Repetitions',
},
bases=(models.Model,),
),
]
| [
"darek@recoded.co"
] | darek@recoded.co |
07f64f78d296821856c1ef3a04cfa9596a3859d1 | 1c76418fee90f80368f2f843007ebd6a37bfc01f | /GLOBALS.py | 6efbf9f79ee858f37aa06dd7eaf8915877d118f8 | [] | no_license | SyntaxVoid/HighRedshiftGalaxyFinder | e5dfb244bbba53c310de9b7fe414990b04bcb3a0 | 83fad048e37d65a1a7c98727c0d4164c8e84922a | refs/heads/master | 2021-01-20T21:59:06.211431 | 2015-11-19T04:24:58 | 2015-11-19T04:24:58 | 42,703,816 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,705 | py | ## The filters [str] which we are looking at in this project
FILTERS = ['f125w','f160w','f435w','f606w','f775w','f850l']
SELECTIONS = ["b435","i775","v606","z4","z5","z6","z7","z8"]
# Column number corresponding to each column name of my header
MASTER_COL_DICT = {"Number": 0, "RA": 1, "ALPHA_J2000": 1, "DEC": 2,"DELTA_J2000": 2,
"F125W_FLUX": 3 , "F125W_FLUXERR": 4 , "F125W_MAG": 5 , "F125W_MAGERR": 6 ,
"F160W_FLUX": 7 , "F160W_FLUXERR": 8 , "F160W_MAG": 9 , "F160W_MAGERR": 10,
"F435W_FLUX": 11, "F435W_FLUXERR": 12, "F435W_MAG": 13, "F435W_MAGERR": 14,
"F606W_FLUX": 15, "F606W_FLUXERR": 16, "F606W_MAG": 17, "F606W_MAGERR": 18,
"F775W_FLUX": 19, "F775W_FLUXERR": 20, "F775W_MAG": 21, "F775W_MAGERR": 22,
"F850L_FLUX": 23, "F850L_FLUXERR": 24, "F850L_MAG": 25, "F850L_MAGERR": 26}
MASTER_CANDELS_DICT = {"Number": 0, "IAU_Name": 1, "RA": 2, "ALPHA_J2000": 2,"DEC":3, "DELTA_J2000": 3,
"F160W_LIMIT_MAG": 4, "FLAGS": 5, "CLASS_STAR": 6, "CITO_U_FLUX": 7, "CITO_U_FLUXERR": 8,
"CITO_U_WEIGHT": 9, "VIMOS_U_FLUX": 10, "VIMOS_U_FLUXERR": 11, "VIMOS_U_WEIGHT": 12,
"F435W_FLUX": 13, "F435W_FLUXERR": 14, "F435W_WEIGHT": 15,
"F606W_FLUX": 16, "F606W_FLUXERR": 17, "F606W_WEIGHT": 18,
"F775W_FLUX": 19, "F775W_FLUXERR": 20, "F775W_WEIGHT": 21,
"F814W_FLUX": 22, "F814W_FLUXERR": 23, "F814W_WEIGHT": 24,
"F850L_FLUX": 25, "F850L_FLUXERR": 26, "F850L_WEIGHT": 27,
"F098M_FLUX": 28, "F098M_FLUXERR": 29, "F098M_WEIGHT": 30,
"F105W_FLUX": 31, "F105W_FLUXERR": 32, "F105W_WEIGHT": 33,
"F125W_FLUX": 34, "F125W_FLUXERR": 35, "F125W_WEIGHT": 36,
"F160W_FLUX": 37, "F160W_FLUXERR": 38, "F160W_WEIGHT": 39}
MY_COLOR_COLOR_OPS = {"b435": [[MASTER_COL_DICT["F435W_MAG"],MASTER_COL_DICT["F606W_MAG"]],
[MASTER_COL_DICT["F606W_MAG"],MASTER_COL_DICT["F850L_MAG"]]],
"v606": [[MASTER_COL_DICT["F606W_MAG"],MASTER_COL_DICT["F775W_MAG"]],
[MASTER_COL_DICT["F775W_MAG"],MASTER_COL_DICT["F850L_MAG"]]],
"i775": [[MASTER_COL_DICT["F606W_MAG"],MASTER_COL_DICT["F850L_MAG"]],
[MASTER_COL_DICT["F775W_MAG"],MASTER_COL_DICT["F850L_MAG"]]]}
CANDELS_COLOR_COLOR_OPS = {"b435": [[MASTER_CANDELS_DICT["F435W_FLUX"],MASTER_CANDELS_DICT["F606W_FLUX"]],
[MASTER_CANDELS_DICT["F606W_FLUX"],MASTER_CANDELS_DICT["F850L_FLUX"]]],
"v606": [[MASTER_CANDELS_DICT["F606W_FLUX"],MASTER_CANDELS_DICT["F775W_FLUX"]],
[MASTER_CANDELS_DICT["F775W_FLUX"],MASTER_CANDELS_DICT["F850L_FLUX"]]],
"i775": [[MASTER_CANDELS_DICT["F606W_FLUX"],MASTER_CANDELS_DICT["F850L_FLUX"]],
[MASTER_CANDELS_DICT["F775W_FLUX"],MASTER_CANDELS_DICT["F850L_FLUX"]]]}
COLOR_RULES = {"b435": ["V-Z","B-V",1.6,1.1,[-1,4],[-1,6],1.10,1.00,'yes'],
"v606": ["I-Z","V-I",1.3,1.2,[-2,5],[-1,6],1.47,0.89,'yes'],
"i775": ["V-Z","I-Z",1.2,1.3,[-1,4],[-1,6],1.20,1.30,'yes']}
## The column [int] corresponding to the filter [str] in the Candels column
PUB_COL_DICT = {"f125w":42,"f160w":45,"f435w":21,"f606w":24,"f775w":27,"f850l":33}
## Zerp points [float] of each filter we are analyzing corresponding
## to the units that our map is in. All maps except f435 are in units
## of uJy, which has a corresponding zero point of 23.9.
ZP_f125w = 23.9
ZP_f160w = 23.9
ZP_f435w = 25.665
ZP_f606w = 23.9
ZP_f775w = 23.9
ZP_f850l = 23.9
MY_MAPS = ["FullMaps/gs_f125w_cropcal.fits","FullMaps/gs_f160w_cropcal.fits","FullMaps/gs_f606w_cropcal.fits",
"FullMaps/gs_f775w_cropcal.fits","FullMaps/gs_f850l_cropcal.fits"]
CANDELS_MAPS = ["CandelsRescaled/gs_all_candels_ers_udf_f125w_060mas_v0.5_drz.fits",
"CandelsRescaled/gs_all_candels_ers_udf_f160w_060mas_v0.5_drz.fits",
"CandelsRescaled/gs_presm4_all_acs_f606w_60mas_v3.0_drz.fits",
"CandelsRescaled/gs_presm4_all_acs_f775w_60mas_v3.0_drz.fits",
"CandelsRescaled/gs_presm4_all_acs_f850l_60mas_v3.0_drz.fits"]
SUB_DEST = ["SubtractedMaps/f125w_sub.fits","SubtractedMaps/f160w_sub.fits","SubtractedMaps/f606w_sub.fits",
"SubtractedMaps/f775w_sub.fits","SubtractedMaps/f850l_sub.fits"]
# Header to add to the catalogs that I generate
header = '''# 1 NUMBER Running object number [count]
# 2 ALPHA_J2000 Right ascension of barycenter (J2000) [deg]
# 3 DELTA_J2000 Declination of barycenter (J2000) [deg]
# 4 F125W_FLUX_AUTO Flux within a Kron-like elliptical aperture [uJy]
# 5 F125W_FLUXERR_AUTO RMS error for AUTO flux [uJy]
# 6 F125W_MAG_AUTO Kron-like elliptical aperture magnitude [mag]
# 7 F125W_MAGERR_AUTO RMS error for AUTO magnitude [mag]
# 8 F160W_FLUX_AUTO Flux within a Kron-like elliptical aperture [uJy]
# 9 F160W_FLUXERR_AUTO RMS error for AUTO flux [uJy]
# 10 F160W_MAG_AUTO Kron-like elliptical aperture magnitude [mag]
# 11 F160W_MAGERR_AUTO RMS error for AUTO magnitude [mag]
# 12 F435W_FLUX_AUTO Flux within a Kron-like elliptical aperture [uJy]
# 13 F435W_FLUXERR_AUTO RMS error for AUTO flux [uJy]
# 14 F435W_MAG_AUTO Kron-like elliptical aperture magnitude [mag]
# 15 F435W_MAGERR_AUTO RMS error for AUTO magnitude [mag]
# 16 F606W_FLUX_AUTO Flux within a Kron-like elliptical aperture [uJy]
# 17 F606W_FLUXERR_AUTO RMS error for AUTO flux [uJy]
# 18 F606W_MAG_AUTO Kron-like elliptical aperture magnitude [mag]
# 19 F606W_MAGERR_AUTO RMS error for AUTO magnitude [mag]
# 20 F775W_FLUX_AUTO Flux within a Kron-like elliptical aperture [uJy]
# 21 F775W_FLUXERR_AUTO RMS error for AUTO flux [uJy]
# 22 F775W_MAG_AUTO Kron-like elliptical aperture magnitude [mag]
# 23 F775_MAGERR_AUTO RMS error for AUTO magnitude [mag]
# 24 F850LP_FLUX_AUTO Flux within a Kron-like elliptical aperture [uJy]
# 25 F850LP_FLUXERR_AUTO RMS error for AUTO flux [uJy]
# 26 F850LP_MAG_AUTO Kron-like elliptical aperture magnitude [mag]
# 27 F850LP_MAGERR_AUTO RMS error for AUTO magnitude [mag]
'''
candels_header = '''# 1 ID (F160W SExtractor ID)
# 2 IAU_Name
# 3 RA (F160W coordinate, J2000, degree)
# 4 DEC (F160W coordinate, J2000, degree)
# 5 F160W_LIMITING_MAGNITUDE (AB)
# 6 FLAGS
# 7 CLASS_STAR (F160W SExtractor S/G classifier output)
# 8 CTIO_U_FLUX (uJy)
# 9 CTIO_U_FLUXERR (uJy)
# 10 CTIO_U_WEIGHT
# 11 VIMOS_U_FLUX (uJy)
# 12 VIMOS_U_FLUXERR (uJy)
# 13 VIMOS_U_WEIGHT
# 14 ACS_F435W_FLUX (uJy)
# 15 ACS_F435W_FLUXERR (uJy)
# 16 ACS_F435W_WEIGHT
# 17 ACS_F606W_FLUX (uJy)
# 18 ACS_F606W_FLUXERR (uJy)
# 19 ACS_F606W_WEIGHT
# 20 ACS_F775W_FLUX (uJy)
# 21 ACS_F775W_FLUXERR (uJy)
# 22 ACS_F775W_WEIGHT
# 23 ACS_F814W_FLUX (uJy)
# 24 ACS_F814W_FLUXERR (uJy)
# 25 ACS_F814W_WEIGHT
# 26 ACS_F850LP_FLUX (uJy)
# 27 ACS_F850LP_FLUXERR (uJy)
# 28 ACS_F850LP_WEIGHT
# 29 WFC3_F098M_FLUX (uJy)
# 30 WFC3_F098M_FLUXERR (uJy)
# 31 WFC3_F098M_WEIGHT
# 32 WFC3_F105W_FLUX (uJy)
# 33 WFC3_F105W_FLUXERR (uJy)
# 34 WFC3_F105W_WEIGHT
# 35 WFC3_F125W_FLUX (uJy)
# 36 WFC3_F125W_FLUXERR (uJy)
# 37 WFC3_F125W_WEIGHT
# 38 WFC3_F160W_FLUX (uJy)
# 39 WFC3_F160W_FLUXERR (uJy)
# 40 WFC3_F160W_WEIGHT
# 41 ISAAC_KS_FLUX (uJy)
# 42 ISAAC_KS_FLUXERR (uJy)
# 43 ISAAC_KS_WEIGHT
# 44 HAWKI_KS_FLUX (uJy)
# 45 HAWKI_KS_FLUXERR (uJy)
# 46 HAWKI_KS_WEIGHT
# 47 IRAC_CH1_FLUX (uJy)
# 48 IRAC_CH1_FLUXERR (uJy)
# 49 IRAC_CH1_WEIGHT
# 50 IRAC_CH2_FLUX (uJy)
# 51 IRAC_CH2_FLUXERR (uJy)
# 52 IRAC_CH2_WEIGHT
# 53 IRAC_CH3_FLUX (uJy)
# 54 IRAC_CH3_FLUXERR (uJy)
# 55 IRAC_CH3_WEIGHT
# 56 IRAC_CH4_FLUX (uJy)
# 57 IRAC_CH4_FLUXERR (uJy)
# 58 IRAC_CH4_WEIGHT
# 59 FLUX_ISO (SExtractor F160W FLUX_ISO, uJy)
# 60 FLUXERR_ISO (SExtractor F160W FLUXERR_ISO, uJy)
# 61 FLUX_AUTO (SExtractor F160W FLUX_AUTO, uJy)
# 62 FLUXERR_AUTO (SExtractor F160W FLUXERR_AUTO, uJy)
# 63 FWHM_IMAGE (FWHM of F160W, pixel, 1 pixel=0.06 arcsec)
# 64 A_IMAGE (F160W SExtractor Profile RMS along major axis, pixel)
# 65 B_IMAGE (F160W SExtractor Profile RMS along minor axis, pixel)
# 66 KRON_RADIUS (F160W SExtractor Kron aperture in units of A or B)
# 67 FLUX_RADIUS_1 (F160W SExtractor 20% of light radius, pixel)
# 68 FLUX_RADIUS_2 (F160W SExtractor 50% of light radius, pixel)
# 69 FLUX_RADIUS_3 (F160W SExtractor 80% of light radius, pixel)
# 70 THETA_IMAGE (F160W SExtractor Position angle (CCW/x), degree)
# 71 APCORR (F160W FLUX_AUTO/FLUX_ISO, applied to ACS and WFC3 bands)
# 72 HOT_FLAG (Source enters the catalog as a hot detection (=1) or a cold detection (=0))
# 73 ISOAREAF_IMAGE (SExtractor F160W Isophotal Area)''' | [
"j.gresl12@gmail.com"
] | j.gresl12@gmail.com |
0d4d5ee0c8dde1162b0de223774319cbefc8f0cf | a139e84cdfeba8da693d5e5996f38e4737ea2a26 | /python/CRedis.py | 34b5d176283725976a33161d548ee8778e89db1c | [] | no_license | TimHuangcheng/Python-Redis-Php-Email | 48f144482bdac96a69680a4dc34ee98d44926f34 | c0f1502779f2646b26ada2b9e7ce742bc958b7b2 | refs/heads/master | 2021-01-10T15:18:51.121404 | 2015-11-18T09:23:25 | 2015-11-18T09:23:25 | 46,407,871 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,947 | py | #!/usr/bin/python
#coding=utf-8
import redis
class CRedis:
def __init__(self,AuthInfo):
self.host = AuthInfo.get('host')
self.port = AuthInfo.get('port')
self.db = AuthInfo.get('db')
self.r = redis.Redis(host = self.host, port = int(self.port), db = int(self.db))
#1. strings 类型及操作
#设置 key 对应的值为 string 类型的 value
def set(self, key, value):
return self.r.set(key, value)
#设置 key 对应的值为 string 类型的 value。如果 key 已经存在,返回 0,nx 是 not exist 的意思
def setnx(self, key, value):
return self.r.setnx(key, value)
#设置 key 对应的值为 string 类型的 value,并指定此键值对应的有效期
def setex(self, key, time, value):
return self.r.setex(key, time, value)
#设置指定 key 的 value 值的子字符串
#setrange name 8 gmail.com
#其中的 8 是指从下标为 8(包含 8)的字符开始替换
def setrange(self, key, num, value):
return self.r.setrange(key, num, value)
#获取指定 key 的 value 值的子字符串
def getrange(self, key, start ,end):
return self.r.getrange(key, start, end)
#mget(list)
def get(self, key):
if isinstance(key, list):
return self.r.mget(key)
else:
return self.r.get(key)
#删除
def remove(self, key):
return self.r.delete(key)
#自增
def incr(self, key, default = 1):
if (1 == default):
return self.r.incr(key)
else:
return self.r.incr(key, default)
#自减
def decr(self, key, default = 1):
if (1 == default):
return self.r.decr(key)
else:
return self.r.decr(key, default)
#2. hashes 类型及操作
#根据email获取session信息
def hget(self, email):
return self.r.hget('session', email)
#以email作为唯一标识,增加用户session
def hset(self, email, content):
return self.r.hset('session', email, content)
#获取session哈希表中的所有数据
def hgetall(self):
return self.r.hgetall('session')
#删除hashes
def hdel(self, name, key = None):
if(key):
return self.r.hdel(name, key)
return self.r.hdel(name)
#清空当前db
def clear(self):
return self.r.flushdb()
#3、lists 类型及操作
#适合做邮件队列
#在 key 对应 list 的头部添加字符串元素
def lpush(self, key ,value):
return self.r.lpush(key, value)
#从 list 的尾部删除元素,并返回删除元素
def lpop(self, key):
return self.r.plush(key)
#pubsub
def pubsub(self):
return self.r.pubsub()
#if __name__ == '__main__':
# r = CRedis() | [
"2902060134@qq.com"
] | 2902060134@qq.com |
95a32a1115e5447005cbe060b401c85f07573f8e | c237e91ba68cfe890026a2466e9aa277cbf29948 | /cookbook/datastruct/max_min_n.py | 3583db4636a1a507b65251c4d9c4ae1bf08036c5 | [] | no_license | Chiva-Zhao/pproject | 47350815ae334137dc775df776dc148ecc25237b | 4e39425dea0bec5b990e80954fafb97c3da34bf5 | refs/heads/master | 2021-01-13T01:28:28.247174 | 2019-12-17T12:29:36 | 2019-12-17T12:29:36 | 22,599,348 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | import heapq
nums = [1, 8, 2, 23, 7, -4, 18, 23, 42, 37, 2]
print(heapq.nlargest(3, nums)) # Prints [42, 37, 23]
print(heapq.nsmallest(3, nums))
portfolio = [
{'name': 'IBM', 'shares': 100, 'price': 91.1},
{'name': 'AAPL', 'shares': 50, 'price': 543.22},
{'name': 'FB', 'shares': 200, 'price': 21.09},
{'name': 'HPQ', 'shares': 35, 'price': 31.75},
{'name': 'YHOO', 'shares': 45, 'price': 16.35},
{'name': 'ACME', 'shares': 75, 'price': 115.65}
]
cheap = heapq.nsmallest(3, portfolio, lambda s: s['price'])
expensive = heapq.nlargest(3, portfolio, lambda s: s['price'])
# print(cheap, expensive)
heapq.heapify(nums)
print(nums)
print(heapq.heappop(nums))
print(heapq.heappop(nums))
print(heapq.heappop(nums))
| [
"zhaozhenhua88@gmail.com"
] | zhaozhenhua88@gmail.com |
1e2546393b7153ae301f10162f4eb5e5cfaec0e2 | 1e49f23da9b9b110755162577a4062afa730d3de | /prepinstaadvcdng1.py | 40fda6d1feeab6f74030c65255ed34a569957521 | [] | no_license | lakshman533/Python_programs | d2c7c49c762998dc718efc477af84acb8f42c48b | 758589efd00e8dbd4aeddc8e866d062459cb4545 | refs/heads/main | 2023-08-10T23:31:47.453952 | 2021-09-13T07:07:14 | 2021-09-13T07:07:14 | 405,868,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52 | py | import re
print(re.split(r'[aeiou]', 'abcdefghij')) | [
"noreply@github.com"
] | noreply@github.com |
bef0fc73db9a1f6dcb706985062db8f7bd09894c | 316333d0aaae398d51f09cee43640f0f7f5f71f6 | /influence.py | 654069b7d22710110981cb1bea0f1f3118aa0cc6 | [] | no_license | nobkins/gurgle | fc127f2c019a41b50c02420cbccbaf38cf1ec2fa | 03deceb5dc00def469584d2efdb691f2fc1a41bb | refs/heads/master | 2021-06-22T09:27:20.916638 | 2017-08-09T18:41:54 | 2017-08-09T18:41:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,409 | py | from math import pow, sqrt
from urllib2 import urlopen
from urllib import urlencode
from datetime import datetime as dt
from config import Config
import json
import re
import time
# Logger instance used by the functions in this module
_LOGGER = Config.getLogger("influence")
# Configuration for the Google Sheet interaction
__SHEET_URL = Config.getString('sheet', 'url')
__SHEET_API_KEY = Config.getCrypt('sheet', 'apikey')
__SHEET_RETRIES = Config.getInteger('sheet', 'retries', 3)
__SHEET_RETRY_WAIT = Config.getInteger('sheet', 'retry_wait', 3)
__SHEET_TIMEOUT = Config.getInteger('sheet', 'timeout', 10)
__SHEET_RESPONSE_BUFFER = Config.getInteger('sheet', 'buffer', 512)
_TODAY_ONLY = Config.getBoolean('events', 'today_only', True)
# Interested in activity around a specified location
_LOCATION_X = Config.getFloat('location', 'x')
_LOCATION_Y = Config.getFloat('location', 'y')
_LOCATION_Z = Config.getFloat('location', 'z')
_RANGE_SQUARED = Config.getFloat('location', 'distance')**2
_LOGGER.info("Configured for %.1f LY around %s", Config.getFloat('location', 'distance'), Config.getString('location', 'name'))
# Provide regular expressions to remove extraneous text specifiers
_MATCH_GOV = re.compile(r'\$government_(.*);', re.IGNORECASE)
_MATCH_SEC = re.compile(r'\$system_security_(.*);', re.IGNORECASE)
_MATCH_ECO = re.compile(r'\$economy_(.*);', re.IGNORECASE)
# Cache mechanism that attempts to prevent duplicate updates
# While ideally we wanted the Google Sheet to prevent duplicates, this is
# actually very difficult to achieve due to a lack of optimised search functionality.
# Instead we'll attempt to prevent duplicates within a single instance and
# the sheet will handle selecting appropriate entries due to complexities such
# as the BGS Tick Date and whether we trust data around the tick time which
# can change 'on a whim'.
# This 'cache' is date-keyed, but we enforce only having an entry for today (which
# ensures automatic clean-up if we continue to execute over several days).
# The key (date) maps to a single map which maps System Name to a List of Faction
# Influence and State values, which we use to determine if there has been a change
# that we need to communicate to the Google Sheet.
_CACHE_BY_DATE = {}
def ConsumeFSDJump(event):
"""Consumes the FSDJump event (or equivalent subset of Location event)
provided by Journal, extracting the factions and influence levels.
"""
# Extract the StarPos to confirm we're interested
(starPosX, starPosY, starPosZ) = event["StarPos"]
starDist2 = pow(_LOCATION_X-starPosX,2)+pow(_LOCATION_Y-starPosY,2)+pow(_LOCATION_Z-starPosZ,2)
if starDist2 > _RANGE_SQUARED:
return
# Extract the star name which is always provided
starName = event["StarSystem"]
# Determine if the timestamp is considered relevant
timestamp = event["timestamp"]
eventDate = timestamp[0:10]
eventTime = timestamp[11:19]
todayDate = dt.utcnow().strftime("%Y-%m-%d")
if _TODAY_ONLY and eventDate != todayDate:
_LOGGER.debug("Event for %s discarded as not today: %s", starName, eventDate)
return
# Interested, so we gather information we want to publish
# Nothing else below here guaranteed to be available
systemFaction = event.get("SystemFaction", "")
systemAllegiance = event.get("SystemAllegiance", "")
systemSecurity = event.get("SystemSecurity", "")
systemGovernment = event.get("SystemGovernment", "")
systemEconomy = event.get("SystemEconomy", "")
distance = sqrt(starDist2)
# Grab the list of factions, if available
factionList = event.get("Factions", [])
# Sort by descending influence (just in case)
factionList = sorted(factionList, key=lambda faction: float(faction["Influence"]), reverse=True)
#print "%s %s (%.1fly) %s" % (timestamp, starName, distance, systemFaction)
#for faction in factionList:
# print " %s at %.1f%% in state %s" % (faction["Name"],faction["Influence"]*100,faction["FactionState"])
# Only want to update if we have factions to report on...
if len(factionList) == 0:
_LOGGER.debug("Event for %s discarded since no factions present.", starName)
else: # len(factionList) > 0
_LOGGER.debug("Processing update for %s (%.1fly) from %s", starName, distance, timestamp)
# Create the update
update = CreateUpdate(timestamp, starName, systemFaction, factionList)
update["EventDate"] = eventDate
update["EventTime"] = eventTime
# Add the other useful information
update["Distance"] = distance
if len(systemAllegiance) > 0:
update["SystemAllegiance"] = systemAllegiance
if len(systemSecurity) > 0 and _MATCH_SEC.match(systemSecurity) is not None:
update["SystemSecurity"] = _MATCH_SEC.match(systemSecurity).group(1)
if len(systemGovernment) > 0 and _MATCH_GOV.match(systemGovernment) is not None:
update["SystemGovernment"] = _MATCH_GOV.match(systemGovernment).group(1)
if len(systemEconomy) > 0 and _MATCH_ECO.match(systemEconomy) is not None:
update["SystemEconomy"] = _MATCH_ECO.match(systemEconomy).group(1)
# Send the update, if Cache says we need to
if CheckCache(eventDate, starName, factionList):
if SendUpdate(update):
_LOGGER.info("Processed (sent) update for %s (%.1fly)", starName, distance)
# Update the Cache Entry (after send so we have definitely send)
CacheUpdate(eventDate, starName, factionList)
else:
_LOGGER.warning("Failed to send update for %s (%.1fly)", starName, distance)
else:
_LOGGER.debug("Processed (not sent) update for %s (%.1fly)", starName, distance)
def CheckCache(eventDate, starName, factionList):
# If the cache doesn't have an entry for today, clear it and add empty
if eventDate not in _CACHE_BY_DATE:
_CACHE_BY_DATE.clear()
_CACHE_BY_DATE[eventDate] = {}
return True
# If the cache of stars doesn't have this star, we should send
starCache = _CACHE_BY_DATE[eventDate]
if starName not in starCache:
return True
# Need to check if the cache entry matches what we want to send
if starCache[starName] != factionList:
return True
# Cache matches supposed update, so no need to send
return False
def CacheUpdate(eventDate, starName, factionList):
starCache = _CACHE_BY_DATE[eventDate]
starCache[starName] = factionList
def CreateUpdate(timestamp, starName, systemFaction, factionList):
"""Formats the information for the upload to the Google Sheet."""
data = { "Timestamp": timestamp, "StarSystem": starName, "SystemFaction": systemFaction }
data["SystemAllegiance"] = ""
data["SystemSecurity"] = ""
data["SystemGovernment"] = ""
data["SystemEconomy"] = ""
factionNo = 1
for faction in factionList:
prefix = "Faction{:d}".format(factionNo)
data[prefix+"Name"] = faction["Name"]
data[prefix+"Influence"] = faction["Influence"]
data[prefix+"State"] = faction["FactionState"]
data[prefix+"Allegiance"] = faction["Allegiance"]
data[prefix+"Government"] = faction["Government"]
# Support for Pending/Recovering States
# since sheet is expecting either all information or none for
# each faction we always need to specify these, even if not present
states = []
if "PendingStates" in faction:
for pendingState in faction["PendingStates"]:
states.append(pendingState["State"])
data[prefix+"PendingState"] = ",".join(states)
states = []
if "RecoveringStates" in faction:
for recoveringState in faction["RecoveringStates"]:
states.append(recoveringState["State"])
data[prefix+"RecoveringState"] = ",".join(states)
factionNo = factionNo + 1
return data
def SendUpdate(dictionary):
"""Posts the specified dictionary to the Google Sheet.
To be successful we need to provide an appropriate API_KEY value, and we also
want to retry any infrastructure errors (i.e. unable to complete the POST) but
abandon the entire process if the "application" does not report success (i.e.
on an invalid token, badly formed request, etc.).
"""
dictionary['API_KEY'] = __SHEET_API_KEY
data = urlencode(dictionary)
retries = __SHEET_RETRIES
success = 0
response = None
while success != 200 and retries > 0:
try:
request = urlopen(__SHEET_URL, data, __SHEET_TIMEOUT)
success = request.getcode()
response = request.read(__SHEET_RESPONSE_BUFFER)
request.close()
except Exception, e:
_LOGGER.info("(Retry %d) Exception while attempting to POST data: %s", retries, str(e))
retries -= 1
if retries > 0:
time.sleep(__SHEET_RETRY_WAIT)
# Check the response for validity, where "result"="success"
if success == 200 and response is not None:
result = json.loads(response) # Throws Exception if JSON not returned
if (result["result"] != "success"):
raise Exception("Bad response from Sheet: %s" % result)
_LOGGER.debug("Success Response: %s" % result)
return (success == 200)
| [
"marcosparks@users.noreply.github.com"
] | marcosparks@users.noreply.github.com |
de8a9c196a80dde711075fc0f91d2dc1ce5625e9 | 10b22cef27b7cb7f06221954eef6ea678c5289c1 | /database/database_schemas_ms.py | 0ad92f442cd946089275a60618ee4b0020b399d7 | [
"MIT"
] | permissive | mshobair/invitro_cheminformatics | 0c1d7c4c2cfd5e20ee24fffac6a0332d503957df | 17201496c73453accd440646a1ee81726119a59c | refs/heads/main | 2023-04-04T19:06:27.098377 | 2021-03-26T17:07:25 | 2021-03-26T17:07:25 | 348,917,957 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | class Schemas:
"""
Class that contains DATABASE schema names.
"""
chemprop_schema = "sbox_rlougee_chemprop"
dsstox_schema = "ro_20191118_dsstox"
qsar_schema = "sbox_mshobair_qsar_snap"
invitrodb_schema = "prod_internal_invitrodb_v3_3"
information_schema = "information_schema"
| [
"mshobair@v2626umcth038.rtord.epa.gov"
] | mshobair@v2626umcth038.rtord.epa.gov |
8c233f047715954abc685b0149bdc1c86d63168e | 36c00fe2afff4818c937e312ce0c6a79f35e2a77 | /7-kyu/naughty-or-nice-/python/solution.py | 7d97dd81f35376746e01998e5608dffd391051cd | [] | no_license | p-lots/codewars | 0a67b6ee4c91180ff78c648421b9d2d64463ddc3 | 535faeee475c6b398124d6f5002b0e111406e8bb | refs/heads/master | 2023-08-23T22:14:33.635011 | 2023-08-23T13:30:37 | 2023-08-23T13:30:37 | 195,320,309 | 0 | 0 | null | 2023-05-09T19:25:50 | 2019-07-05T01:40:15 | Python | UTF-8 | Python | false | false | 191 | py | def get_nice_names(people):
return [dct['name'] for dct in people if dct['was_nice']]
def get_naughty_names(people):
return [dct['name'] for dct in people if not dct['was_nice']] | [
"paul.calotta@gmail.com"
] | paul.calotta@gmail.com |
81a213de91ddbd14c690ad56d6167b8d2729bf9e | 2c3340c0c9c3effc22ce181506a7c76718485510 | /src/comms/www/index.py | 5f091deb0c740e6670e1ce3017c1a43bb90475de | [] | no_license | samtaufa/nomoa.bsd | 3db5b336c34c8e24f94601129ab4f9682adbbac3 | 592e158be1d8a078625c56bce973449c61fd6451 | refs/heads/master | 2021-01-25T07:34:35.637146 | 2011-10-16T10:28:03 | 2011-10-16T10:28:03 | 688,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | from countershape.doc import *
this.titlePrefix = ns.titlePrefix + "[Communications | WWW] "
pages = [
Page("portal.md",
title="Portal",
pageTitle="Portal"),
Page("ssl.md",
title="SSL Certificates",
pageTitle="SSL Certificates"),
Page("test.md",
title="Validation",
pageTitle="Validating Connectivity"),
]
| [
"samtaufa@gmail.com"
] | samtaufa@gmail.com |
fd7329f081a88aa7cd2c61e483d6efb4a6c5fcde | 08e10b36303e5b898eadf358cf53b87109552415 | /drive.py | 1f8c5c81fe3d8bfeafa020dd1ba43d7ae48fa583 | [] | no_license | AlphaLFC/CarND-Behavioral-Cloning-P3 | 480de5b1b4b07e8059bcc786fe5ebebcd013f012 | b52dd1917f48b93b51c4330217436ed67eb3e181 | refs/heads/master | 2021-01-11T19:55:07.369776 | 2017-01-21T02:16:56 | 2017-01-21T02:16:56 | 79,428,066 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,681 | py | import argparse
import base64
import json
import numpy as np
import socketio
import eventlet
import eventlet.wsgi
import time
from PIL import Image
from PIL import ImageOps
from flask import Flask, render_template
from io import BytesIO
from keras.models import model_from_json
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array
# Fix error with Keras and TensorFlow
import tensorflow as tf
tf.python.control_flow_ops = tf
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
@sio.on('telemetry')
def telemetry(sid, data):
# The current steering angle of the car
steering_angle = data["steering_angle"]
# The current throttle of the car
throttle = data["throttle"]
# The current speed of the car
speed = data["speed"]
# The current image from the center camera of the car
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
image_array = np.asarray(image)
transformed_image_array = image_array[None, :, :, :]
# This model currently assumes that the features of the model are just the images. Feel free to change this.
steering_angle = float(model.predict(transformed_image_array, batch_size=1))
# The driving model currently just outputs a constant throttle. Feel free to edit this.
throttle = 1
print(steering_angle, throttle)
send_control(steering_angle, throttle)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
def send_control(steering_angle, throttle):
sio.emit("steer", data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
}, skip_sid=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument('model', type=str,
help='Path to model definition json. Model weights should be on the same path.')
args = parser.parse_args()
with open(args.model, 'r') as jfile:
# NOTE: if you saved the file by calling json.dump(model.to_json(), ...)
# then you will have to call:
#
# model = model_from_json(json.loads(jfile.read()))\
#
# instead.
model = model_from_json(jfile.read())
model.compile("adam", "mse")
weights_file = args.model.replace('json', 'h5')
model.load_weights(weights_file)
# wrap Flask application with engineio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
| [
"noreply@github.com"
] | noreply@github.com |
9bdfa679de6d431d2bd4da3245f7bae4dcc5d385 | 1e91d38e7bfa427884206f02512a436d74a05d93 | /kudos_oss/__main__.py | 66438beaecc51e465e765112a6a76edd872aebb0 | [] | no_license | lel99999/dev_pyCRUD | c7f40d7a89c158045007a8f8f27be9368832cb4c | 069998b67c27a8cb074c79ed3cd0c3aa619a510a | refs/heads/master | 2020-05-04T19:50:40.359268 | 2019-04-18T03:24:45 | 2019-04-18T03:24:45 | 179,410,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | from __future__ import absolute_import, print_function
GET /kudos
POST /kudos
DELETE /kudos/:id
| [
"luan.le@cfpb.gov"
] | luan.le@cfpb.gov |
70c5eed717e2fbeab808207591ca6080896331f0 | e6f36db5e63315f0c214f921ddc904ad0e0e3831 | /pysendy/pysendy.py | 856c19c690f61adfd2461f600d574081521bff94 | [
"MIT"
] | permissive | pomarec/pysendy | 0a6daa12dd3d75168a4ff246103e2daf9fc2cc1d | e378ce6f267cce31f0c81fd90057486ceaf263a1 | refs/heads/master | 2021-01-15T11:14:20.549366 | 2013-12-07T18:02:41 | 2013-12-07T18:02:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | # -*- coding: utf-8 -*-
from .exceptions import *
import requests
class Sendy(object):
def __init__(self, base_url, api_key=''):
self.api_key = api_key
self.base_url = base_url
def subscribe(self, name='', email='', list_id=''):
params = {'name': name, 'email': email, 'list': list_id}
self._post('/subscribe', params, SUBSCRIPTION_ERRORS)
def unsubscribe(self, email='', list_id=''):
params = {'email': email, 'list': list_id}
self._post('/unsubscribe', params, UNSUBSCRIPTION_ERRORS)
def _post(self, path, params, errors):
url = self.base_url + path
_params = {'boolean': 'true'}
_params.update(params)
try:
response = requests.post(url, data=_params)
success = response.text == '1'
if not success:
try:
err = errors[response.text](response.text)
except KeyError:
err = UserException('Failed [' + path + ']: ' + response.text)
raise err
except requests.exceptions.RequestException as e:
raise HttpRequestException(e.message)
if response.status_code != 200:
raise HttpRequestException(response.status_code)
| [
"thiagofa@gmail.com"
] | thiagofa@gmail.com |
6e39762a6673f11ca94947c8499aa363af2b4dd2 | c168fe819b446640957e5e310ef89fcfe28662b3 | /userbenchmark/__init__.py | c9ff1fac46844cf4cb62479ffa15096e9436dbf2 | [
"BSD-3-Clause"
] | permissive | pytorch/benchmark | 7b55e8d714de2ea873e03df43811aab3848485dd | df4da9bdff11a2f948d5bd4ac83da7922e6f44f4 | refs/heads/main | 2023-08-29T13:06:09.671728 | 2023-08-28T16:51:55 | 2023-08-28T16:51:55 | 92,541,759 | 685 | 220 | BSD-3-Clause | 2023-09-14T18:10:18 | 2017-05-26T19:21:12 | Python | UTF-8 | Python | false | false | 851 | py | from pathlib import Path
from typing import List
CURRENT_DIR = Path(__file__).parent
def list_userbenchmarks() -> List[str]:
ub_dirs = [x for x in CURRENT_DIR.iterdir() if x.is_dir() and x.joinpath('__init__.py').exists() ]
ub_names = list(map(lambda x: x.name, ub_dirs))
return ub_names
def get_ci_from_ub(ub_name):
import yaml
ci_file = CURRENT_DIR.joinpath(ub_name).joinpath("ci.yaml")
if not ci_file.exists():
return None
with open(ci_file, "r") as ciobj:
cicfg = yaml.safe_load(ciobj)
ret = {}
ret["name"] = ub_name
ret["ci_cfg"] = cicfg
return ret
def get_userbenchmarks_by_platform(platform):
ub_names = list_userbenchmarks()
cfgs = list(map(lambda x: x["name"], filter(lambda x: x and x["ci_cfg"]["platform"] == platform, map(get_ci_from_ub, ub_names))))
return cfgs
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
59ec5fc5fd80068eb9447b6b36f95bffd2ccc6b5 | 162f56089fbfed17fe02590a69bc16c89698c2e9 | /module1.py | 746904cb8108fc535abbae7ec4edb5875d279eaa | [] | no_license | emmanuelmacharia/andelaworkshop | fbd3e759ecb140f9abf58c10773b771f301ca860 | 43470fe7b441c90bfb82ede7bdb64dbe6c73ba10 | refs/heads/master | 2020-03-30T16:22:00.671936 | 2018-10-17T13:56:24 | 2018-10-17T13:56:24 | 151,405,461 | 0 | 0 | null | 2018-10-17T13:56:25 | 2018-10-03T11:58:28 | HTML | UTF-8 | Python | false | false | 2,344 | py | from flask import Flask, request
from flask_restful import Api, Resource
app = Flask(__name__)
api = Api(app)
products = {}
class AdminProducts(Resource):
'''Endpoints for creating and viewing products in the application'''
def get(self):
'''Views all the products in the application'''
return jsonify({'products':products})
def post(self):
'''Creates a new product in the store'''
id = len(products)+1
data = request.get_json()
name = data['name']
description = data['description']
category = ['category']
price = ['price']
payload = {'name': name, 'description': description, 'category': category, 'price': price}
products[id] = payload
return orders, 201
class AttendantProducts(Resource, AdminProducts):
'''endpoints for viewing all the products in the inventory by the attendant'''
def get(self):
super().post(self)
sales ={}
class AttendantSales(Resource):
'''endpoint for creating and viewing sales'''
def get(self):
'''views all sales made by the attendant'''
return jsonify({'sales':sales})
def post(self):
'''Creates a new sale by the attendant'''
id = len(sales)+1
data = request.get_json()
price = data['price']
quantity = data['quantity']
productname = data['productname']
description = data['description']
payload = {'product':productname, 'description': description, 'quantity': quantity , 'price': price}
sales[id] = payload
return sales, 201
class Product(Resource):
'''Endpoint that allows a user to view a single product'''
def get(self, id):
'''view a single product'''
if id in products:
return products[AdminProducts.id], 200
return 404
class Sale(Resource):
'''Endpoint for viewing a single sale'''
def get(self):
'''views single sale'''
if id not in sales:
return 404
else:
return sales[id] ,200
api.add_resourse(AdminProducts, '/admin/products')
api.add_resourse(AdminProducts, '/attendant/products')
api.add_resourse(AttendantSales, '/attendant/sales')
api.add_resourse(Sale, '/admin/sales/<int:id>')
api.add_resource(Product, '/products/<int:id>')
| [
"samuelmarsha@outlook.com"
] | samuelmarsha@outlook.com |
08bee4ae1052cf518c23e03568ba16ba18ff39d4 | 5ef82ade9309b70204e1d392f10835a9e7d553a7 | /tweet.py | 5a5bbb264cca8536a98b06827c9ec09813165739 | [
"MIT"
] | permissive | etalab/data-covid19-dashboard-widgets | 258a52b85edfe91d92cc7d176bec3f75a5c4413f | fb429e3552f67e267801251ee6ee7e2d78c46829 | refs/heads/master | 2022-05-30T01:10:17.562117 | 2022-05-17T23:26:22 | 2022-05-17T23:26:22 | 361,129,834 | 2 | 4 | MIT | 2022-03-22T10:37:13 | 2021-04-24T10:13:31 | Python | UTF-8 | Python | false | false | 728 | py | import tweepy
import secrets
import toml
import os
TWITTER_API_KEY = os.getenv('TWITTER_API_KEY')
TWITTER_API_SECRET_KEY = os.getenv('TWITTER_API_SECRET_KEY')
TWITTER_TOKEN = os.getenv('TWITTER_TOKEN')
TWITTER_SECRET_TOKEN = os.getenv('TWITTER_SECRET_TOKEN')
# Deprecated. Function to tweet if we want.
auth = tweepy.OAuthHandler(TWITTER_API_KEY, TWITTER_API_SECRET_KEY)
auth.set_access_token(TWITTER_TOKEN, TWITTER_SECRET_TOKEN)
api = tweepy.API(auth)
config = toml.load('./config.toml')
for itemGroup, detail in config.items():
# load image
imagePath = "plots/"+itemGroup+".png"
f = open("kpis/"+itemGroup+".txt", "r")
status = f.read()
# Send the tweet.
api.update_with_media(imagePath, status)
| [
"geoffrey.aldebert@data.gouv.fr"
] | geoffrey.aldebert@data.gouv.fr |
79e5d650d9505b34a5feeb25bec942c15949abdf | c86a071d99d06395ab8ef22a3eab8675bffbc72c | /PySpark-ETL-Eder-Stupka/PySpark_ETL_Eder_Stupka.py | bfa52d3e6842bad89dd5448e1a358608767c26c2 | [] | no_license | EderStupka/PySpark-ETL-Google-Cloud-Postgres | ca9243fb953e89a8a2b38d5998d3aa9e26359b98 | c132816419d8aefbf0692cc531da3288c681b78b | refs/heads/main | 2023-04-20T06:33:54.459043 | 2021-05-10T16:28:02 | 2021-05-10T16:28:02 | 365,175,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,769 | py |
import psycopg2
import sys
import os
import pyspark
from pyspark.sql import SparkSession
from pyspark.sql.types import StructField, StructType, IntegerType, StringType, LongType, TimestampType, ShortType, DateType
from pyspark.sql.functions import date_add, col, date_format, to_timestamp, to_date, year, month, sum
def main():
#1 - Realizar a importação dos dados dos 3 arquivos em uma tabela criada por você no banco de dados de sua escolha;
# establish a connection to the GOOGLE CLOUD DATABASE
conn = psycopg2.connect(
host = "34.95.193.234",
database = "postgres",
user = "postgres",
password = "siul1991")
print("Connection to PostgreSQL created", "\n")
cur = conn.cursor()
spark = initialize_Spark()
df = loadDFWithSchema(spark, "Base_2017_1.csv,Base_2018_2.csv,Base_2019_3.csv,")
create_vendas_table(cur)
insert_query, venda_seq = write_vendas_postgresql(df)
cur.execute(insert_query, venda_seq)
print("Data inserted into PostgreSQL", "\n")
# 2- Com os dados importados, modelar 4 novas tabelas e implementar processos que façam as transformações necessárias e insiram as seguintes visões nas tabelas:
#a. Tabela1: Consolidado de vendas por ano e mês;
cur = conn.cursor()
table1 = table1_data(df)
create_table1(cur)
insert_query, venda_seq = write_table1_postgresql(table1)
#b. Tabela2: Consolidado de vendas por marca e linha;
table2 = table2_data(df)
#c. Tabela3: Consolidado de vendas por marca, ano e mês;
#d. Tabela4: Consolidado de vendas por linha, ano e mês
#create_table2(cur)
#insert_query, venda_seq = write_table1_postgresql(table1)
print("Commiting changes to database", "\n")
conn.commit()
print("Closing connection", "\n")
# close the connection
cur.close()
conn.close()
print("Done!", "\n")
def initialize_Spark(): #metodo de inicialização do spark
os.environ['HADOOP_HOME'] = "D:/hadoop/hadoop-3.2.1"
sys.path.append("D:/hadoop/hadoop-3.2.1/bin")
spark = SparkSession.builder \
.master("local[*]") \
.appName("Simple etl job") \
.getOrCreate()
print("Spark Initialized", "\n")
return spark
def loadDFWithSchema(spark, file):
schema = StructType([
StructField("ID_MARCA", IntegerType(), True),
StructField("MARCA", StringType(), True),
StructField("ID_LINHA", IntegerType(), False),
StructField("LINHA", StringType(), True),
StructField("DATA_VENDA", DateType(), True),
StructField("QTD_VENDA", IntegerType(), True),
])
splits = file.split(",")
x = 0
df1 = spark.read.format("csv").schema(schema).option("header", "true").load(splits[0])
df2 = spark.read.format("csv").schema(schema).option("header", "true").load(splits[1])
df3 = spark.read.format("csv").schema(schema).option("header", "true").load(splits[2])
df1_df2 = df1.union(df2)
result = df1_df2.union(df3)
print("Data loaded into PySpark", "\n")
print("Printing data ...")
result.show()
result.printSchema()
return result
def table1_data(df):
df_dropped = df.drop("ID_MARCA","MARCA","ID_LINHA","LINHA")
df_mes_ano = df_dropped.select(date_format("DATA_VENDA", "MM/yyyy").alias("MES/ANO"), "QTD_VENDA")
gr = df_mes_ano.groupby("MES/ANO")
df_grouped = gr.agg(sum(col('QTD_VENDA')).alias('SUM_QTD_VENDA'))
df_sort = df_grouped.sort("MES/ANO")
print("Data transformed", "\n")
df_sort.show()
df_sort.printSchema()
return df_sort
def table2_data(df):
df_dropped_marca = df.drop("ID_LINHA","LINHA", "DATA_VENDA", "QTD_VENDA")
df_marca = df_dropped_marca.select('ID_MARCA','MARCA').distinct()
df_dropped_linha = df.drop("ID_MARCA","MARCA","DATA_VENDA", "QTD_VENDA")
df_linha = df_dropped_linha.select('ID_LINHA','LINHA').distinct()
#select id_linha,sum(qtd_venda) as marca_1 from vendas where id_marca = 1 group by id_linha;
df_select = df.select('ID_LINHA','QTD_VENDA').filter('ID_MARCA = 1')
gr = df_select.groupby("ID_LINHA")
df_grouped = gr.agg(sum(col('QTD_VENDA')).alias('MARCA 1 '))
#gr = df_select
df_grouped.show()
#marca_seq = [tuple(x) for x in df_marca.collect()]
#print (marca_seq)
#records_list_template = ','.join(['%s'] * len(marca_seq))
#print ("INSERT INTO VENDAS (id_marca, marca, id_linha, linha, data_venda, qtd_venda \
# ) VALUES {}".format(records_list_template))
#insert_query = "INSERT INTO VENDAS (id_marca, marca, id_linha, linha, data_venda, qtd_venda \
# ) VALUES {}".format(records_list_template)
#print("Inserting data into PostgreSQL...", "\n")
#return insert_query, venda_seq
print("Data transformed", "\n")
df_marca.show()
df_marca.printSchema()
df_linha.show()
df_linha.printSchema()
return df_linha
def create_vendas_table(cursor):
try:
cursor.execute("CREATE TABLE IF NOT EXISTS VENDAS ( \
ID_MARCA int, \
MARCA varchar(100), \
ID_LINHA int, \
LINHA varchar(100), \
DATA_VENDA date, \
QTD_VENDA int \
);")
print("Created table in PostgreSQL", "\n")
except:
print("Something went wrong when creating the table", "\n")
def create_table1(cursor):
try:
cursor.execute("CREATE TABLE IF NOT EXISTS TABELA1 (MES_ANO varchar(30), SUM_QTD_VENDA int)")
print("Created TABELA1 in PostgreSQL", "\n")
except:
print("Something went wrong when creating the table", "\n")
def create_table2(cursor):
try:
cursor.execute("CREATE TABLE IF NOT EXISTS VENDAS ( \
ID_MARCA int, \
MARCA varchar(100), \
ID_LINHA int, \
LINHA varchar(100), \
DATA_VENDA date, \
QTD_VENDA int \
);")
print("Created table in PostgreSQL", "\n")
except:
print("Something went wrong when creating the table", "\n")
def create_table3(cursor):
try:
cursor.execute("CREATE TABLE IF NOT EXISTS VENDAS ( \
ID_MARCA int, \
MARCA varchar(100), \
ID_LINHA int, \
LINHA varchar(100), \
DATA_VENDA date, \
QTD_VENDA int \
);")
print("Created table in PostgreSQL", "\n")
except:
print("Something went wrong when creating the table", "\n")
def create_table4(cursor):
try:
cursor.execute("CREATE TABLE IF NOT EXISTS VENDAS ( \
ID_MARCA int, \
MARCA varchar(100), \
ID_LINHA int, \
LINHA varchar(100), \
DATA_VENDA date, \
QTD_VENDA int \
);")
print("Created table in PostgreSQL", "\n")
except:
print("Something went wrong when creating the table", "\n")
def write_vendas_postgresql(df):
venda_seq = [tuple(x) for x in df.collect()]
records_list_template = ','.join(['%s'] * len(venda_seq))
insert_query = "INSERT INTO VENDAS (id_marca, marca, id_linha, linha, data_venda, qtd_venda \
) VALUES {}".format(records_list_template)
print("Inserting data into PostgreSQL...", "\n")
return insert_query, venda_seq
def write_table1_postgresql(df):
venda_seq = [tuple(x) for x in df.collect()]
records_list_template = ','.join(['%s'] * len(venda_seq))
insert_query = "INSERT INTO TABELA1 (MES_ANO, SUM_QTD_VENDAS \
) VALUES {}".format(records_list_template)
print("Inserting data into PostgreSQL...", "\n")
return insert_query, venda_seq
if __name__ == '__main__':
main() | [
"45463511+EderStupka@users.noreply.github.com"
] | 45463511+EderStupka@users.noreply.github.com |
3420d4b60ef89979085bd48d4c97c9a8b77209d1 | cd83f6f33311b76a7fd669e1d4a5131c099beabb | /mail/migrations/0001_initial.py | 1aba1bcd71f620af3d1940eb3279097f148bf092 | [] | no_license | nathanieltse/Mail-app | 39d68cf7bc2e99b0d21a2342e4aff3c93a9536d9 | b07b7f8dad2580a615649364ad0cd39030b671f2 | refs/heads/main | 2023-06-10T00:11:05.615916 | 2021-06-26T01:52:52 | 2021-06-26T01:52:52 | 380,390,862 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,883 | py | # Generated by Django 3.1.7 on 2021-04-06 20:15
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Email',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=255)),
('body', models.TextField(blank=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('read', models.BooleanField(default=False)),
('archived', models.BooleanField(default=False)),
('recipients', models.ManyToManyField(related_name='emails_received', to=settings.AUTH_USER_MODEL)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='emails_sent', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='emails', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"nathanieltse@hotmail.com"
] | nathanieltse@hotmail.com |
39385e0a7b92b66933385b77e3533b3a516318ea | 13213e3e7d6a0866cdf28483adc46d458f8977ac | /qsort/qs.py | f464a28fe040fbe56cf5762e4a0066e408678f00 | [] | no_license | j0k/algopractice | 42654b1158497050911822c46de6791cf8bf251f | 1be3df5553156a523bfce5328df205e6c67c19f3 | refs/heads/master | 2022-06-27T00:10:57.028619 | 2022-06-15T12:34:11 | 2022-06-15T12:34:11 | 100,791,845 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | # 18.06.2017
import random
A = [1,2,4,7,8,9,0,5,3,5,6,8,4,3]
def qsort(a):
l = len(a)
if l <= 1:
return a
pi = int(random.random() * l)
left = []
right = []
p = a[pi]
for (i,item) in enumerate(a):
if i == pi:
continue;
if item <= p:
left.append(item)
else:
right.append(item)
return qsort(left) + [p] + qsort(right)
print qsort(A)
| [
"darling.kicks@gmail.com"
] | darling.kicks@gmail.com |
cdf73285697080951a456e2a5c01d533c393b240 | 1cb4b326b8148779221f38da5ba1b4fa9a017f12 | /Game22/modules/online/server.py | e486128b2439a353fb0c15ea4f0e7de70fef91c8 | [
"MIT"
] | permissive | Chirag2007/Games | c7b27d5bb735912a7dec1fade76f92abfbc078e8 | bc05c6826e63e5e3e279073443f4587f70fae741 | refs/heads/master | 2023-08-21T09:02:06.799276 | 2021-10-01T04:07:03 | 2021-10-01T04:07:03 | 412,320,297 | 0 | 0 | MIT | 2021-10-01T04:07:04 | 2021-10-01T04:05:26 | null | UTF-8 | Python | false | false | 14,729 | py | '''
Function:
联机对战服务器端
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import socket
import pygame
import random
import threading
from ..misc import *
from PyQt5 import QtCore
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from itertools import product
'''服务器端'''
class gobangSever(QWidget):
back_signal = pyqtSignal()
exit_signal = pyqtSignal()
receive_signal = pyqtSignal(dict, name='data')
send_back_signal = False
def __init__(self, cfg, nickname, parent=None, **kwargs):
super(gobangSever, self).__init__(parent)
# 预定义一些必要的变量
self.cfg = cfg
self.nickname = nickname
self.opponent_nickname = None
self.client_ipport = None
self.is_gaming = False
self.chessboard = [[None for i in range(19)] for _ in range(19)]
self.history_record = []
self.winner = None
self.winner_info_label = None
self.player_color = 'white'
self.opponent_player_color = 'black'
self.whoseround = None
# 当前窗口的基本设置
self.setFixedSize(760, 650)
self.setWindowTitle('五子棋-微信公众号: Charles的皮卡丘')
self.setWindowIcon(QIcon(cfg.ICON_FILEPATH))
# 背景图片
palette = QPalette()
palette.setBrush(self.backgroundRole(), QBrush(QPixmap(cfg.BACKGROUND_IMAGEPATHS.get('bg_game'))))
self.setPalette(palette)
# 显示你的昵称
self.nickname_label = QLabel('您是%s' % self.nickname, self)
self.nickname_label.resize(200, 40)
self.nickname_label.move(640, 180)
# 落子标志
self.chessman_sign = QLabel(self)
sign = QPixmap(cfg.CHESSMAN_IMAGEPATHS.get('sign'))
self.chessman_sign.setPixmap(sign)
self.chessman_sign.setFixedSize(sign.size())
self.chessman_sign.show()
self.chessman_sign.hide()
# 按钮
self.home_button = PushButton(cfg.BUTTON_IMAGEPATHS.get('home'), self)
self.home_button.click_signal.connect(self.goHome)
self.home_button.move(680, 10)
self.startgame_button = PushButton(cfg.BUTTON_IMAGEPATHS.get('startgame'), self)
self.startgame_button.click_signal.connect(self.startgame)
self.startgame_button.move(640, 240)
self.regret_button = PushButton(cfg.BUTTON_IMAGEPATHS.get('regret'), self)
self.regret_button.click_signal.connect(self.regret)
self.regret_button.move(640, 310)
self.givein_button = PushButton(cfg.BUTTON_IMAGEPATHS.get('givein'), self)
self.givein_button.click_signal.connect(self.givein)
self.givein_button.move(640, 380)
self.urge_button = PushButton(cfg.BUTTON_IMAGEPATHS.get('urge'), self)
self.urge_button.click_signal.connect(self.urge)
self.urge_button.move(640, 450)
# 落子和催促声音加载
pygame.mixer.init()
self.drop_sound = pygame.mixer.Sound(cfg.SOUNDS_PATHS.get('drop'))
self.urge_sound = pygame.mixer.Sound(cfg.SOUNDS_PATHS.get('urge'))
# 接收数据信号绑定到responseForReceiveData函数
self.receive_signal.connect(self.responseForReceiveData)
# TCP/IP服务器
self.tcp_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_server.bind(('0.0.0.0', cfg.PORT))
self.tcp_server.listen(1)
# TCP/IP的socket
self.tcp_socket = None
# 开一个线程进行监听
threading.Thread(target=self.startListen).start()
'''返回游戏主界面'''
def goHome(self):
self.send_back_signal = True
self.close()
self.back_signal.emit()
'''开始游戏'''
def startgame(self):
if self.tcp_socket is None:
QMessageBox.information(self, '提示', '对方未连接, 请耐心等待')
else:
self.randomAssignColor()
data = {'type': 'action', 'detail': 'startgame', 'data': [self.player_color, self.opponent_player_color]}
self.tcp_socket.sendall(packSocketData(data))
QMessageBox.information(self, '提示', '游戏开始请求已发送, 等待对方确定中')
'''认输'''
def givein(self):
if self.tcp_socket and self.is_gaming and (self.winner is None) and (self.whoseround == self.player_color):
self.winner = self.opponent_player_color
self.showGameEndInfo()
data = {'type': 'action', 'detail': 'givein'}
self.tcp_socket.sendall(packSocketData(data))
'''悔棋-只有在对方回合才能悔棋'''
def regret(self):
if self.tcp_socket and self.is_gaming and (self.winner is None) and (self.whoseround == self.opponent_player_color):
data = {'type': 'action', 'detail': 'regret'}
self.tcp_socket.sendall(packSocketData(data))
'''催促'''
def urge(self):
if self.tcp_socket and self.is_gaming and (self.winner is None) and (self.whoseround == self.opponent_player_color):
data = {'type': 'action', 'detail': 'urge'}
self.tcp_socket.sendall(packSocketData(data))
self.urge_sound.play()
'''鼠标左键点击事件-玩家回合'''
def mousePressEvent(self, event):
if (self.tcp_socket is None) or (event.buttons() != QtCore.Qt.LeftButton) or (self.winner is not None) or (self.whoseround != self.player_color) or (not self.is_gaming):
return
# 保证只在棋盘范围内响应
if event.x() >= 50 and event.x() <= 50 + 30 * 18 + 14 and event.y() >= 50 and event.y() <= 50 + 30 * 18 + 14:
pos = Pixel2Chesspos(event)
# 保证落子的地方本来没有人落子
if self.chessboard[pos[0]][pos[1]]:
return
# 实例化一个棋子并显示
c = Chessman(self.cfg.CHESSMAN_IMAGEPATHS.get(self.whoseround), self)
c.move(event.pos())
c.show()
self.chessboard[pos[0]][pos[1]] = c
# 落子声音响起
self.drop_sound.play()
# 最后落子位置标志对落子位置进行跟随
self.chessman_sign.show()
self.chessman_sign.move(c.pos())
self.chessman_sign.raise_()
# 记录这次落子
self.history_record.append([*pos, self.whoseround])
# 发送给对方自己的落子位置
data = {'type': 'action', 'detail': 'drop', 'data': pos}
self.tcp_socket.sendall(packSocketData(data))
# 是否胜利了
self.winner = checkWin(self.chessboard)
if self.winner:
self.showGameEndInfo()
return
# 切换回合方(其实就是改颜色)
self.nextRound()
'''显示游戏结束结果'''
def showGameEndInfo(self):
self.is_gaming = False
info_img = QPixmap(self.cfg.WIN_IMAGEPATHS.get(self.winner))
self.winner_info_label = QLabel(self)
self.winner_info_label.setPixmap(info_img)
self.winner_info_label.resize(info_img.size())
self.winner_info_label.move(50, 50)
self.winner_info_label.show()
'''响应接收到的数据'''
def responseForReceiveData(self, data):
if data['type'] == 'action' and data['detail'] == 'exit':
QMessageBox.information(self, '提示', '您的对手已退出游戏, 游戏将自动返回主界面')
self.goHome()
elif data['type'] == 'action' and data['detail'] == 'startgame':
self.opponent_player_color, self.player_color = data['data']
self.whoseround = 'white'
self.whoseround2nickname_dict = {self.player_color: self.nickname, self.opponent_player_color: self.opponent_nickname}
res = QMessageBox.information(self, '提示', '对方请求(重新)开始游戏, 您为%s, 您是否同意?' % {'white': '白子', 'black': '黑子'}.get(self.player_color), QMessageBox.Yes | QMessageBox.No)
if res == QMessageBox.Yes:
data = {'type': 'reply', 'detail': 'startgame', 'data': True}
self.tcp_socket.sendall(packSocketData(data))
self.is_gaming = True
self.setWindowTitle('五子棋-微信公众号: Charles的皮卡丘 ——> %s走棋' % self.whoseround2nickname_dict.get(self.whoseround))
for i, j in product(range(19), range(19)):
if self.chessboard[i][j]:
self.chessboard[i][j].close()
self.chessboard[i][j] = None
self.history_record.clear()
self.winner = None
if self.winner_info_label:
self.winner_info_label.close()
self.winner_info_label = None
self.chessman_sign.hide()
else:
data = {'type': 'reply', 'detail': 'startgame', 'data': False}
self.tcp_socket.sendall(packSocketData(data))
elif data['type'] == 'action' and data['detail'] == 'drop':
pos = data['data']
# 实例化一个棋子并显示
c = Chessman(self.cfg.CHESSMAN_IMAGEPATHS.get(self.whoseround), self)
c.move(QPoint(*Chesspos2Pixel(pos)))
c.show()
self.chessboard[pos[0]][pos[1]] = c
# 落子声音响起
self.drop_sound.play()
# 最后落子位置标志对落子位置进行跟随
self.chessman_sign.show()
self.chessman_sign.move(c.pos())
self.chessman_sign.raise_()
# 记录这次落子
self.history_record.append([*pos, self.whoseround])
# 是否胜利了
self.winner = checkWin(self.chessboard)
if self.winner:
self.showGameEndInfo()
return
# 切换回合方(其实就是改颜色)
self.nextRound()
elif data['type'] == 'action' and data['detail'] == 'givein':
self.winner = self.player_color
self.showGameEndInfo()
elif data['type'] == 'action' and data['detail'] == 'urge':
self.urge_sound.play()
elif data['type'] == 'action' and data['detail'] == 'regret':
res = QMessageBox.information(self, '提示', '对方请求悔棋, 您是否同意?', QMessageBox.Yes | QMessageBox.No)
if res == QMessageBox.Yes:
pre_round = self.history_record.pop(-1)
self.chessboard[pre_round[0]][pre_round[1]].close()
self.chessboard[pre_round[0]][pre_round[1]] = None
self.chessman_sign.hide()
self.nextRound()
data = {'type': 'reply', 'detail': 'regret', 'data': True}
self.tcp_socket.sendall(packSocketData(data))
else:
data = {'type': 'reply', 'detail': 'regret', 'data': False}
self.tcp_socket.sendall(packSocketData(data))
elif data['type'] == 'reply' and data['detail'] == 'startgame':
if data['data']:
self.is_gaming = True
self.setWindowTitle('五子棋-微信公众号: Charles的皮卡丘 ——> %s走棋' % self.whoseround2nickname_dict.get(self.whoseround))
for i, j in product(range(19), range(19)):
if self.chessboard[i][j]:
self.chessboard[i][j].close()
self.chessboard[i][j] = None
self.history_record.clear()
self.winner = None
if self.winner_info_label:
self.winner_info_label.close()
self.winner_info_label = None
self.chessman_sign.hide()
QMessageBox.information(self, '提示', '对方同意开始游戏请求, 您为%s, 执白者先行.' % {'white': '白子', 'black': '黑子'}.get(self.player_color))
else:
QMessageBox.information(self, '提示', '对方拒绝了您开始游戏的请求.')
elif data['type'] == 'reply' and data['detail'] == 'regret':
if data['data']:
pre_round = self.history_record.pop(-1)
self.chessboard[pre_round[0]][pre_round[1]].close()
self.chessboard[pre_round[0]][pre_round[1]] = None
self.nextRound()
QMessageBox.information(self, '提示', '对方同意了您的悔棋请求.')
else:
QMessageBox.information(self, '提示', '对方拒绝了您的悔棋请求.')
elif data['type'] == 'nickname':
self.opponent_nickname = data['data']
'''随机生成双方颜色-白子先走'''
def randomAssignColor(self):
self.player_color = random.choice(['white', 'black'])
self.opponent_player_color = 'white' if self.player_color == 'black' else 'black'
self.whoseround = 'white'
self.whoseround2nickname_dict = {self.player_color: self.nickname, self.opponent_player_color: self.opponent_nickname}
'''改变落子方'''
def nextRound(self):
self.whoseround = self.player_color if self.whoseround == self.opponent_player_color else self.opponent_player_color
self.setWindowTitle('五子棋-微信公众号: Charles的皮卡丘 ——> %s走棋' % self.whoseround2nickname_dict.get(self.whoseround))
'''开始监听客户端的连接'''
def startListen(self):
while True:
try:
self.setWindowTitle('五子棋-微信公众号: Charles的皮卡丘 ——> 服务器端启动成功, 等待客户端连接中')
self.tcp_socket, self.client_ipport = self.tcp_server.accept()
self.setWindowTitle('五子棋-微信公众号: Charles的皮卡丘 ——> 客户端已连接, 点击开始按钮进行游戏')
data = {'type': 'nickname', 'data': self.nickname}
self.tcp_socket.sendall(packSocketData(data))
self.receiveClientData()
except:
break
'''接收客户端数据'''
def receiveClientData(self):
while True:
data = receiveAndReadSocketData(self.tcp_socket)
self.receive_signal.emit(data)
'''关闭窗口事件'''
def closeEvent(self, event):
if self.tcp_socket:
self.tcp_socket.sendall(packSocketData({'type': 'action', 'detail': 'exit'}))
self.tcp_socket.shutdown(socket.SHUT_RDWR)
self.tcp_socket.close()
self.tcp_server.close()
return super().closeEvent(event) | [
"1159254961@qq.com"
] | 1159254961@qq.com |
aa57bec9411af12d680e8aaf548c29c04b4a21f5 | dd15b273e1bedad5d80b253855bbed0a207640e0 | /validation.py | 40a141092859e7674bc574025851cc429df48f0e | [] | no_license | rebeccamcohen/heart_rate_sentinel_server | 8f45e3f69bed40ebdb7fd950ee03dcbee994ae97 | c046fc18d4913fd8ff86a7e0ffee4602fe27f3ea | refs/heads/master | 2020-04-06T08:25:42.634181 | 2018-11-17T01:16:56 | 2018-11-17T01:16:56 | 157,304,271 | 0 | 0 | null | 2018-11-16T22:39:57 | 2018-11-13T01:53:46 | Python | UTF-8 | Python | false | false | 4,423 | py | from database import User
from pymodm import connect
class ValidationError(Exception):
def __init__(self, message):
self.message = message
class InputError(Exception):
def __init__(self, message):
self.message = message
class EmptyHrListError(Exception):
def __init__(self, message):
self.message = message
class UserDoesNotExistError(Exception):
def __init__(self, message):
self.message = message
class NoHrSinceError(Exception):
def __init__(self, message):
self.message = message
REQUIRED_REQUEST_KEYS_NEW_PATIENT = [
"patient_id",
"attending_email",
"user_age",
]
REQUIRED_REQUEST_KEYS_POST_HEART_RATE = [
"patient_id",
"heart_rate",
]
REQUIRED_REQUEST_KEYS_POST_INTERNAL_AVERAGE = [
"patient_id",
"heart_rate_average_since",
]
def validate_new_patient(req):
"""Validates that all required keys are present in request
Args:
req (json): New patient info
Raises:
ValidationError: If a required key is not present in request
"""
for key in REQUIRED_REQUEST_KEYS_NEW_PATIENT:
if key not in req.keys():
raise ValidationError(
"Key '{0}' not present in request".format(key))
def check_new_id(patient_id):
"""Ensures that new users do not use a patient id that is already taken
Args:
patient_id: Specified patient id
Raises:
InputError: If the patient_id specified in request already exists
"""
connect("mongodb://rebeccacohen:bme590@ds037768.mlab.com:37768/bme_590")
my_id = patient_id
all_ids = []
all_patients_in_db = User.objects.raw({})
for user in all_patients_in_db:
all_ids.append(user.patient_id)
for item in all_ids:
if item == my_id:
raise InputError("User '{}' already exists".format(patient_id))
def validate_post_heart_rate(req):
"""Validates that all required keys are present in request
Args:
req (json): Heart rate measurements
Raises:
ValidationError: If a required key is not present in request
"""
for key in REQUIRED_REQUEST_KEYS_POST_HEART_RATE:
if key not in req.keys():
raise ValidationError(
"Key '{0}' not present in request".format(key))
def check_list_empty(patient_id):
"""Checks if the list of heart rates
associated with a patient is empty
Args:
patient_id: specified patient id
RaisesEmpyHrListError: If no heart rate measurements
exist for specified user
"""
connect("mongodb://rebeccacohen:bme590@ds037768.mlab.com:37768/bme_590")
p = User.objects.raw({"_id": patient_id}).first()
hr_list = p.heart_rate
if len(hr_list) == 0:
raise EmptyHrListError("No heart rate "
"measurements exist for "
"patient {0}".format(patient_id))
def check_id_exists(patient_id):
"""Checks if a specified patient id exists
Args:
patient_id: Specified patient id
Raises:
InputError: If specified user does not exist
"""
connect("mongodb://rebeccacohen:bme590@ds037768.mlab.com:37768/bme_590")
my_id = patient_id
all_ids = []
all_patients_in_db = User.objects.raw({})
for user in all_patients_in_db:
all_ids.append(user.patient_id)
if my_id not in all_ids:
raise InputError("User '{0}' does not exist".format(patient_id))
def validate_post_int_avg(req):
"""Validates that all required keys are present in request
Args:
req (json): Patient id and time stamp from request
Raises:
ValidationError: If a required key is not present in request
"""
for key in REQUIRED_REQUEST_KEYS_POST_INTERNAL_AVERAGE:
if key not in req.keys():
raise ValidationError("Key '{0}' not "
"present in request".format(key))
def check_hr_since_list_empty(heart_rates_since):
"""Checks if heart rates exist since specified time stamp
Args:
heart_rates_since (list): Heart rate measurements
since specified time
Raises:
NoHrSinceError: If no heart rate measurements exist for specified user
"""
if len(heart_rates_since) == 0:
raise NoHrSinceError("No heart rate measurements "
"exist since specified time stamp")
| [
"rebecca.cohen@duke.edu"
] | rebecca.cohen@duke.edu |
87b6cd872faff0465ea42ba50c6be9d681f0137a | b24e45267a8d01b7d3584d062ac9441b01fd7b35 | /Usuario/migrations/0001_initial.py | f1a088a9eef7d4b51c898384d51b3a312255a586 | [] | no_license | slalbertojesus/merixo-rest | 1707b198f31293ced38930a31ab524c0f9a6696c | 5c12790fd5bc7ec457baad07260ca26a8641785d | refs/heads/master | 2022-12-10T18:56:36.346159 | 2020-05-02T00:42:39 | 2020-05-02T00:42:39 | 212,175,889 | 0 | 0 | null | 2022-12-08T07:00:07 | 2019-10-01T18:56:45 | Python | UTF-8 | Python | false | false | 1,629 | py | # Generated by Django 2.2.6 on 2019-11-29 05:50
import Usuario.models
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('name', models.CharField(max_length=30)),
('email', models.EmailField(max_length=60, unique=True)),
('username', models.CharField(max_length=30, unique=True)),
('estado', models.CharField(max_length=30)),
('date_joined', models.DateTimeField(auto_now_add=True, verbose_name='date joined')),
('last_login', models.DateTimeField(auto_now=True, verbose_name='last login')),
('listaUsuarios', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=200), default=list, null=True, size=None)),
('pic', models.ImageField(upload_to=Usuario.models.upload_location)),
('is_admin', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('is_superuser', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
]
| [
"slalbertojesus@gmail.com"
] | slalbertojesus@gmail.com |
919ac69337e9c727ed9e2d07b25750de1a8160b2 | 72edeba5f5a912dfdca7df04b42f01c163392dd7 | /RFC_Project/urls.py | 886be2ad6a931a5c16722adbf70911c96ba0326e | [] | no_license | nate-birth/robovac-fight-club | bc56137786cd3b06ff0926f6f4802249e7bf99f6 | c8c7929d190e05147a32c5d5ae4376e3a2a66a36 | refs/heads/main | 2023-03-01T21:26:13.722907 | 2021-02-08T20:23:51 | 2021-02-08T20:23:51 | 337,190,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('RFC_App.urls')),
]
| [
"wvonberg@gmail.com"
] | wvonberg@gmail.com |
266f7c43ec194665af03f4823f13ff1664004761 | f305f84ea6f721c2391300f0a60e21d2ce14f2a5 | /0_字符串/87. 扰乱字符串.py | 29e56a9f818fe0967e242d7e3d9221f6a53b65b7 | [] | no_license | 981377660LMT/algorithm-study | f2ada3e6959338ae1bc21934a84f7314a8ecff82 | 7e79e26bb8f641868561b186e34c1127ed63c9e0 | refs/heads/master | 2023-09-01T18:26:16.525579 | 2023-09-01T12:21:58 | 2023-09-01T12:21:58 | 385,861,235 | 225 | 24 | null | null | null | null | UTF-8 | Python | false | false | 675 | py | from collections import Counter
from functools import lru_cache
# 1 <= s1.length <= 30
# 87. 扰乱字符串
# !bit packing 可以将复杂度降低为 O(n^4/w)
class Solution:
@lru_cache(None)
def isScramble(self, s1: str, s2: str) -> bool:
if s1 == s2:
return True
if sorted(s1) != sorted(s2): # counter
return False
for i in range(1, len(s1)):
if self.isScramble(s1[:i], s2[:i]) and self.isScramble(s1[i:], s2[i:]):
return True
if self.isScramble(s1[:i], s2[-i:]) and self.isScramble(s1[i:], s2[:-i]):
return True
return False
| [
"lmt2818088@gmail.com"
] | lmt2818088@gmail.com |
4c97b82819556eb6e0ca2cac2baa5c7e8074ebbb | 258bfef4f6907041de5710bbb002a3372deb1fb5 | /build/lib/assess_composition/__init__.py | b4e712299c7d8221125f6b0d3c532c0378572f9d | [] | no_license | RosieCampbell/assess_composition | c1a68160386c7cc100c990ee98299d4323a94fcf | a463209de3c41a4fbf1df46d432a9475ee9ca55e | refs/heads/master | 2021-03-30T17:27:37.981495 | 2016-11-08T16:34:58 | 2016-11-08T16:34:58 | 72,562,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | import cv2
import numpy as np
import os
import rule_of_thirds
| [
"rosiekcampbell@gmail.com"
] | rosiekcampbell@gmail.com |
f19f9ff467cd554996e2019a980c30658be6c4aa | 33a961276497abc1096aca8d0fb7519ddfcb3bdb | /esl/economics/markets/walras/__init__.py | 1944405c411d1d4cdebb3b859305415de84c46a6 | [
"Apache-2.0"
] | permissive | ShrutiAppiah/ESL | 3a7d8bb7756d28990730a377c6e5e0c3423a7e99 | 069a9afa0150355a1903ddc76fce04f376a6940d | refs/heads/master | 2023-02-25T07:56:54.536007 | 2021-01-27T04:55:15 | 2021-01-27T04:55:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py | from esl.economics.markets.walras._walras import *
| [
"scholl.maarten@gmail.com"
] | scholl.maarten@gmail.com |
bbaf640d918ac50921a99b40825d91656630c755 | 249b8f7312b57388ffb8fb367c7d1a8d81bbac0f | /a3/smoothing.py | 4bf703ce206f7d5cf623c35528c45e1056dad3c4 | [
"Apache-2.0"
] | permissive | arizona-phonological-imaging-lab/autotres | cf1d8353505d094b8389fbe5b1b5ebc1b6deb5c1 | 4a2d58e1df5a627eb2e9c2d3ccb64750ce13937f | refs/heads/master | 2021-01-10T13:55:04.666165 | 2018-12-06T05:04:47 | 2018-12-06T05:04:47 | 45,319,121 | 0 | 1 | null | 2015-11-15T00:30:03 | 2015-10-31T21:34:53 | Python | UTF-8 | Python | false | false | 4,091 | py | from scipy.signal import savgol_filter
#from scipy.signal import argrelextrema
from .translate import *
import numpy as np
class TraceCorrection(object):
"""
Methods for smoothing traces
"""
@classmethod
def trim_by_min(cls, trace):
"""
returns a new Trace with values preceding either min set to zero
"""
early_min, late_min = cls.get_minima(trace.coordinates)
late_min += 1
nc = trace.coordinates[:]
empty_point = (0,0)
nc[:early_min] = early_min * [empty_point]
nc[late_min:] = [empty_point for i in range(late_min, len(nc))]
return Trace(image=trace.image, tracer="trimmed_min", coordinates=nc, metadata=trace.metadata)
@classmethod
def get_minima(cls, coords):
"""
Divide a Trace in 2 and find the position of the min y value for each section
"""
x,y = zip(*coords)
# switch sign
y = -np.array(y)
midpoint = int(len(x)/2)
return (np.argmin(y[:midpoint]), midpoint + np.argmin(y[midpoint:]))
@classmethod
def sg_filtering(cls, t, window=None, order=9):
"""
Applies Savitsky-Golay filtering to a Trace's y values
"""
if window:
if window % 2 == 0:
raise WindowException("Window size must be odd")
if order >= window:
raise WindowException("Order must be less than window size")
x,y = zip(*t.coordinates)
# window must be odd
window_size = window if window else max([i for i in range(0, len(y)) if i % 2 != 0])
y_hat = savgol_filter(y, window_size, order)
if len(y_hat) != 32 and len(y) == 32:
print(t.image)
# zip together the smoothed y values with the old x values
new_coords = list(zip(x,y_hat))
tracer = "svagol_filter-w={}-order={}".format(window_size, order)
return Trace(image=t.image, tracer=tracer, coordinates=new_coords, metadata=t.metadata)
@classmethod
def threshold_coordinates(cls, coords, threshold):
"""
"""
# unzip the coordinates
x,y = zip(*coords)
# convert to lists
x = list(x)
y = list(y)
# traverse these values
indices = range(0, len(y) - 1)
#reversed_y = list(reversed(y))
for i in indices:
# moving from the right edge...
current_y = y[i]
# get the point immediately to the left
#print("current_y: {}".format(current_y))
next_y = y[i + 1]
#print("next_y: {}".format(next_y))
# is the next value positive or negative?
sign = 1 if next_y > current_y else -1
# what is the actual diff?
diff = abs(next_y - current_y)
# set the diff to the actual diff or the threshold (whichever is less)
diff = diff if diff < threshold else threshold
diff *= sign
# thresholded y value
adjusted_y = current_y + diff
y[i+1] = adjusted_y
return list(zip(x,y))
@classmethod
def threshold_trace(cls, t, anterior_mean, body_mean, front_mean):
"""
"""
coords = t.coordinates
edge_length = int(len(coords) / 4 * 1.5)
rear = coords[:edge_length]
body = coords[edge_length:-1 * edge_length]
front = coords[-1 * edge_length:]
# remove rear mininimum
reversed_rear = list(reversed(rear))
thresholded_rear = reversed(cls.threshold_coordinates(reversed_rear, anterior_mean))
thresholded_body = cls.threshold_coordinates(body, body_mean)
thresholded_front = cls.threshold_coordinates(front, front_mean)
thresholded_coords = cls.thresholded_front + thresholded_body + thresholded_rear
tracer = "thresholded-rm={0}-bm={1}-fm={2}".format(anterior_mean, body_mean, front_mean)
return Trace(image=t.image, tracer=tracer, coordinates=thresholded_coords, metadata=t.metadata)
class WindowException(Exception):
pass
| [
"gushahnpowell@gmail.com"
] | gushahnpowell@gmail.com |
b8a79bc4b6d34e10295fe6b987039e7bd513dccf | fa8685c0d18c06fa2d796570b82c5021150b2371 | /boostpython/class/test/class.py | aa2fe955343adcebfb43a32eddb4057972029959 | [] | no_license | qwv/test-boost | 52407774783c913d7a3dad0498da78319be18e5e | 4300bb830ed6703108c7585d76fc5027a269770e | refs/heads/master | 2021-01-19T14:24:41.195565 | 2017-08-30T11:26:57 | 2017-08-30T11:26:57 | 100,900,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | #!/usr/bin/env python
import test
t = test.World()
t.set("bom dia!")
print (t.greet())
| [
"chowyuhuan@gmail.com"
] | chowyuhuan@gmail.com |
43ebb4bd5c0c0fa866bb592f633956324fff45dc | 4507c041158e427f1fb09ad62e329364a0a03cbf | /matrixcal.py | b72bf7644fa893f82dcd6747c40668038665f53b | [] | no_license | Holmsoe/PandasHints | 0e079ece32ee1cf16995a373f9bed6a995bece58 | 80e1f77832ea5c51f8767066c13dda48bddf9f4d | refs/heads/master | 2021-01-09T04:04:38.681763 | 2020-02-21T22:15:47 | 2020-02-21T22:15:47 | 242,239,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,320 | py | import time
import numpy as np
class lavenhedsmatrix():
def __init__(self,m):
self.tolerance=1e-10
self.mgem=m
self.n=len(self.mgem)
#Styring af beregningsmatrix
#===========================
def beregn(self,rs):
self.rs=[[i for i in line] for line in rs]
self.rsgem=rs
#nulstil beregningsmatrix
self.m=[[i for i in line] for line in self.mgem]
self.NulUnderDiagonal()
self.EnhedsMatrix()
#===========================
#Standard rækkeoperation på matrix:matr,faktor der anvendes,pivotlinie,korrigeret line
#============================
def rowoperate(self,matr,fak,piv,lin):
for nr,item in enumerate(matr[lin]):
matr[lin][nr]=item-fak*matr[piv][nr]
return matr
#=============================
#Ved rækkeoperationer fås nuller under diagonalen
#===========================
def NulUnderDiagonal(self):
self.rowtaken=[]
for col in range(self.n):
self.NullifyCol(col)
self.sortermatrix()
def NullifyCol(self,col):
pivotline=self.MaxAbsCol(col)
for line in range(self.n):
if line not in self.rowtaken:
fak=self.m[line][col]/self.m[pivotline][col]
self.m=self.rowoperate(self.m,fak,pivotline,line)
self.rs=self.rowoperate(self.rs,fak,pivotline,line)
def MaxAbsCol(self,col):
abskolonne=[abs(line[col]) if nr not in self.rowtaken else 0 for nr,line in enumerate(self.m)]
linienr=[i for i,item in enumerate(abskolonne) if max(abskolonne)==item][0]
self.rowtaken.append(linienr)
return linienr
#Slut på nuller under diagonalen
#==============================
#På basis af matrix med nuller under diagonalen beregnes enhedsmatrix
#==================================
def EnhedsMatrix(self):
w=self.n
for i in reversed(range(self.n)):
for j in range(i):
fak=self.m[j][i-w]/self.m[i][i-w]
self.m=self.rowoperate(self.m,fak,i,j)
self.rs=self.rowoperate(self.rs,fak,i,j)
for i in range(self.n):
#rs kan have forskellig bredde. Kan være en søjle eller en matrix så vi må rette hvert element
for nr,item in enumerate(self.rs[i]):
self.rs[i][nr]=self.rs[i][nr]/self.m[i][i]
self.m[i][i]=self.m[i][i]/self.m[i][i]
def sortermatrix(self):
sortmatrix=[]
rssort=[]
for i in range(self.n):
sortmatrix.append(self.m[self.findlinie(i)])
rssort.append(self.rs[self.findlinie(i)])
self.m=sortmatrix
self.rs=rssort
def findlinie(self,n):
for row in range(self.n):
if abs(sum(self.m[row])-sum(self.m[row][n:self.n]))<self.tolerance and abs(self.m[row][n])>self.tolerance :
linieregn=row
return linieregn
#Slut på enhedsmatrix
#========================
class matrixinvert(lavenhedsmatrix):
def __init__(self,m):
lavenhedsmatrix.__init__(self,m)
def finninvert(self):
enhedsm=[[1 if i==j else 0 for i in range(4)] for j in range(4)]
self.beregn(enhedsm)
return self.rs
class matrixsolve(lavenhedsmatrix):
def __init__(self,m):
lavenhedsmatrix.__init__(self,m)
def finnsolve(self,rs):
if len(rs[0])!=1:
print("Input skal være søjlematrix")
else:
self.beregn(rs)
return self.rs
#Start på klasse tilberegning af determinant
#==========================================
class matrixdeterm():
def __init__(self,m):
self.m=m
#Beregning af determinant
def mdet(self):
matr=self.m
permutliste=self.permutationer(len(matr)-1)
determ=0
for pline in permutliste:
prod=1
for im in range(len(m)):
prod*=matr[im][pline[im]]
if self.inversioner(pline)%2==0:
determ+=prod
else:
determ-=prod
return determ
#Beregning af alle permutationer fra 0 til n-1
def permutationer(self,n):
listeny=[[0]]
for i in range(1,n+1):
listeny=self.beregnnyliste(listeny,i)
return listeny
def beregnnyliste(self,mlist,tal):
nylist=[]
for line in mlist:
for j in range(len(line)+1):
temp=[item for item in line]
temp.insert(j,tal)
nylist.append(temp)
return nylist
#Slut på permutationer
#Beregning af antal inversioner for en permutation
def inversioner(self,liste):
count=0
p=liste
for i in range(len(p)):
for j in range(i+1,len(p)):
if p[j]<p[i]: count+=1
return count
#Slut på beregning af inversioner
#Start på klasse til multiplikation af matricer
#==========================================
class matrixmult():
def __init__(self,m1,m2):
self.m1=m1
self.m2=m2
def matmult(self):
m1=self.m1
m2=self.m2
m3=[] #gemmer resultat
for col in range(len(m2)):
mcol=[] #gemmer produkt per kolonne
collist=[line[col] for line in m2]
for line in m1:
mcol.append(self.dotprodukt(line,collist))
m3.append(mcol)
m3=self.transpose(m3)
return m3
def dotprodukt(self,a,b):
if len(a)!=len(b):
print("Vektorer ikke lige lange")
else:
dp=sum([a[i]*b[i] for i in range(len(a))])
return dp
def transpose(self,m):
mud=[[line[col] for line in m] for col in range(len(m[0]))]
return mud
#Slut på klasse til multiplikation af matricer
#==========================================
class matmultk():
def __init__(self,k,m):
self.k=k
self.m=m
self.d=len(self.m)
def mmultk(self):
mud=[[self.m[i][j]*self.k for i in range(self.d)] for j in range(self.d)]
return mud
#Opløfte matrix til n'te potens
class matpow():
def __init__(self,m,n):
self.m=m
self.mny=m
self.n=n
def mpow(self):
for i in range(self.n):
self.mny=matrixmult(self.m,self.mny).matmult()
return self.mny
#Beregne trace af matrix (sum af diagonal
class mattrace():
def __init__(self,m):
self.m=m
self.d=len(m)
def tr(self):
tr=[[self.m[i][j] if i==j else 0 for i in range(self.d)] for j in range(self.d)]
trsum=sum([sum(line) for line in tr])
return trsum
class karaklign():
'''
Finde koefficienter til karakteristisk polynomium
https://en.wikipedia.org/wiki/Faddeev%E2%80%93LeVerrier_algorithm
'''
def __init__(self,m):
self.m=m
def lign(self):
n=len(self.m)
c=[0 for tal in range(n+1)]
E=[[1 if i==j else 0 for i in range(n)] for j in range(n)]
c[n]=1
M=[[0 for i in range(n)] for j in range(n)]
A=self.m
for k in range(1,n+1):
AM=matrixmult(A,M).matmult()
cI=matmultk(c[n-k+1],E).mmultk()
M=[[(AM[i][j]+cI[i][j]) for j in range(n)] for i in range(n)]
AM=matrixmult(A,M).matmult()
c[n-k]=-mattrace(AM).tr()/k
#c.reverse()
return c
class quadeq():
def __init__(self,koeff):
self.koef=koeff
def findroots(self):
f=self.koef
maxroots=len(f)-1
nroot=0
guess=-100
while nroot<maxroots:
nroot+=1
r=self.findrod(f,guess)
print("rod",nroot,'{:6.2f}'.format(r))
if nroot!=maxroots: guess=self.findnewguess(f,r,nroot)
def findrod(self,f,x0):
while abs(self.y(f,x0))>0.0001:
a=self.dy(f,x0)
x0=self.xny(x0,self.y(f,x0),a)
return x0
def findnewguess(self,f,x0,nroot):
#print(nroot,len(f)-1)
if nroot<len(f)-2:
v1=self.findvende(f,x0)
v2=self.findvende(f,v1)
nytguess=(v1+v2)/2
else:
nytguess=self.findvende(f,x0)
return nytguess
def findvende(self,f,xstart):
dx=0.01
a0=self.dy(f,xstart)
xud=xstart+dx
a1=self.dy(f,xud)
c=0
while self.sign(a0)==self.sign(a1) and c<100:
c+=1
xud+=dx
a1=self.dy(f,xud)
if c>=100: print("CMAX")
return xud
def y(self,f,x):
p=len(f)
yc=sum([f[i]*x**i for i in range(p)])
return yc
def dy(self,f,x):
dx=0.001
x1=x+dx
dydx=(self.y(f,x)-self.y(f,x1))/(x-x1)
return dydx
def xny(self,x0,y0,dy):
x1=x0-y0/dy
return x1
def sign(self,tal):
if tal>=0:
s=True
else:
s=False
return s
#Her er hovedprogram
#===================
def printm(mprint):
for line in mprint:
for item in line:
print('{:8.2f}'.format(item),end="")
print("")
#m1=[[5,8,6,9],[-7,9,3,5],[-7,-10,-8,2],[-1,8,9,-6]]
#rs=[[-100],[-150],[29],[-49]]
m=[[7,2,4,-3],[0,6,5,9],[0,6,2,5],[-8,-1,-7,-8]]
#E4=[[1 if i==j else 0 for i in range(4)] for j in range(4)]
#E3=[[1 if i==j else 0 for i in range(3)] for j in range(3)]
#print(E4)
rs=[[-73],[67],[59],[45]]
rs1=[[1 if i==j else 0 for i in range(4)] for j in range(4)]
mtest=[[3,1,5],[3,3,1],[4,6,4]]
print("")
print("Løs ligninger")
printm(matrixsolve(m).finnsolve(rs))
A=np.array(m)
b=np.array(rs)
print("")
print("Tjek løs ligninger")
printm(np.linalg.solve(A,b))
print("")
print("Beregn invers")
printm(matrixinvert(m).finninvert())
print("")
print("Tjek invers matrix")
printm(np.linalg.inv(A))
print("")
print("Determinant ",matrixdeterm(m).mdet())
print("Tjek determinant",'{:8.4f}'.format(np.linalg.det(m)))
print("")
printm(matrixmult(m,m).matmult())
print("Tjek multiplikation")
M1=np.array(m)
M2=np.array(m)
print(np.matmul(M1,M2))
print("")
print(karaklign(mtest).lign())
print("Tjek karakterlign")
Mtest=np.array(mtest)
print(np.poly(Mtest))
print(np.poly(Mtest)[::-1]) #reverse numpy array
c=karaklign(mtest).lign()
print(c)
quadeq(c).findroots()
c.reverse()
ctest=np.array(c)
print(np.roots(ctest))
| [
"finn.holmsoe@gmail.com"
] | finn.holmsoe@gmail.com |
da2666b33694c9a517fd8f35af321fce19c14414 | e6d95a590f6e661991b75b0ffacaf3a312479bfb | /chapter_6/list_comprehentions.py | 24384a43e3d713925154f66f8afa7a05c4f7a20e | [] | no_license | wanga0104/learn | 05ebe57025d4a6f4bd66a8157916c3a1d54313d4 | eb0dc5a48451c2af4dd630620e194a8d57d3cbfa | refs/heads/master | 2022-10-11T08:50:00.653837 | 2020-06-14T02:28:02 | 2020-06-14T02:28:02 | 271,970,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | # list = [i ** 2 for i in range(21) if not i % 2 == 0 ]
# print(list)
# new_list = [i if i > 100 else i ** 2 for i in list]
# print(new_list)
list_val = []
for i in '高富帅':
for j in '白富美':
list_val.append(i+j)
print(list_val)
print([i+j for i in '高富帅' for j in '白富美']) | [
"wanga0104@gmail.com"
] | wanga0104@gmail.com |
3a762c9e0cf07f1d9832c6a76c2334c0528244f5 | e3017c4c18b0226ea2131161159a7e51ff02cc0e | /test_ddl.py | 29848bcab2602888f9243625f8af0fb8bc4ad607 | [] | no_license | samhaug/tomography_tools | 834c0f9781928411d32f9b190f2689194972c339 | ce57bc2517fd5acbf645d6af633321d12122e518 | refs/heads/master | 2018-10-19T15:05:51.458378 | 2018-07-20T14:15:56 | 2018-07-20T14:15:56 | 104,912,898 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | #from data_downloader import data_downloader
| [
"samhaug@umich.edu"
] | samhaug@umich.edu |
915bfdf93bc5399c071bb32a978cc52625edaf9c | eeed0744d5165f901071ece01a757077d196addb | /compare.py | 374db00b8b36d7bd7ffb68e57df38c7eb8134380 | [] | no_license | joshua217/josh-fyp | 9aee6d425b52ced32dae3cf173a6add0475cdfe2 | 816002c0939e41cc3fdee546e868b7471567f61a | refs/heads/master | 2021-01-01T21:00:57.498109 | 2020-03-02T11:35:06 | 2020-03-02T11:35:06 | 239,337,876 | 2 | 0 | null | 2020-06-05T19:57:47 | 2020-02-09T16:43:59 | Python | UTF-8 | Python | false | false | 418 | py | from scipy import stats
from matplotlib import pyplot as plt
def compare(x, y, x_name, y_name):
plt.figure()
tau, p = stats.kendalltau(x, y, nan_policy='omit')
plt.scatter(x, y)
plt.xlabel(x_name)
plt.ylabel(y_name)
plt.title("x:{}, y:{}\ntau: {}, p: {}".format(x_name, y_name, tau, p))
plt.savefig(r"C:\dev\data\josh-fyp\outputs\{}_against_{}.png".format(x_name, y_name))
# return
| [
"joshlim217@gmail.com"
] | joshlim217@gmail.com |
79835cbf228f09bc7b84d0f8c97de3d9a29f1086 | 29025afc2ef7a01231aba4c66a35df6bc5997212 | /utils/plots.py | c3bef03efaebc77e1bd9cb21d22e444ca5ed6939 | [] | no_license | junjie2008v/Torch_Classify | bb5e790db4925caf1cb0d48e62ea40d57bea36e2 | 9d94da7d19df4c4cf8f88b44c53b6e61a509c30a | refs/heads/master | 2023-07-03T09:06:55.555372 | 2021-08-08T05:04:11 | 2021-08-08T05:04:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,230 | py | import os
from copy import copy
import numpy as np
import matplotlib.pyplot as plt
def plot_base(ax, epochs, y, color, title=None):
ax.plot(epochs, y, color=color)
ax.grid(axis='y')
ax.grid(axis='x')
if title:
ax.set_title(title)
return ax
def double_bar(num_train_class, num_val_class, classes, log_dir):
width = 0.75 # the width of the bars: can also be len(x) sequence
fig, ax = plt.subplots()
ax.bar(classes, num_train_class, width, label='train')
ax.bar(classes, num_val_class, width, bottom=num_train_class, label='val')
ax.set_ylabel('Number')
ax.set_title('Number by train/val and class')
ax.legend(bbox_to_anchor=(-0.15, 0.7), fontsize=5)
plt.savefig(os.path.join(log_dir, 'data_distribution.jpg'), dpi=600, bbox_inches='tight')
# plt.show()
def plot_datasets(img_path, log_dir):
train_root = os.path.join(img_path, 'train')
val_root = os.path.join(img_path, 'val')
classes = os.listdir(train_root)
num_train_class = [len(os.listdir(os.path.join(train_root, class_))) for class_ in classes]
num_val_class = [len(os.listdir(os.path.join(val_root, class_))) for class_ in classes]
double_bar(num_train_class, num_val_class, classes, log_dir)
def plot_txt(log_dir, num_classes, labels_name):
txt_results = np.loadtxt(os.path.join(log_dir, "results.txt"), dtype=str, delimiter=',')
txt_results = [[i for i in j.split(' ')] for j in txt_results]
for i in txt_results:
while '' in i:
i.remove('')
txt_results = np.array(txt_results)
epochs = txt_results[:, 0][1:].astype(int)
accuracy = txt_results[:, 1][1:].astype(float)
precision = txt_results[:, 2][1:].astype(float)
recall = txt_results[:, 3][1:].astype(float)
F1 = txt_results[:, 4][1:].astype(float)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
ax[0, 0] = plot_base(ax[0, 0], epochs, accuracy, color='red', title='accuracy')
ax[0, 1] = plot_base(ax[0, 1], epochs, precision, color='red', title='precision')
ax[1, 0] = plot_base(ax[1, 0], epochs, recall, color='red', title='recall')
ax[1, 1] = plot_base(ax[1, 1], epochs, F1, color='red', title='F1')
fig.text(0.5, 0.04, 'Epoch', ha='center')
plt.savefig(os.path.join(log_dir, 'Acc-P-R-F1.jpg'), dpi=600, bbox_inches='tight')
fig, ax = plt.subplots(3, num_classes, figsize=(2 * num_classes, 2 * 3), sharex=True, sharey=True)
for i in range(num_classes):
precision_i = txt_results[:, 5 + i][1:].astype(float)
recall_i = txt_results[:, 5 + num_classes + i][1:].astype(float)
F1_i = txt_results[:, 5 + 2 * num_classes + i][1:].astype(float)
ax[0, i] = plot_base(ax[0, i], epochs, precision_i, color='blue')
ax[0, i].set_title(labels_name[i])
ax[1, i] = plot_base(ax[1, i], epochs, recall_i, color='blue')
ax[2, i] = plot_base(ax[2, i], epochs, F1_i, color='blue')
ax[0, 0].set_ylabel('Precision')
ax[1, 0].set_ylabel('Recall')
ax[2, 0].set_ylabel('F1-score')
fig.text(0.5, 0.04, 'Epoch', ha='center')
plt.savefig(os.path.join(log_dir, 'P-R-F1-per-class.jpg'), dpi=600, bbox_inches='tight')
def plot_lr_scheduler(optimizer, scheduler, epochs, log_dir, scheduler_type):
optimizer, scheduler = copy(optimizer), copy(scheduler)
plt.figure()
y = []
for epoch in range(epochs):
y.append(optimizer.param_groups[0]['lr'])
# print('epoch:', epoch, 'lr:', optimizer.param_groups[0]['lr'])
scheduler.step()
plt.plot(y, c='r', label='warmup step_lr', linewidth=1)
plt.legend(loc='best')
# plt.xticks(np.arange(0, epochs+20, 20))
if scheduler_type!='warmup_cosine_lr':
plt.yscale("log")
plt.savefig(os.path.join(log_dir, 'lr_scheduler.jpg'), dpi=600, bbox_inches='tight')
def plot_loss(log_dir, train_loss_list, val_loss_list):
plt.figure()
plt.plot(train_loss_list, c='r', label='train loss', linewidth=2)
plt.plot(val_loss_list, c='b', label='val loss', linewidth=2)
plt.legend(loc='best')
plt.xlabel('epoch', fontsize=10)
plt.ylabel('loss', fontsize=10)
plt.yscale("log")
plt.savefig(os.path.join(log_dir, 'plot_loss.jpg'), dpi=600, bbox_inches='tight') | [
"2267335097@qq.com"
] | 2267335097@qq.com |
1fb86b58724cb9c2760776d223919b590e47950f | 47374ff190c4957ed464e345f94fbfee64983825 | /feed2rss/views.py | b7b0710fa98cf32a365485d2e7e00f1378a163db | [] | no_license | alaski/Feed2RSS | 0af78c5af74cbecddf945837257f893d4bc7b799 | 25c26e5ab9989b4bc067a382ca7ace9538dc62c8 | refs/heads/master | 2022-10-14T14:38:05.564515 | 2012-08-07T17:49:51 | 2012-08-07T17:49:51 | 3,626,778 | 0 | 0 | null | 2022-09-16T17:43:10 | 2012-03-05T12:23:27 | Python | UTF-8 | Python | false | false | 7,697 | py | import datetime
import random
import string
from pyramid.httpexceptions import (
HTTPFound,
HTTPForbidden,
HTTPNotFound,
)
from pyramid.response import Response
from pyramid.security import (
remember,
forget,
authenticated_userid,
)
from pyramid.view import (
view_config,
#forbidden_view_config,
)
from .models import (
DBSession,
Feed,
User,
)
import PyRSS2Gen
import tweepy
@view_config(route_name='twitter_login')
def twitter_login(request):
settings = request.registry.settings
auth = tweepy.OAuthHandler(
settings['twconsumer_key'],
settings['twconsumer_secret'],
request.route_url('twitter_authenticated')
)
try:
redirect_url = auth.get_authorization_url(signin_with_twitter=True)
except tweepy.TweepError, e:
print 'Failed to get request token: {0}'.format(e)
session = request.session
session['request_token'] = (
auth.request_token.key,
auth.request_token.secret
)
session.changed()
return HTTPFound(location=redirect_url)
@view_config(route_name='twitter_authenticated')
def twitter_authenticated(request):
verifier = request.GET.getone('oauth_verifier')
settings = request.registry.settings
auth = tweepy.OAuthHandler(
settings['twconsumer_key'],
settings['twconsumer_secret']
)
session = request.session
token = session.get('request_token')
if 'request_token' in session:
del session['request_token']
auth.set_request_token(token[0], token[1])
try:
auth.get_access_token(verifier)
except tweepy.TweepError:
print 'Failed to get access token'
api = tweepy.API(auth)
tw_user = api.me()
oauth_token = auth.access_token.key
oauth_token_secret = auth.access_token.secret
user = User.get_by_screen_name(tw_user.screen_name)
if user is None:
user = User(
tw_user.screen_name,
tw_user.id,
oauth_token,
oauth_token_secret
)
DBSession.add(user)
user = User.get_by_screen_name(tw_user.screen_name)
else:
if user.oauth_token != oauth_token:
user.oauth_token = oauth_token
if user.oauth_token_secret != oauth_token_secret:
user.oauth_token_secret = oauth_token_secret
user.last_login = datetime.datetime.now()
DBSession.add(user)
user_home = request.route_url('user_home', user=tw_user.screen_name)
headers = remember(request, tw_user.screen_name)
return HTTPFound(location = user_home, headers = headers)
@view_config(route_name='user_home', renderer='templates/user_home.pt')
def user_home(request):
user_name = request.matchdict['user']
logged_in = authenticated_userid(request)
if user_name != logged_in:
return HTTPFound(location = request.route_url('home'))
user = User.get_by_screen_name(user_name)
feeds_cursor = Feed.get_by_userid(user.id)
feeds = []
if feeds_cursor is not None:
for feed in feeds_cursor:
feed_url = request.route_url(
'view_feed',
user = user_name,
feedname = feed.name,
)
feeds.append({
'url': feed_url,
})
return {'screen_name': user_name,
'feeds': feeds,
'logged_in': logged_in,
}
@view_config(route_name='get_feeds', renderer='json')
def get_feeds(request):
user_name = request.matchdict['user']
logged_in = authenticated_userid(request)
if user_name != logged_in:
return HTTPForbidden()
user = User.get_by_screen_name(user_name)
feeds = Feed.get_by_userid(user.id)
feed_list = []
for feed in feeds:
feed_dict = {
'name': feed.name,
'feed_uri': request.route_url(
'view_feed',
user = user_name,
feedname = feed.name
),
'delete_uri': request.route_url(
'delete_feed',
user=user_name,
feedname=feed.name
)
}
feed_list.append(feed_dict)
return feed_list
@view_config(route_name='view_feed')
def view_feed(request):
user = User.get_by_screen_name(request.matchdict['user'])
if user is None:
return HTTPNotFound()
feed = Feed.get_by_userid_and_name(user.id, request.matchdict['feedname'])
if feed is None:
return HTTPNotFound()
tweets_db = request.mongo_db.tweets
tweets_cursor = tweets_db.find(
{'rss_user': user.screen_name, 'feedname': feed.name}
)
rss_tweets = []
for tweet in tweets_cursor:
rss_tweets.append(
PyRSS2Gen.RSSItem(
title = tweet['title'],
author = tweet['author'],
link = tweet['link'],
description = tweet['description'],
pubDate = tweet['pubDate'],
)
)
rss = PyRSS2Gen.RSS2(
title = 'Feed2RSS',
link = request.route_url(
'view_feed',
user = request.matchdict['user'],
feedname = request.matchdict['feedname'],
),
description = 'RSS feed of tweets with links',
lastBuildDate = datetime.datetime.now(),
items = rss_tweets,
)
return Response(rss.to_xml(), content_type='text/xml')
@view_config(route_name='create_feed', request_method='POST')
def create_feed(request):
user_name = request.matchdict['user']
logged_in = authenticated_userid(request)
if user_name != logged_in:
return HTTPForbidden()
form_data = request.POST
feed_sources = form_data['sources'].split(',')
#filter_links = form_data['filter_links']
user = User.get_by_screen_name(user_name)
for source in feed_sources:
feedq = Feed.get_by_userid_and_source(user.id, source)
if feedq is None:
random_name = ''.join(
random.choice(
string.ascii_lowercase + string.digits) for x in range(10)
)
feed = Feed(user.id, random_name, source)
DBSession.add(feed)
#settings = request.registry.settings
#auth = tweepy.OAuthHandler(
# settings['twconsumer_key'],
# settings['twconsumer_secret']
# )
#auth.set_access_token(user.oauth_token, user.oauth_token_secret)
#api = tweepy.API(auth)
#api.favorites()
return Response()
@view_config(route_name='delete_feed', request_method='DELETE')
def delete_feed(request):
user_name = request.matchdict['user']
logged_in = authenticated_userid(request)
if user_name != logged_in:
return HTTPForbidden()
user = User.get_by_screen_name(user_name)
feed = Feed.get_by_userid_and_name(user.id, request.matchdict['feedname'])
if feed is not None:
DBSession.delete(feed)
return Response()
@view_config(route_name='home', renderer='templates/home.pt')
def home(request):
return {
'project': 'Feed2RSS',
'logged_in': authenticated_userid(request),
}
@view_config(route_name='logout')
def logout(request):
headers = forget(request)
home = request.route_url('home')
return HTTPFound(location = home, headers = headers)
# vim:et:ts=4:sw=4:sts=4
| [
"alaski@gmail.com"
] | alaski@gmail.com |
1747c3d6ebe232b90f1163f18a849a3f71ccebc4 | e614c145ab902ebed09af2bcef5b36dca78a5787 | /authors/migrations/0117_auto_20160214_0747.py | 26a7a40388a4b500cb05fd171b2905ed7e43788d | [] | no_license | rtreharne/pvsat-dev | 1646ca8f51bd466d659b25eb721750de8361ef02 | faa2b28250e2110f4603ffeff80ad0fedda1abbb | refs/heads/master | 2021-01-17T13:24:12.578341 | 2017-09-19T06:42:51 | 2017-09-19T06:42:51 | 44,095,813 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('authors', '0116_auto_20160214_0743'),
]
operations = [
migrations.AlterField(
model_name='abstract',
name='date',
field=models.DateTimeField(default=datetime.datetime(2016, 2, 14, 7, 47, 55, 128934)),
),
]
| [
"R.Treharne@liverpool.ac.uk"
] | R.Treharne@liverpool.ac.uk |
ac140ccd1f835b9970eb434daa082c8ce09e86fc | 06bb125304edb1a587059d372f3e587a6f73ced2 | /cdpcli/completer.py | 01274b08fb6f55eb0dff24c3890d651a44acdf33 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | arcOsiNer/cdpcli | 2743bf01a55f23466e445a74468ce14c4ac6c1f0 | 484b9814d845046773e4fe5408f41f7cfc6dbb7a | refs/heads/master | 2023-06-04T14:22:49.167787 | 2021-06-24T00:17:22 | 2021-06-24T00:17:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,386 | py | # Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Modifications made by Cloudera are:
# Copyright (c) 2018 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import copy
import logging
import os
import sys
import cdpcli.clidriver
LOG = logging.getLogger(__name__)
class Completer(object):
def __init__(self, driver=None):
if driver is not None:
self.driver = driver
else:
self.driver = cdpcli.clidriver.CLIDriver()
self.main_help = self.driver._create_help_command()
self.main_options = self._get_documented_completions(
self.main_help.arg_table)
def complete(self, cmdline, point=None):
if point is None:
point = len(cmdline)
args = cmdline[0:point].split()
current_arg = args[-1]
cmd_args = [w for w in args if not w.startswith('-')]
opts = [w for w in args if w.startswith('-')]
cmd_name, cmd = self._get_command(self.main_help, cmd_args)
subcmd_name, subcmd = self._get_command(cmd, cmd_args)
if cmd_name is None:
# If we didn't find any command names in the cmdline
# lets try to complete provider options
return self._complete_provider(current_arg, opts)
elif subcmd_name is None:
return self._complete_command(cmd_name, cmd, current_arg, opts)
return self._complete_subcommand(subcmd_name, subcmd, current_arg, opts)
def _complete_command(self, command_name, command_help, current_arg, opts):
if current_arg == command_name:
if command_help:
return self._get_documented_completions(
command_help.command_table)
elif current_arg.startswith('-'):
return self._find_possible_options(current_arg, opts)
elif command_help is not None:
# See if they have entered a partial command name
return self._get_documented_completions(
command_help.command_table, current_arg)
return []
def _complete_subcommand(self, subcmd_name, subcmd_help, current_arg, opts):
if current_arg != subcmd_name and current_arg.startswith('-'):
return self._find_possible_options(current_arg, opts, subcmd_help)
return []
def _complete_option(self, option_name):
if option_name == '--endpoint-url':
return []
if option_name == '--output':
cli_data = self.driver.session.get_data('cli')
return cli_data['options']['output']['choices']
if option_name == '--profile':
return self.driver.session.available_profiles
return []
def _complete_provider(self, current_arg, opts):
if current_arg.startswith('-'):
return self._find_possible_options(current_arg, opts)
elif current_arg == 'cdp':
return self._get_documented_completions(
self.main_help.command_table)
else:
# Otherwise, see if they have entered a partial command name
return self._get_documented_completions(
self.main_help.command_table, current_arg)
def _get_command(self, command_help, command_args):
if command_help is not None and command_help.command_table is not None:
for command_name in command_args:
if command_name in command_help.command_table:
cmd_obj = command_help.command_table[command_name]
return command_name, cmd_obj.create_help_command()
return None, None
def _get_documented_completions(self, table, startswith=None):
names = []
for key, command in table.items():
if getattr(command, '_UNDOCUMENTED', False):
# Don't tab complete undocumented commands/params
continue
if startswith is not None and not key.startswith(startswith):
continue
if getattr(command, 'positional_arg', False):
continue
names.append(key)
return names
def _find_possible_options(self, current_arg, opts, subcmd_help=None):
all_options = copy.copy(self.main_options)
if subcmd_help is not None:
all_options += self._get_documented_completions(
subcmd_help.arg_table)
for option in opts:
# Look through list of options on cmdline. If there are
# options that have already been specified and they are
# not the current word, remove them from list of possibles.
if option != current_arg:
stripped_opt = option.lstrip('-')
if stripped_opt in all_options:
all_options.remove(stripped_opt)
cw = current_arg.lstrip('-')
possibilities = ['--' + n for n in all_options if n.startswith(cw)]
if len(possibilities) == 1 and possibilities[0] == current_arg:
return self._complete_option(possibilities[0])
return possibilities
def complete(cmdline, point):
choices = Completer().complete(cmdline, point)
print(' \n'.join(choices))
def main():
# bash exports COMP_LINE and COMP_POINT, tcsh COMMAND_LINE only
cline = os.environ.get('COMP_LINE') or os.environ.get('COMMAND_LINE') or ''
cpoint = int(os.environ.get('COMP_POINT') or len(cline))
try:
complete(cline, cpoint)
except KeyboardInterrupt:
# If the user hits Ctrl+C, we don't want to print
# a traceback to the user.
pass
if __name__ == '__main__':
if len(sys.argv) == 3:
cmdline = sys.argv[1]
point = int(sys.argv[2])
elif len(sys.argv) == 2:
cmdline = sys.argv[1]
else:
print('usage: %s <cmdline> <point>' % sys.argv[0])
sys.exit(1)
print(complete(cmdline, point))
| [
"dev-kitchen@cloudera.com"
] | dev-kitchen@cloudera.com |
28d43402215205f7fc2d64cf801303a61b84c5f9 | afe916cfbb383f08ebcfd4ef290eefe556585acb | /readbin/apps/bin/serializers.py | 91fe989fb09645ec4a7404b03b8daa6d932f143b | [
"MIT"
] | permissive | asnelzin/readbin | cb47664d1fc1eb741f94d73e672815829c9209f6 | 1b546f71955cf5753d63aaf7d7fda0d466fc1332 | refs/heads/master | 2021-01-10T13:57:14.347921 | 2015-12-09T17:55:19 | 2015-12-09T17:55:19 | 47,063,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | from rest_framework import serializers
from readbin.apps.bin.models import Article
class ArticleSerializer(serializers.ModelSerializer):
class Meta:
model = Article
fields = ('id', 'title', 'url')
| [
"asnelzin@gmail.com"
] | asnelzin@gmail.com |
92b727dd208e19757a6dcb3fa0bd8c47e62e05e6 | 05d692469305dd1adb9ebc46080525bb4515b424 | /Exception handling/indentatitonerror2.py | fcf2aa2d7734d4ba77e193e053b1e5add48c0f73 | [] | no_license | rajdharmkar/pythoncode | 979805bc0e672f123ca1460644a4bd71d7854fd5 | 15b758d373f27da5680a711bf12c07e86758c447 | refs/heads/master | 2020-08-07T18:30:55.575632 | 2019-10-14T12:46:09 | 2019-10-14T12:46:09 | 213,551,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | try:
import test.py
except IndentationError as ex:
print ex
| [
"rajdharmkar@gmail.com"
] | rajdharmkar@gmail.com |
a1a0d498a55a027931df2f2909a70172a69c3276 | d9406464161b49a535a68356306d196efd5cb073 | /p5.py | 23f8140e641c66bc90d4ce73f4754205ea0e7bf9 | [] | no_license | jjimenez98/Blockchain-Communication-Model | 2b2b3361c7821567cfdea08a2b3dfa9ba9073701 | 048cee402d67182dbf06e69971c60988ddfb8be8 | refs/heads/master | 2022-12-17T08:00:01.397994 | 2020-09-26T20:44:26 | 2020-09-26T20:44:26 | 298,896,563 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,621 | py | import socket
import time
import random
import threading
from threading import Lock
import collections # provides a double-ended queue and is implemented as a doubly-linked list internally
import queue
import pickle
import hashlib
import numpy as np
import shelve
import sys
blockchain = collections.deque()
depth = 0
Q = queue.Queue()
Q1 = queue.Queue()
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
udp_host = socket.gethostbyname("")
sock.setblocking(False)
udp_port = 45935
sock.bind((udp_host,udp_port))
lcip = "127.0.0.1"
active1 = True
active2 = True
active3 = True
active4 = True
active5 = True
def send_all(msg):
if active1 == True:
sock.sendto(msg,(lcip,45931))
if active2 == True :
sock.sendto(msg,(lcip,45932))
if active3 == True:
sock.sendto(msg,(lcip,45933))
if active4 == True:
sock.sendto(msg,(lcip,45934))
def send_port(msg,id):
if id == 1:
sock.sendto(msg,(lcip,45931))
if id == 2:
sock.sendto(msg,(lcip,45932))
if id == 3:
sock.sendto(msg,(lcip,45933))
if id == 4:
sock.sendto(msg,(lcip,45934))
if id == 5:
sock.sendto(msg,(lcip,45935))
def nonce(a):
print("calculating nonce")
if(len(blockchain)!=0):
print("previous blockchain",blockchain[-1].encode('utf-8'))
previous_block = blockchain[-1].encode('utf-8')
else:
print("no previous block")
while True:
r_value = random.randint(0,15)
r_value1 = r_value
r_value = bytes(r_value)
x = hashlib.sha256(a)
x.update(r_value)
if(len(blockchain)!= 0):
x.update(previous_block)
z = x.hexdigest()
print(z)
print(z[63])
lastdigit = int(z[63],16)
if lastdigit >=0 and lastdigit <=4:
print("good")
print("heyyyyyy",r_value1)
return r_value1
depth = 0
ballotnum = [0,5,0]
acceptnum = 0
acceptval = 0
accepted_acks = 0
promise_acks = 0
balance = 100
credit = 100
def restart_election():
global promise_acks
global accepted_acks
while(1):
time.sleep(1)
while(Q.empty()==False):
x = random.randint(0,10)
time.sleep(x)
print("starting leader election")
ballotnum[0] = ballotnum[0] + 1
ballotnum[1] = 5
ballotnum[2] = len(blockchain)
transaction1 = transaction
print(transaction1)
preparemsg = ("prepare",ballotnum)
preparemsg = pickle.dumps(("prepare",ballotnum))
send_all(preparemsg)
time.sleep(5)
accepted_acks = 0
promise_acks = 0
print("DONE")
def communication_thread():
global ballotnum
global acceptnum
global acceptval
global promise_acks
global accepted_acks
global Q
global Q1
global balance
global credit
global depth
global blockchain
while(1):
try:
msg = sock.recvfrom(1246)
if msg!=socket.error:
msg = pickle.loads(msg[0])
if msg[0] == "prepare":
time.sleep(0.5)
if(depth <= msg[1][2]):
if msg[1][0] > ballotnum[0]:
ballotnum = msg[1]
promisemessage = ("promise",ballotnum,acceptnum,acceptval)
print("sending promise to, ", msg[1][1])
promisem = pickle.dumps(promisemessage)
send_port(promisem,msg[1][1])
else:
if msg[1][0] == ballotnum[0]:
if msg[1][1] > ballotnum[1]:
ballotnum = msg[1]
promisemessage = ("promise",ballotnum,acceptnum,acceptval)
print("sending promise to, ", msg[1][1])
promisem = pickle.dumps(promisemessage)
send_port(promisem,msg[1][1])
else:
print("ack : sending update for blockchain")
msgx = pickle.dumps(("update for block",blockchain))
send_port(msgx,msg[1][1])
if msg[0] == "update for block":
if depth < len(msg[1]):
print("new blockchain received, fixing state of blockchain")
blockchain = msg[1]
depth = len(blockchain)
if msg[0] == "promise":
print("promise ack")
promise_acks = promise_acks + 1
if promise_acks == 2 and promise_acks < 3:
list1 = [Q1.get()]
while(Q1.empty()==False):
list1.append(Q1.get())
for i in Q.queue:
Q1.put(i)
x = np.array(list1)
block_hash = hashlib.sha256(x)
Nonce = nonce(x)
print("Nonce : ", Nonce)
Nonce = bytes(Nonce)
block_hash.update(Nonce)
if(len(blockchain)!=0):
previous_block = blockchain[-1].encode('utf-8')
block_hash.update(previous_block)
print("hash(transaction||Nonce) : ",block_hash.hexdigest())
m = hashlib.sha256(x)
print("original : " , m.hexdigest())
print(list1)
acceptval = block_hash.hexdigest()
acceptm = ("accept",ballotnum,acceptval)
acceptm = pickle.dumps(acceptm)
print("sending accepts")
send_all(acceptm)
if msg[0] == "accept":
if msg[1][0] > ballotnum[0]:
acceptnum = msg[1]
acceptval = msg[2]
acceptedm = ("accepted",ballotnum,1)
acceptedm = pickle.dumps(acceptedm)
print("sending accept to ", msg[1][1])
send_port(acceptedm,msg[1][1])
else:
if msg[1][0] == ballotnum[0]:
if msg[1][1] >= ballotnum[1]:
acceptnum = msg[1]
acceptval = msg[2]
acceptedm = ("accepted",ballotnum,1)
acceptedm = pickle.dumps(acceptedm)
print("sending accept to ", msg[1][1])
send_port(acceptedm,msg[1][1])
if msg[0] == "accepted_value":
v = msg[1]
for i in msg[2]:
if i[1] == 5:
balance = balance + i[2]
credit = credit + i[2]
print("acceptval : ",v)
blockchain.append(v)
depth = len(blockchain)
if msg[0] == "accepted":
accepted_acks = accepted_acks + 1
print("accept ack")
if accepted_acks == 2:
list_of_transactions = list()
for i in Q.queue:
list_of_transactions.append(i)
balance = balance - i[2]
print("acceptval : ",acceptval)
print("sending accepted val")
msgv = pickle.dumps(("accepted_value",acceptval,list_of_transactions))
Q.queue.clear()
send_all(msgv)
blockchain.append(acceptval)
depth = len(blockchain)
except socket.error:
time.sleep(1)
pass
t2 = threading.Thread(target = communication_thread, daemon = True)
t2.start()
t3 = threading.Thread(target = restart_election, daemon = True)
t3.start()
while(1):
global transaction
x = input("\n1 = moeny Transaction = , 2 = faillink , 3 = failProcess , 4 = printBlockchain , 5 = printBalance , 6 = printQueue, 7 = recover state\n")
if x == '1':
accepted_acks = 0
promise_acks = 0
value = input("value = ")
if int(value) <= credit:
credit = credit - int(value)
rcvr = input("reciever = ")
transaction = [1,int(rcvr),int(value)]
Q.put(transaction)
Q1.put(transaction)
else:
print("value exceeds balance, try again")
continue
if x == '2':
d = input("\n 1 = activate linke 2 = deactivate link: ")
if(d=='2'):
d2 = input("\n what link to deactivate 1 , 2 , 3 , 4 : ")
if(d2=='1'):
active1 = False
if(d2=='2'):
active2 = False
if(d2=='3'):
active3 = False
if(d2=='4'):
active4 = False
if(d2=='5'):
active5 = False
if(d=='1'):
d2 = input("\n what link to activate 1 , 2 , 3 , 4 : ")
if(d2=='1'):
active1 = True
if(d2=='2'):
active2 = True
if(d2=='3'):
active3 = True
if(d2=='4'):
active4 = True
if(d2=='5'):
active5 = True
if x == '3':
shfile = shelve.open("shelf_file")
shfile['bc']= blockchain
shfile['depth']= depth
shfile['credit']= credit
shfile['balance']= balance
shfile['ballotnum']= ballotnum
shfile['acceptval']= acceptval
shfile['acceptnum']= acceptnum
shfile.close()
sys.exit()
if x =='4':
print(blockchain)
if x =='5':
print(balance)
if x =='6':
print(list(Q.queue))
if x =='7':
var = shelve.open("shelf_file")
blockchain = var['bc']
depth = var['depth']
credit = var['credit']
balance = var['balance']
ballotnum = var['ballotnum']
acceptval = var['acceptval']
acceptnum = var['acceptnum']
var.clear()
var.clear()
| [
"noreply@github.com"
] | noreply@github.com |
d7decc5793abb668a64476cf829ed006844441e9 | 636cb88a2cd6386e5e2731c227547496059370be | /src/DjangoPractice/urls.py | 2e388e3b83aaf415a4630a5c6dba97e242546373 | [] | no_license | Kazungu/DjangoPractice | 4cfed564686ef1edce7d83231dc7e9705e9e9e9e | 03543096154b8f7b54600157036235aea841fd13 | refs/heads/master | 2022-05-30T15:12:44.388067 | 2020-05-02T08:37:19 | 2020-05-02T08:37:19 | 257,080,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | """DjangoPractice URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from pages.views import home_view
from products.views import product_detail_view, product_create_view
urlpatterns = [
path('', home_view, name='home'),
path('create/',product_create_view),
path('product/',product_detail_view),
path('admin/', admin.site.urls),
]
| [
"kazungu61@gmail.com"
] | kazungu61@gmail.com |
8d56e332c42605f75e9b3d63c13f532e5ba7d925 | 3d75c928899c582159ea022580116c50359fc638 | /PrimerBlog/urls.py | 840d9910691ef5483632951577ec0f01a6a85372 | [] | no_license | codeheroco/django_desde_cero | a9133790e0d29d2ab4273f895eef616265b4fcf4 | bfdba157e55afbaa814898845f785f6e91001c05 | refs/heads/master | 2021-01-23T06:50:00.326584 | 2013-11-06T03:53:19 | 2013-11-06T03:53:19 | 14,162,257 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 706 | py | from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^articulos/', include('blog.urls')),
# Examples:
# url(r'^$', 'PrimerBlog.views.home', name='home'),
# url(r'^PrimerBlog/', include('PrimerBlog.foo.urls')),
url(r'^$', 'blog.views.home', name='home'),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^crear/', 'blog.views.crear', name='crear'),
)
| [
"carlospicca@gmail.com"
] | carlospicca@gmail.com |
14b78f12d7845d6f3e43fc11adfae20a360f0cf1 | d7d1558607a8ace96a840cbef47e24a2d6a26102 | /setup3/preprocess/.ipynb_checkpoints/PreprocessingSetup3-checkpoint.py | 83187d8b58a979c02336ee48b75f943f5862135f | [
"MIT"
] | permissive | aliyesilkanat/MSC | 4a02e9f49c617dd724632c083325d887dbf87443 | 1fd424c82a86109a3ecfb7c657fb564a6839f7aa | refs/heads/master | 2022-03-26T00:06:18.846304 | 2019-11-12T17:31:49 | 2019-11-12T17:31:49 | 221,272,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,292 | py | #!/usr/bin/env python
# coding: utf-8
# In[2]:
import pickle
import librosa
import sys
import glob
import random
import os
from collections import defaultdict
import re
import numpy as np
import json
from tacotron.utils import get_spectrograms
import pandas as pd
import gc
def read_speaker_info(speaker_info_path):
speaker_ids = []
with open(speaker_info_path, 'r') as f:
for i, line in enumerate(f):
if i == 0:
continue
speaker_id = line.strip().split()[0]
speaker_ids.append("p"+speaker_id)
return speaker_ids
def read_filenames(root_dir):
speaker2filenames = defaultdict(lambda : [])
for path in sorted(glob.glob(os.path.join(root_dir, '*/*'))):
filename = path.strip().split('/')[-1]
speaker_id, utt_id = re.match(r'p(\d+)_(\d+)\.wav', filename).groups()
speaker2filenames["p"+speaker_id].append(path)
return speaker2filenames
def wave_feature_extraction(wav_file, sr):
y, sr = librosa.load(wav_file, sr)
y, _ = librosa.effects.trim(y, top_db=20)
return y
def spec_feature_extraction(wav_file):
mel, mag = get_spectrograms(wav_file)
return mel, mag
def sample_single_segments(pickle_path,sample_path,segment_size,n_samples):
with open(pickle_path, 'rb') as f:
data = pickle.load(f)
# (utt_id, timestep, neg_utt_id, neg_timestep)
samples = []
# filter length > segment_size
utt_list = [key for key in data]
utt_list = sorted(list(filter(lambda u : len(data[u]) > segment_size, utt_list)))
print(f'{len(utt_list)} utterances')
sample_utt_index_list = random.choices(range(len(utt_list)), k=n_samples)
for i, utt_ind in enumerate(sample_utt_index_list):
if i % 500 == 0:
print(f'sample {i} samples')
utt_id = utt_list[utt_ind]
t = random.randint(0, len(data[utt_id]) - segment_size)
samples.append((utt_id, t))
with open(sample_path, 'w') as f:
json.dump(samples, f)
# In[3]:
vctk_ids=read_speaker_info("/raid/users/ayesilkanat/MSC/VCTK/VCTK-Corpus/speaker-info.txt")
# In[5]:
from sklearn.model_selection import train_test_split
train, test = train_test_split(vctk_ids, test_size=0.2, random_state=13)
train, val = train_test_split(train, test_size=0.2, random_state=1)
# In[6]:
train_speaker_ids=train + [os.path.split(path)[-1] for path in sorted(glob.glob("/raid/users/ayesilkanat/MSC/SELL-CORPUS/train/*/*"))]
test_speaker_ids=[os.path.split(path)[-1] for path in sorted(glob.glob("/raid/users/ayesilkanat/MSC/SELL-CORPUS/dev/*/*"))]
# In[14]:
stage=0
segment_size=128
n_out_speakers=20
test_prop=0.1
sample_rate=24000
training_samples=10000000
testing_samples=10000
n_utt_attr=5000
output_dir = "../spectrograms/sr_24000_mel_norm_128frame_256mel"
test_proportion = test_prop
n_utts_attr = n_utt_attr
#$raw_data_dir/wav48 $raw_data_dir/speaker-info.txt $data_dir $n_out_speakers $test_prop $sample_rate $n_utt_attr
speaker2filenames = defaultdict(lambda : [])
for path in sorted(glob.glob(os.path.join("/raid/users/ayesilkanat/MSC/VCTK/VCTK-Corpus/wav48", '*/*'))):
filename = path.strip().split('/')[-1]
speaker_id, utt_id = re.match(r'p(\d+)_(\d+)\.wav', filename).groups()
if speaker_id in train:
speaker2filenames["p"+speaker_id].append(path)
for folder_path in sorted(glob.glob("/raid/users/ayesilkanat/MSC/SELL-CORPUS/train/*/*")):
speaker_id=os.path.split(folder_path)[-1]
paths=glob.glob(os.path.join(folder_path,"*.wav"))
for path in paths:
speaker2filenames[speaker_id].append(path)
for folder_path in sorted(glob.glob("/raid/users/ayesilkanat/MSC/SELL-CORPUS/dev/*/*")):
speaker_id=os.path.split(folder_path)[-1]
paths=glob.glob(os.path.join(folder_path,"*.wav"))
for path in paths:
speaker2filenames[speaker_id].append(path)
train_path_list, in_test_path_list, out_test_path_list = [], [], []
for speaker in train_speaker_ids:
path_list = speaker2filenames[speaker]
random.shuffle(path_list)
test_data_size = int(len(path_list) * test_proportion)
train_path_list += path_list[:-test_data_size]
in_test_path_list += path_list[-test_data_size:]
for speaker in test_speaker_ids:
path_list = speaker2filenames[speaker]
out_test_path_list += path_list
# In[15]:
speaker2filenames
# In[ ]:
# In[8]:
with open(os.path.join(output_dir, 'in_test_files.txt'), 'w') as f:
for path in in_test_path_list:
f.write(f'{path}\n')
with open(os.path.join(output_dir, 'out_test_files.txt'), 'w') as f:
for path in out_test_path_list:
f.write(f'{path}\n')
# In[12]:
train_path_list
# In[9]:
for dset, path_list in zip(['train', 'in_test', 'out_test'], [train_path_list, in_test_path_list, out_test_path_list]):
print(f'processing {dset} set, {len(path_list)} files')
data = {}
output_path = os.path.join(output_dir, f'{dset}.pkl')
all_train_data = []
for i, path in enumerate(sorted(path_list)):
if i % 500 == 0 or i == len(path_list) - 1:
print(f'processing {i} files')
filename = path.strip().split('/')[-1]
mel, mag = spec_feature_extraction(path)
data[filename] = mel
if dset == 'train' and i < n_utts_attr:
all_train_data.append(mel)
if dset == 'train':
all_train_data = np.concatenate(all_train_data)
mean = np.mean(all_train_data, axis=0)
std = np.std(all_train_data, axis=0)
attr = {'mean': mean, 'std': std}
with open(os.path.join(output_dir, 'attr.pkl'), 'wb') as f:
pickle.dump(attr, f)
for key, val in data.items():
val = (val - mean) / std
data[key] = val
with open(output_path, 'wb') as f:
pickle.dump(data, f)
# In[ ]:
del data
gc.collect()
# In[ ]:
# In[ ]:
pkl_path = "spectrograms/sr_24000_mel_norm_128frame_256mel/train.pkl"
output_path = "../spectrograms/sr_24000_mel_norm_128frame_256mel/train_"+str(segment_size)+".pkl"
with open(pkl_path, 'rb') as f:
data = pickle.load(f)
reduced_data = {key:val for key, val in data.items() if val.shape[0] > segment_size}
with open(output_path, 'wb') as f:
pickle.dump(reduced_data, f)
# In[ ]:
del reduced_data
gc.collect()
# In[ ]:
pickle_path = "../spectrograms/sr_24000_mel_norm_128frame_256mel/train.pkl"
sample_path = "../spectrograms/sr_24000_mel_norm_128frame_256mel/train_samples_"+str(segment_size)+".json"
n_samples = training_samples
sample_single_segments(pickle_path,sample_path,segment_size,n_samples)
gc.collect()
# In[ ]:
pickle_path = "../spectrograms/sr_24000_mel_norm_128frame_256mel/in_test.pkl"
sample_path = "../spectrograms/sr_24000_mel_norm_128frame_256mel/in_test_samples_"+str(segment_size)+".json"
n_samples = testing_samples
sample_single_segments(pickle_path,sample_path,segment_size,n_samples)
gc.collect()
# In[ ]:
pickle_path = "../spectrograms/sr_24000_mel_norm_128frame_256mel/out_test.pkl"
sample_path = "../spectrograms/sr_24000_mel_norm_128frame_256mel/out_test_samples_"+str(segment_size)+".json"
n_samples = testing_samples
sample_single_segments(pickle_path,sample_path,segment_size,n_samples)
gc.collect()
# In[ ]:
# In[ ]:
| [
"aliyesilkanat@gmail.com"
] | aliyesilkanat@gmail.com |
8d551edf06666357e86023d00f97b72089c75f70 | d58037fda6a6d05e226261b47cb34ba8dd6ab724 | /airflow/providers/microsoft/azure/operators/data_factory.py | 1ad372395057edf46e7cd1ea4e84aeb4e27543b0 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | shiv-io/airflow | 432f684af111d75356ff0b960aa214767a379b0d | 36fe6d0377d37b5f6be8ea5659dcabb44b4fc233 | refs/heads/main | 2023-05-28T08:35:17.500007 | 2023-04-29T15:36:31 | 2023-04-29T15:36:31 | 309,048,170 | 0 | 0 | Apache-2.0 | 2020-11-01T07:52:50 | 2020-11-01T07:52:49 | null | UTF-8 | Python | false | false | 11,484 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import time
import warnings
from typing import TYPE_CHECKING, Any, Sequence
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.models import BaseOperator, BaseOperatorLink, XCom
from airflow.providers.microsoft.azure.hooks.data_factory import (
AzureDataFactoryHook,
AzureDataFactoryPipelineRunException,
AzureDataFactoryPipelineRunStatus,
get_field,
)
from airflow.providers.microsoft.azure.triggers.data_factory import AzureDataFactoryTrigger
from airflow.utils.log.logging_mixin import LoggingMixin
if TYPE_CHECKING:
from airflow.models.taskinstance import TaskInstanceKey
from airflow.utils.context import Context
class AzureDataFactoryPipelineRunLink(LoggingMixin, BaseOperatorLink):
"""Constructs a link to monitor a pipeline run in Azure Data Factory."""
name = "Monitor Pipeline Run"
def get_link(
self,
operator: BaseOperator,
*,
ti_key: TaskInstanceKey,
) -> str:
run_id = XCom.get_value(key="run_id", ti_key=ti_key)
conn_id = operator.azure_data_factory_conn_id # type: ignore
conn = BaseHook.get_connection(conn_id)
extras = conn.extra_dejson
subscription_id = get_field(extras, "subscriptionId") or get_field(
extras, "extra__azure__subscriptionId"
)
if not subscription_id:
raise KeyError(f"Param subscriptionId not found in conn_id '{conn_id}'")
# Both Resource Group Name and Factory Name can either be declared in the Azure Data Factory
# connection or passed directly to the operator.
resource_group_name = operator.resource_group_name or get_field( # type: ignore
extras, "resource_group_name"
)
factory_name = operator.factory_name or get_field(extras, "factory_name") # type: ignore
url = (
f"https://adf.azure.com/en-us/monitoring/pipelineruns/{run_id}"
f"?factory=/subscriptions/{subscription_id}/"
f"resourceGroups/{resource_group_name}/providers/Microsoft.DataFactory/"
f"factories/{factory_name}"
)
return url
class AzureDataFactoryRunPipelineOperator(BaseOperator):
"""
Executes a data factory pipeline.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AzureDataFactoryRunPipelineOperator`
:param azure_data_factory_conn_id: The connection identifier for connecting to Azure Data Factory.
:param pipeline_name: The name of the pipeline to execute.
:param wait_for_termination: Flag to wait on a pipeline run's termination. By default, this feature is
enabled but could be disabled to perform an asynchronous wait for a long-running pipeline execution
using the ``AzureDataFactoryPipelineRunSensor``.
:param resource_group_name: The resource group name. If a value is not passed in to the operator, the
``AzureDataFactoryHook`` will attempt to use the resource group name provided in the corresponding
connection.
:param factory_name: The data factory name. If a value is not passed in to the operator, the
``AzureDataFactoryHook`` will attempt to use the factory name name provided in the corresponding
connection.
:param reference_pipeline_run_id: The pipeline run identifier. If this run ID is specified the parameters
of the specified run will be used to create a new run.
:param is_recovery: Recovery mode flag. If recovery mode is set to `True`, the specified referenced
pipeline run and the new run will be grouped under the same ``groupId``.
:param start_activity_name: In recovery mode, the rerun will start from this activity. If not specified,
all activities will run.
:param start_from_failure: In recovery mode, if set to true, the rerun will start from failed activities.
The property will be used only if ``start_activity_name`` is not specified.
:param parameters: Parameters of the pipeline run. These parameters are referenced in a pipeline via
``@pipeline().parameters.parameterName`` and will be used only if the ``reference_pipeline_run_id`` is
not specified.
:param timeout: Time in seconds to wait for a pipeline to reach a terminal status for non-asynchronous
waits. Used only if ``wait_for_termination`` is True.
:param check_interval: Time in seconds to check on a pipeline run's status for non-asynchronous waits.
Used only if ``wait_for_termination`` is True.
:param deferrable: Run operator in deferrable mode.
"""
template_fields: Sequence[str] = (
"azure_data_factory_conn_id",
"resource_group_name",
"factory_name",
"pipeline_name",
"reference_pipeline_run_id",
"parameters",
)
template_fields_renderers = {"parameters": "json"}
ui_color = "#0678d4"
operator_extra_links = (AzureDataFactoryPipelineRunLink(),)
def __init__(
self,
*,
pipeline_name: str,
azure_data_factory_conn_id: str = AzureDataFactoryHook.default_conn_name,
wait_for_termination: bool = True,
resource_group_name: str | None = None,
factory_name: str | None = None,
reference_pipeline_run_id: str | None = None,
is_recovery: bool | None = None,
start_activity_name: str | None = None,
start_from_failure: bool | None = None,
parameters: dict[str, Any] | None = None,
timeout: int = 60 * 60 * 24 * 7,
check_interval: int = 60,
deferrable: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.azure_data_factory_conn_id = azure_data_factory_conn_id
self.pipeline_name = pipeline_name
self.wait_for_termination = wait_for_termination
self.resource_group_name = resource_group_name
self.factory_name = factory_name
self.reference_pipeline_run_id = reference_pipeline_run_id
self.is_recovery = is_recovery
self.start_activity_name = start_activity_name
self.start_from_failure = start_from_failure
self.parameters = parameters
self.timeout = timeout
self.check_interval = check_interval
self.deferrable = deferrable
def execute(self, context: Context) -> None:
self.hook = AzureDataFactoryHook(azure_data_factory_conn_id=self.azure_data_factory_conn_id)
self.log.info("Executing the %s pipeline.", self.pipeline_name)
response = self.hook.run_pipeline(
pipeline_name=self.pipeline_name,
resource_group_name=self.resource_group_name,
factory_name=self.factory_name,
reference_pipeline_run_id=self.reference_pipeline_run_id,
is_recovery=self.is_recovery,
start_activity_name=self.start_activity_name,
start_from_failure=self.start_from_failure,
parameters=self.parameters,
)
self.run_id = vars(response)["run_id"]
# Push the ``run_id`` value to XCom regardless of what happens during execution. This allows for
# retrieval the executed pipeline's ``run_id`` for downstream tasks especially if performing an
# asynchronous wait.
context["ti"].xcom_push(key="run_id", value=self.run_id)
if self.wait_for_termination:
if self.deferrable is False:
self.log.info("Waiting for pipeline run %s to terminate.", self.run_id)
if self.hook.wait_for_pipeline_run_status(
run_id=self.run_id,
expected_statuses=AzureDataFactoryPipelineRunStatus.SUCCEEDED,
check_interval=self.check_interval,
timeout=self.timeout,
resource_group_name=self.resource_group_name,
factory_name=self.factory_name,
):
self.log.info("Pipeline run %s has completed successfully.", self.run_id)
else:
raise AzureDataFactoryPipelineRunException(
f"Pipeline run {self.run_id} has failed or has been cancelled."
)
else:
end_time = time.time() + self.timeout
self.defer(
timeout=self.execution_timeout,
trigger=AzureDataFactoryTrigger(
azure_data_factory_conn_id=self.azure_data_factory_conn_id,
run_id=self.run_id,
wait_for_termination=self.wait_for_termination,
resource_group_name=self.resource_group_name,
factory_name=self.factory_name,
check_interval=self.check_interval,
end_time=end_time,
),
method_name="execute_complete",
)
else:
if self.deferrable is True:
warnings.warn(
"Argument `wait_for_termination` is False and `deferrable` is True , hence "
"`deferrable` parameter doesn't have any effect",
)
def execute_complete(self, context: Context, event: dict[str, str]) -> None:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was
successful.
"""
if event:
if event["status"] == "error":
raise AirflowException(event["message"])
self.log.info(event["message"])
def on_kill(self) -> None:
if self.run_id:
self.hook.cancel_pipeline_run(
run_id=self.run_id,
resource_group_name=self.resource_group_name,
factory_name=self.factory_name,
)
# Check to ensure the pipeline run was cancelled as expected.
if self.hook.wait_for_pipeline_run_status(
run_id=self.run_id,
expected_statuses=AzureDataFactoryPipelineRunStatus.CANCELLED,
check_interval=self.check_interval,
timeout=self.timeout,
resource_group_name=self.resource_group_name,
factory_name=self.factory_name,
):
self.log.info("Pipeline run %s has been cancelled successfully.", self.run_id)
else:
raise AzureDataFactoryPipelineRunException(f"Pipeline run {self.run_id} was not cancelled.")
| [
"noreply@github.com"
] | noreply@github.com |
be1621abbe83595d69e640b35f1f12fae4156faf | d3af7445da81972bb3102fabb68c0baa608fca21 | /liver.py | 6b64b4d0e83be8ffff061c6bb7d2d8da590fe53a | [] | no_license | xyj77/siamese-triplet | 258c4f4945af0f0e80ba8bac7a43dbd1ea9a4379 | 6ff34f6fa08eba9e203ef22a233e1d3afe40a869 | refs/heads/master | 2020-03-20T07:50:41.518151 | 2018-12-27T07:30:44 | 2018-12-27T07:30:44 | 137,292,537 | 0 | 1 | null | 2018-06-14T01:53:08 | 2018-06-14T01:53:08 | null | UTF-8 | Python | false | false | 23,659 | py | #-*- coding:utf-8 -*-
from __future__ import division
from __future__ import absolute_import
from __future__ import with_statement
import os
import time
import argparse
import torch
from torch import nn
from torch.autograd import Variable
import torchvision
from torchvision import transforms
import torch._utils
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
from model import resnet
from model.trainer import fit
from model.metrics import AccumulatedAccuracyMetric
from utils.utils import extract_embeddings, plot_embeddings
# Device configuration
cuda = torch.cuda.is_available()
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
parser = argparse.ArgumentParser("""Image classifical!""")
parser.add_argument('--path', type=str, default='./data/cifar10/',
help="""image dir path default: './data/cifar10/'.""")
parser.add_argument('--epochs', type=int, default=50,
help="""Epoch default:50.""")
parser.add_argument('--batch_size', type=int, default=256,
help="""Batch_size default:256.""")
parser.add_argument('--lr', type=float, default=0.0001,
help="""learing_rate. Default=0.0001""")
parser.add_argument('--num_classes', type=int, default=10,
help="""num classes""")
parser.add_argument('--model_path', type=str, default='./model/',
help="""Save model path""")
parser.add_argument('--model_name', type=str, default='cifar10.pth',
help="""Model name.""")
parser.add_argument('--display_epoch', type=int, default=5)
# parser.add_argument('-c', '--config', default='configs/transfer_config.json')
# # classes=('I II','III IV')
# classes=('I', 'II', 'III', 'IV')
parser.add_argument('-c', '--config', default='configs/who_config.json')
classes=('1','2', '3')
args = parser.parse_args()
# Create model
if not os.path.exists(args.model_path):
os.makedirs(args.model_path)
# ##############################################################################
# #######################################Method01: Train ResNet ################
# transform = transforms.Compose([
# transforms.RandomHorizontalFlip(),
# transforms.RandomCrop(32, 4),
# # transforms.RandomHorizontalFlip(p=0.50), # 有0.75的几率随机旋转
# # transforms.ColorJitter(brightness=1, contrast=2, saturation=3, hue=0), # 给图像增加一些随机的光照
# transforms.ToTensor(), # 将numpy数据类型转化为Tensor
# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # 归一化
# ])
# classes=('plane','car','bird','cat','deer','dog','frog','horse','ship','truck')
# # Load data
# train_dataset = torchvision.datasets.CIFAR10(root=args.path,
# transform=transform,
# download=True,
# train=True)
# test_dataset = torchvision.datasets.CIFAR10(root=args.path,
# transform=transform,
# download=True,
# train=False)
# # Set up data loaders
# batch_size = 128
# kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
# train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
# # 训练模型
# res_model = resnet.resnet20(num_features = 2, num_classes = 10)
# print(res_model)
# res_model.cuda()
# loss_fn = nn.NLLLoss().cuda()
# lr = 1e-2
# optimizer = torch.optim.SGD(res_model.parameters(), lr, momentum=0.9, weight_decay=5e-4)
# scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[100, 150])
# n_epochs = 200
# log_interval = 100
# fit(train_loader, test_loader, res_model, loss_fn, optimizer, scheduler, n_epochs, cuda, log_interval, metrics=[AccumulatedAccuracyMetric()])
# # 绘图
# from utils.utils import extract_embeddings, plot_embeddings
# # Set up data loaders
# batch_size = 256
# kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
# train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
# train_embeddings_cl, train_labels_cl = extract_embeddings(train_loader, res_model)
# plot_embeddings(train_embeddings_cl, train_labels_cl, classes=classes, save_tag = 'train')
# val_embeddings_cl, val_labels_cl = extract_embeddings(test_loader, res_model)
# plot_embeddings(val_embeddings_cl, val_labels_cl, classes=classes, save_tag = 'test')
# # Save model
# torch.save(res_model.state_dict(), './model/model_dict.pkl')
# #载入预训练Resnet
# pretrain_dict = torch.load('model/model_dict.pkl')
# res_model = resnet.resnet20(num_features = 2, num_classes = 10)
# res_model.load_state_dict(pretrain_dict)
# # 验证预训练模型
# from utils.eval import validate
# validate(test_loader, res_model.cuda(), nn.CrossEntropyLoss().cuda())
# validate(train_loader, res_model.cuda(), nn.CrossEntropyLoss().cuda())
##################################################################################
#######################################Method02: ResNet for Liver ################
## python liver.py -c configs/transfer_config.json
from data_loader.mri_t2wi import MRIT2WI
from data_loader.datasets import SiameseMRI, TripletMRI
from utils.config import get_args, process_config
from utils.utils import printData
config = process_config(args.config)
# transform = transforms.Compose([
# transforms.RandomHorizontalFlip(),
# transforms.RandomCrop(32, 4),
# transforms.ToTensor(), # 将numpy数据类型转化为Tensor
# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # 归一化
# ])
# Load data
print('Create the data generator.')
train_dataset = MRIT2WI(config, train = True)
test_dataset = MRIT2WI(config, train = False)
# printData(test_dataset, type='normal')
# Set up data loaders
batch_size = 128
kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
# Set up the network and training parameters
res_model = resnet.resnet20(num_features = 2, num_classes = config.classes)
print(res_model)
res_model.cuda()
loss_fn = nn.NLLLoss().cuda()
lr = 1e-3
optimizer = torch.optim.Adam(res_model.parameters(), lr=lr, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 16, gamma=0.1, last_epoch=-1)
# lr = 1e-2
# optimizer = torch.optim.SGD(res_model.parameters(), lr, momentum=0.8, weight_decay=5e-4)
# scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[100, 150])
n_epochs = 100
log_interval = 100
fit(train_loader, test_loader, res_model, loss_fn, optimizer, scheduler, n_epochs, cuda, log_interval, metrics=[AccumulatedAccuracyMetric()])
# 绘图
linearWeights = res_model.state_dict()['linear.weight'].cpu().numpy()
linearBias = res_model.state_dict()['linear.bias'].cpu().numpy()
# Set up data loaders
batch_size = 256
kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
train_embeddings_cl, train_labels_cl = extract_embeddings(train_loader, res_model)
plot_embeddings(train_embeddings_cl, train_labels_cl, linearWeights, linearBias, classes=classes, save_tag = 'train')
val_embeddings_cl, val_labels_cl = extract_embeddings(test_loader, res_model)
plot_embeddings(val_embeddings_cl, val_labels_cl, linearWeights, linearBias, classes=classes, save_tag = 'test')
# ##################################################################################
# #######################################Method03: ResNet-Siamese for Liver ################
# ## python liver.py -c configs/transfer_config.json
# from data_loader.mri_t2wi import MRIT2WI
# from data_loader.datasets import SiameseMRI, TripletMRI
# from utils.config import get_args, process_config
# from utils.utils import printData
# config = process_config(args.config)
# # Load data
# print('Create the data generator.')
# train_dataset = MRIT2WI(config, train = True)
# test_dataset = MRIT2WI(config, train = False)
# siamese_train_dataset = SiameseMRI(train_dataset) # Returns pairs of images and target same/different
# siamese_test_dataset = SiameseMRI(test_dataset)
# # printData(test_dataset, type='normal')
# # Set up data loaders
# batch_size = 32
# kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
# train_loader = torch.utils.data.DataLoader(siamese_train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(siamese_test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
# # Set up the network and training parameters
# from model.cifar_networks import SiameseNet
# from model.losses import ContrastiveLoss
# res_model = resnet.resnet20(num_features = 8, num_classes = config.classes)
# print(res_model)
# model = SiameseNet(res_model)
# model.cuda()
# loss_fn = ContrastiveLoss(1.0).cuda()
# lr = 1e-3
# optimizer = torch.optim.Adam(res_model.parameters(), lr=lr, weight_decay=1e-4)
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 16, gamma=0.1, last_epoch=-1)
# # lr = 1e-2
# # optimizer = torch.optim.SGD(model.parameters(), lr, momentum=0.9, weight_decay=5e-4)
# # scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[100, 150])
# n_epochs = 200
# log_interval = 100
# fit(train_loader, test_loader, model, loss_fn, optimizer, scheduler, n_epochs, cuda, log_interval)
# # 绘图
# # Set up data loaders
# batch_size = 256
# kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
# train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
# train_embeddings_cl, train_labels_cl = extract_embeddings(train_loader, model)
# plot_embeddings(train_embeddings_cl, train_labels_cl, classes=classes, save_tag = 'train')
# val_embeddings_cl, val_labels_cl = extract_embeddings(test_loader, model)
# plot_embeddings(val_embeddings_cl, val_labels_cl, classes=classes, save_tag = 'test')
# ##################################################################################
# #######################################Method04: ResNet-Triplet for Liver ################
# ## python liver.py -c configs/transfer_config.json
# from data_loader.mri_t2wi import MRIT2WI
# from data_loader.datasets import SiameseMRI, TripletMRI
# from utils.config import get_args, process_config
# from utils.utils import printData
# config = process_config(args.config)
# # Load data
# print('Create the data generator.')
# train_dataset = MRIT2WI(config, train = True)
# test_dataset = MRIT2WI(config, train = False)
# triplet_train_dataset = TripletMRI(train_dataset) # Returns pairs of images and target same/different
# triplet_test_dataset = TripletMRI(test_dataset)
# # printData(test_dataset, type='normal')
# # Set up data loaders
# batch_size = 32
# kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
# train_loader = torch.utils.data.DataLoader(triplet_train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(triplet_test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
# # Set up the network and training parameters
# from model.cifar_networks import TripletNet
# from model.losses import TripletLoss
# res_model = resnet.resnet20(num_features = 8, num_classes = config.classes)
# print(res_model)
# model = TripletNet(res_model)
# model.cuda()
# loss_fn = TripletLoss(1.0).cuda()
# lr = 1e-3
# optimizer = torch.optim.Adam(res_model.parameters(), lr=lr, weight_decay=1e-4)
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 16, gamma=0.1, last_epoch=-1)
# # lr = 1e-2
# # optimizer = torch.optim.SGD(model.parameters(), lr, momentum=0.9, weight_decay=5e-4)
# # scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[100, 150])
# n_epochs = 200
# log_interval = 100
# fit(train_loader, test_loader, model, loss_fn, optimizer, scheduler, n_epochs, cuda, log_interval)
# # 绘图
# # Set up data loaders
# batch_size = 256
# kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
# train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
# train_embeddings_cl, train_labels_cl = extract_embeddings(train_loader, model)
# plot_embeddings(train_embeddings_cl, train_labels_cl, classes=classes, save_tag = 'train')
# val_embeddings_cl, val_labels_cl = extract_embeddings(test_loader, model)
# plot_embeddings(val_embeddings_cl, val_labels_cl, classes=classes, save_tag = 'test')
# #################################################################################
# ######################################Method05: Pretrained ResNet for Liver ################
# ## python liver.py -c configs/transfer_config.json
# from data_loader.mri_t2wi import MRIT2WI
# from data_loader.datasets import SiameseMRI, TripletMRI
# from utils.config import get_args, process_config
# from utils.utils import printData
# config = process_config(args.config)
# # transform = transforms.Compose([
# # transforms.RandomHorizontalFlip(),
# # transforms.RandomCrop(32, 4),
# # transforms.ToTensor(), # 将numpy数据类型转化为Tensor
# # transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # 归一化
# # ])
# # Load data
# print('Create the data generator.')
# train_dataset = MRIT2WI(config, train = True)
# test_dataset = MRIT2WI(config, train = False)
# # printData(test_dataset, type='normal')
# # Set up data loaders
# batch_size = 128
# kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
# train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
# # Set up the network and training parameters
# #载入预训练Resnet
# pretrain_dict = torch.load('model/model_dict.pkl')
# res_model = resnet.resnet20(num_features = 2, num_classes = 10)
# res_model.load_state_dict(pretrain_dict)
# #提取fc层中固定的参数
# features_num = res_model.linear.in_features
# #修改类别
# res_model.linear = nn.Linear(features_num, config.classes)
# print(res_model)
# res_model.cuda()
# loss_fn = nn.NLLLoss().cuda()
# lr = 1e-3
# optimizer = torch.optim.Adam(res_model.parameters(), lr=lr, weight_decay=1e-4)
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 16, gamma=0.1, last_epoch=-1)
# # lr = 1e-2
# # optimizer = torch.optim.SGD(res_model.parameters(), lr, momentum=0.8, weight_decay=5e-4)
# # scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[100, 150])
# n_epochs = 100
# log_interval = 100
# fit(train_loader, test_loader, res_model, loss_fn, optimizer, scheduler, n_epochs, cuda, log_interval, metrics=[AccumulatedAccuracyMetric()])
# # 绘图
# # Set up data loaders
# batch_size = 256
# kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
# train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
# train_embeddings_cl, train_labels_cl = extract_embeddings(train_loader, res_model)
# plot_embeddings(train_embeddings_cl, train_labels_cl, classes=classes, save_tag = 'train')
# val_embeddings_cl, val_labels_cl = extract_embeddings(test_loader, res_model)
# plot_embeddings(val_embeddings_cl, val_labels_cl, classes=classes, save_tag = 'test')
# # L-Softmax Failed
# # ##################################################################################
# # #######################################Method06: ResNet-L-Softmax for Liver ################
# # ## python liver.py -c configs/transfer_config.json
# # from data_loader.mri_t2wi import MRIT2WI
# # from data_loader.datasets import SiameseMRI, TripletMRI
# # from utils.config import get_args, process_config
# # from utils.utils import printData
# # config = process_config(args.config)
# # # transform = transforms.Compose([
# # # transforms.RandomHorizontalFlip(),
# # # transforms.RandomCrop(32, 4),
# # # transforms.ToTensor(), # 将numpy数据类型转化为Tensor
# # # transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # 归一化
# # # ])
# # # Load data
# # print('Create the data generator.')
# # train_dataset = MRIT2WI(config, train = True)
# # test_dataset = MRIT2WI(config, train = False)
# # # printData(test_dataset, type='normal')
# # # Set up data loaders
# # batch_size = 128
# # kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
# # train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
# # test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
# # # Set up the network and training parameters
# # from model import resnetA
# # res_model = resnetA.resnet20(num_features = 2, num_classes = config.classes)
# # print(res_model)
# # res_model.cuda()
# # loss_fn = nn.NLLLoss().cuda()
# # lr = 1e-3
# # optimizer = torch.optim.Adam(res_model.parameters(), lr=lr, weight_decay=1e-4)
# # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 16, gamma=0.1, last_epoch=-1)
# # # lr = 1e-2
# # # optimizer = torch.optim.SGD(res_model.parameters(), lr, momentum=0.8, weight_decay=5e-4)
# # # scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[100, 150])
# # n_epochs = 100
# # log_interval = 100
# # from model.trainerA import fitA
# # fitA(train_loader, test_loader, res_model, loss_fn, optimizer, scheduler, n_epochs, cuda, log_interval, metrics=[AccumulatedAccuracyMetric()])
# # # 绘图
# # linearWeights = res_model.state_dict()['linear.weight'].cpu().numpy()
# # linearBias = res_model.state_dict()['linear.bias'].cpu().numpy()
# # # Set up data loaders
# # batch_size = 256
# # kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
# # train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
# # test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
# # train_embeddings_cl, train_labels_cl = extract_embeddings(train_loader, res_model)
# # plot_embeddings(train_embeddings_cl, train_labels_cl, linearWeights, linearBias, classes=classes, save_tag = 'train')
# # val_embeddings_cl, val_labels_cl = extract_embeddings(test_loader, res_model)
# # plot_embeddings(val_embeddings_cl, val_labels_cl, linearWeights, linearBias, classes=classes, save_tag = 'test')
# # ##############################################################################################
# # #######################################Method06: ResNet-L-Softmax for cifar10 ################
# # transform = transforms.Compose([
# # transforms.RandomHorizontalFlip(),
# # transforms.RandomCrop(32, 4),
# # # transforms.RandomHorizontalFlip(p=0.50), # 有0.75的几率随机旋转
# # # transforms.ColorJitter(brightness=1, contrast=2, saturation=3, hue=0), # 给图像增加一些随机的光照
# # transforms.ToTensor(), # 将numpy数据类型转化为Tensor
# # transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # 归一化
# # ])
# # classes=('plane','car','bird','cat','deer','dog','frog','horse','ship','truck')
# # # Load data
# # train_dataset = torchvision.datasets.CIFAR10(root=args.path,
# # transform=transform,
# # download=True,
# # train=True)
# # test_dataset = torchvision.datasets.CIFAR10(root=args.path,
# # transform=transform,
# # download=True,
# # train=False)
# # # Set up data loaders
# # batch_size = 128
# # kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
# # train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
# # test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
# # # Set up the network and training parameters
# # from model import resnetA
# # res_model = resnetA.resnet20(num_features = 2, num_classes = 10)
# # print(res_model)
# # res_model.cuda()
# # loss_fn = nn.NLLLoss().cuda()
# # lr = 1e-3
# # optimizer = torch.optim.Adam(res_model.parameters(), lr=lr, weight_decay=1e-4)
# # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 16, gamma=0.1, last_epoch=-1)
# # n_epochs = 100
# # log_interval = 100
# # from model.trainerA import fitA
# # fitA(train_loader, test_loader, res_model, loss_fn, optimizer, scheduler, n_epochs, cuda, log_interval, metrics=[AccumulatedAccuracyMetric()])
# # # 绘图
# # linearWeights = res_model.state_dict()['linear.weight'].cpu().numpy()
# # linearBias = res_model.state_dict()['linear.bias'].cpu().numpy()
# # # Set up data loaders
# # batch_size = 256
# # kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
# # train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
# # test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
# # train_embeddings_cl, train_labels_cl = extract_embeddings(train_loader, res_model)
# # plot_embeddings(train_embeddings_cl, train_labels_cl, linearWeights, linearBias, classes=classes, save_tag = 'train')
# # val_embeddings_cl, val_labels_cl = extract_embeddings(test_loader, res_model)
# # plot_embeddings(val_embeddings_cl, val_labels_cl, linearWeights, linearBias, classes=classes, save_tag = 'test')
# # # Save model
# # torch.save(res_model.state_dict(), './model/model_dict.pkl')
# # #载入预训练Resnet
# # pretrain_dict = torch.load('model/model_dict.pkl')
# # res_model = resnet.resnet20(num_features = 2, num_classes = 10)
# # res_model.load_state_dict(pretrain_dict)
# # # 验证预训练模型
# # from utils.eval import validate
# # validate(test_loader, res_model.cuda(), nn.CrossEntropyLoss().cuda())
# # validate(train_loader, res_model.cuda(), nn.CrossEntropyLoss().cuda()) | [
"15172535067@163.com"
] | 15172535067@163.com |
f9967a372a3f92a0a32b1e8b2a075e4e2de103d1 | 2d8e383ffba2a0aaaa2cbb50793b1635a45e3883 | /bigdata-demo-searching-lambda/ankaracloudmeetup-bigdata-demo-searching-lambda-s3-to-es-python/twitter_to_es.py | 3102978cc1a88bb410291d8f996f63559b8de2a5 | [
"Apache-2.0"
] | permissive | yukarikir/ankaracloudmeetup-bigdata-demo | 5461f8a2cfa8e741033afa78c9f2911ad655b44b | 43196dc97c77ed4616e28e24ebcfa07174a392db | refs/heads/master | 2020-03-07T01:46:09.464104 | 2017-06-27T16:50:01 | 2017-06-27T16:50:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,980 | py | '''
Created on Oct 8, 2015
@author: mentzera
Modified on Jul 1, 2016
@author: sozal
'''
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
from elasticsearch.exceptions import ElasticsearchException
from tweet_utils import analyze_and_get_tweet, id_field, tweet_mapping
index_name = 'twitter'
doc_type = 'tweet'
mapping = {
doc_type: tweet_mapping
}
bulk_chunk_size = 1000
def save(tweets, es_host, es_port):
es = Elasticsearch(host = es_host, port = es_port)
print('Saving tweets into ElasticSearch on {}...'.format(es_host))
if es.indices.exists(index_name):
print ('Index {} already exists'.format(index_name))
try:
es.indices.put_mapping(doc_type, tweet_mapping, index_name)
except ElasticsearchException as e:
print('Error while putting mapping:\n' + str(e))
print('Deleting index {} on...'.format(index_name))
es.indices.delete(index_name)
print('Creating index {}...'.format(index_name))
es.indices.create(index_name, body = {'mappings': mapping})
else:
print('Index {} does not exist'.format(index_name))
print('Creating index {}...'.format(index_name))
es.indices.create(index_name, body = {'mappings': mapping})
counter = 0
bulk_data = []
list_size = len(tweets)
for doc in tweets:
tweet = analyze_and_get_tweet(doc)
bulk_doc = {
"_index": index_name,
"_type": doc_type,
"_id": tweet[id_field],
"_source": tweet
}
bulk_data.append(bulk_doc)
counter += 1
if counter % bulk_chunk_size == 0 or counter == list_size:
print('ElasticSearch bulk index (index: {INDEX}, type: {TYPE})...'.format(INDEX=index_name, TYPE=doc_type))
success, _ = bulk(es, bulk_data)
print 'ElasticSearch has indexed %d documents' % success
bulk_data = []
| [
"serkanozal86@hotmail.com"
] | serkanozal86@hotmail.com |
3fbdf957571e7f078c7dcecad3966c0746a6fc5e | 4273f6c264fa5a7267557c5e0d338a2cbd27789e | /AIE23/20191207_big_data_ai/1_pyspark_dataframe/ml/3_decision_tree_classification_example.py | fe32e3c30872236d2fbd76cdba11f209f222b78b | [] | no_license | shcqupc/Alg_study | 874d37954ed8ed2cdb3bd492d59cd071836946f5 | 462ee12c72b7f84c5ae45aaf0f65b812d7c1ada1 | refs/heads/master | 2020-07-10T15:26:40.603300 | 2020-03-27T12:53:16 | 2020-03-27T12:53:16 | 204,298,238 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,226 | py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Decision Tree Classification Example.
"""
from __future__ import print_function
# $example on$
from pyspark.ml import Pipeline
from pyspark.ml.classification import DecisionTreeClassifier
from pyspark.ml.feature import StringIndexer, VectorIndexer
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("DecisionTreeClassificationExample")\
.getOrCreate()
# $example on$
# Load the data stored in LIBSVM format as a DataFrame.
data = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
# Index labels, adding metadata to the label column.
# Fit on whole dataset to include all labels in index.
labelIndexer = StringIndexer(inputCol="label", outputCol="indexedLabel").fit(data)
# Automatically identify categorical features, and index them.
# We specify maxCategories so features with > 4 distinct values are treated as continuous.
featureIndexer =\
VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=4).fit(data)
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3])
# Train a DecisionTree model.
dt = DecisionTreeClassifier(labelCol="indexedLabel", featuresCol="indexedFeatures")
# Chain indexers and tree in a Pipeline
pipeline = Pipeline(stages=[labelIndexer, featureIndexer, dt])
# Train model. This also runs the indexers.
model = pipeline.fit(trainingData)
# Make predictions.
predictions = model.transform(testData)
# Select example rows to display.
predictions.select("prediction", "indexedLabel", "features").show(5)
# Select (prediction, true label) and compute test error
evaluator = MulticlassClassificationEvaluator(
labelCol="indexedLabel", predictionCol="prediction", metricName="accuracy")
accuracy = evaluator.evaluate(predictions)
print("Test Error = %g " % (1.0 - accuracy))
treeModel = model.stages[2]
# summary only
print(treeModel)
# $example off$
spark.stop()
# (1) Spark dataframe和pandas的dataframe有哪些细微不同?
# (2) 切换其他数据源数据格式文件?例如csv
# http://cherishlc.iteye.com/blog/2384865
# (3) 尝试换其他模型构建数据分析流水线? | [
"253848296@qq.com"
] | 253848296@qq.com |
9adadf89af9b4b4b688e2c2d130ed932a2e138c7 | 01e9f22a9d4dcbe1ffdb5f3c7f854a190c3918a9 | /pose/pose_visualizer.py | 31e3dee0b11b221456cba854b0f65bd046b51d75 | [] | no_license | TheK2NumberOne/Action-recognition-using-pose-estimation-2D-openpose | 69eaf126b8363a25f6fcffea1d56c3e20b8bb5cc | 341a6b398a3cc41a037487b696a3290ee2bfc1b5 | refs/heads/master | 2022-11-17T13:34:18.171214 | 2020-07-18T05:55:00 | 2020-07-18T05:55:00 | 280,106,041 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,041 | py | import numpy as np
import cv2
from pose.format_coco import Points, mapIdx, POSE_PAIRS
def getValidPairs(image, output, detected_keypoints, keypoints_list):
# The function is used to determine the connection between joints A and B.
valid_pairs = []
invalid_pairs = []
n_interp_samples = 10
paf_score_threshold = 0.1
conf_th = 0.7
for idx in range(len(mapIdx)):
pafA = output[0, mapIdx[idx][0], :, :]
pafB = output[0, mapIdx[idx][1], :, :]
pafA = cv2.resize(pafA, (image.shape[1], image.shape[0]))
pafB = cv2.resize(pafB, (image.shape[1], image.shape[0]))
candA = detected_keypoints[POSE_PAIRS[idx][0]]
candB = detected_keypoints[POSE_PAIRS[idx][1]]
n_A = len(candA)
n_B = len(candB)
if n_A != 0 and n_B != 0:
valid_pair = np.zeros((0,3))
for i in range(n_A):
j_ok = -1
avg_max_score = -1
found = 0
for j in range(n_B):
unitVec_ij = np.subtract(candB[j][:2], candA[i][:2])
norm = np.linalg.norm(unitVec_ij)
if norm:
unitVec_ij = unitVec_ij / norm
else:
continue
interpolate_p = list(zip(np.linspace(candA[i][0], candB[j][0], num = n_interp_samples),
np.linspace(candA[i][1], candB[j][1], num = n_interp_samples)))
PAF_interp = []
for k in range(len(interpolate_p)):
PAF_interp.append([ pafA[int(round(interpolate_p[k][1])), int(round(interpolate_p[k][0]))],
pafB[int(round(interpolate_p[k][1])), int(round(interpolate_p[k][0]))] ])
paf_scores = np.dot(PAF_interp, unitVec_ij)
avg_paf_score = sum(paf_scores) / len(paf_scores)
if (len(np.where(paf_scores > paf_score_threshold)[0]) / n_interp_samples) > conf_th:
if avg_paf_score > avg_max_score:
j_ok = j
avg_max_score = avg_paf_score
found = 1
if found:
valid_pair = np.append(valid_pair, [[candA[i][3], candB[j_ok][3], avg_max_score]], axis=0)
valid_pairs.append(valid_pair)
else:
invalid_pairs.append(idx)
valid_pairs.append([])
return valid_pairs, invalid_pairs
def getPersonwiseKeypoints(valid_pairs, invalid_pairs, keypoints_list):
# the last number in each row is the overall score
personwiseKeypoints = -1 * np.ones((0, 19))
for k in range(len(mapIdx)):
if k not in invalid_pairs:
partAs = valid_pairs[k][:,0]
partBs = valid_pairs[k][:,1]
indexA, indexB = np.array(POSE_PAIRS[k])
for i in range(len(valid_pairs[k])):
found = 0
person_idx = -1
for j in range(len(personwiseKeypoints)):
if personwiseKeypoints[j][indexA] == partAs[i]:
person_idx = j
found = 1
break
if found:
personwiseKeypoints[person_idx][indexB] = partBs[i]
personwiseKeypoints[person_idx][-1] += keypoints_list[partBs[i].astype(int), 2] + valid_pairs[k][i][2]
# if find no partA in the subset, create a new subset
elif not found and k < 17:
row = -1 * np.ones(19)
row[indexA] = partAs[i]
row[indexB] = partBs[i]
# add the keypoint_scores for the two keypoints and the paf_score
row[-1] = sum(keypoints_list[valid_pairs[k][i,:2].astype(int), 2]) + valid_pairs[k][i][2]
personwiseKeypoints = np.vstack([personwiseKeypoints, row])
return personwiseKeypoints
def visualizer(image, detected_keypoints, keypoints_list, output):
#draw pose estimation
vali, invali = getValidPairs(image, output, detected_keypoints, keypoints_list)
personWiseKeyPoints = getPersonwiseKeypoints(vali, invali, keypoints_list)
show = image.copy()
for i in range(Points):
for j in range(len(detected_keypoints[i])):
cv2.circle(show, detected_keypoints[i][j][0:2], 3, [0,0,255], -1, cv2.LINE_AA)
#return show
for i in range(17):
for n in range(len(personWiseKeyPoints)):
index = personWiseKeyPoints[n][np.array(POSE_PAIRS[i])]
if -1 in index:
continue
B = np.int32(keypoints_list[index.astype(int), 0])
A = np.int32(keypoints_list[index.astype(int), 1])
cv2.line(show, (B[0], A[0]), (B[1], A[1]), [255,0,0], 3, cv2.LINE_AA)
return show
| [
"quanghuyonline08@gmail.com"
] | quanghuyonline08@gmail.com |
1b826dff5250dd586491f1aaacd474401b2043b1 | 0427a50ec60b6ca664288738cb5b9211f595e5c2 | /meiduo/apps/verifications/tests.py | 2d955a35a2e9fe5aec5efe93b5cc5f86a496b135 | [] | no_license | lcp964/meiduo | 6862f25df9c05ea9f970cacbd30673f612f7c103 | e13c076aabf95dec8de04b13e70d6094157cad4d | refs/heads/master | 2023-07-03T23:01:03.380090 | 2021-08-02T14:42:28 | 2021-08-02T14:42:28 | 391,984,463 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | from django.test import TestCase
import random
# Create your tests here.
| [
"2441497185@qq.com"
] | 2441497185@qq.com |
b89f856a4efbd1215ba554a3547b2d5f64a60502 | 1e177ebdcb470f738c058606ac0f86a36085f661 | /Pico/MicroPython/mqtt/mqttPub01_main.py | 60f96980393a5b6b04d87afbd41113c2b7db4245 | [] | no_license | robingreig/raspi-git | 5cbdd295c1048a0571aa2c2f8576438269439f07 | 7373bf94557d7a88c8f343362ba64f9cd19c8ce7 | refs/heads/master | 2023-08-31T03:16:17.286700 | 2023-08-26T11:54:23 | 2023-08-26T11:54:23 | 16,873,881 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,202 | py | import time
import network
from machine import Pin
from umqttsimple import MQTTClient
ssid = 'MakerSpaceTest'
password = 'P@55w0rd'
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
wlan.connect(ssid, password)
rp2.country('CA')
led = machine.Pin("LED", machine.Pin.OUT, value=0)
# Wait for connect or fail
max_wait = 10
while max_wait > 0:
if wlan.status() < 0 or wlan.status() >= 3:
break
max_wait -= 1
print('waiting for connection...')
time.sleep(1)
# Handle connection error
if wlan.status() != 3:
raise RuntimeError('network connection failed')
else:
print('connected')
status = wlan.ifconfig()
print( 'ip = ' + status[0] )
print(wlan.ifconfig())
led.toggle()
#mqtt config
mqtt_server = '192.168.204.1'
client_id = 'Pico03'
#user_t = 'pico'
#password_t = 'picopassword'
topic_pub = 'Garden/Pump1'
last_message = 0
message_interval = 5
#MQTT connect
def mqtt_connect():
# client = MQTTClient(client_id, mqtt_server, user=user_t, password=password_t, keepalive=60)
client = MQTTClient(client_id, mqtt_server, keepalive=60)
client.connect()
print('Connected to %s MQTT Broker'%(mqtt_server))
return client
#reconnect & reset
def reconnect():
print('Failed to connected to MQTT Broker. Reconnecting...')
time.sleep(5)
machine.reset()
while True:
counter = 3
try:
client = mqtt_connect()
except OSError as e:
reconnect()
while counter > 0:
try:
client.publish(topic_pub, msg='0')
print('published 0')
time.sleep(5)
client.publish(topic_pub, msg='1')
print('published 1')
time.sleep(5)
except:
reconnect()
pass
print('Printed first set')
try:
client.publish(topic_pub, msg='0')
print('published 0')
time.sleep(5)
client.publish(topic_pub, msg='1')
print('published 1')
time.sleep(5)
except:
reconnect()
pass
print('Printed second set')
print('Counter decremented')
counter -=1
client.disconnect() | [
"robin.greig@calalta.com"
] | robin.greig@calalta.com |
d4da4f399743a1bbcccc23dce4f21f4f9e0fbd9d | 4ac687bc28b9f5cf7f822e9d4c0db8b46fe363b3 | /30_day_leetcoding_challenge/2020_08/06-Find_All_Duplicates_in_an_Array.py | 72bd02e1bfc9c119c422a9d3b17b9e73c1be9add | [
"MIT"
] | permissive | QuenLo/LeetCode-share | b1e75e02e1dfe85be44ddb0ae1f4345353b0b569 | ce861103949510dc54fd5cb336bd992c40748de2 | refs/heads/master | 2021-12-23T11:23:09.111711 | 2021-11-15T18:54:46 | 2021-11-15T18:54:46 | 131,681,273 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | class Solution:
def findDuplicates(self, nums: List[int]) -> List[int]:
ans = []
for num in nums:
if( nums[abs(num)-1] < 0 ):
ans.append(abs(num))
else:
nums[abs(num)-1] *= -1
return ans
| [
"noreply@github.com"
] | noreply@github.com |
389fb95b2509687f5d976c6f9564d0a80ebef0a1 | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-cbn/aliyunsdkcbn/request/v20170912/UpdateTrafficMarkingPolicyAttributeRequest.py | 743e5f15ad08d14e8d8f3b0fa5e14fc7e66e1659 | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 7,712 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcbn.endpoint import endpoint_data
class UpdateTrafficMarkingPolicyAttributeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cbn', '2017-09-12', 'UpdateTrafficMarkingPolicyAttribute')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_AddTrafficMatchRuless(self): # RepeatList
return self.get_query_params().get('AddTrafficMatchRules')
def set_AddTrafficMatchRuless(self, AddTrafficMatchRules): # RepeatList
for depth1 in range(len(AddTrafficMatchRules)):
if AddTrafficMatchRules[depth1].get('DstPortRange') is not None:
for depth2 in range(len(AddTrafficMatchRules[depth1].get('DstPortRange'))):
self.add_query_param('AddTrafficMatchRules.' + str(depth1 + 1) + '.DstPortRange.' + str(depth2 + 1), AddTrafficMatchRules[depth1].get('DstPortRange')[depth2])
if AddTrafficMatchRules[depth1].get('MatchDscp') is not None:
self.add_query_param('AddTrafficMatchRules.' + str(depth1 + 1) + '.MatchDscp', AddTrafficMatchRules[depth1].get('MatchDscp'))
if AddTrafficMatchRules[depth1].get('Protocol') is not None:
self.add_query_param('AddTrafficMatchRules.' + str(depth1 + 1) + '.Protocol', AddTrafficMatchRules[depth1].get('Protocol'))
if AddTrafficMatchRules[depth1].get('TrafficMatchRuleDescription') is not None:
self.add_query_param('AddTrafficMatchRules.' + str(depth1 + 1) + '.TrafficMatchRuleDescription', AddTrafficMatchRules[depth1].get('TrafficMatchRuleDescription'))
if AddTrafficMatchRules[depth1].get('SrcPortRange') is not None:
for depth2 in range(len(AddTrafficMatchRules[depth1].get('SrcPortRange'))):
self.add_query_param('AddTrafficMatchRules.' + str(depth1 + 1) + '.SrcPortRange.' + str(depth2 + 1), AddTrafficMatchRules[depth1].get('SrcPortRange')[depth2])
if AddTrafficMatchRules[depth1].get('DstCidr') is not None:
self.add_query_param('AddTrafficMatchRules.' + str(depth1 + 1) + '.DstCidr', AddTrafficMatchRules[depth1].get('DstCidr'))
if AddTrafficMatchRules[depth1].get('TrafficMatchRuleName') is not None:
self.add_query_param('AddTrafficMatchRules.' + str(depth1 + 1) + '.TrafficMatchRuleName', AddTrafficMatchRules[depth1].get('TrafficMatchRuleName'))
if AddTrafficMatchRules[depth1].get('SrcCidr') is not None:
self.add_query_param('AddTrafficMatchRules.' + str(depth1 + 1) + '.SrcCidr', AddTrafficMatchRules[depth1].get('SrcCidr'))
def get_TrafficMarkingPolicyDescription(self): # String
return self.get_query_params().get('TrafficMarkingPolicyDescription')
def set_TrafficMarkingPolicyDescription(self, TrafficMarkingPolicyDescription): # String
self.add_query_param('TrafficMarkingPolicyDescription', TrafficMarkingPolicyDescription)
def get_TrafficMarkingPolicyId(self): # String
return self.get_query_params().get('TrafficMarkingPolicyId')
def set_TrafficMarkingPolicyId(self, TrafficMarkingPolicyId): # String
self.add_query_param('TrafficMarkingPolicyId', TrafficMarkingPolicyId)
def get_TrafficMarkingPolicyName(self): # String
return self.get_query_params().get('TrafficMarkingPolicyName')
def set_TrafficMarkingPolicyName(self, TrafficMarkingPolicyName): # String
self.add_query_param('TrafficMarkingPolicyName', TrafficMarkingPolicyName)
def get_DryRun(self): # Boolean
return self.get_query_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_DeleteTrafficMatchRuless(self): # RepeatList
return self.get_query_params().get('DeleteTrafficMatchRules')
def set_DeleteTrafficMatchRuless(self, DeleteTrafficMatchRules): # RepeatList
for depth1 in range(len(DeleteTrafficMatchRules)):
if DeleteTrafficMatchRules[depth1].get('DstPortRange') is not None:
for depth2 in range(len(DeleteTrafficMatchRules[depth1].get('DstPortRange'))):
self.add_query_param('DeleteTrafficMatchRules.' + str(depth1 + 1) + '.DstPortRange.' + str(depth2 + 1), DeleteTrafficMatchRules[depth1].get('DstPortRange')[depth2])
if DeleteTrafficMatchRules[depth1].get('MatchDscp') is not None:
self.add_query_param('DeleteTrafficMatchRules.' + str(depth1 + 1) + '.MatchDscp', DeleteTrafficMatchRules[depth1].get('MatchDscp'))
if DeleteTrafficMatchRules[depth1].get('Protocol') is not None:
self.add_query_param('DeleteTrafficMatchRules.' + str(depth1 + 1) + '.Protocol', DeleteTrafficMatchRules[depth1].get('Protocol'))
if DeleteTrafficMatchRules[depth1].get('TrafficMatchRuleDescription') is not None:
self.add_query_param('DeleteTrafficMatchRules.' + str(depth1 + 1) + '.TrafficMatchRuleDescription', DeleteTrafficMatchRules[depth1].get('TrafficMatchRuleDescription'))
if DeleteTrafficMatchRules[depth1].get('SrcPortRange') is not None:
for depth2 in range(len(DeleteTrafficMatchRules[depth1].get('SrcPortRange'))):
self.add_query_param('DeleteTrafficMatchRules.' + str(depth1 + 1) + '.SrcPortRange.' + str(depth2 + 1), DeleteTrafficMatchRules[depth1].get('SrcPortRange')[depth2])
if DeleteTrafficMatchRules[depth1].get('DstCidr') is not None:
self.add_query_param('DeleteTrafficMatchRules.' + str(depth1 + 1) + '.DstCidr', DeleteTrafficMatchRules[depth1].get('DstCidr'))
if DeleteTrafficMatchRules[depth1].get('TrafficMatchRuleName') is not None:
self.add_query_param('DeleteTrafficMatchRules.' + str(depth1 + 1) + '.TrafficMatchRuleName', DeleteTrafficMatchRules[depth1].get('TrafficMatchRuleName'))
if DeleteTrafficMatchRules[depth1].get('SrcCidr') is not None:
self.add_query_param('DeleteTrafficMatchRules.' + str(depth1 + 1) + '.SrcCidr', DeleteTrafficMatchRules[depth1].get('SrcCidr'))
| [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
2da0aa36e7be889a32196c3d06867c36c614e741 | 246fb3d3163411f8d2f23f0c58277e183a9aa04b | /StockAdmin2/core/restapi/updater.py | 5648083f9fc8de8493a0c08f8977c09f967d0f31 | [] | no_license | zwolf21/StockAdmin2 | ed5adb10cb94f688ce0ec9c18291f8d0eae79a33 | 430189bd8ea3820c00cf77e7ed741745f1ed74ca | refs/heads/master | 2022-12-12T03:53:07.101298 | 2017-12-26T04:49:27 | 2017-12-26T04:49:27 | 81,782,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,914 | py | from decimal import Decimal
from listorm import Listorm
from .dgamt_service import DGamtService
update_supervise_fields = {
'edi_code':'update', 'pro_type':'update', 'pay_type':'update',
'price':'create'
}
product_supervise_fields = {
'edi_code': 'update'
}
buyinfo_supervise_field = {
'pro_type': 'update', 'pay_type': 'update',
'price':'create', 'buy_edi_code': 'create'
}
# update: 변경시 레코드 항목 수정만하기 create: 변경사항 발생시 새로 만들기
UPDATE_METHOD = {
'product': {
'update': [
'edi_code', 'unit', 'company', 'unit_amount', 'apply_root', 'op_type'
],
},
'buyinfo': {
'create': ['buy_edi_code', 'price'],
'update': ['pro_type', 'pay_type', 'date']
}
}
def get_newest_record(edi_code, recursive_try=5):
if recursive_try == 0:
return edi_code
if not edi_code:
return
api = DGamtService()
api_lst = api.getDgamtList(mdsCd=edi_code)
if len(api_lst) == 1:
record = api_lst.first
if record.edi_code_after:
return get_newest_record(
record.edi_code_after,
recursive_try=recursive_try-1
)
return record
def get_fieldset_for_update(instance, new_record, update_methods=UPDATE_METHOD):
instance_name = instance.__class__.__name__.lower()
update_context = update_methods.get(instance_name, {})
updates, creates = {}, {}
for method, fields in update_context.items():
for field in fields:
oldVal = str(getattr(instance, field) or '')
newVal = str(getattr(new_record, field) or '')
if not newVal:
continue
if oldVal != newVal:
if method == 'update':
updates[field] = newVal
else:
create[field] = newVal
return creates, updates
def record_isvalid(record):
if record.get('price') not in [0, '0', '', None]:
return True
return False
def smart_update(product, update_methods=UPDATE_METHOD):
new_record = get_newest_record(product.edi_code)
if not new_record:
return
new_edi_code = new_record.get('edi_code')
if new_edi_code != product.edi_code:
product.edi_code = new_edi_code
product.save()
product_creates, product_updates = get_fieldset_for_update(product, new_record)
product.__class__.objects.filter(pk=product.id).update(**product_updates)
buyinfo_set = product.buyinfo_set.filter(buy_edi_code=new_edi_code, active=True)
new_price = Decimal(new_record.price or 0)
if product.buyinfo_set.exists():
market = product.buyinfo_set.last().market
else:
market = None
buyinfo_create_fields = update_methods.get('buyinfo', {}).get('create', [])
buyinfo_update_fields = update_methods.get('buyinfo', {}).get('update', [])
buyinfo_create_kwargs = new_record.select(*(buyinfo_create_fields+buyinfo_update_fields), values=False)
buyinfo_update_kwargs = new_record.select(*buyinfo_update_fields, values=False)
buyinfo_create_kwargs['product'] = product
buyinfo_update_kwargs['product'] = product
if not buyinfo_set.exists():
if not new_price:
print(new_price)
buyinfo_create_kwargs['price'] = 0
buyinfo_set.create(**buyinfo_create_kwargs)
else:
buyinfo_create_kwargs['market'] = market
buyinfo_update_kwargs['market'] = market
if new_price:
buyinfo_set = buyinfo_set.filter(price=new_price)
if not buyinfo_set.exists():
buyinfo_set.create(**buyinfo_create_kwargs)
else:
buyinfo_set.update(**buyinfo_update_kwargs)
else:
buyinfo_update_kwargs.pop('price')
buyinfo_set.update(**buyinfo_update_kwargs)
| [
"pbr112@naver.com"
] | pbr112@naver.com |
5beeeeee1729033d70dc9bdd7e2aa08ca6ecb3b2 | 246afde69972b2ddabf34004f48f6a9ea2870c1a | /xadminStudy/wsgi.py | a137fbc8249362399b2a0f98196392f86eb77cec | [] | no_license | jayxqt/xadminStudy | dc3ee635408eae08645334664328e18c5ff9622f | 50d0235ca723c73ce921362a226deb7607012d0f | refs/heads/master | 2022-12-07T06:25:29.531267 | 2020-08-31T07:59:52 | 2020-08-31T07:59:52 | 291,652,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for xadminStudy project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'xadminStudy.settings')
application = get_wsgi_application()
| [
"quantong_xu@163.com"
] | quantong_xu@163.com |
5945ac73322c07df601001ad78d4c9d7fa2bc303 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /gaussiana/ch3_2020_03_04_20_49_28_001210.py | 736fec0c051e1192ca98e7fa7fd600af6d7e2eff | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | import math.
def calcula_gausiana(x,mi,sigma):
parte1 = 1/(sigma*(2*math.pi)**(1/2))
parte2 = math.exp(-0.5*((x-mi)/sigma)**2)
return(parte1*parte2)
| [
"you@example.com"
] | you@example.com |
9be134a4a9373859f990d5d3743f9cfb575d68f9 | 60f588e7420b062021d78ae3ef4626d4ce472456 | /이것이코딩테스트다_나동빈/ch10graph_set.py | 91a7e4283a3869670b5f40e710e1e83a2054ec61 | [] | no_license | DojinPark/algorithms | 3ac580117115a74906ca78a202e81853a0c7b5e8 | 35c01064778d9109368b33aab90ba373e1e5458a | refs/heads/main | 2023-04-27T23:13:00.903287 | 2021-05-07T09:52:28 | 2021-05-07T09:52:28 | 313,219,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,696 | py | # 서로소 집합을 이용한 트리 표현
V, E = 6, 4
parent_str = [
'1 4',
'2 3',
'2 4',
'5 6'
]
# 각 원소가 속한 집합: 1 1 1 1 5 5
# 부모 테이블: 1 1 2 1 5 5
# worst case: O(V) --> 비효율적이다 > 이후 등작하는 개선된 find_parent 참조
def find_parent(parent, x):
if parent[x] == x:
return x
return find_parent(parent, parent[x])
# 두 노드 각각의 루트 노드를 찾은 뒤, 두 루트 노드간 부모 관계를 "a < b 이면 a 가 부모" 이라는 규칙에 따라 정한다.
def union_parent(parent, a, b):
a = find_parent(parent, a)
b = find_parent(parent, b)
if a < b:
parent[b] = a
else:
parent[a] = b
# 자기 자신을 부모로 초기화
parent = [i for i in range(V+1)]
# 입력받은 부모 관계를 적용
# 이후에도 자기 자신이 부모인 노드는 루트 노드이다.
for i in range(E):
a, b = map(int, parent_str[i].split())
union_parent(parent, a, b)
print('서로소 집합 알고리즘 1')
print('각 원소가 속한 집합')
for i in range(1, V + 1):
print(i, end=' ')
print()
for i in range(1, V + 1):
print(find_parent(parent, i), end=' ')
print()
print('부모 테이블')
for i in range(1, V + 1):
print(i, end=' ')
print()
for i in range(1, V + 1):
print(parent[i], end=' ')
print()
# "경로 압축 기법"을 적용한 find_parent 함수
#
# 참고: 직계 부모 노드가 아닌 루트 노드를 찾는 함수임
# 원리: 재귀 함수 호출의 끝에서 찾은 루트 노드를 반환받아 부모 리스트에 저장한다. 부모 리스트의 모든 엔트리는 루트 노드만을 저장하게 된다.
def find_root(root, x):
# if root[x] == x: # NoneType 이 반환되는 경우가 발생한다. root[x] != x 일때 반환값이 없기 때문!
# return root[x] # 파이썬 함수 작성시 NoneType 이 반횐되는 경우 모든 케이스에 대하여 반환값을 지정해두었는지 확인하자.
# root[x] = find_root(root, root[x])
if root[x] != x:
root[x] = find_root(root, root[x])
return root[x]
def union_root(root, a, b):
a = find_root(root, a)
b = find_root(root, b)
if a < b:
root[b] = a
else:
root[a] = b
root = [i for i in range(V+1)]
for i in range(E):
a, b = map(int, parent_str[i].split())
union_root(root, a, b)
print('\n서로소 집합 알고리즘 2')
print('각 원소가 속한 집합')
for i in range(1, V + 1):
print(i, end=' ')
print()
for i in range(1, V + 1):
print(find_root(root, i), end=' ')
print()
print('부모 테이블')
for i in range(1, V + 1):
print(i, end=' ')
print()
for i in range(1, V + 1):
print(root[i], end=' ')
print()
# 서로소 집합 알고리즘을 이용한 사이클 판별
# 원리: 두 노드를 새롭게 연결 할 때 마다 두 노드의 루트 노드(find_parent 함수 리턴값)를 확인하는데, 같으면 사이클이 존재하는 것이다.
# ! 무향성 간선 그래프에만 사용 가능
V, E = 3, 3
parent_str = [
'1 2',
'1 3',
'2 3'
]
parent = [i for i in range(V+1)]
print('\n사이클 검출 알고리즘')
for i in range(E):
a, b = map(int, parent_str[i].split())
# parent 리스트에 서로소 집합을 표현하는 방식으로는
# 말단 노드간의 연결 관계는 입력을 통해서만 알 수 있다.
# 따라서 입력을 받는 동시에 사이클을 검출해야함.
if find_parent(parent, a) == find_parent(parent, b):
print('Detected a cycle containing node', a, 'and', b)
union_parent(parent, a, b) | [
"74354737+DojinPark@users.noreply.github.com"
] | 74354737+DojinPark@users.noreply.github.com |
eaaf9937a3853ee4f5e92ba894c9455bac2f13f6 | d2c4151eff768af64946ababc2e41c13d8973cd3 | /ABC133/c.py | a99b470c2376c8f63bc104e312e3e27c9cd418aa | [] | no_license | Intel-out-side/AtCoder | 2de19b71981247135432aed2d6d9c2a16c3ab7f0 | 0c419d2df15fff02032432cb1b1323612484e16e | refs/heads/master | 2022-06-23T04:21:12.886072 | 2022-06-13T14:39:07 | 2022-06-13T14:39:07 | 235,240,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | L, R = map(int, input().split())
ans = 2020
if R - L < 2019:
for i in range(L, R+1):
for j in range(i+1, R+1):
ans = min((i*j)%2019, ans)
else:
for i in range(L, L+2019):
for j in range(i+1, L+2019):
ans = min((i*j)%2019, ans)
print(ans)
| [
"so.eng.eng.1rou@gmail.com"
] | so.eng.eng.1rou@gmail.com |
145f6d7644ac5106b240e5fafa7a8934c404aaf6 | d3d9c0a5146ecf7a1dd8fa9948641217774047db | /HLAFeatureLibrary/Training/GATE.py | a31dfbb911a3551501fdc021f39a4fa6ce8c1c9b | [
"LicenseRef-scancode-philippe-de-muyter",
"LicenseRef-scancode-us-govt-public-domain",
"Apache-2.0",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | fulQuan/NLPWorkbench | 81e8f3f40dc5c1b89f3b64d9296f15e3782e7ce1 | c3fadef1bd6345e01a58afef051491d8ef6a7f93 | refs/heads/master | 2020-05-03T02:14:26.713492 | 2018-12-12T15:42:01 | 2018-12-12T15:42:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,783 | py | import sys
import json
import glob
from Training.Client import ServiceClient
import sys
sys.path.append('..')
from PostTokenizer_GATE import PostTokenizer
from PostSentenceSplitter_GATE import PostSentenceSplitter
from FeatureExtractor_GATE import FeatureExtractor
import time
from Training.merge_bio import merge_bio
from Training.CRFRunner import CRFRunner
from Training.BIOtoANN import BIOtoANN
from Training.ANNtoLIF import ANNtoLIF
def text_to_lif(text):
lif_wrapper = ServiceClient('http://vassar.lappsgrid.org/wsdl/anc:wrap.lif_1.0.0')
lif_result = lif_wrapper.execute(text)
return lif_result
def tokenizer(lif):
gate_tokenizer = ServiceClient('http://vassar.lappsgrid.org/wsdl/anc:gate.tokenizer_2.3.0')
tokenier_lif = gate_tokenizer.execute(lif)
return tokenier_lif
def gate_to_lif(gate):
lif_converter = ServiceClient('http://vassar.lappsgrid.org/wsdl/anc:convert.gate2json_2.1.0')
lif_string = lif_converter.execute(gate)
return lif_string
def lif_to_gate(lif):
gate_converter = ServiceClient('http://vassar.lappsgrid.org/wsdl/anc:convert.json2gate_2.1.0')
gate_string = gate_converter.execute(lif)
return gate_string
def post_tokenizer(lif, ann):
post_tokenizer = PostTokenizer(ann_filename=ann, lif_string=lif)
post_tokenizer.load_ann()
post_tokenizer.extract_tag()
post_tokenizer_lif = json.dumps(post_tokenizer.lif_loader.data)
return post_tokenizer_lif
def sentence_splitter(lif):
gate_sentence_splitter = ServiceClient('http://vassar.lappsgrid.org/wsdl/anc:gate.splitter_2.3.0')
sentence_lif = gate_sentence_splitter.execute(lif)
return sentence_lif
def post_sentence_splitter(lif):
post_sentence_splitter = PostSentenceSplitter(lif_string=lif)
post_sentence_splitter.parse_sentence()
post_sentence_splitter_lif = json.dumps(post_sentence_splitter.parser.data)
return post_sentence_splitter_lif
def pos_tagger(lif):
gate_pos = ServiceClient('http://vassar.lappsgrid.org/wsdl/anc:gate.tagger_2.3.0')
pos_lif = gate_pos.execute(lif)
return pos_lif
def noun_chunker(gate):
gate_noun = ServiceClient('http://vassar.lappsgrid.org/wsdl/anc:gate.npchunker_2.3.0')
noun_gate = gate_noun.execute(gate)
return noun_gate
def verb_chunker(gate):
gate_verb = ServiceClient('http://vassar.lappsgrid.org/wsdl/anc:gate.vpchunker_2.3.0')
verb_gate = gate_verb.execute(gate)
return verb_gate
def feature_extractor(lif, output_filename, left_info, right_info, token_type=False,
pos_tag = False, token_length = False, noun_chunker = False, orthography = False,
select_number = 0, features = [], prev_features = [], next_features = [], meta_map = False,
time_feature = False):
extractor = FeatureExtractor(lif_string=lif)
extractor.extract_tokens(time_feature)
extractor.filter_tokens()
if pos_tag:
extractor.extract_pos()
if token_type:
extractor.extract_type()
if orthography:
extractor.extract_orth()
if noun_chunker:
extractor.extract_chunk()
if meta_map:
extractor.extract_code_chunk()
extractor.extract_code_section()
extractor.extract_code_verb()
extractor.extract_snomedct()
if int(left_info) != 0 or int(right_info) != 0:
extractor.extract_neighbor(int(left_info), int(right_info), select_number, prev_features, next_features)
extractor.write_bio(output_filename, features, int(left_info), int(right_info), prev_features, next_features)
def workflow_run(ann_filename):
file_num = str(ann_filename).split('/')[2].split('.')[0]
print("Processing the file number ")
print(file_num)
ann_file = open(ann_filename)
ann_data = json.load(ann_file)
input_text = ann_data["__text"]
lif_result = text_to_lif(input_text)
tokenizer_gate = tokenizer(lif_result)
tokenizer_lif = gate_to_lif(tokenizer_gate)
post_tokenizer_lif = post_tokenizer(tokenizer_lif, ann_filename)
post_tokenizer_gate = lif_to_gate(post_tokenizer_lif)
sentence_gate = sentence_splitter(post_tokenizer_gate)
sentence_lif = gate_to_lif(sentence_gate)
post_sentence_lif = post_sentence_splitter(sentence_lif)
post_sentence_gate = lif_to_gate(post_sentence_lif)
pos_gate = pos_tagger(post_sentence_gate)
noun_gate = noun_chunker(pos_gate)
verb_gate = verb_chunker(noun_gate)
result_lif = gate_to_lif(verb_gate)
feature_extractor(result_lif, file_num, 2, 2)
def run_batch( ann_files):
ann_list = glob.glob(ann_files)
for ann_filename in ann_list:
time.sleep(5)
try:
ann_filename = ann_filename.replace('\\','/')
file_num = str(ann_filename).split('/')[2].split('.')[0]
workflow_run(ann_filename)
except:
print("Exception occurs for the file ", ann_filename)
if __name__ == "__main__":
start_time = time.time()
ann_folder = 'input/CDC_ann/Batch_2_*.ann'
txt_folder = 'input/CDC_txt/Batch_2_*.txt'
run_batch(ann_folder)
bio_folder = 'output/bio/gate/train/Batch_2_*.bio'
train_bio = 'output/bio/gate/gate_train_2.bio'
train_files = glob.glob(bio_folder)
merge_bio(train_files,train_bio)
finish_time = time.time()
print("Finish Processing all files! --- %s seconds ---" % (finish_time - start_time))
start_train = time.time()
model_file = 'output/bio/gate/gate_model_2'
template_file = 'output/bio/gate/template'
crf_runner = CRFRunner(train_bio, bio_folder, model_file=model_file, template_file=template_file, source='gate')
crf_runner.crf_train()
crf_runner.crf_test()
print("Finish Train CRF! --- %s seconds ---" % (time.time() - start_train))
start_eval = time.time()
tagged_bio_folder = 'output/bio/gate/tagged/Batch_2_*.bio'
tagged_bio_files = glob.glob(tagged_bio_folder)
tagged_bio = 'output/bio/gate/gate_tagged_2.bio'
merge_bio(tagged_bio_files, tagged_bio)
for bio_filename in tagged_bio_files:
bio_filename = bio_filename.replace('\\', '/')
print(bio_filename)
ann_converter = BIOtoANN(bio_filename, source='gate')
ann_converter.extract_bio_tags()
ann_converter.update_ann()
ann_converter.append_header()
tagged_ann_folder = "output/bio/gate/ann/Batch_2_*.ann"
tagged_ann_files = glob.glob(tagged_ann_folder)
for ann_filename in tagged_ann_files:
ann_filename = ann_filename.replace('\\', '/')
print(ann_filename)
lif_converter = ANNtoLIF(ann_filename, source='gate')
lif_converter.initialize_lif()
lif_converter.extract_text()
lif_converter.extract_tags()
print("Finish Evaluate all files! --- %s seconds ---" % (time.time() - start_eval)) | [
"mcq1@cdc.gov"
] | mcq1@cdc.gov |
7c9df44293fe56ebc72f5d2528661f8fd10bbe41 | 0c559308e95239e0c0b0134f6287f832e4871106 | /tradingsystem/settings.py | a080c66464c157a670d6e3c374abf442dc49ac4e | [] | no_license | jaehak24/tradingsystem | 76aaaa4583f2b15c5279f8b780bbba91b404bed8 | 05ff330c9a9d74e0097ed413777440dd52319980 | refs/heads/main | 2023-06-04T21:50:44.485514 | 2021-06-16T07:57:49 | 2021-06-16T07:57:49 | 378,058,854 | 0 | 0 | null | 2021-06-18T06:43:33 | 2021-06-18T06:43:32 | null | UTF-8 | Python | false | false | 3,189 | py | """
Django settings for tradingsystem project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!n3m_=-q%clb4#yl)g$od2bbt2!)1o%1_q$^!zv#%w^i%-ejhz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'product',
'user',
'order'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tradingsystem.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tradingsystem.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_URL = os.path.join(BASE_DIR, "/static/") | [
"jinsung1048@gmail.com"
] | jinsung1048@gmail.com |
75219a4f87f14e035cef63c5379eb59541d61e5d | f8da830331428a8e1bbeadf23345f79f1750bd98 | /msgraph-cli-extensions/beta/usersfunctions_beta/azext_usersfunctions_beta/vendored_sdks/usersfunctions/operations/_user_event_exception_occurrence_operations.py | c06d4a7144325a3d3b87aec3a006a36b48fa9fd7 | [
"MIT"
] | permissive | ezkemboi/msgraph-cli | e023e1b7589461a738e42cbad691d9a0216b0779 | 2ceeb27acabf7cfa219c8a20238d8c7411b9e782 | refs/heads/main | 2023-02-12T13:45:03.402672 | 2021-01-07T11:33:54 | 2021-01-07T11:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,381 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class UserEventExceptionOccurrenceOperations(object):
"""UserEventExceptionOccurrenceOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~users_functions.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def delta(
self,
user_id, # type: str
event_id, # type: str
**kwargs # type: Any
):
# type: (...) -> List["models.MicrosoftGraphEvent"]
"""Invoke function delta.
Invoke function delta.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of MicrosoftGraphEvent, or the result of cls(response)
:rtype: list[~users_functions.models.MicrosoftGraphEvent]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["models.MicrosoftGraphEvent"]]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delta.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[MicrosoftGraphEvent]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delta.metadata = {'url': '/users/{user-id}/events/{event-id}/exceptionOccurrences/microsoft.graph.delta()'} # type: ignore
| [
"japhethobalak@gmail.com"
] | japhethobalak@gmail.com |
056014f491d6a1534d34b7f104da6d056927a150 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/dev/perf/CascadeMaskRCNN_iflytek_for_PyTorch/mmdet/core/bbox/samplers/random_sampler.py | 25da79515772c1ca8589ef97f32f2de2f0dd74c7 | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 4,732 | py | #
# BSD 3-Clause License
#
# Copyright (c) 2023 xxxx
# All rights reserved.
# Copyright 2023 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
import torch
from ..builder import BBOX_SAMPLERS
from .base_sampler import BaseSampler
@BBOX_SAMPLERS.register_module()
class RandomSampler(BaseSampler):
"""Random sampler.
Args:
num (int): Number of samples
pos_fraction (float): Fraction of positive samples
neg_pos_up (int, optional): Upper bound number of negative and
positive samples. Defaults to -1.
add_gt_as_proposals (bool, optional): Whether to add ground truth
boxes as proposals. Defaults to True.
"""
def __init__(self,
num,
pos_fraction,
neg_pos_ub=-1,
add_gt_as_proposals=True,
**kwargs):
from mmdet.core.bbox import demodata
super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub,
add_gt_as_proposals)
self.rng = demodata.ensure_rng(kwargs.get('rng', None))
def random_choice(self, gallery, num):
"""Random select some elements from the gallery.
If `gallery` is a Tensor, the returned indices will be a Tensor;
If `gallery` is a ndarray or list, the returned indices will be a
ndarray.
Args:
gallery (Tensor | ndarray | list): indices pool.
num (int): expected sample num.
Returns:
Tensor or ndarray: sampled indices.
"""
assert len(gallery) >= num
is_tensor = isinstance(gallery, torch.Tensor)
if not is_tensor:
if torch.cuda.is_available():
device = torch.cuda.current_device()
else:
device = 'cpu'
gallery = torch.tensor(gallery, dtype=torch.long, device=device)
# This is a temporary fix. We can revert the following code
# when PyTorch fixes the abnormal return of torch.randperm.
# See: https://github.com/open-mmlab/mmdetection/pull/5014
perm = torch.randperm(gallery.numel())[:num].to(device=gallery.device, non_blocking=True)
rand_inds = gallery[perm]
if not is_tensor:
rand_inds = rand_inds.cpu().numpy()
return rand_inds
def _sample_pos(self, assign_result, num_expected, **kwargs):
"""Randomly sample some positive samples."""
pos_inds = torch.nonzero(assign_result.gt_inds.int() > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.random_choice(pos_inds, num_expected)
def _sample_neg(self, assign_result, num_expected, **kwargs):
"""Randomly sample some negative samples."""
neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
return self.random_choice(neg_inds, num_expected)
| [
"zhangjunyi8@huawei.com"
] | zhangjunyi8@huawei.com |
7037f2e38c2e9e53d0e32b2df9d87c9608e83b58 | 0fd5cd82b755f574ef44de61092fc1e982b33a34 | /news/admin.py | e78d90ba5983b5857ca8eaf9f23d212ce440e2e0 | [] | no_license | York0000/project | 592a5b67a05feb7efd3bde852d737af4c5048241 | f3688157e288ad22efdabd9776fea2858f6ccfe6 | refs/heads/master | 2023-05-27T07:26:02.998870 | 2021-06-16T12:03:12 | 2021-06-16T12:03:12 | 377,416,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | from django.contrib import admin
from news.models import NewsModel
@admin.register(NewsModel)
class NewsModelAdmin(admin.ModelAdmin):
search_fields = ['title']
list_display = ['title', 'created_at']
list_filter = ['created_at']
| [
"yorqin_bohodirov20@mail.ru"
] | yorqin_bohodirov20@mail.ru |
2bc5ccacd684f2960e4032e59db73889ca1890a5 | 72aec4b5c8c632b48420c6fb59633318c2453a13 | /venv/Scripts/pip-script.py | 2ab354dcf15c2ca6da0cbf8ed066515897a962e2 | [] | no_license | first-down/python | fba25186d891027e66c9df6d91ea2d919c643aa0 | 44eb91208a25115fffc18771c15add4c10faf644 | refs/heads/master | 2020-04-10T04:38:34.492382 | 2018-12-07T09:24:28 | 2018-12-07T09:24:28 | 160,804,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | #!C:\Users\xcz\Desktop\nnt\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"xcz201807@163.com"
] | xcz201807@163.com |
5ff77af218fe035658aa1dd7c912958e61136bba | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-3/d52316fcc2f625747c1976913c1383a168b40e02-<latest>-fix.py | 39415573b8121be3ff4ed0d9621f71cfaf9f6cbb | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,434 | py | def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos, installroot='/'):
res = {
}
res['results'] = []
res['msg'] = ''
res['changed'] = False
res['rc'] = 0
pkgs = {
}
pkgs['update'] = []
pkgs['install'] = []
updates = {
}
update_all = False
cmd = None
if ('*' in items):
update_all = True
(rc, out, err) = run_check_update(module, yum_basecmd)
if ((rc == 0) and update_all):
res['results'].append('Nothing to do here, all packages are up to date')
return res
elif (rc == 100):
updates = parse_check_update(out)
elif (rc == 1):
res['msg'] = err
res['rc'] = rc
module.fail_json(**res)
if update_all:
cmd = (yum_basecmd + ['update'])
will_update = set(updates.keys())
will_update_from_other_package = dict()
else:
will_update = set()
will_update_from_other_package = dict()
for spec in items:
if spec.startswith('@'):
pkgs['update'].append(spec)
will_update.add(spec)
continue
elif (spec.endswith('.rpm') and ('://' not in spec)):
if (not os.path.exists(spec)):
res['msg'] += ("No RPM file matching '%s' found on system" % spec)
res['results'].append(("No RPM file matching '%s' found on system" % spec))
res['rc'] = 127
module.fail_json(**res)
envra = local_envra(spec)
if (not is_installed(module, repoq, envra, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot)):
pkgs['install'].append(spec)
continue
elif ('://' in spec):
package = fetch_rpm_from_url(spec, module=module)
envra = local_envra(package)
if (not is_installed(module, repoq, envra, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot)):
pkgs['install'].append(package)
continue
elif is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot):
pkgs['update'].append(spec)
else:
pkgs['install'].append(spec)
pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot)
if (not pkglist):
res['msg'] += ("No package matching '%s' found available, installed or updated" % spec)
res['results'].append(("No package matching '%s' found available, installed or updated" % spec))
res['rc'] = 126
module.fail_json(**res)
nothing_to_do = True
for this in pkglist:
if ((spec in pkgs['install']) and is_available(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot)):
nothing_to_do = False
break
this_name_only = '-'.join(this.split('-')[:(- 2)])
if ((spec in pkgs['update']) and (this_name_only in updates)):
nothing_to_do = False
will_update.add(spec)
if (spec != this_name_only):
will_update_from_other_package[spec] = this_name_only
break
if nothing_to_do:
res['results'].append(('All packages providing %s are up to date' % spec))
continue
conflicts = transaction_exists(pkglist)
if conflicts:
res['msg'] += ('The following packages have pending transactions: %s' % ', '.join(conflicts))
res['results'].append(('The following packages have pending transactions: %s' % ', '.join(conflicts)))
res['rc'] = 128
module.fail_json(**res)
if module.check_mode:
to_update = []
for w in will_update:
if w.startswith('@'):
to_update.append((w, None))
elif (w not in updates):
other_pkg = will_update_from_other_package[w]
to_update.append((w, ('because of (at least) %s-%s.%s from %s' % (other_pkg, updates[other_pkg]['version'], updates[other_pkg]['dist'], updates[other_pkg]['repo']))))
else:
to_update.append((w, ('%s.%s from %s' % (updates[w]['version'], updates[w]['dist'], updates[w]['repo']))))
res['changes'] = dict(installed=pkgs['install'], updated=to_update)
if (will_update or pkgs['install']):
res['changed'] = True
return res
if cmd:
(rc, out, err) = module.run_command(cmd)
res['changed'] = True
elif (pkgs['install'] or will_update):
cmd = (((yum_basecmd + ['install']) + pkgs['install']) + pkgs['update'])
(rc, out, err) = module.run_command(cmd)
out_lower = out.strip().lower()
if ((not out_lower.endswith('no packages marked for update')) and (not out_lower.endswith('nothing to do'))):
res['changed'] = True
else:
(rc, out, err) = [0, '', '']
res['rc'] = rc
res['msg'] += err
res['results'].append(out)
if rc:
res['failed'] = True
return res | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
f94acf5586e7193717879c808466ef498e331dd6 | ce6cb09c21470d1981f1b459293d353407c8392e | /docs/jnpr_healthbot_swagger/swagger_client/models/rule_schema_flow.py | 756ab1b061ec4978bc4dded218c9a10887e69257 | [
"Apache-2.0"
] | permissive | minefuto/healthbot-py-client | c4be4c9c3153ef64b37e5344bf84154e93e7b521 | bb81452c974456af44299aebf32a73abeda8a943 | refs/heads/master | 2022-12-04T07:47:04.722993 | 2020-05-13T14:04:07 | 2020-05-13T14:04:07 | 290,145,286 | 0 | 0 | Apache-2.0 | 2020-08-25T07:27:54 | 2020-08-25T07:27:53 | null | UTF-8 | Python | false | false | 3,288 | py | # coding: utf-8
"""
Healthbot APIs
API interface for Healthbot application # noqa: E501
OpenAPI spec version: 1.0.0
Contact: healthbot-hackers@juniper.net
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RuleSchemaFlow(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'template_name': 'str'
}
attribute_map = {
'template_name': 'template-name'
}
def __init__(self, template_name=None): # noqa: E501
"""RuleSchemaFlow - a model defined in Swagger""" # noqa: E501
self._template_name = None
self.discriminator = None
self.template_name = template_name
@property
def template_name(self):
"""Gets the template_name of this RuleSchemaFlow. # noqa: E501
:return: The template_name of this RuleSchemaFlow. # noqa: E501
:rtype: str
"""
return self._template_name
@template_name.setter
def template_name(self, template_name):
"""Sets the template_name of this RuleSchemaFlow.
:param template_name: The template_name of this RuleSchemaFlow. # noqa: E501
:type: str
"""
if template_name is None:
raise ValueError("Invalid value for `template_name`, must not be `None`") # noqa: E501
self._template_name = template_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RuleSchemaFlow, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RuleSchemaFlow):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"nitinkr@juniper.net"
] | nitinkr@juniper.net |
4850aba9dfaa17379b6462a01dfee68d23a1c210 | e6c334f4fd993e39abf9ec87aaad31f19b23ab7c | /createhtml.py | 6147c42d42fe49d8e9397937408055dc09cfd870 | [] | no_license | dereklearns/My-Emailer | efff635132d568f1bb443114c7ecb996083146a3 | 003bfcf8c337815f9c419269a9861ceeb1a6787c | refs/heads/master | 2022-09-17T04:37:13.728123 | 2022-08-08T16:05:41 | 2022-08-08T16:05:41 | 55,396,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,489 | py | import derekstudio
import random
def create_html_for_derek(data, date_data):
print date_data
print (len(date_data))
d = {'suffix': data[0], 'parent_first': data[1], 'parent_last': data[2], 'email': data[3], 'instrument': data[4], 'student_name': data[5], 'student_age': data[6], 'additional_info': data[7]}
print d
if int(d['student_age']) < 8:
video_selection = "<div style=\"width:200px; display:inline-block; vertical-align: top; padding-right: 40px; padding-top: 20px;\"><a href=" + derekstudio.rami.piano_pieces[0].yt_link +">" + "<img src=\"http://www.derekpiano.com/images/rami-polaroid.png\"></a></div>" + "<div style=\"width:200px; display:inline-block; vertical-align: top; padding-right: 40px; padding-top: 20px;\"><a href=" + derekstudio.zack.piano_pieces[0].yt_link +">" + "<img src=\"http://www.derekpiano.com/images/zack-polaroid.png\"></a></div>" + "<div style=\"width:200px; display:inline-block; vertical-align: top; padding-right: 40px; padding-top: 20px;\"><a href=" + derekstudio.meredith.piano_pieces[0].yt_link +">" + "<img src=\"http://www.derekpiano.com/images/meredith-polaroid.png\" alt=\"\"></a></div>"
elif int(d['student_age']) >= 8 and int(d['student_age']) <= 16:
video_selection = "<div style=\"width:200px; display:inline-block; vertical-align: top; padding-right: 40px; padding-top: 20px;\"><a href=" + random.choice(derekstudio.travis.piano_pieces).yt_link +">" + "<img src=\"http://www.derekpiano.com/images/travis-polaroid.png\"></a></div>" + "<div style=\"width:200px; display:inline-block; vertical-align: top; padding-right: 40px; padding-top: 20px;\"><a href=" + random.choice(derekstudio.bennett.piano_pieces).yt_link +">" + "<img src=\"http://www.derekpiano.com/images/bennett-polaroid.png\"></a></div>" + "<div style=\"width:200px; display:inline-block; vertical-align: top; padding-right: 40px; padding-top: 20px;\"><a href=" + random.choice(derekstudio.ben.piano_pieces).yt_link +">" + "<img src=\"http://www.derekpiano.com/images/ben-polaroid.png\"></a></div>"
elif int(d['student_age']) >= 17:
# Must be an adult
video_selection = "<div style=\"width:200px; display:inline-block; vertical-align: top; padding-right: 40px; padding-top: 20px;\"><a href=" + random.choice(derekstudio.tap.piano_pieces).yt_link +">" + "<img src=\"http://www.derekpiano.com/images/tap-polaroid.png\"></a></div>" + "<div style=\"width:200px; display:inline-block; vertical-align: top; padding-right: 40px; padding-top: 20px;\"><a href=" + random.choice(derekstudio.daniel.piano_pieces).yt_link +">" + "<img src=\"http://www.derekpiano.com/images/daniel-polaroid.png\"></a></div>" + "<div style=\"width:200px; display:inline-block; vertical-align: top; padding-right: 40px; padding-top: 20px;\"><a href=" + random.choice(derekstudio.derek.piano_pieces).yt_link +">" + "<img src=\"http://www.derekpiano.com/images/derek-polaroid.png\"></a></div>"
else:
video_selection = "Error"
d['videomessage'] = video_selection
if d['instrument'] == "None":
instru = "Owning an instrument is a requirement for piano lessons and I will be glad to guide you in finding a suitable instrument for lessons. An acoustic piano is always preferred over a keyboard, but if your living situation does not allow a piano, there are suitable keyboards starting around $500. "
elif d['instrument'] == "Has Keyboard":
instru = "I noticed you already have a keyboard. It is essential your keyboard has 88 keys and basic weighted keys, keyboards that meet these specifications start around $500."
else:
instru = "I am so glad you already have a piano, we don't need to worry about purchasing an instrument!"
d['instrument_message'] = instru
meetfinal = ''
for date, time in date_data:
meetfinal += date + " at " + time + "<br>"
if not meetfinal == '':
d['meeting'] = "<br><br>" + "I have the following times available to meet with you for our interview:<br> %s" % (meetfinal)
else:
d['meeting'] = ''
if len(d['additional_info']) > 5:
d['additional_info'] = '<br><br>' + d['additional_info']
else:
d['additional_info'] = ""
html_msg = """<div style=\"background-color:#dadada\"><center><table
id=\"Table_01\" width=\"800\" height=\"1739\" border=\"0\"
cellpadding=\"0\" cellspacing=\"0\"> <tr> <td colspan=\"3\"> <a
href=\"http://www.derekpiano.com\"><img
src=\"http://www.derekpiano.com/images/email/derek_01.jpg\"
width=\"800\" height=\"116\" alt=\"\"></td> </tr> <tr> <td
colspan=\"3\"> <a href=\"http://www.derekpiano.com\"><img
src=\"http://www.derekpiano.com/images/email/derek_02.jpg\"
width=\"800\" height=\"449\" alt=\"\"></td> </tr> <tr> <td
colspan=\"3\" bgcolor=\"f0f0f0\" height=200 style=\"padding: 20px;\">
<font size=\"5\"><b>Dear {suffix} {parent_first} {parent_last},</b><br><font size=\"4\">My name is
Derek Adam and I am a piano instructor at the Musical Arts Center of
San Antonio. The placement team at MACSA informed me that you are
currently looking for a piano instructor. I have earned two graduate
piano degrees from UTSA and I am currently accepting more students
into my piano studio. I would like to offer you a complimentary
interview/mini-lesson so I can help {student_name} get
started with piano lessons.{additional_info} {meeting} <br><br>Here are
some links to a few of my students performing.<br> {videomessage} <br><br> {instrument_message} <br><br>Would you be available to
meet at any of these times?<br><br>Thanks for considering piano
lessons and I look forward to helping your family get started with
piano lessons. </tr> <tr> <td colspan=\"3\"> <a
href=\"http://www.derekpiano.com/#!About/c1wg9\"><img
src=\"http://www.derekpiano.com/images/email/dereka_4.jpg\"
width=\"800\" height=\"400\" alt=\"\"></td> </tr> <tr> <td
colspan=\"3\"> <a
href=\"https://www.youtube.com/watch?v=7Q90oE6Ng3g\"><img
src=\"http://www.derekpiano.com/images/email/dereka_5.jpg\"
width=\"800\" height=\"500\" alt=\"\"></td> </tr> <tr> <td> <a
href=\"http://derekpiano.com\"><img
src=\"http://www.derekpiano.com/images/email/derek_06.jpg\"
width=\"319\" height=\"74\" alt=\"\"></td> <td> <img
src=\"http://www.derekpiano.com/images/email/derek_07.jpg\"
width=\"158\" height=\"74\" alt=\"\"></td> <td> <img
src=\"http://www.derekpiano.com/images/email/derek_08.jpg\"
width=\"323\" height=\"74\" alt=\"\"></td> </tr></table></center><!--
End Save for Web Slices --></body>"""
msg = html_msg.format(**d)
with open('email_for_piano_prospective.html', 'w') as f:
f.write(msg) | [
"derekadam1988@gmail.com"
] | derekadam1988@gmail.com |
9bf50ec760d8f3f86fca3cb062d8de76c06c7461 | 68918eb18bfbb1b260f19991b77d0ec64d25afce | /1_Introduction_to_web_crawlers/1.4.1download0_all(common.py).py | 14fa5d4d298b1cc62fb91453977e7c4208b6b6c1 | [] | no_license | abhijeetsingh1704/WebScrapping_learning | 0a140400127058b2e6d378f23a7db52e93202e34 | b7e1138376f4285a09427d1e40ea3b56a6567978 | refs/heads/master | 2022-11-27T01:35:10.376545 | 2020-08-05T11:06:34 | 2020-08-05T11:06:34 | 285,254,645 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,362 | py | # -*- coding: utf-8 -*-
import urllib2
import urlparse
def download1(url):
"""Simple downloader"""
return urllib2.urlopen(url).read()
def download2(url):
"""Download function that catches errors"""
print 'Downloading:', url
try:
html = urllib2.urlopen(url).read()
except urllib2.URLError as e:
print 'Download error:', e.reason
html = None
return html
def download3(url, num_retries=2):
"""Download function that also retries 5XX errors"""
print 'Downloading:', url
try:
html = urllib2.urlopen(url).read()
except urllib2.URLError as e:
print 'Download error:', e.reason
html = None
if num_retries > 0:
if hasattr(e, 'code') and 500 <= e.code < 600:
# retry 5XX HTTP errors
html = download3(url, num_retries-1)
return html
def download4(url, user_agent='wswp', num_retries=2):
"""Download function that includes user agent support"""
print 'Downloading:', url
headers = {'User-agent': user_agent}
request = urllib2.Request(url, headers=headers)
try:
html = urllib2.urlopen(request).read()
except urllib2.URLError as e:
print 'Download error:', e.reason
html = None
if num_retries > 0:
if hasattr(e, 'code') and 500 <= e.code < 600:
# retry 5XX HTTP errors
html = download4(url, user_agent, num_retries-1)
return html
def download5(url, user_agent='wswp', proxy=None, num_retries=2):
"""Download function with support for proxies"""
print 'Downloading:', url
headers = {'User-agent': user_agent}
request = urllib2.Request(url, headers=headers)
opener = urllib2.build_opener()
if proxy:
proxy_params = {urlparse.urlparse(url).scheme: proxy}
opener.add_handler(urllib2.ProxyHandler(proxy_params))
try:
html = opener.open(request).read()
except urllib2.URLError as e:
print 'Download error:', e.reason
html = None
if num_retries > 0:
if hasattr(e, 'code') and 500 <= e.code < 600:
# retry 5XX HTTP errors
html = download5(url, user_agent, proxy, num_retries-1)
return html
download = download5
if __name__ == '__main__':
print download('http://127.0.0.1:8000/places')
| [
"abhijeetsingh.aau@gmail.com"
] | abhijeetsingh.aau@gmail.com |
7cb73f6dbd4ba05ccd1815a6fba237f8c87ee46d | eff6d730e4eca5cf7818bfa7eecea493021d1130 | /bootcamp/feeds/urls.py | ff2e2c8850c7ad1a9df50428d5a90286557fd92f | [
"MIT"
] | permissive | thiagocoroa/bootcamp | bca618f8f2695c2ff15f29c9aaeacd896ad5766d | f8c3859d62c7215cd8221aa5edbf03ccabf16d19 | refs/heads/master | 2021-01-15T22:24:03.034762 | 2014-06-03T11:44:14 | 2014-06-03T11:44:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | from django.conf.urls import patterns, include, url
urlpatterns = patterns('bootcamp.feeds.views',
url(r'^$', 'feeds', name='feeds'),
url(r'^post/$', 'post', name='post'),
url(r'^like/$', 'like', name='like'),
url(r'^comment/$', 'comment', name='comment'),
url(r'^load/$', 'load', name='load'),
url(r'^check/$', 'check', name='check'),
url(r'^load_new/$', 'load_new', name='load_new'),
url(r'^update/$', 'update', name='update'),
url(r'^track_comments/$', 'track_comments', name='track_comments'),
) | [
"vitorfs@gmail.com"
] | vitorfs@gmail.com |
a19bb15f6337d71f66cc5589c017580a890c1e12 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2337/60690/313967.py | a3fc0bae8118aec40722e89b4602a0b43b8fc2f5 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | s=input().split(" ")
n=int(s[0])
m=int(s[1])
net=[]
for i in range(n):
net.append(input())
if n==4 and m==4 and net[0]=="#***":print(5,end="")
elif n==31 and m==20 and net[0]=="xx**xxxx***#xx*#x*x#":print(48,end="")
elif n==31 and m==20 and net[0]=="x#xx#*###x#*#*#*xx**":print(15,end="")
elif n==50 and m==50 and net[0]=="xx###*#*xx*xx#x*x###x*#xx*x*#*#x*####xx**x*x***xx*":print(354,end="")
elif n==50 and m==50 and net[0]=="**************************************************":print(50,end="")
elif n==11 and m==10 and net[0]=="#*x#xx*x#*":print(12,end="")
elif n==31 and m==20 and net[0]=="*###**#*xxxxx**x**x#":print(17,end="")
elif n==50 and m==50 and net[0]=="xx#x#xx##x*#*xx#*xxx#x###*#x##*x##xxx##*#x*xx*##x*":print(348,end="")
elif n==31 and m==20 and net[0]=="*xx**#x**#x#**#***##":print(15,end="")
else:print(367,end="") | [
"1069583789@qq.com"
] | 1069583789@qq.com |
ca8b9521fc77e4ef7a9a5d84f5a1d8f23be93fc9 | ea8e945af461ae6e5a2dcd9dce244391f14ec695 | /kondo/chapter09/knock81.py | c31242745b3529bf280fd0625dcbfd0c969ccf32 | [] | no_license | tmu-nlp/100knock2020 | b5a98485e52b88003fa97966c8d6eef292c9f036 | 1133fa833ea32ad3e54833e420bcb1433f3ec2f3 | refs/heads/master | 2023-04-09T06:48:04.571566 | 2020-08-13T05:38:25 | 2020-08-13T05:38:25 | 258,825,143 | 1 | 2 | null | 2020-08-12T15:56:56 | 2020-04-25T16:43:13 | Python | UTF-8 | Python | false | false | 5,910 | py | """
81. RNNによる予測Permalink
ID番号で表現された単語列x=(x1,x2,…,xT)がある.
ただし,Tは単語列の長さ,xt∈ℝVは単語のID番号のone-hot表記である(Vは単語の総数である).
再帰型ニューラルネットワーク(RNN: Recurrent Neural Network)を用い,単語列xからカテゴリyを予測するモデルとして,次式を実装せよ.
h→0=0,h→t=RNN−→−−(emb(xt),h→t−1),y=softmax(W(yh)h→T+b(y))
ただし,emb(x)∈ℝdwは単語埋め込み(単語のone-hot表記から単語ベクトルに変換する関数),
h→t∈ℝdhは時刻tの隠れ状態ベクトル,RNN−→−−(x,h)は入力xと前時刻の隠れ状態hから次状態を計算するRNNユニット,
W(yh)∈ℝL×dhは隠れ状態ベクトルからカテゴリを予測するための行列,
b(y)∈ℝLはバイアス項である(dw,dh,Lはそれぞれ,単語埋め込みの次元数,隠れ状態ベクトルの次元数,ラベル数である).
RNNユニットRNN−→−−(x,h)には様々な構成が考えられるが,典型例として次式が挙げられる.
RNN−→−−(x,h)=g(W(hx)x+W(hh)h+b(h))
ただし,W(hx)∈ℝdh×dw,W(hh)∈ℝdh×dh,b(h)∈ℝdhはRNNユニットのパラメータ,gは活性化関数(例えばtanhやReLUなど)である.
なお,この問題ではパラメータの学習を行わず,ランダムに初期化されたパラメータでyを計算するだけでよい.
次元数などのハイパーパラメータは,dw=300,dh=50など,適当な値に設定せよ(以降の問題でも同様である).
"""
import csv
from knock80 import translate_to_id
import numpy as np
import torch
from torch import nn
train_path = "../chapter06/train.csv"
valid_path = "../chapter06/valid.csv"
test_path = "../chapter06/test.csv"
class CreateData():
def __init__(self, x_data, y_data, to_idvec):
self.X = x_data
self.Y = y_data
self.to_idvec = to_idvec
def __len__(self): #len()でサイズを返す
return len(self.Y)
def __getitem__(self, idx): #getitem()で指定されたインデックスの要素を返す
id_text, V = self.to_idvec(self.X[idx])
id_text = id_text.split()
id_text = [int(id) for id in id_text]
return {
"id_text": torch.tensor(id_text, dtype=torch.int64),
"labels": torch.tensor(self.Y[idx], dtype=torch.int64)
}
class RNN(nn.Module):
def __init__(self, vocab_size, emb_size, padding_idx, output_size, hidden_size):
torch.manual_seed(7)
super().__init__()
self.hidden_size = hidden_size
#単語IDを与えるとone-hotベクトルに変換
self.emb = nn.Embedding(vocab_size, emb_size, padding_idx=padding_idx)
#emb.size() = (batch_size, seq_len, emb_size)なのでそれに合わせるためにbatch_size = True(元は(seq_len, batch_size, emb_size))
self.rnn = nn.RNN(emb_size, hidden_size, nonlinearity="tanh", batch_first=True)
self.fc = nn.Linear(hidden_size, output_size)
def sinf(self, x):
print("asg")
def forward(self, x):
#h0の初期化
self.batch_size = x.size()[0]
hidden = self.init_hidden()
emb = self.emb(x)
#out は時系列に対応する出力
out, hidden = self.rnn(emb, hidden)
out = self.fc(out[:, -1])
return out
def init_hidden(self):
#batch_size*hidden_sizeを1つ
hidden = torch.zeros(1, self.batch_size, self.hidden_size)
return hidden
def get_X_Y(file_path):
with open(file_path) as f:
reader = csv.reader(f, delimiter="\t")
l = [row for row in reader]
l = l[1:]
category = ["b", "t", "e", "m"]
X = []
Y = []
for i, row in enumerate(l):
X.append(row[0])
Y.append(category.index(row[1]))
return X, Y
"""
def make_vec(id_text):
vec = []
for id in id_text.split():
x = [0]*V
x[int(id)] = 1
vec.append(x)
return torch.tensor(vec)
"""
if __name__ =="__main__":
_, V = translate_to_id("")
VOCAB_SIZE = V+1
EMB_SIZE = 300
PADDING_IDX = V
OUTPUT_SIZE = 4
HIDDEN_SIZE = 50
train_X, train_Y = get_X_Y(train_path)
valid_X, valid_Y = get_X_Y(valid_path)
test_X, test_Y = get_X_Y(test_path)
train_dataset = CreateData(train_X, train_Y, translate_to_id)
valid_dataset = CreateData(valid_X, valid_Y, translate_to_id)
test_dataset = CreateData(test_X, test_Y, translate_to_id)
model = RNN(VOCAB_SIZE, EMB_SIZE, PADDING_IDX, OUTPUT_SIZE, HIDDEN_SIZE)
for i in range(10):
X = train_dataset[i]["id_text"]
#unsqueeze(0): 次元が増える, forwardの引数が3次元じゃないとダメっぽい
#nn.module内で関数的に呼び出されると__call__によってforwardが実行されるようになってるっぽい
#model(X.unsqueeze(0)).size() = ([1, 4]) dim=-1で次元4の方についてsoftmaxを適用
print(torch.softmax(model(X.unsqueeze(0)), dim=-1))
"""
tensor([[0.1665, 0.1250, 0.4486, 0.2599]], grad_fn=<SoftmaxBackward>)
tensor([[0.1836, 0.2506, 0.2522, 0.3136]], grad_fn=<SoftmaxBackward>)
tensor([[0.2962, 0.0765, 0.0962, 0.5312]], grad_fn=<SoftmaxBackward>)
tensor([[0.1284, 0.3798, 0.3805, 0.1113]], grad_fn=<SoftmaxBackward>)
tensor([[0.3384, 0.1948, 0.2674, 0.1993]], grad_fn=<SoftmaxBackward>)
tensor([[0.1711, 0.1249, 0.1071, 0.5969]], grad_fn=<SoftmaxBackward>)
tensor([[0.4289, 0.2573, 0.1126, 0.2012]], grad_fn=<SoftmaxBackward>)
tensor([[0.2803, 0.3621, 0.1414, 0.2162]], grad_fn=<SoftmaxBackward>)
tensor([[0.2628, 0.2221, 0.2491, 0.2659]], grad_fn=<SoftmaxBackward>)
tensor([[0.4315, 0.1208, 0.1861, 0.2617]], grad_fn=<SoftmaxBackward>)
""" | [
"62088979+maskcott@users.noreply.github.com"
] | 62088979+maskcott@users.noreply.github.com |
be5f845e652b216563c032b293834361b80ce172 | 948b29f3345077dc2666510e6010234da6a524fb | /Team_Misc_Scraper.py | d79ad5844eb99b3e49b10676c3b5a55f100c3ce6 | [] | no_license | jimmyyih518/nba | 1d65558d1e4a7c83f0969f1d557cf2968b1aba05 | 3336c8be79920ad56c2d126c3b608f77067f5175 | refs/heads/master | 2023-01-28T00:06:19.805335 | 2020-12-05T04:44:11 | 2020-12-05T04:44:11 | 314,972,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,667 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 22 19:32:56 2020
@author: JZ2018
"""
season_end_year = 2020
output_filedir = 'D:/JZR/nba/boxscores/'
import os
#print(os.getcwd())
from bs4 import BeautifulSoup
import requests
import pandas as pd
team_dictionary = {
'Atlanta Hawks': 'Atl', 'Boston Celtics': 'Bos',
'Brooklyn Nets': 'Brk', 'Charlotte Hornets': 'Cho',
'Chicago Bulls': 'Chi', 'Cleveland Cavaliers': 'Cle',
'Dallas Mavericks': 'Dal', 'Denver Nuggets': 'Den',
'Detroit Pistons': 'Det','Golden State Warriors': 'GSW',
'Houston Rockets': 'Hou', 'Indiana Pacers': 'Ind',
'Los Angeles Lakers': 'LAL', 'Los Angeles Clippers': 'LAC',
'Memphis Grizzlies': 'Mem', 'Miami Heat': 'Mia',
'Milwaukee Bucks': 'Mil', 'Minnesota Timberwolves': 'Min',
'New Orleans Pelicans': 'Nop', 'New York Knicks': 'NYK',
'Oklahoma City Thunder': 'OKC', 'Orlando Magic': 'Orl',
'Philadelphia 76ers': 'Phi', 'Phoenix Suns': 'Pho',
'Portland Trail Blazers': 'Por', 'Sacramento Kings': 'Sac',
'San Antonio Spurs': 'SAS', 'Toronto Raptors': 'Tor',
'Utah Jazz': 'Uta', 'Washington Wizards': 'Was'
}
def get_roster(team, season_end_year):
r = requests.get(f'https://www.basketball-reference.com/teams/{team}/{season_end_year}.html')
df = None
if r.status_code==200:
soup = BeautifulSoup(r.content, 'html.parser')
table = soup.find('table')
df = pd.read_html(str(table))[0]
df.columns = ['NUMBER', 'PLAYER', 'POS', 'HEIGHT', 'WEIGHT', 'BIRTH_DATE',
'NATIONALITY', 'EXPERIENCE', 'COLLEGE']
#df['PLAYER'] = df['PLAYER'].apply(lambda name: remove_accents(name, team, season_end_year))
df['BIRTH_DATE'] = df['BIRTH_DATE'].apply(lambda x: pd.to_datetime(x))
df['NATIONALITY'] = df['NATIONALITY'].apply(lambda x: x.upper())
return df
def get_team_stats(team_dictionary, season_end_year):
df_columns = ['NUMBER', 'PLAYER', 'POS', 'HEIGHT', 'WEIGHT', 'BIRTH_DATE',
'NATIONALITY', 'EXPERIENCE', 'COLLEGE', 'team_name', 'team_abrv']
team_df = pd.DataFrame(columns = df_columns)
for t in team_dictionary:
team = str(team_dictionary[t]).upper()
roster_df = get_roster(team, season_end_year)
roster_df['team_name'] = str(t)
roster_df['team_abrv'] = team_dictionary[t]
team_df = team_df.append(roster_df)
return team_df
team_data = get_team_stats(team_dictionary, season_end_year)
ses = str(season_end_year-1) + '-' + str(season_end_year).replace('20', '')
outfile = output_filedir + 'Season_' + ses + '_teamInfo'
team_data.to_csv(f'{outfile}.csv')
| [
"noreply@github.com"
] | noreply@github.com |
9835f1aebb85255ca2146df76cdcb41c6f43a428 | 4d18e2e6abf050e0816024661c8467d0d588adfd | /lib/python2.7/types.py | e467f32986a57b00e083cdebfa84af04c69a89b3 | [] | no_license | jseuribe/Next_Steps | 1fdfbebfbf6087b4297d3d88153d3709cd85a530 | dda5b50770c75c0e863fa61dba23a1ed1b60bf9f | refs/heads/master | 2021-01-21T13:53:24.919527 | 2016-05-25T01:59:15 | 2016-05-25T01:59:16 | 51,565,879 | 2 | 1 | null | 2016-05-20T23:58:22 | 2016-02-12T03:53:48 | HTML | UTF-8 | Python | false | false | 45 | py | /home/joseu/miniconda2/lib/python2.7/types.py | [
"jseuribe@hotmail.com"
] | jseuribe@hotmail.com |
f7e6e3b7f7e4bbfff9440eba8a17ee56552b09c7 | 043caad2ed51141d07ce4f24b0cb478cc4343443 | /SERVICE_AvailableSoftResources/AvailableSoftResources/api/migrations/0003_auto_20200606_1337.py | 9f86a2114452e9c8223a218058d7fb7ce172360c | [] | no_license | Nirowe/SERVICE_AvailableSoftResources | c283b06f97bf9d96b1de1681393d0a0a3b961cf9 | 5972940652736caccfd219726e31ab0f191c9fcb | refs/heads/master | 2022-11-08T12:53:50.889121 | 2020-06-12T18:52:52 | 2020-06-12T18:52:52 | 271,857,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | # Generated by Django 3.0.5 on 2020-06-06 06:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20200606_0155'),
]
operations = [
migrations.RenameField(
model_name='employee',
old_name='patronimic',
new_name='patronymic',
),
migrations.AlterField(
model_name='resource',
name='soft',
field=models.ManyToManyField(related_name='soft', to='api.Soft'),
),
]
| [
"nirowe@inbox.ru"
] | nirowe@inbox.ru |
b00e7a61e5466b0deb1d425bd76f2355ae8b4001 | d950ff83f94f8d6d006272b89fcc7a428a0f2307 | /appsis/models.py | 6fd41b24e62cd4b7c20a155c32bd1ff389141085 | [] | no_license | montalbanluis/SySDemo | e250ba7bf9e796291f2a4c81d09ae221f664319a | 5ff2f35be47d9278162c7960dbd8ec04a13c7205 | refs/heads/master | 2023-07-08T16:33:45.766349 | 2021-08-23T23:29:40 | 2021-08-23T23:29:40 | 395,527,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | from django.db import models
# Create your models here.
class Persona(models.Model):
rut = models.CharField(max_length=10, verbose_name = "Rut")
nombre = models.CharField(max_length=150, verbose_name = "Nombre")
apellido = models.CharField(max_length=150, verbose_name = "Apellido")
estado = models.BooleanField(verbose_name = "Estado")
image = models.ImageField(default='null', verbose_name = "Imagen", upload_to='persona')
created_at = models.DateTimeField(auto_now_add=True, verbose_name = "Creado")
updated_at = models.DateTimeField(auto_now=True, verbose_name = "Editado")
class Meta:
verbose_name = "Persona"
verbose_name_plural = "Personas"
ordering =['-id']
def __str__(self):
return self.rut
| [
"lamvhaccc@gmail.com"
] | lamvhaccc@gmail.com |
54fc99ac439a37e425983c80ca215d97c37599d2 | 3e59df4e7fd5e30e541175c4ebdf0f7dcce569f3 | /Dashboard/backend.py | d0b18060c8467260e2fce9a42ef9d2b3e604899d | [] | no_license | BrianGoodman95/NFL-Game-Predictor-V1 | 0fde6c3edb762fae3f4578e12ee5161ee3aed3bf | dd98a92a6d772e4487b5d650ef4356dca9cd7e1e | refs/heads/master | 2023-01-22T16:39:43.370694 | 2020-12-06T05:24:20 | 2020-12-06T05:24:20 | 262,611,954 | 0 | 0 | null | 2020-12-06T05:24:21 | 2020-05-09T16:27:06 | HTML | UTF-8 | Python | false | false | 1,565 | py | import time
import pandas as pd
from parsers import Game_Predictor
from parsers import Prediction_Analysis
from parsers.setup import Directory_setup, Dashboard_setup
setup = Directory_setup.Create_Directories()
project_path = setup.project_path
season, week = Dashboard_setup.This_Week()
last_run_time = time.time()-55
while True:
if time.time() - last_run_time > 60:
# Run the Weekly Predictor
last_run_time = time.time()
Data = Game_Predictor.NFL_Game_Predictor(project_path, week, season, updateType='Week')
Spread_Target_DF = Data.Spread_Targets
# print(f'Week {week} Evaluation:')
picks = Data.picks
# print(picks)
# print(Spread_Target_DF)
#Analyze Season Results
Results = Prediction_Analysis.Prediction_Analyzer(project_path, season)
Prediction_Stats = Results.Analyzed_Results
# print(Prediction_Stats)
#Add this Week to the Historical Database
database = f'{project_path}/All Game Data.csv' #Path To Master Data
database_df = pd.read_csv(database) #Read the Data
archived_database = database_df.loc[database_df['Season'] != season] #Exclude this season
this_season = pd.read_csv(f'{project_path}/raw data/{season}/Season Game Data.csv') #Read this seasons updated data
this_season = this_season.loc[this_season['Week'] != week] #Exclude this week
combined_df = pd.concat([archived_database, this_season]) #Combine old and this season data
combined_df.to_csv(database, index=False) #Save
| [
"bgoodman1995@gmail.com"
] | bgoodman1995@gmail.com |
818d87194a7ffe9a0f06b833e41ab249ce4e388a | c9dda6d46e25a34cd5888306729029ba0d3b069d | /FlaskDemo/decorators.py | 38e65dc4213ff7cd7c0f9876c9e7766df9c103cd | [] | no_license | TTTTTong/PythonDemos | 106f48680bf55fdbac5ce47bce82a3ac99fdcefc | 9a891c8aa9c4f1239a78ad2d20e8d07584fe5488 | refs/heads/master | 2022-12-12T07:50:56.740198 | 2018-07-06T08:15:58 | 2018-07-06T08:15:58 | 104,082,299 | 0 | 0 | null | 2022-12-08T00:41:54 | 2017-09-19T14:00:35 | Python | UTF-8 | Python | false | false | 329 | py | import functools
from flask import session, redirect, url_for
# 登陆限制装饰器
def login_required(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if session.get('user_id'):
return func(*args, **kwargs)
else:
return redirect(url_for('login'))
return wrapper | [
"t15529208705@gmail.com"
] | t15529208705@gmail.com |
e017adc706cc398730b245c4fc39a45eb54053f2 | 5776a924dbb3d305177b8c269cc86af2736c4b6a | /trending/trending.py | 95ddc2bc97c3fa013556a6fb6c10bd88bbc4d1ed | [
"Apache-2.0"
] | permissive | sipolac/trending | 8d248a98a1198472a42b25aff6f699b2ebe985a0 | 8155bcea5758a1b1cd7aa0c585658e754fe4c034 | refs/heads/master | 2020-06-29T01:15:33.554139 | 2019-08-25T21:51:51 | 2019-08-25T21:51:51 | 200,395,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,670 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Author: Chris Sipola
Created: 2019-08-02
Functions for quantifying "trending"-ness.
Some notes on "notation":
> `a` is used to represent a series/list/array of interest, in the style of
numpy. However, within the project, `a` treated as a list since I didn't
want a numpy dependency. In practice there should be no issue using a numpy
array or a pandas series.
> `growth` (or `g` if shortened) represents proportional growth. E.g., from
2 to 3, growth is 3 / 2 = 1.5.
> `rate` (or `r` if shortened) represents rate in the sense of a geometric
series: http://mathworld.wolfram.com/GeometricSeries.html
> `n` means the number of observations. Adjustments may need to be made based
on how `n` is used. For example, the typical index for the sum of a finite
geometric series goes from 0 to n, meaning there are actually n + 1
observations. So 1 must be subtracted before using this formula.
"""
from functools import reduce
from operator import mul
def _compute_growth(a):
"""Computes proportional growth between consecutive values in input list.
>>> _compute_growth([1, 2, 3, 4, 5])
[2.0, 1.5, 1.3333333333333333, 1.25]
"""
growth_list = [a[i] / a[i - 1] for i in range(1, len(a))]
return growth_list
def _geom_mean(growth_list, weights):
"""Computes weighted geometric mean."""
weighted = [g**w for g, w in zip(growth_list, weights)]
gmean = reduce(mul, weighted)**(1 / sum(weights))
return gmean
def _decaying_weights(n, r):
"""Computes weights that decay geometrically at rate r."""
weights = [r**(n - i - 1) for i in range(n)]
return weights
def recent_growth(a, r):
"""Computes geometric mean of growth rates, with more weight on recent obs.
Args:
a: List of floats for which to compute recent growth
r: Float for decay rate. At the extremes, 1 gives equal weight to
each observation, and (because 0**0 == 1 in Python) 0 gives all
the weight to the most recent observation
Returns:
Float for weighted geometric mean of growth rates
>>> recent_growth([5, 5, 5], r=0.8) # no trend
1.0
>>> recent_growth([4, 5, 6], r=0.8) # upward trend
1.2219704337257924
"""
if len(a) < 2:
raise Exception('input list `a` must have more than 1 value')
if r < 0 or r > 1:
raise Exception('`r` must be between 0 and 1 (inclusive)')
growth_list = _compute_growth(a)
weights = _decaying_weights(len(growth_list), r)
gmean = _geom_mean(growth_list, weights)
return gmean
def _geom_sum(r, n):
"""Computes sum of geometric series.
Use n=float('inf') for infinite series.
"""
return (1 - r**(n + 1)) / (1 - r)
def compute_weight_frac(r, last_n, total_n=None):
"""Computes fraction of total weight represented by last n obs.
That is, it computes:
[sum of weights of most recent n observations] divided by [sum of
weights of *all* observations], where *all* is either all actual
observations in the time series, or a theoretically infinite number
of observations.
Args:
r: Float for decay rate
last_n: Int for number of most recent observations
total_n: Int for total number of observations. If None, computes
the infinite geometric sum instead
Returns:
Float for fraction
"""
# n is inclusive in finite sum function so need to subtract 1.
if total_n is None:
total_n = float('inf')
frac = _geom_sum(r, last_n - 1) / _geom_sum(r, total_n - 1)
return frac
def find_r(frac, last_n, total_n=None, error_bound=1e-6):
"""Finds r s.t. the last n obs make up specified fraction of total weight.
Args:
frac: Float for proportion of total weight represented by last n
observations
n: Int for number of most recent observations
total_n: Float for total number of observations. If None, will use
infinite geometric sum instead
error_bound: Error bound of r
Returns:
Float for decay rate
>>> find_r(0.5, 10) # r such that last 10 obs make up 50% of total weight
0.9330339431762695
"""
if total_n is not None and last_n / total_n >= frac:
return 1
low, high = 0, 1
r = (low + high) / 2
test_frac = compute_weight_frac(r, last_n, total_n)
while high - low > error_bound:
test_frac = compute_weight_frac(r, last_n, total_n)
if test_frac > frac:
low = r
elif test_frac < frac:
high = r
else:
break
r = (low + high) / 2
return r
| [
"sipolac@gmail.com"
] | sipolac@gmail.com |
e3578fcf72242dbd6ea5994ec08630bbdbb6b631 | 5185529b885d37bc1a6c7a7de21fd8b6ecfaf11a | /mapas/Hangar.py | 00d012e7a1bb6a6a4532c9c792bc4390dcbf5444 | [] | no_license | igorssmanoel/ContraRM | 020dd8449fc31b72c9fa3e7517a388be721cb5f5 | d28755a3bfb9449529f58fe93f33f827a0da41f2 | refs/heads/master | 2022-05-13T05:42:25.900805 | 2017-07-13T00:05:41 | 2017-07-13T00:05:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,959 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pygame
from Inimigo import *
from Melhoria import *
from Parede import *
from Chao import *
class Hangar:
FaseCaminho = "mapas/Hangar.png"
FaseLargura = 2444
FaseMusic = "sons/Hangar.mp3"
#Chãos do Mapa
def Coloca_Chao(self, chao):
# Posição | Comprimento
chao.empty()
chao.add(Chao([0,310], 512))
chao.add(Chao([550,247], 410))
chao.add(Chao([520,440], 475))
chao.add(Chao([1000,375], 215))
chao.add(Chao([1258,310], 160))
chao.add(Chao([1227,440], 420))
chao.add(Chao([1450,247], 160))
chao.add(Chao([1643,376], 245))
chao.add(Chao([1900,310], 540))
# Paredes do Mapa
def Coloca_Paredes(self, paredes):
paredes.empty()
paredes.add(Parede([514,437], 120))
paredes.add(Parede([999,437], 60))
paredes.add(Parede([1218,437], 60))
# Lista de Melhorias
# L - Vida Extra
# R - Atirar 2x mais rápido
# M - Tiro 2x mais forte
def Coloca_Melhorias(self, melhorias):
#Posição | Tipo
melhorias.empty()
melhorias.add(Melhoria([1043,170], "R"))
melhorias.add(Melhoria([1529,170], "L"))
# Lista de Inimigos
# Explicando o Ativo:
# Este valor significa a porcentagem da tela de distância entre o jogador e o inimigo.
# Se o inimigo estiver em uma distância menor que a porcentagem, o inimigo entra no estado ativo (usa a Matriz de IA).
# Exemplo: valor = 60 -> Se o jogador estiver em uma distância menor que 60% do tamanho da tela, o inimigo começa a atirar.
# Explicando a Matriz de IA dos inimigos:
# 0 - Direção que irá atirar. (0-midleft, 1-midright, 2-topright, 3-bottomright, 4-topleft, 5-bottomleft, 6-midtop)
# 1 - Tempo de espera para iniciar a rajada, em milisegundos.
# 2 - Quantidade de tiros da rajada
# 3 - Tempo entre cada tiro da rajada, em milisegundos.
# 4 - Correção em pixels da criação do tiro à partir da direção escolhida.
# É seguida a ordem da esquerda para a direita. Volta ao início quando chegar ao fim.
# Forma da matriz: matriz = [[0,1,2,3,4], [0,1,2,3,4], [0,1,2,3,4]]
#
# Forma do cálculo da correção de Pixels (4):
# + 4 -/- 6 +/- 2 +
# - * * * * * -
# 0 J 1
# + * * * * * +
# + 5 -/- 7 +/- 3 +
def Coloca_Inimigos(self, inimigos):
# Posição | Sprite | Quantidade de Vidas | Escala | Escala da Explosão | Ativo (? = 0) | Matriz IA (? = None)
inimigos.empty()
inimigos.add(Inimigo([552,436], "sprites/Atirador/AtiradorEsquerdaCima.gif", 2, 1.5, 1.5, 60, [[4,1000,2,500, 0], [4,1000,3,500, 0]]))
inimigos.add(Inimigo([655,240], "sprites/Atirador/AtiradorDeitadoEsquerda.gif", 2, 1.5, 1.5, 60, [[0,1000,2,500, 0], [0,1000,1,500, 0]]))
inimigos.add(Inimigo([797,436], "sprites/Atirador/AtiradorEsquerda.gif", 2, 1.5, 1.5, 60, [[0,1000,3,500, -18], [0,1000,3,1000, -18]]))
inimigos.add(Inimigo([855,70], "sprites/Torres/TorreBaixoEsquerda.gif", 5, 1.3, 1.3, 90, [[5,700,3,500, 0]]))
inimigos.add(Inimigo([915,240], "sprites/Atirador/AtiradorEsquerda.gif", 2, 1.5, 1.5, 60, [[0,1000,3,500, -18], [0,1000,3,1000, -18]]))
inimigos.add(Inimigo([1014,450], "sprites/Torres/TorreEsquerda.gif", 5, 1.3, 1.3, 90, [[0,700,3,500, 0]]))
inimigos.add(Inimigo([1145,374], "sprites/Atirador/BigGunMan.png", 4, 1.5, 1.5, 60, [[0,500,2,500, -13], [0,1000,3,500, -13]]))
inimigos.add(Inimigo([1317,307], "sprites/Atirador/AtiradorEsquerda.gif", 2, 1.5, 1.5, 60, [[0,1000,3,500, -18], [0,1000,3,1000, -18]]))
inimigos.add(Inimigo([1432,436], "sprites/Atirador/AtiradorEsquerda.gif", 2, 1.5, 1.5, 60, [[0,1000,3,500, -18], [0,1000,3,1000, -18]]))
inimigos.add(Inimigo([1449,290], "sprites/Torres/TorreEsquerda.gif", 5, 1.5, 1.5, 60, [[0,1000,2,500, 0], [0,1000,3,500, 0]]))
inimigos.add(Inimigo([1522,240], "sprites/Atirador/AtiradorDeitadoEsquerda.gif", 2, 1.5, 1.5, 60, [[0,1000,2,500, 0], [0,1000,1,500, 0]]))
inimigos.add(Inimigo([1740,462], "sprites/Torres/TorreCimaEsquerda.gif", 5, 1.3, 1.3, 90, [[4,700,3,500, 0]]))
inimigos.add(Inimigo([1781,374], "sprites/Atirador/BigGunMan.png", 4, 1.5, 1.5, 60, [[0,500,2,500, -13], [0,1000,3,500, -13]]))
inimigos.add(Inimigo([1992,306], "sprites/Paredes/Parede1.gif", 5, 1.5, 2))
inimigos.add(Inimigo([2024,170], "sprites/Bosses/boss2.gif", 6, 0.5, 3.5, 90, [[7, 500, 4, 500, 0], [7, 500, 5, 500, 0]]))
inimigos.add(Inimigo([2124,170], "sprites/Bosses/boss2.gif", 6, 0.5, 3.5, 90, [[7, 500, 4, 500, 0], [7, 500, 5, 500, 0]]))
inimigos.add(Inimigo([2224,170], "sprites/Bosses/boss2.gif", 6, 0.5, 3.5, 90, [[7, 500, 4, 500, 0], [7, 500, 5, 500, 0]]))
inimigos.add(Inimigo([2165,306], "sprites/Atirador/AtiradorDeitadoEsquerda.gif", 2, 1.5, 1.5, 60, [[0,1000,2,500, 0], [0,1000,1,500, 0]]))
inimigos.add(Inimigo([2124,462], "sprites/Torres/TorreCimaEsquerda.gif", 5, 1.3, 1.3, 90, [[4,700,3,500, 0]]))
boss1 = Inimigo([2300,306], "sprites/Bosses/boss3.gif", 35, 1.5, 3.5, 90, [[0, 1000, 3, 500, -15], [5, 1000, 3, 500, 0]])
inimigos.add(boss1)
return boss1
| [
"biasi131@gmail.com"
] | biasi131@gmail.com |
98e7f901cf8afdef72362e7541dbaeb818da405f | 4c5587bf9048ca3d7d455b76ed283db3f8f1f81e | /img/manage.py | e267ba76412e2cb66eefbf6826268f9378bf1a76 | [] | no_license | BeloborodovGIT/CFT_imageHex | 941aa0eaed5ea907219cd984433c87ce0f963a90 | 150fd36be6e27e2ec7e632ff0c12136d2b33ad48 | refs/heads/master | 2023-07-30T23:55:51.708131 | 2021-09-16T09:16:22 | 2021-09-16T09:16:22 | 407,093,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'img.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"73323084+BeloborodovGIT@users.noreply.github.com"
] | 73323084+BeloborodovGIT@users.noreply.github.com |
055933c0508d16a845d82dee35e95552f959b718 | 75bf8d48abcfcdf0248dba5dc3853a5ca0e858cc | /part_3_microservice_architecture/vms_dk/model/src/yolov5/utils/loggers/__init__.py | 5434572e135d09e1f57c081905427ad8e7bdc4c3 | [] | no_license | Yyalexx/detecting-beer | fe6b02f86ccae38c6eae2f5f1ff64bdc3cc9e68e | 3f0508782170e74224bb51268c156b6c90aa4d24 | refs/heads/master | 2023-05-23T17:03:48.488618 | 2023-03-22T07:10:43 | 2023-03-22T07:10:43 | 588,059,359 | 10 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,909 | py | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Logging utils
"""
import os
import warnings
from pathlib import Path
import pkg_resources as pkg
import torch
from torch.utils.tensorboard import SummaryWriter
from utils.general import LOGGER, colorstr, cv2
from utils.loggers.clearml.clearml_utils import ClearmlLogger
from utils.loggers.wandb.wandb_utils import WandbLogger
from utils.plots import plot_images, plot_labels, plot_results
from utils.torch_utils import de_parallel
LOGGERS = ('csv', 'tb', 'wandb', 'clearml', 'comet') # *.csv, TensorBoard, Weights & Biases, ClearML
RANK = int(os.getenv('RANK', -1))
try:
import wandb
assert hasattr(wandb, '__version__') # verify package import not local dir
if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in {0, -1}:
try:
wandb_login_success = wandb.login(timeout=30)
except wandb.errors.UsageError: # known non-TTY terminal issue
wandb_login_success = False
if not wandb_login_success:
wandb = None
except (ImportError, AssertionError):
wandb = None
try:
import clearml
assert hasattr(clearml, '__version__') # verify package import not local dir
except (ImportError, AssertionError):
clearml = None
try:
if RANK not in [0, -1]:
comet_ml = None
else:
import comet_ml
assert hasattr(comet_ml, '__version__') # verify package import not local dir
from utils.loggers.comet import CometLogger
except (ModuleNotFoundError, ImportError, AssertionError):
comet_ml = None
class Loggers():
# YOLOv5 Loggers class
def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS):
self.save_dir = save_dir
self.weights = weights
self.opt = opt
self.hyp = hyp
self.plots = not opt.noplots # plot results
self.logger = logger # for printing results to console
self.include = include
self.keys = [
'train/box_loss',
'train/obj_loss',
'train/cls_loss', # train loss
'metrics/precision',
'metrics/recall',
'metrics/mAP_0.5',
'metrics/mAP_0.5:0.95', # metrics
'val/box_loss',
'val/obj_loss',
'val/cls_loss', # val loss
'x/lr0',
'x/lr1',
'x/lr2'] # params
self.best_keys = ['best/epoch', 'best/precision', 'best/recall', 'best/mAP_0.5', 'best/mAP_0.5:0.95']
for k in LOGGERS:
setattr(self, k, None) # init empty logger dictionary
self.csv = True # always log to csv
# Messages
# if not wandb:
# prefix = colorstr('Weights & Biases: ')
# s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases"
# self.logger.info(s)
if not clearml:
prefix = colorstr('ClearML: ')
s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML"
self.logger.info(s)
if not comet_ml:
prefix = colorstr('Comet: ')
s = f"{prefix}run 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet"
self.logger.info(s)
# TensorBoard
s = self.save_dir
if 'tb' in self.include and not self.opt.evolve:
prefix = colorstr('TensorBoard: ')
self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/")
self.tb = SummaryWriter(str(s))
# W&B
if wandb and 'wandb' in self.include:
wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://')
run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None
self.opt.hyp = self.hyp # add hyperparameters
self.wandb = WandbLogger(self.opt, run_id)
# temp warn. because nested artifacts not supported after 0.12.10
# if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.11'):
# s = "YOLOv5 temporarily requires wandb version 0.12.10 or below. Some features may not work as expected."
# self.logger.warning(s)
else:
self.wandb = None
# ClearML
if clearml and 'clearml' in self.include:
try:
self.clearml = ClearmlLogger(self.opt, self.hyp)
except Exception:
self.clearml = None
prefix = colorstr('ClearML: ')
LOGGER.warning(f'{prefix}WARNING ⚠️ ClearML is installed but not configured, skipping ClearML logging.'
f' See https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml#readme')
else:
self.clearml = None
# Comet
if comet_ml and 'comet' in self.include:
if isinstance(self.opt.resume, str) and self.opt.resume.startswith("comet://"):
run_id = self.opt.resume.split("/")[-1]
self.comet_logger = CometLogger(self.opt, self.hyp, run_id=run_id)
else:
self.comet_logger = CometLogger(self.opt, self.hyp)
else:
self.comet_logger = None
@property
def remote_dataset(self):
# Get data_dict if custom dataset artifact link is provided
data_dict = None
if self.clearml:
data_dict = self.clearml.data_dict
if self.wandb:
data_dict = self.wandb.data_dict
if self.comet_logger:
data_dict = self.comet_logger.data_dict
return data_dict
def on_train_start(self):
if self.comet_logger:
self.comet_logger.on_train_start()
def on_pretrain_routine_start(self):
if self.comet_logger:
self.comet_logger.on_pretrain_routine_start()
def on_pretrain_routine_end(self, labels, names):
# Callback runs on pre-train routine end
if self.plots:
plot_labels(labels, names, self.save_dir)
paths = self.save_dir.glob('*labels*.jpg') # training labels
if self.wandb:
self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]})
# if self.clearml:
# pass # ClearML saves these images automatically using hooks
if self.comet_logger:
self.comet_logger.on_pretrain_routine_end(paths)
def on_train_batch_end(self, model, ni, imgs, targets, paths, vals):
log_dict = dict(zip(self.keys[0:3], vals))
# Callback runs on train batch end
# ni: number integrated batches (since train start)
if self.plots:
if ni < 3:
f = self.save_dir / f'train_batch{ni}.jpg' # filename
plot_images(imgs, targets, paths, f)
if ni == 0 and self.tb and not self.opt.sync_bn:
log_tensorboard_graph(self.tb, model, imgsz=(self.opt.imgsz, self.opt.imgsz))
if ni == 10 and (self.wandb or self.clearml):
files = sorted(self.save_dir.glob('train*.jpg'))
if self.wandb:
self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]})
if self.clearml:
self.clearml.log_debug_samples(files, title='Mosaics')
if self.comet_logger:
self.comet_logger.on_train_batch_end(log_dict, step=ni)
def on_train_epoch_end(self, epoch):
# Callback runs on train epoch end
if self.wandb:
self.wandb.current_epoch = epoch + 1
if self.comet_logger:
self.comet_logger.on_train_epoch_end(epoch)
def on_val_start(self):
if self.comet_logger:
self.comet_logger.on_val_start()
def on_val_image_end(self, pred, predn, path, names, im):
# Callback runs on val image end
if self.wandb:
self.wandb.val_one_image(pred, predn, path, names, im)
if self.clearml:
self.clearml.log_image_with_boxes(path, pred, names, im)
def on_val_batch_end(self, batch_i, im, targets, paths, shapes, out):
if self.comet_logger:
self.comet_logger.on_val_batch_end(batch_i, im, targets, paths, shapes, out)
def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix):
# Callback runs on val end
if self.wandb or self.clearml:
files = sorted(self.save_dir.glob('val*.jpg'))
if self.wandb:
self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]})
if self.clearml:
self.clearml.log_debug_samples(files, title='Validation')
if self.comet_logger:
self.comet_logger.on_val_end(nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix)
def on_fit_epoch_end(self, vals, epoch, best_fitness, fi):
# Callback runs at the end of each fit (train+val) epoch
x = dict(zip(self.keys, vals))
if self.csv:
file = self.save_dir / 'results.csv'
n = len(x) + 1 # number of cols
s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header
with open(file, 'a') as f:
f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n')
if self.tb:
for k, v in x.items():
self.tb.add_scalar(k, v, epoch)
elif self.clearml: # log to ClearML if TensorBoard not used
for k, v in x.items():
title, series = k.split('/')
self.clearml.task.get_logger().report_scalar(title, series, v, epoch)
if self.wandb:
if best_fitness == fi:
best_results = [epoch] + vals[3:7]
for i, name in enumerate(self.best_keys):
self.wandb.wandb_run.summary[name] = best_results[i] # log best results in the summary
self.wandb.log(x)
self.wandb.end_epoch(best_result=best_fitness == fi)
if self.clearml:
self.clearml.current_epoch_logged_images = set() # reset epoch image limit
self.clearml.current_epoch += 1
if self.comet_logger:
self.comet_logger.on_fit_epoch_end(x, epoch=epoch)
def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
# Callback runs on model save event
if (epoch + 1) % self.opt.save_period == 0 and not final_epoch and self.opt.save_period != -1:
if self.wandb:
self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)
if self.clearml:
self.clearml.task.update_output_model(model_path=str(last),
model_name='Latest Model',
auto_delete_file=False)
if self.comet_logger:
self.comet_logger.on_model_save(last, epoch, final_epoch, best_fitness, fi)
def on_train_end(self, last, best, epoch, results):
# Callback runs on training end, i.e. saving best model
if self.plots:
plot_results(file=self.save_dir / 'results.csv') # save results.png
files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))]
files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter
self.logger.info(f"Results saved to {colorstr('bold', self.save_dir)}")
if self.tb and not self.clearml: # These images are already captured by ClearML by now, we don't want doubles
for f in files:
self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC')
if self.wandb:
self.wandb.log(dict(zip(self.keys[3:10], results)))
self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]})
# Calling wandb.log. TODO: Refactor this into WandbLogger.log_model
if not self.opt.evolve:
wandb.log_artifact(str(best if best.exists() else last),
type='model',
name=f'run_{self.wandb.wandb_run.id}_model',
aliases=['latest', 'best', 'stripped'])
self.wandb.finish_run()
if self.clearml and not self.opt.evolve:
self.clearml.task.update_output_model(model_path=str(best if best.exists() else last),
name='Best Model',
auto_delete_file=False)
if self.comet_logger:
final_results = dict(zip(self.keys[3:10], results))
self.comet_logger.on_train_end(files, self.save_dir, last, best, epoch, final_results)
def on_params_update(self, params: dict):
# Update hyperparams or configs of the experiment
if self.wandb:
self.wandb.wandb_run.config.update(params, allow_val_change=True)
if self.comet_logger:
self.comet_logger.on_params_update(params)
class GenericLogger:
"""
YOLOv5 General purpose logger for non-task specific logging
Usage: from utils.loggers import GenericLogger; logger = GenericLogger(...)
Arguments
opt: Run arguments
console_logger: Console logger
include: loggers to include
"""
def __init__(self, opt, console_logger, include=('tb', 'wandb')):
# init default loggers
self.save_dir = Path(opt.save_dir)
self.include = include
self.console_logger = console_logger
self.csv = self.save_dir / 'results.csv' # CSV logger
if 'tb' in self.include:
prefix = colorstr('TensorBoard: ')
self.console_logger.info(
f"{prefix}Start with 'tensorboard --logdir {self.save_dir.parent}', view at http://localhost:6006/")
self.tb = SummaryWriter(str(self.save_dir))
if wandb and 'wandb' in self.include:
self.wandb = wandb.init(project=web_project_name(str(opt.project)),
name=None if opt.name == "exp" else opt.name,
config=opt)
else:
self.wandb = None
def log_metrics(self, metrics, epoch):
# Log metrics dictionary to all loggers
if self.csv:
keys, vals = list(metrics.keys()), list(metrics.values())
n = len(metrics) + 1 # number of cols
s = '' if self.csv.exists() else (('%23s,' * n % tuple(['epoch'] + keys)).rstrip(',') + '\n') # header
with open(self.csv, 'a') as f:
f.write(s + ('%23.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n')
if self.tb:
for k, v in metrics.items():
self.tb.add_scalar(k, v, epoch)
if self.wandb:
self.wandb.log(metrics, step=epoch)
def log_images(self, files, name='Images', epoch=0):
# Log images to all loggers
files = [Path(f) for f in (files if isinstance(files, (tuple, list)) else [files])] # to Path
files = [f for f in files if f.exists()] # filter by exists
if self.tb:
for f in files:
self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC')
if self.wandb:
self.wandb.log({name: [wandb.Image(str(f), caption=f.name) for f in files]}, step=epoch)
def log_graph(self, model, imgsz=(640, 640)):
# Log model graph to all loggers
if self.tb:
log_tensorboard_graph(self.tb, model, imgsz)
def log_model(self, model_path, epoch=0, metadata={}):
# Log model to all loggers
if self.wandb:
art = wandb.Artifact(name=f"run_{wandb.run.id}_model", type="model", metadata=metadata)
art.add_file(str(model_path))
wandb.log_artifact(art)
def update_params(self, params):
# Update the paramters logged
if self.wandb:
wandb.run.config.update(params, allow_val_change=True)
def log_tensorboard_graph(tb, model, imgsz=(640, 640)):
# Log model graph to TensorBoard
try:
p = next(model.parameters()) # for device, type
imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz # expand
im = torch.zeros((1, 3, *imgsz)).to(p.device).type_as(p) # input image (WARNING: must be zeros, not empty)
with warnings.catch_warnings():
warnings.simplefilter('ignore') # suppress jit trace warning
tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), [])
except Exception as e:
LOGGER.warning(f'WARNING ⚠️ TensorBoard graph visualization failure {e}')
def web_project_name(project):
# Convert local project name to web project name
if not project.startswith('runs/train'):
return project
suffix = '-Classify' if project.endswith('-cls') else '-Segment' if project.endswith('-seg') else ''
return f'YOLOv5{suffix}'
| [
"yyalex@yandex.ru"
] | yyalex@yandex.ru |
f24fb3d1131a4f965e82af8be1b81f64b58efa79 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2988/40186/309199.py | 0ca7338779c915c5aed6534e33cb13897f6eeb2d | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | n = int(input())
str = input()
m = int(input())
oup = ''
for i in range(m-1,len(str)):
oup = oup+str[i]
print(oup) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
0fae3c9d16697b87593802275bb1bc06d00ee552 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_027/ch176_2020_08_14_13_50_25_526217.py | d1d82bafaf5a209399bad7eb71c499da51816aeb | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | def imprime_grade(n: int):
for i in range(1, n+1):
if i > 1:
print("|" + " |"*n)
for j in range(1, n+1):
end = "-" if j < n else "-+
"
print("+", end=end) | [
"you@example.com"
] | you@example.com |
a06a6d8f27c8e05ded71ea8fda920a11249ec506 | a2b7b0947c59acfcf78bc8471b1fb548c82de723 | /plot.py | 94660560fadce3e315445f53ae180490bda5ad52 | [] | no_license | Lilith5th/TNO | bcf0a4283b7c85ac38b793d7c91873f771e0f7f2 | 0e38b37d0d9c5e9fd415926b71fb59755da4c9b9 | refs/heads/master | 2023-02-14T04:49:01.233573 | 2021-01-11T09:15:43 | 2021-01-11T09:15:43 | 323,447,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,979 | py | import pandas as pd
import json
import matplotlib.pyplot as plt
from calendar import monthrange
from pandas import json_normalize
def cleanupJson():
cols = ['time', 'sensor_values.radiationSky','sensor_values.radiationIndoor']
data = []
with open('logData2.log') as json_file:
data = json.load(json_file)
nycphil = json_normalize(data=data)
#nycphil = json_normalize(data=data["sensor_values"],record_path=["sensor_values"],meta=["radiationSky", "radiationIndoor", "radiationFacade", "indoorTemp", "externalTemp", "Presence"])
#nycphil.drop(columns=["get_controls","status"])
#nycphil.head(3)
print(nycphil[cols])
pass
def getDATA():
db = {}
dayValues = []
days = []
months = []
newDay = True
newMonth = True
currentYear = 2020
currentMonth = 0
currentDay = 0
with open('logData2.log') as json_file:
data = json.load(json_file)
i = 0
firstDateItem = list(data)[0]["time"]
lastDateItem = list(data)[-1]["time"]
firstMonth = firstDateItem.split(",")[0].split("/")[0]
firstDay = firstDateItem.split(",")[0].split("/")[1]
lastMonth = lastDateItem.split(",")[0].split("/")[0]
lastDay = lastDateItem.split(",")[0].split("/")[1]
for d in data:
time = d["time"]
date = time.split(",")[0]
dateMonth,dateDay = int(date.split("/")[0]),int(date.split("/")[1])
timeOfDay = time.split(",")[1]
timeHH,timeMM = int(timeOfDay.split(":")[0]),int(timeOfDay.split(":")[1])
if i == 0:
currentDay = dateDay
currentMonth = dateMonth
i = 1
if currentDay != dateDay:
days.append({currentDay:dayValues})
dayValues = []
currentDay = currentDay + 1
if currentDay > monthrange(2020,currentMonth)[1]:
currentDay = 1
if currentMonth != dateMonth:
months.append({currentMonth:days})
days = []
currentMonth = currentMonth + 1
if currentMonth > 12:
currentMonth = 1
if timeHH >= 7 and timeHH <= 20:
dayValues.append(dict(time=str("{}:{}".format(timeHH,timeMM)), radiationSky=d['sensor_values']['radiationSky'], radiationFacade=d['sensor_values']['radiationFacade']))
#dbCleaned.append
#(dict(time=str("{}:{}".format(timeHH,timeMM)),
#radiationSky=d['sensor_values']['radiationSky'],
#radiationIndoor=d['sensor_values']['radiationIndoor']))
#dbCleaned.append
#(dict(time=str("{}:{}".format(timeHH,timeMM)),
#externalTemp=d['sensor_values']['externalTemp'],
#indoorTemp=d['sensor_values']['indoorTemp']))
return months
#for p in d['sensor_values']:
# print('Name: ' + p['name'])
# print('Website: ' + p['website'])
# print('From: ' + p['from'])
# print('')
def plotData(months=None):
d = {"sell": [{
"Rate": 0.001425,
"Quantity": 537.27713514
},
{
"Rate": 0.00142853,
"Quantity": 6.59174681
}]}
month = months[0]
for day in month:
values = month[day]
var = day
df = pd.DataFrame(values)
df.plot(x='time')#, y='externalTemp')
#df.plot()
#plt.savefig("test_rasterization{}_{}.jpg".format(month[0],day[0]),
#dpi=400)
print(day)
plt.show()
if __name__ == "__main__":
cleanupJson()
#data = getDATA()
#plotData(data)
| [
"lucano.deskovic@gmail.com"
] | lucano.deskovic@gmail.com |
bf7266aa3902b9eee966c52ecd03b7f4483bdbf9 | 8f7f91b24c83c0d15077d5e719e6aa40ab2ea0d6 | /library/oneview_server_profile.py | f05f9b45b4636f1224eaac378842beb2d7e7b40e | [
"MIT"
] | permissive | bryansullins/baremetalesxi-hpesynergyoneview | 06d45805e811ea1bf2787780770d670a683c0bcd | e4541d02ce1c93bb9a98a07a3a483a9b2ac90bce | refs/heads/master | 2021-04-14T19:41:14.001768 | 2021-02-06T22:17:57 | 2021-02-06T22:17:57 | 249,260,616 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,455 | py | #!/usr/bin/python
###
# Copyright (2016-2019) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_server_profile
short_description: Manage OneView Server Profile resources
description:
- Manage the servers lifecycle with OneView Server Profiles. On C(present) state, it selects a server hardware
automatically based on the server profile configuration if no server hardware was provided.
version_added: "2.5"
requirements:
- hpOneView >= 5.0.0
author:
- "Chakravarthy Racharla"
- "Camila Balestrin (@balestrinc)"
- "Mariana Kreisig (@marikrg)"
options:
state:
description:
- Indicates the desired state for the Server Profile resource by the end of the playbook execution.
C(present) will ensure data properties are compliant with OneView. This operation will power off the Server
Hardware before configuring the Server Profile. After it completes, the Server Hardware is powered on.
C(absent) will remove the resource from OneView, if it exists.
C(compliant) will make the server profile compliant with its server profile template, when this option was
specified. If there are Offline updates, the Server Hardware is turned off before remediate compliance issues
and turned on after that.
default: present
choices: ['present', 'absent', 'compliant']
data:
description:
- List with Server Profile properties.
required: true
auto_assign_server_hardware:
description:
- Bool indicating whether or not a Server Hardware should be automatically retrieved and assigned to the Server Profile.
When set to true, creates and updates try to ensure that an available Server Hardware is assigned to the Server Profile.
When set to false, if no Server Hardware is specified during creation, the profile is created as 'unassigned'. If the
profile already has a Server Hardware assigned to it and a serverHardwareName or serverHardwareUri is specified as None,
the Server Profile will have its Server Hardware unassigned.
default: True
choices: [True, False]
params:
description:
- Dict with query parameters.
required: False
notes:
- "For the following data, you can provide either a name or a URI: enclosureGroupName or enclosureGroupUri,
osDeploymentPlanName or osDeploymentPlanUri (on the osDeploymentSettings), networkName or networkUri (on the
connections list), volumeName or volumeUri (on the volumeAttachments list), volumeStoragePoolName or
volumeStoragePoolUri (on the volumeAttachments list), volumeStorageSystemName or volumeStorageSystemUri (on the
volumeAttachments list), serverHardwareTypeName or serverHardwareTypeUri, enclosureName or enclosureUri,
firmwareBaselineName or firmwareBaselineUri (on the firmware), sasLogicalJBODName or sasLogicalJBODUri (on
the sasLogicalJBODs list) and initialScopeNames or initialScopeUris"
- "If you define the volumeUri as null in the volumeAttachments list, it will be understood that the volume
does not exist, so it will be created along with the server profile. Be warned that every time this option
is executed it will always be understood that a new volume needs to be created, so this will not be idempotent.
It is strongly recommended to ensure volumes with Ansible and then assign them to the desired server profile.
does not exists, so it will be created along with the server profile"
extends_documentation_fragment:
- oneview
- oneview.validateetag
'''
EXAMPLES = '''
- name: Create a Server Profile from a Server Profile Template with automatically selected hardware
oneview_server_profile:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 800
state: present
data:
name: Web-Server-L2
# You can choose either server_template or serverProfileTemplateUri to inform the Server Profile Template
# serverProfileTemplateUri: "/rest/server-profile-templates/31ade62c-2112-40a0-935c-2f9450a75198"
server_template: Compute-node-template
# You can inform a server_hardware or a serverHardwareUri. If any hardware was informed, it will try
# get one available automatically
# server_hardware: Encl1, bay 12
# serverHardwareUri: /rest/server-hardware/30303437-3933-4753-4831-30335835524E
# You can choose either serverHardwareTypeUri or serverHardwareTypeName to inform the Server Hardware Type
# serverHardwareTypeUri: /rest/server-hardware-types/BCAB376E-DA2E-450D-B053-0A9AE7E5114C
# serverHardwareTypeName: SY 480 Gen9 1
# You can choose either enclosureName or enclosureUri to inform the Enclosure
# enclosureUri: /rest/enclosures/09SGH100Z6J1
enclosureName: 0000A66102
sanStorage:
hostOSType: Windows 2012 / WS2012 R2
manageSanStorage: true
volumeAttachments:
- id: 1
# You can choose either volumeName or volumeUri to inform the Volumes
# volumeName: DemoVolume001
volumeUri: /rest/storage-volumes/BCAB376E-DA2E-450D-B053-0A9AE7E5114C
# You can choose either volumeStoragePoolUri or volumeStoragePoolName to inform the Volume Storage Pool
# volumeStoragePoolName: FST_CPG2
volumeStoragePoolUri: /rest/storage-pools/30303437-3933-4753-4831-30335835524E
# You can choose either volumeStorageSystemUri or volumeStorageSystemName to inform the Volume Storage
# System
# volumeStorageSystemName: ThreePAR7200-2127
volumeStorageSystemUri: /rest/storage-systems/TXQ1000307
lunType: 'Auto'
storagePaths:
- isEnabled: true
connectionId: 1
storageTargetType: Auto
- isEnabled: true
connectionId: 2
storageTargetType: Auto
delegate_to: localhost
- debug: var=server_profile
- debug: var=serial_number
- debug: var=server_hardware
- debug: var=compliance_preview
- debug: var=created
- name: Create a Server Profile with connections
oneview_server_profile:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 800
data:
name: server-profile-with-connections
connectionSettings:
connections:
- id: 1
name: connection1
functionType: Ethernet
portId: Auto
requestedMbps: 2500
networkName: eth-demo
delegate_to: localhost
- name: Unassign Server Hardware from Server Profile
oneview_server_profile:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 800
# This is required for unassigning a SH, or creating a SP and not auto-assigning a SH
auto_assign_server_hardware: False
data:
name: server-profile-with-sh
# Specify a blank serverHardwareName or serverHardwareUri when auto_assign_server_hardware is False to unassign a SH
serverHardwareName:
delegate_to: localhost
- name : Remediate compliance issues
oneview_server_profile:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 800
state: compliant
data:
name: Web-Server-L2
delegate_to: localhost
- name : Remove the server profile
oneview_server_profile:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 800
state: absent
data:
name: Web-Server-L2
delegate_to: localhost
'''
RETURN = '''
server_profile:
description: Has the OneView facts about the Server Profile.
returned: On states 'present' and 'compliant'.
type: dict
serial_number:
description: Has the Server Profile serial number.
returned: On states 'present' and 'compliant'.
type: dict
server_hardware:
description: Has the OneView facts about the Server Hardware.
returned: On states 'present' and 'compliant'.
type: dict
compliance_preview:
description:
Has the OneView facts about the manual and automatic updates required to make the server profile
consistent with its template.
returned: On states 'present' and 'compliant'.
type: dict
created:
description: Indicates if the Server Profile was created.
returned: On states 'present' and 'compliant'.
type: bool
'''
import time
from copy import deepcopy
from ansible.module_utils.oneview import (OneViewModule,
ServerProfileReplaceNamesByUris,
OneViewModuleValueError,
ServerProfileMerger,
OneViewModuleTaskError,
SPKeys,
OneViewModuleException,
compare)
class ServerProfileModule(OneViewModule):
ASSIGN_HARDWARE_ERROR_CODES = ['AssignProfileToDeviceBayError',
'EnclosureBayUnavailableForProfile',
'ProfileAlreadyExistsInServer']
MSG_TEMPLATE_NOT_FOUND = "Informed Server Profile Template '{}' not found"
MSG_HARDWARE_NOT_FOUND = "Informed Server Hardware '{}' not found"
MSG_CREATED = "Server Profile created."
MSG_ALREADY_PRESENT = 'Server Profile is already present.'
MSG_UPDATED = 'Server profile updated'
MSG_DELETED = 'Deleted profile'
MSG_ALREADY_ABSENT = 'Nothing do.'
MSG_REMEDIATED_COMPLIANCE = "Remediated compliance issues"
MSG_ALREADY_COMPLIANT = "Server Profile is already compliant."
MSG_NOT_FOUND = "Server Profile is required for this operation."
MSG_ERROR_ALLOCATE_SERVER_HARDWARE = 'Could not allocate server hardware'
MSG_MAKE_COMPLIANT_NOT_SUPPORTED = "Update from template is not supported for server profile '{}' because it is" \
" not associated with a server profile template."
CONCURRENCY_FAILOVER_RETRIES = 25
argument_spec = dict(
state=dict(choices=['present', 'absent', 'compliant'], default='present'),
data=dict(type='dict', required=True),
params=dict(type='dict', required=False),
auto_assign_server_hardware=dict(type='bool', default=True)
)
def __init__(self):
super(ServerProfileModule, self).__init__(additional_arg_spec=self.argument_spec,
validate_etag_support=True)
self.set_resource_object(self.oneview_client.server_profiles)
self.server_profile_templates = self.oneview_client.server_profile_templates
self.server_hardware = self.oneview_client.server_hardware
self.os_deployment_plans = self.oneview_client.os_deployment_plans
self.server_template = None
def execute_module(self):
self.auto_assign_server_hardware = self.module.params.get('auto_assign_server_hardware')
params = self.module.params.get("params")
self.params = params if params else {}
if self.state == 'present':
created, changed, msg, server_profile = self.__present()
facts = self.__gather_facts()
facts['created'] = created
return dict(
changed=changed, msg=msg, ansible_facts=facts
)
elif self.state == 'absent':
changed, msg = self.__delete_profile()
return dict(
changed=changed, msg=msg
)
elif self.state == "compliant":
changed, msg, server_profile = self.__make_compliant()
return dict(
changed=changed, msg=msg, ansible_facts=self.__gather_facts()
)
def __present(self):
server_template_name = self.data.pop('serverProfileTemplateName', '')
server_hardware_name = self.data.pop('serverHardwareName', '')
changed = False
created = False
ServerProfileReplaceNamesByUris().replace(self.oneview_client, self.data)
if server_hardware_name:
selected_server_hardware = self.__get_server_hardware_by_name(server_hardware_name)
if not selected_server_hardware:
raise OneViewModuleValueError(self.MSG_HARDWARE_NOT_FOUND.format(server_hardware_name))
self.data['serverHardwareUri'] = selected_server_hardware['uri']
if server_template_name:
self.server_template = self.server_profile_templates.get_by_name(server_template_name)
if not self.server_template:
raise OneViewModuleValueError(self.MSG_TEMPLATE_NOT_FOUND.format(server_template_name))
self.data['serverProfileTemplateUri'] = self.server_template.data['uri']
elif self.data.get('serverProfileTemplateUri'):
self.server_template = self.server_profile_templates.get_by_uri(self.data['serverProfileTemplateUri'])
if not self.current_resource:
self.current_resource = self.__create_profile()
changed = True
created = True
msg = self.MSG_CREATED
else:
# This allows unassigning a profile if a SH key is specifically passed in as None
if not self.auto_assign_server_hardware:
server_hardware_uri_exists = False
if 'serverHardwareUri' in self.module.params['data'].keys() or 'serverHardwareName' in self.module.params['data'].keys():
server_hardware_uri_exists = True
if self.data.get('serverHardwareUri') is None and server_hardware_uri_exists:
self.data['serverHardwareUri'] = None
# Auto assigns a Server Hardware to Server Profile if auto_assign_server_hardware is True and no SH uris exist
if not self.current_resource.data.get('serverHardwareUri') and not self.data.get('serverHardwareUri') and self.auto_assign_server_hardware:
self.data['serverHardwareUri'] = self._auto_assign_server_profile()
merged_data = ServerProfileMerger().merge_data(self.current_resource.data, self.data)
self.__validations_for_os_custom_attributes(merged_data, self.current_resource.data)
if not compare(self.current_resource.data, merged_data):
self.__update_server_profile(merged_data)
changed = True
msg = self.MSG_UPDATED
else:
msg = self.MSG_ALREADY_PRESENT
return created, changed, msg, self.current_resource.data
# Removes .mac entries from resource os_custom_attributes if no .mac passed into data params.
# Swaps True values for 'true' string, and False values for 'false' string to avoid common user errors.
def __validations_for_os_custom_attributes(self, merged_data, resource):
if self.data.get('osDeploymentSettings') is None or resource.get('osDeploymentSettings') is None:
return
elif self.data.get('osDeploymentSettings', {}).get('osCustomAttributes') is None:
return
elif resource.get('osDeploymentSettings', {}).get('osCustomAttributes') is None:
return
attributes_merged = merged_data.get('osDeploymentSettings', {}).get('osCustomAttributes', None)
attributes_resource = resource.get('osDeploymentSettings', {}).get('osCustomAttributes', None)
dp_uri = resource.get('osDeploymentSettings', {}).get('osDeploymentPlanUri', None)
dp = self.os_deployment_plans.get_by_uri(dp_uri)
nics = []
if dp:
for parameter in dp.data['additionalParameters']:
if parameter['caType'] == 'nic':
nics.append(parameter['name'])
mac_positions_in_merged_data = self.__find_in_array_of_hashes(attributes_merged, '.mac', -4)
mac_positions_in_resource = self.__find_in_array_of_hashes(attributes_resource, '.mac', -4)
if not mac_positions_in_merged_data:
for index in sorted(mac_positions_in_resource, reverse=True):
if attributes_resource[index].get('name').split('.')[0] in nics:
del attributes_resource[index]
if attributes_merged:
for attribute in attributes_merged:
if attribute['value'] is True:
attribute['value'] = 'true'
elif attribute['value'] is False:
attribute['value'] = 'false'
# Searches for a key or suffix of a key inside an array of hashes. The search looks for {'name': <key>} pairs
# inside the array.
# Returns an array containing the positions of matches.
def __find_in_array_of_hashes(self, array_of_hashes, key, part=None):
matches = []
for position in range(0, len(array_of_hashes)):
attribute_name = array_of_hashes[position].get('name', None)
if attribute_name and attribute_name[part:] == key:
matches.append(position)
return matches
def __update_server_profile(self, profile_with_updates):
self.module.log(msg="Updating Server Profile")
# These removes are necessary in case SH associated to the SP is being changed
if self.data.get('enclosureUri') is None:
profile_with_updates.pop('enclosureUri', None)
if self.data.get('enclosureBay') is None:
profile_with_updates.pop('enclosureBay', None)
# Some specific SP operations require the SH to be powered off. This method attempts
# the update, and in case of failure mentioning powering off the SH, a Power off on
# the SH is attempted, followed by the update operation again and a Power On.
try:
self.current_resource.update(profile_with_updates)
except OneViewModuleException as exception:
error_msg = '; '.join(str(e) for e in exception.args)
power_on_msg = 'Some server profile attributes cannot be changed while the server hardware is powered on.'
if power_on_msg in error_msg:
self.module.log("Update failed due to powered on Server Hardware. Powering off before retrying.")
time.sleep(10) # sleep timer to avoid timing issues after update operation failed
# When reassigning Server Hardwares, both the original and the new SH should be set to OFF
self.__set_server_hardware_power_state(self.current_resource.data['serverHardwareUri'], 'Off')
self.__set_server_hardware_power_state(profile_with_updates['serverHardwareUri'], 'Off')
self.module.log("Retrying update operation after server power off")
self.current_resource.update(profile_with_updates)
self.module.log("Powering on the server hardware after update")
self.__set_server_hardware_power_state(self.current_resource.data['serverHardwareUri'], 'On')
else:
raise OneViewModuleException(error_msg)
def __create_profile(self):
tries = 0
self.__remove_inconsistent_data()
while tries < self.CONCURRENCY_FAILOVER_RETRIES:
try:
tries += 1
server_hardware_uri = self._auto_assign_server_profile()
if server_hardware_uri:
self.module.log(msg="Power off the Server Hardware before create the Server Profile")
self.__set_server_hardware_power_state(server_hardware_uri, 'Off')
# Build the data to create a new server profile based on a template if informed
server_profile = self.__build_new_profile_data(server_hardware_uri)
self.module.log(msg="Request Server Profile creation")
return self.resource_client.create(server_profile, **self.params)
except OneViewModuleTaskError as task_error:
self.module.log("Error code: {} Message: {}".format(str(task_error.error_code), str(task_error.msg)))
if task_error.error_code in self.ASSIGN_HARDWARE_ERROR_CODES:
# if this is because the server is already assigned, someone grabbed it before we assigned,
# ignore and try again
# This waiting time was chosen empirically and it could differ according to the hardware.
time.sleep(10)
else:
raise task_error
raise OneViewModuleException(self.MSG_ERROR_ALLOCATE_SERVER_HARDWARE)
def __build_new_profile_data(self, server_hardware_uri):
server_profile_data = deepcopy(self.data)
if self.server_template:
self.module.log(msg="Get new Profile from template")
server_profile_template = self.server_template.get_new_profile()
server_profile_template.update(server_profile_data)
server_profile_data = server_profile_template
if server_hardware_uri:
server_profile_data['serverHardwareUri'] = server_hardware_uri
return server_profile_data
def __remove_inconsistent_data(self):
def is_virtual_or_physical(defined_type):
return defined_type == 'Virtual' or defined_type == 'Physical'
# Remove the MAC from connections when MAC type is Virtual or Physical
mac_type = self.data.get(SPKeys.MAC_TYPE, None)
if mac_type and is_virtual_or_physical(mac_type):
for conn in self.data.get(SPKeys.CONNECTIONS) or []:
conn.pop(SPKeys.MAC, None)
# Remove the UUID when Serial Number Type is Virtual or Physical
serial_number_type = self.data.get(SPKeys.SERIAL_NUMBER_TYPE, None)
if serial_number_type and is_virtual_or_physical(serial_number_type):
self.data.pop(SPKeys.UUID, None)
self.data.pop(SPKeys.SERIAL_NUMBER, None)
# Remove the WWPN and WWNN when WWPN Type is Virtual or Physical
for conn in self.data.get(SPKeys.CONNECTIONS) or []:
wwpn_type = conn.get(SPKeys.WWPN_TYPE, None)
if is_virtual_or_physical(wwpn_type):
conn.pop(SPKeys.WWNN, None)
conn.pop(SPKeys.WWPN, None)
# Remove the driveNumber from the Controllers Drives
if SPKeys.LOCAL_STORAGE in self.data and self.data[SPKeys.LOCAL_STORAGE]:
for controller in self.data[SPKeys.LOCAL_STORAGE].get(SPKeys.CONTROLLERS) or []:
for drive in controller.get(SPKeys.LOGICAL_DRIVES) or []:
drive.pop(SPKeys.DRIVE_NUMBER, None)
# Remove the Lun when Lun Type from SAN Storage Volume is Auto
if SPKeys.SAN in self.data and self.data[SPKeys.SAN]:
if SPKeys.VOLUMES in self.data[SPKeys.SAN]:
for volume in self.data[SPKeys.SAN].get(SPKeys.VOLUMES) or []:
if volume.get(SPKeys.LUN_TYPE) == 'Auto':
volume.pop(SPKeys.LUN, None)
def __get_available_server_hardware_uri(self):
if self.server_template:
enclosure_group = self.server_template.data.get('enclosureGroupUri', '')
server_hardware_type = self.server_template.data.get('serverHardwareTypeUri', '')
else:
enclosure_group = self.data.get('enclosureGroupUri', '')
server_hardware_type = self.data.get('serverHardwareTypeUri', '')
if not enclosure_group and not server_hardware_type:
return
self.module.log(msg="Finding an available server hardware")
available_server_hardware = self.resource_client.get_available_servers(
enclosureGroupUri=enclosure_group,
serverHardwareTypeUri=server_hardware_type)
# targets will list empty bays. We need to pick one that has a server
index = 0
server_hardware_uri = None
while not server_hardware_uri and index < len(available_server_hardware):
server_hardware_uri = available_server_hardware[index]['serverHardwareUri']
index = index + 1
self.module.log(msg="Found available server hardware: '{}'".format(server_hardware_uri))
return server_hardware_uri
def __delete_profile(self):
if not self.current_resource:
return False, self.MSG_ALREADY_ABSENT
if self.current_resource.data.get('serverHardwareUri'):
self.__set_server_hardware_power_state(self.current_resource.data['serverHardwareUri'],
'Off')
self.current_resource.delete()
return True, self.MSG_DELETED
def __make_compliant(self):
changed = False
msg = self.MSG_ALREADY_COMPLIANT
if not self.current_resource.data.get('serverProfileTemplateUri'):
self.module.log("Make the Server Profile compliant is not supported for this profile")
self.module.fail_json(msg=self.MSG_MAKE_COMPLIANT_NOT_SUPPORTED.format(
self.current_resource.data['name']))
elif self.current_resource.data['templateCompliance'] != 'Compliant':
self.module.log(
"Get the preview of manual and automatic updates required to make the server profile consistent "
"with its template.")
compliance_preview = self.current_resource.get_compliance_preview()
self.module.log(str(compliance_preview))
is_offline_update = compliance_preview.get('isOnlineUpdate') is False
if is_offline_update:
self.module.log(msg="Power off the server hardware before update from template")
self.__set_server_hardware_power_state(
self.current_resource.data['serverHardwareUri'], 'Off')
self.module.log(msg="Updating from template")
self.current_resource.patch('replace', '/templateCompliance', 'Compliant')
if is_offline_update:
self.module.log(msg="Power on the server hardware after update from template")
self.__set_server_hardware_power_state(
self.current_resource.data['serverHardwareUri'], 'On')
changed = True
msg = self.MSG_REMEDIATED_COMPLIANCE
return changed, msg, self.current_resource.data
def __gather_facts(self):
server_hardware = None
if self.current_resource.data.get('serverHardwareUri'):
server_hardware_by_uri = self.server_hardware.get_by_uri(
self.current_resource.data['serverHardwareUri'])
if server_hardware_by_uri:
server_hardware = server_hardware_by_uri.data
compliance_preview = None
if self.current_resource.data.get('serverProfileTemplateUri'):
compliance_preview = self.current_resource.get_compliance_preview()
facts = {
'serial_number': self.current_resource.data.get('serialNumber'),
'server_profile': self.current_resource.data,
'server_hardware': server_hardware,
'compliance_preview': compliance_preview,
'created': False
}
return facts
def __get_server_hardware_by_name(self, server_hardware_name):
server_hardwares = self.server_hardware.get_by('name', server_hardware_name)
return server_hardwares[0] if server_hardwares else None
def __set_server_hardware_power_state(self, hardware_uri, power_state='On'):
if hardware_uri is not None:
hardware = self.server_hardware.get_by_uri(hardware_uri)
if power_state in ['On']:
hardware.update_power_state(
dict(powerState='On', powerControl='MomentaryPress'))
else:
hardware.update_power_state(
dict(powerState='Off', powerControl='PressAndHold'))
def _auto_assign_server_profile(self):
server_hardware_uri = self.data.get('serverHardwareUri')
if not server_hardware_uri and self.auto_assign_server_hardware:
# find servers that have no profile, matching Server hardware type and enclosure group
self.module.log(msg="Get an available Server Hardware for the Profile")
server_hardware_uri = self.__get_available_server_hardware_uri()
return server_hardware_uri
def main():
ServerProfileModule().run()
if __name__ == '__main__':
main()
| [
"bsullin@rei.com"
] | bsullin@rei.com |
4736ccf46a33004246d425f53a0427cf1bd74a87 | 3100013f7f7aa8a8291527e2e832200912b92c55 | /10.函数_by/2.内部函数_by/内部函数_by.py | 195bfde2048f4570e50500f2c6bff5d16fd85328 | [] | no_license | baiye1228/vscode_studylist | f609c379cdade4f2c61bbe7881cb500cc5cfd491 | a53c080861e1b618ed3037a6e8e3c5af06acbe3c | refs/heads/master | 2022-12-17T11:21:29.896557 | 2020-09-18T09:41:45 | 2020-09-18T09:41:45 | 281,262,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | def func():
#声明变量
n=100
list1=[3,6,9,4]
#声明内部函数
def inner_func():
nonlocal n
#对元素加5操作
for index,i in enumerate(list1):
list1[index]=i+5
list1.sort()
n+=101
inner_func()
print(list1,n)
func()
'''
特点:
1.可以访问外部函数的变量
2.内部函数可以修改外部函数的可变类型的变量,如:list1
3.内部函数修改全局的不可变变量时,需要在内部函数声明: global 变量名
内部函数修改外部函数的不可变变量时,需要在内部函数中声明:nonlocal 变量名
4.locals() 查看本地变量有哪些,以字典形式输出
globals() 查看全局变量有哪些,以字典形式输出
''' | [
"baiyebzx1228@gmail.com"
] | baiyebzx1228@gmail.com |
6d941d57a643281b395f760262b236f1cde01bab | 72bf58e646d31d915403da0fd9fbf4ab54affdde | /amboro_store/settings.py | c84af598bc65f15d668c230a4f6bd223be3bb4c4 | [] | no_license | JohannQuispe/ecommerce | b3f1ccb102c31c5c6640352e735afe9e08859623 | 37aa66373118c1e5f4ed53b853ac9e680766523d | refs/heads/master | 2022-12-09T22:59:06.438155 | 2020-08-24T06:12:26 | 2020-08-24T06:12:26 | 286,192,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,041 | py | """
Django settings for amboro_store project.
Generated by 'django-admin startproject' using Django 2.2.14.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ud$%8m&%*+v$uvkq)1mqn0z2!9h@w@dvrpb&ms$)kgp@7wtd6v'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1','.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'users',
'carts',
'orders',
'charges',
'products',
'categories',
'billing_profiles',
'shipping_addresses',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'amboro_store.urls'
AUTH_USER_MODEL ='users.User'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'amboro_store.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'amboro_store',
'USER': 'postgres',
'PASSWORD': 'passwd',
'HOST': 'localhost',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
from decouple import config
EMAIL_HOST = 'smtp.googlemail.com'
EMAIL_PORT = '587'
EMAIL_HOST_USER = 'jquis812@gmail.com'
EMAIL_HOST_PASSWORD = 'jb7803105JB'
EMAIL_USE_TLS = True
STRIPE_PUBLIC_KEY = 'pk_test_51HImxHGU4eiL1A64tu8vUcXntO6aRiU836NBsShkl1hSWKnKcMYg8YqulUBSasf4rRrlTUdPP71k0obm1YxMEyrv00YYw1xrGE'
STRIPE_PRIVATE_KEY = 'sk_test_51HImxHGU4eiL1A640dpmLiI6zvqdULk7vyilaFghUBichHxttlj2wnfx3Lb7BJLsy3cCooPK1k0rAmqqyxFC4hfs00ue1UYSC6'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
| [
"jbquispe@hotmail.com"
] | jbquispe@hotmail.com |
57046f09bb8de7002a9fb8f4ff1f05dc12a753f9 | c1c20624df477d7c9249bb8b1369849ad5fac3bb | /dotfiles/.xkeysnail.config.py | 2511f708f40645ed8059117272ba3ccddcaa801c | [] | no_license | hitswint/.emacs.d | c0b3144d82f9b56013bbd4d6cd987ff1b56e2172 | 769c60ac9d7477e35a7d54eca151822cff2c97b0 | refs/heads/master | 2023-09-01T18:15:32.567844 | 2023-08-28T13:26:00 | 2023-08-28T13:26:00 | 34,164,740 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,711 | py | # -*- coding: utf-8 -*-
import re
from xkeysnail.transform import *
import os
def rofi_not_running():
return os.system("pidof rofi > /dev/null")!=0
# * [Global modemap] Change modifier keys as in xmodmap
define_modmap({Key.CAPSLOCK: Key.LEFT_CTRL,
# Poker键盘,交换Escape和grave
# Key.ESC: Key.GRAVE,
# Key.GRAVE: Key.ESC
})
# * [Conditional modmap] Change modifier keys in certain applications
# define_conditional_modmap(re.compile(r'Emacs'), {
# Key.RIGHT_CTRL: Key.ESC,
# })
# * [Multipurpose modmap] Give a key two meanings.
# A normal key when pressed and released, and a modifier key when held down with another key. See Xcape, Carabiner and caps2esc for ideas and concept.
# define_multipurpose_modmap(
# # Enter is enter when pressed and released. Control when held down.
# # {Key.ENTER: [Key.ENTER, Key.RIGHT_CTRL]}
# # Capslock is escape when pressed and released. Control when held down.
# {Key.CAPSLOCK: [Key.ESC, Key.LEFT_CTRL]}
# # To use this example, you can't remap capslock with define_modmap.
# )
# * Keybindings for Firefox/Chromium
firefox_dict = {
# Ctrl+Alt+j/k to switch next/previous tab
K("M-n"): K("C-TAB"),
K("M-p"): K("C-Shift-TAB"),
# Type C-j to focus to the content
K("C-i"): K("C-f6")
# very naive "Edit in editor" feature (just an example)
# K("C-o"): [K("C-a"), K("C-c"), launch(["gedit"]), sleep(0.5), K("C-v")]
}
define_keymap(
lambda wm_class: wm_class and wm_class[-1] in ("firefox", "Chromium") and rofi_not_running(),
firefox_dict,
"Firefox and Chromium")
# * Keybindings for Qpdfview
define_keymap(
lambda wm_class: wm_class and wm_class[-1] in ("qpdfview") and rofi_not_running(),
{
K("q"): [K("q"), K("C-f"), K("esc")],
K("C-comma"): [K("M-Shift-m"), K("M-t")],
K("C-j"): [K("enter"), K("M-Shift-m")],
K("C-apostrophe"): [K("M-Shift-m"), K("F6"), K("M-Shift-m")],
},
"Qpdfview")
# * Keybindings for Zeal https://github.com/zealdocs/zeal/
# define_keymap(re.compile("Zeal"), {
# # Ctrl+s to focus search area
# K("C-s"): K("C-k"),
# }, "Zeal")
# * Emacs-like keybindings in non-Emacs applications
emacs_dict = {
# Cursor
K("C-b"): with_mark(K("left")),
K("C-f"): with_mark(K("right")),
K("C-p"): with_mark(K("up")),
K("C-n"): with_mark(K("down")),
K("C-h"): with_mark(K("backspace")),
# Forward/Backward word
K("M-b"): with_mark(K("C-left")),
K("M-f"): with_mark(K("C-right")),
# Beginning/End of line
K("C-a"): with_mark(K("home")),
K("C-e"): with_mark(K("end")),
# Page up/down
K("M-v"): with_mark(K("page_up")),
K("C-v"): with_mark(K("page_down")),
# Beginning/End of file
K("M-Shift-comma"): with_mark(K("C-home")),
K("M-Shift-dot"): with_mark(K("C-end")),
# Newline
K("C-m"): K("enter"),
K("C-j"): K("enter"),
K("C-o"): [K("enter"), K("left")],
# Copy
K("C-w"): [K("C-x"), set_mark(False)],
K("M-w"): [K("C-c"), set_mark(False)],
K("C-y"): [K("C-v"), set_mark(False)],
# Delete
K("C-d"): [K("delete"), set_mark(False)],
K("M-d"): [K("C-delete"), set_mark(False)],
# Kill line
K("C-k"): [K("Shift-end"), K("C-x"),
set_mark(False)],
# Undo
K("C-slash"): [K("C-z"), set_mark(False)],
K("C-M-slash"): [K("C-y"), set_mark(False)],
K("C-Shift-ro"): K("C-z"),
# Mark
K("C-semicolon"): set_mark(True),
K("C-M-semicolon"): with_or_set_mark(K("C-right")),
# Search
K("C-s"): K("F3"),
K("C-r"): K("Shift-F3"),
K("M-Shift-key_5"): K("C-h"),
# Cancel
K("C-g"): [K("esc"), set_mark(False)],
# Escape
K("C-q"): escape_next_key,
# Menu
K("Super-Shift-o"): K("Compose"),
# C-x YYY
K("C-x"): {
# C-x h (select all)
K("h"): [K("C-home"), K("C-a"),
set_mark(True)],
# C-x C-f (open)
K("C-f"): K("C-o"),
# C-x C-s (save)
K("C-s"): K("C-s"),
# C-x k (kill tab)
K("k"): K("C-f4"),
# C-x C-c (exit)
K("C-c"): K("C-q"),
# cancel
K("C-g"): pass_through_key,
# C-x u (undo)
K("u"): [K("C-z"), set_mark(False)],
}}
define_keymap(
lambda wm_class: wm_class and wm_class[-1] not in ("Emacs", "URxvt", "Vncviewer", "Blender", "scrcpy", "VirtualBox Machine", "org.remmina.Remmina") and wm_class[1] not in ("winword.exe") and rofi_not_running(),
emacs_dict,
"Emacs-like keys")
# * Remove Alt tip with win in Office/Wps
# define_conditional_multipurpose_modmap(
# lambda wm_class: wm_class and wm_class[1] in ("winword.exe", "excel.exe", "powerpnt.exe", "wps", "et", "wpp"),
# {
# Key.LEFT_ALT: [Key.LEFT_META, Key.LEFT_ALT],
# })
# dict_disable_alt = {
# K("M-b"): [with_mark(K("C-left"))],
# K("M-f"): [with_mark(K("C-right"))],
# K("M-v"): [with_mark(K("page_up"))],
# K("M-Shift-comma"): [with_mark(K("C-home"))],
# K("M-Shift-dot"): [with_mark(K("C-end"))],
# K("M-w"): [K("C-c"), set_mark(False)],
# K("M-d"): [K("C-delete"), set_mark(False)],
# K("C-M-slash"): [K("C-y"), set_mark(False)],
# K("C-M-semicolon"): [with_or_set_mark(K("C-right"))],
# K("M-Shift-key_5"): [K("C-h")]}
# emacs_dict_disable_alt = emacs_dict.copy()
# emacs_dict_disable_alt.update(dict_disable_alt)
# define_keymap(
# lambda wm_class: wm_class and wm_class[1] in ("winword.exe", "excel.exe", "powerpnt.exe", "wps", "et", "wpp"),
# emacs_dict_disable_alt,
# "Emacs-like keys without alt")
# * Keybindings for Remmina
remmina_dict = emacs_dict.copy()
remmina_dict.update(firefox_dict)
define_keymap(
lambda wm_class: wm_class and wm_class[-1] in ("org.remmina.Remmina",) and wm_class[0] in ("Remmina Remote Desktop Client") and rofi_not_running(),
remmina_dict,
"Remmina")
# * Keybindings for pdfviewer
define_keymap(
lambda wm_class: wm_class and wm_class[-1] in ("llpp", "Wpspdf") and rofi_not_running(),
{
K("k"): with_mark(K("up")),
K("j"): with_mark(K("down"))
},
"pdfviewer")
# * Keybindings for URxvt
define_keymap(
lambda wm_class: wm_class and wm_class[-1] in ("URxvt") and rofi_not_running(),
{
K("C-Super-enter"): K("Shift-down"),
K("C-Super-TAB"): K("Shift-right"),
K("C-Super-grave"): K("Shift-left"),
},
"URxvt")
# * Keybindings for Tab switch
define_keymap(
lambda wm_class: wm_class and wm_class[-1] in ("firefox", "Chromium", "qpdfview", "Wps", "Et", "Wpp", "Wpspdf", "Wpsoffice") and rofi_not_running(),
{
K("C-grave"): K("C-Shift-TAB"),
},
"Tab")
| [
"wgq_hit@126.com"
] | wgq_hit@126.com |
580336d9d0573c43f6d5dba9ca428534a337b584 | 4ccc93c43061a18de9064569020eb50509e75541 | /ios/chrome/ios_chrome_tests.gyp | 4c14d68846eb05c5f92b28291140882a506cdb1a | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | SaschaMester/delicium | f2bdab35d51434ac6626db6d0e60ee01911797d7 | b7bc83c3b107b30453998daadaeee618e417db5a | refs/heads/master | 2021-01-13T02:06:38.740273 | 2015-07-06T00:22:53 | 2015-07-06T00:22:53 | 38,457,128 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,799 | gyp | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'targets': [
{
'target_name': 'ios_chrome_unittests',
'type': '<(gtest_target_type)',
'dependencies': [
'../../base/base.gyp:base',
'../../base/base.gyp:test_support_base',
'../../components/components.gyp:bookmarks_test_support',
'../../components/components.gyp:enhanced_bookmarks_test_support',
'../../net/net.gyp:net_test_support',
'../../testing/gmock.gyp:gmock',
'../../testing/gtest.gyp:gtest',
'../../third_party/ocmock/ocmock.gyp:ocmock',
'../ios_tests.gyp:test_support_ios',
'../web/ios_web.gyp:ios_web',
'../web/ios_web.gyp:test_support_ios_web',
'ios_chrome.gyp:ios_chrome_app',
'ios_chrome.gyp:ios_chrome_browser',
'ios_chrome.gyp:ios_chrome_common',
'ios_chrome_test_support',
],
'mac_bundle_resources': [
'browser/ui/native_content_controller_test.xib'
],
'sources': [
'app/safe_mode_util_unittest.cc',
'browser/chrome_url_util_unittest.mm',
'browser/crash_loop_detection_util_unittest.mm',
'browser/enhanced_bookmarks/bookmark_image_service_ios_unittest.mm',
'browser/experimental_flags_unittest.mm',
'browser/geolocation/CLLocation+XGeoHeaderTest.mm',
'browser/geolocation/location_manager_unittest.mm',
'browser/install_time_util_unittest.mm',
'browser/installation_notifier_unittest.mm',
'browser/memory/memory_wedge_unittest.cc',
'browser/net/image_fetcher_unittest.mm',
'browser/net/metrics_network_client_unittest.mm',
'browser/net/retryable_url_fetcher_unittest.mm',
'browser/snapshots/snapshot_cache_unittest.mm',
'browser/snapshots/snapshots_util_unittest.mm',
'browser/translate/translate_service_ios_unittest.cc',
'browser/ui/commands/set_up_for_testing_command_unittest.mm',
'browser/ui/native_content_controller_unittest.mm',
'browser/ui/ui_util_unittest.mm',
'browser/ui/uikit_ui_util_unittest.mm',
'common/string_util_unittest.mm',
],
'actions': [
{
'action_name': 'copy_ios_chrome_test_data',
'variables': {
'test_data_files': [
'test/data/webdata/bookmarkimages',
],
'test_data_prefix': 'ios/chrome',
},
'includes': [ '../../build/copy_test_data_ios.gypi' ]
},
],
'includes': ['ios_chrome_resources_bundle.gypi'],
},
{
'target_name': 'ios_chrome_test_support',
'type': 'static_library',
'dependencies': [
'../../base/base.gyp:base',
'../../testing/gmock.gyp:gmock',
'../../testing/gtest.gyp:gtest',
'../../ui/base/ui_base.gyp:ui_base',
'../../url/url.gyp:url_lib',
'../provider/ios_provider_chrome.gyp:ios_provider_chrome_browser',
'ios_chrome.gyp:ios_chrome_browser',
],
'sources': [
'browser/geolocation/location_manager+Testing.h',
'browser/geolocation/test_location_manager.h',
'browser/geolocation/test_location_manager.mm',
'browser/net/mock_image_fetcher.h',
'browser/net/mock_image_fetcher.mm',
'browser/sync/sync_setup_service_mock.cc',
'browser/sync/sync_setup_service_mock.h',
'test/ios_chrome_unit_test_suite.cc',
'test/ios_chrome_unit_test_suite.h',
'test/run_all_unittests.cc',
'test/testing_application_context.cc',
'test/testing_application_context.h',
],
},
],
}
| [
"g4jc@github.com"
] | g4jc@github.com |
2f9963b5e8c4babf74fc6d9a8e0e0e7a894047c5 | 9f4d5b17ba701e6e9f9ade4441b7aae106c3fd84 | /mordred/Weight.py | 7ac3c7f37def4c167eefb82f583dee7c083f2f5e | [
"BSD-3-Clause"
] | permissive | simonbray/mordred | 55385e37b3f622513e75f00fe21fb7e6d1edf02d | bfb3b0a50fb7f42cd996e091d67c3a3dcc815134 | refs/heads/master | 2020-05-26T04:23:50.856152 | 2018-05-31T07:21:43 | 2018-05-31T07:21:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,138 | py | from rdkit.Chem.Descriptors import MolWt, ExactMolWt
from ._base import Descriptor
__all__ = (
"Weight",
)
class Weight(Descriptor):
r"""molecular weight descriptor.
:type averaged: bool
:param averaged: averaged by number of atom
"""
def description(self):
return "{}{}molecular weight".format(
"averaged " if self._averaged else "",
"exact " if self._exact else "",
)
since = "1.0.0"
__slots__ = ("_averaged", "_exact")
explicit_hydrogens = True
@classmethod
def preset(cls, version):
yield cls(True, False)
yield cls(True, True)
def __str__(self):
return "{}{}MW".format("A" if self._averaged else "", "" if self._exact else "a")
def parameters(self):
return self._exact, self._averaged
def __init__(self, exact=True, averaged=False):
self._averaged = averaged
self._exact = exact
def calculate(self):
w = ExactMolWt(self.mol) if self._exact else MolWt(self.mol)
if self._averaged:
w /= self.mol.GetNumAtoms()
return w
rtype = float
| [
"philopon.dependence@gmail.com"
] | philopon.dependence@gmail.com |
837945adfe5f58e0b2985accb3351473b1800bd9 | d1ff6b7feaf22eb7940281a17b823d6899bcfa8c | /python/problems/leetcode/1-two-sum.py | 37fd5066a714d2fe119697190f2cfbb831a6be0c | [] | no_license | sumitkrm/lang-1 | 42e839dae8caf78986a10bb3d635021d45f66593 | c08fdd1556b6dbbdda8ad6210aa0eaa97074ae3b | refs/heads/master | 2021-04-18T10:26:27.466682 | 2019-11-14T07:56:45 | 2019-11-14T07:56:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,418 | py | """
Given an array of integers, return indices of the two numbers such that they add up to a specific target.
You may assume that each input would have exactly one solution, and you may not use the same element twice.
Example:
Given nums = [2, 7, 11, 15], target = 9,
Because nums[0] + nums[1] = 2 + 7 = 9,
return [0, 1].
"""
# function to check for the given sum in the array
def returnIndexPair(arr, sum):
print ("A =", arr, "n=", sum)
data = {}
j = 1
for i in arr:
temp = sum - i
if (temp in data):
return [data[temp] - 1, j - 1]
data[i] = j
j = j + 1
return []
A = [3, 3]
n = 6
print(returnIndexPair(A, n))
A = [3, 2, 4]
n = 6
print(returnIndexPair(A, n))
A = [1, 4, 45, 6, 10, -8]
n = 16
print(returnIndexPair(A, n))
A = [1, 2, 4]
n = 6
print(returnIndexPair(A, n))
A = [-3, 4, 3, 90]
n = 0
print(returnIndexPair(A, n))
def twoSum(nums, target):
data = {}
j = 0
for i in nums:
data[i] = j
j = j + 1
j = 0
for i in nums:
if target - i in data:
if data[i] != data[target - i]:
return [j, data[target - i]]
j = j + 1
return []
# arr = [2, 7, 11, 15]
# target = 26
# print (twoSum(arr, target))
# arr = [3, 2, 4]
# target = 6
# print (twoSum(arr, target))
# # This testcase is not passing!!!
# arr = [3, 3]
# target = 6
# print (twoSum(arr, target))
| [
"noreply@github.com"
] | noreply@github.com |
3b0ff7f32b9e070a5249eb81007c35351f16a08b | 0111fa3fb70d54fc746ea0d6e9951560b56a3c84 | /CTCI/c4_treesAndGraphs/bstSeq.py | a5b32fd6cf3610b92fde08d830efeff4877d36a2 | [] | no_license | michlee1337/practice | b1f4c8069573c129e2799eb38f6d0b8cc646282e | 454a78bf69cbecaaeee6af062a201470286542d3 | refs/heads/master | 2021-04-27T04:58:17.030589 | 2020-10-20T20:47:02 | 2020-10-20T20:47:02 | 122,588,699 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,189 | py | # ugh still borken
class Solution():
def bstSeq(self, root):
if root == None:
return([[]])
if root.left == None and root.right == None:
return([[root.val]])
seq_left = self.bstSeq(root.left)
seq_right = self.bstSeq(root.right)
if len(seq_left) == 0 and len(seq_right) == 0:
return([[root.val]])
for l in seq_left:
for r in seq_right:
weaves = self.doWeave(l,r,[root.val],[])
return(weaves)
def doWeave(self,l1,l2,prefix,weaves):
if len(l1) == 0 or len(l2) == 0:
weaves.append(prefix + l1 + l2)
else:
self.doWeave(l1[1:],l2, prefix + [l1[0]], weaves)
self.doWeave(l1,l2[1:], prefix + [l2[0]], weaves)
return(weaves)
class Node():
def __init__(self,val):
self.val = val
self.left = None
self.right = None
if __name__=="__main__":
n1 = Node(1)
n2 = Node(2)
n3 = Node(3)
n4 = Node(4)
n5 = Node(5)
n6 = Node(6)
n1.left = n2
n1.right = n3
n2.left = n4
n2.right = n5
n3.left = n6
test = Solution()
print(test.bstSeq(n1))
| [
"michlee1337@gmail.com"
] | michlee1337@gmail.com |
9b80f24b60cf7a97705d6d7face0f6a14fab0453 | 5b82fa5f8d98c8fe6fbccae7566e7d9eaa2e7428 | /tests/arbitrage_test.py | 195cb57d48c295f8ee26d019b9b775eee39934ed | [
"MIT"
] | permissive | f0ster/bitcoin-arbitrage | a84325b78920b2850eed7673112786102afa3bb5 | 2c389fca988e6d24f3394adbc67d4a01259aa345 | refs/heads/master | 2020-04-15T03:15:13.794667 | 2013-04-18T01:39:47 | 2013-04-18T01:39:47 | 9,504,532 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,567 | py | import sys
sys.path.append('src/')
sys.path.append('../src/')
import unittest
import arbitrage
depths1 = {
'BitcoinCentralEUR':
{'asks': [{'amount': 4, 'price': 32.8},
{'amount': 8, 'price': 32.9},
{'amount': 2, 'price': 33.0},
{'amount': 3, 'price': 33.6}],
'bids': [{'amount': 2, 'price': 31.8},
{'amount': 4, 'price': 31.6},
{'amount': 6, 'price': 31.4},
{'amount': 2, 'price': 30}]},
'MtGoxEUR':
{'asks': [{'amount': 1, 'price': 34.2},
{'amount': 2, 'price': 34.3},
{'amount': 3, 'price': 34.5},
{'amount': 3, 'price': 35.0}],
'bids': [{'amount': 2, 'price': 33.2},
{'amount': 3, 'price': 33.1},
{'amount': 5, 'price': 32.6},
{'amount': 10, 'price': 32.3}]}}
depths2 = {
'BitcoinCentralEUR':
{'asks': [{'amount': 4, 'price': 32.8},
{'amount': 8, 'price': 32.9},
{'amount': 2, 'price': 33.0},
{'amount': 3, 'price': 33.6}]},
'MtGoxEUR':
{'bids': [{'amount': 2, 'price': 33.2},
{'amount': 3, 'price': 33.1},
{'amount': 5, 'price': 32.6},
{'amount': 10, 'price': 32.3}]}}
depths3 = {
'BitcoinCentralEUR':
{'asks': [{'amount': 1, 'price': 34.2},
{'amount': 2, 'price': 34.3},
{'amount': 3, 'price': 34.5},
{'amount': 3, 'price': 35.0}]},
'MtGoxEUR':
{'bids': [{'amount': 2, 'price': 33.2},
{'amount': 3, 'price': 33.1},
{'amount': 5, 'price': 32.6},
{'amount': 10, 'price': 32.3}]}}
class TestArbitrage(unittest.TestCase):
def setUp(self):
self.arbitrer = arbitrage.Arbitrer()
def test_getprofit1(self):
self.arbitrer.depths = depths2
profit, vol, wb, ws = self.arbitrer.get_profit_for(
0, 0, 'BitcoinCentralEUR', 'MtGoxEUR')
assert(80 == int(profit * 100))
assert(vol == 2)
def test_getprofit2(self):
self.arbitrer.depths = depths2
profit, vol, wb, ws = self.arbitrer.get_profit_for(
2, 1, 'BitcoinCentralEUR', 'MtGoxEUR')
assert(159 == int(profit * 100))
assert(vol == 5)
def test_getprofit3(self):
self.arbitrer.depths = depths3
profit, vol, wb, ws = self.arbitrer.get_profit_for(
2, 1, 'BitcoinCentralEUR', 'MtGoxEUR')
assert(profit == 0)
assert(vol == 0)
if __name__ == '__main__':
unittest.main()
| [
"maxime.biais@gmail.com"
] | maxime.biais@gmail.com |
bdea9c72dd10be9794375d3cc3d1d61fea04371d | 490fec2286bbd3241f7b4e7e2520496087ef2271 | /manage.py | 54c58d756cb665d08282278ea8a0579fb24e3844 | [] | no_license | lupeixin/drf_03 | 689018fa14fd75f7be09f64390930b8459c83567 | 6dbb3b0e6883d8450d7d9d856511ece02217f1ee | refs/heads/master | 2022-12-11T21:58:35.381615 | 2020-09-15T00:52:52 | 2020-09-15T00:52:52 | 295,574,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'drf_03.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"813094269@qq.com"
] | 813094269@qq.com |
c6bccc7b878d8ad17ad5f51b5c1bd8f96e13bd0f | 1bfe5887d6158b7bc4c57b2b136b9f8b666c7bbd | /UHCF/run.py | f4e16833f92cba7a1a9a89d4ff32448098524c85 | [] | no_license | wenzhiquan/lab | 879c6e835b99f224b2bff0ea0847de81dc488dde | 37cd41244e281b84663d752b72f3ca3b13f3c37f | refs/heads/master | 2021-01-17T08:53:18.717864 | 2016-05-17T08:23:22 | 2016-05-17T08:23:22 | 22,674,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,665 | py | #!/env/python
# -*- encoding: utf-8 -*-
"""
@version: 0.1
@author: wenzhiquan
@contact: wenzhiquanr@163.com
@site: http://github.wenzhiquan.com
@software: PyCharm
@file: run.py
@time: 16/1/5 21:43
@description: null
"""
from datetime import datetime
from tools.sortByTime import sortByTime
from tools.combineById import combineById
from tools.divideTrainAndTest import divideTrainAndTest
from tools.timeInterval import timeInterval
from CFU.CFU import CFU
from CFU.promoteCFU import PromoteCFU
from core.UHCF import UHCF
from common.evaluation import Evaluation
from common.recommendation import generaRecommendList
from common.combineCFUAndTHCCF import combine
from common.movieAttr import MovieAttr
from config import config
if __name__ == '__main__':
startTime = datetime.now()
print 'program start......'
print 'start time :'
print startTime
# movieAttr = MovieAttr()
# movieAttr.commonLabel()
# timeInterval()
if config.needDivideTrainAndTest is True:
divideTrainAndTest()
if config.needPreSettle is True:
sortByTime()
combineById()
if config.needUHCF is True:
uhcf = UHCF()
uhcf.generaUserPrefer()
uhcf.simCalculate()
generaRecommendList(config.userSimMatrix)
if config.needCFU is True:
# cfu = CFU()
# cfu.matrix()
# generaRecommendList()
cfu = PromoteCFU()
cfu.iuMatrix()
# cfu.matrix()
# generaRecommendList(config.promoteCFUUserSimMatrix)
if config.needCombine is True:
combine()
generaRecommendList(config.combineSimMatrix)
if config.needEvaluate is True:
evaluate = Evaluation()
rap = evaluate.recall_and_precision()
print "recall: %5.5f%%" % rap[0]
print "precision: %5.5f%%" % rap[1]
fvalue = evaluate.fvalue(rap)
print "F value: %5.5f%%" % fvalue
mae = 0 # evaluate.MAE()
print "MAE: %5.5f" % mae
# diversity = evaluate.diversity()
# print "diversity: %5.5f%%" % diversity
outfile = r'result/evaluationResult.csv'
out = open(outfile, 'a')
spliter = ','
out.write(str(config.n) + spliter + str(config.listLength) +
spliter + str(config.G) + spliter + str(config.delta) +
spliter + str(rap[0])[:7] + '%' + spliter + str(rap[1])[:7] +
'%' + spliter + str(fvalue)[:7] + '%' + spliter + str(mae)[:7] + spliter + '\n')
out.close()
endTime = datetime.now()
print 'program finished......'
print 'finish time :'
print endTime
print 'total run time :'
print endTime - startTime
| [
"wenzhiquanr@163.com"
] | wenzhiquanr@163.com |
c12de7349ec39732017cff9b64ee3015361295e3 | 2a160a8491bccee82c05cd878b11c564b8471583 | /pull_stream/pull_live_stream.py | 9967a0f2937867c9b04c8045606ded1f0b61f33e | [] | no_license | ETalienwx/Bwork_code | 7eba821bd060fe7005d2bd35a5d6490745ec7ed8 | ac557887a23e12f7fd1c9ef6f094e6ee66780902 | refs/heads/master | 2023-02-11T10:19:15.625806 | 2021-01-11T06:16:25 | 2021-01-11T06:16:25 | 319,890,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,382 | py | # 拉取直播流
import subprocess
import requests
import time
import os
from enum import Enum
class BaseInfo(object):
def from_dict(self, meta: dict):
props = self.__dict__
for k in props:
v = meta.get(k, None)
setattr(self, k, v)
def to_dict(self) -> dict:
props = {}
for k, v in self.__dict__.items():
value = v
if v.__class__.__base__ is Enum:
value = v.value
props[k] = value
return props
class SrtResult(BaseInfo):
def __init__(self):
super(SrtResult, self).__init__()
self.command = None
self.stdout = None
self.stderr = None
self.timeout = None
self.killed = False
def download_file(url):
print(url)
headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36"}
try:
response = requests.get(url, stream=True, headers=headers)
with open("input.flv", "wb") as pdf:
start = time.time()
for chunk in response.iter_content(chunk_size=1024):
if chunk:
pdf.write(chunk)
if (time.time() - start) > 1:
break
except Exception as error:
print("Request stream url error!", error)
if os.path.getsize("input.flv") > 0:
print("download file success!")
return 1
else:
print("download file error!")
return -1
def pull_first_packet():
command = "ffprobe -show_packets input.flv"
args = dict(
shell=True,
stderr=subprocess.STDOUT,
encoding='utf-8',
timeout=2,
)
result = SrtResult()
result.command = command
result.timeout = 2
try:
result_str = subprocess.check_output(command, **args)
result.stdout = result_str
except subprocess.TimeoutExpired as exception:
result.killed = True
if exception.stdout is not None:
result.stdout = exception.stdout.decode('utf-8')
if exception.stderr is not None:
result.stderr = exception.stderr.decode('utf-8')
packet_start = result.stdout.find("[PACKET]")
packet_end = result.stdout.find("[/PACKET]") + 9
packer_str = result.stdout[packet_start:packet_end]
result.stdout = packer_str
print(result.to_dict())
def main():
# 正确的流
url = "https://d1--cn-gotcha04.bilivideo.com/live-bvc/852011/live_69307_2194433_1500.flv?cdn=cn-gotcha04&expires=1598589796&len=0&oi=3030954244&pt=web&qn=150&trid=7fd77e91987f4e52990406d16ab9243f&sigparams=cdn,expires,len,oi,pt,qn,trid&sign=5e66a350499eefd2bc5c5829f4141706&ptype=0&src=9&sl=2&order=1&platform=web&pSession=aj9z3Mdy-7iCk-4kXr-BcCx-4H5MYx6Z3hJp"
# 错误的流
# url = "https://d1--cn-gotcha04.bilivideo.com/live-bvc/208258/live_52926766_1129961_1500.flv?cdn=cn-gotcha04&expires=1597217512&len=0&oi=3030954244&pt=web&qn=150&trid=ed819650419545c09b72800ce7548c57&sigparams=cdn,expires,len,oi,pt,qn,trid&sign=5785ccd89c84f4fd9f188ed63474774d&ptype=0&src=9&sl=3&order=1&platform=web&pSession=kKbe92E4-DeC4-4QT2-penA-8mimYiXc3Ktn"
res = download_file(url)
if res == 1:
pull_first_packet()
else:
print("stream expire!")
if __name__ == '__main__':
main()
| [
"wangxuan02@bilibili.com"
] | wangxuan02@bilibili.com |
bfc337766c024645d34b4d882f875b699dde5fb8 | 7acb4e7602b706f26cbf0a5d5a69a08739e23b42 | /geektime/mathbase/chapter6/lesson6_1.py | 68c0c37772069ed48d427c9c2d3db38e070e0dbc | [] | no_license | JasenChu/debugtalk | 2577c8194f8ff10fd90cfc4998e9c27c9d5a9ea7 | 1b6ac9ebaeef88843e5fb732db4ee41872569ade | refs/heads/master | 2020-04-12T09:19:01.810342 | 2018-12-26T04:40:43 | 2018-12-26T04:40:43 | 162,398,792 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,382 | py | # 实现2路归并排序算法
def merge(leftSource = [], rightSource = []):
lenLeft = len(leftSource)
lenRight = len(rightSource)
# 定义存储合并后的list数组
merge_one = [0] * (lenLeft + lenRight)
mi = 0 # 定义合并list的下标
li = 0 # 定义左边数组的下标
ri = 0 # 定义右边数组的下标
while li < lenLeft and ri < lenRight:
# 左右和右边依次取第一个数字比较,小的放合并数组中
if leftSource[li] <= rightSource[ri]:
merge_one[mi] = leftSource[li]
li = li + 1
else:
merge_one[mi] = rightSource[ri]
ri = ri + 1
mi = mi + 1
if li < lenLeft: # 判断左边数组是否已经取值完毕
for i in range(li, lenLeft):
merge_one[mi] = leftSource[i]
mi = mi + 1
else: # 判断右边数组是否已经取值完毕
for j in range(ri, lenRight):
merge_one[mi] = rightSource[j]
mi = mi + 1
return merge_one
def merge_sort(to_sort = []):
if len(to_sort) == 1:
return to_sort
mid = len(to_sort) // 2
left = to_sort[:mid]
right = to_sort[mid:]
left = merge_sort(left)
right = merge_sort(right)
merged = merge(left, right)
return merged
if __name__ == '__main__':
print(merge_sort([10,20,9,50,30,99,3]))
| [
"xiaochu698@126.com"
] | xiaochu698@126.com |
f0bf9f11e977825e819d9ebc3d85c48b7b4dc035 | 1769a59bf55b04d358ba10759c596df4139dcd8b | /codeforces/594_div2/integerpoints.py | 849476900e25cf0b9e9f92316f062262022798d8 | [] | no_license | preetmishra/competitive-programming | 39c3b7b21d2b71589bd5e58989ce2f50ce400ecd | 7d014112a2e3f1bb5508c1e03378e13d94292f6c | refs/heads/master | 2021-07-14T15:24:22.417735 | 2020-09-29T15:58:57 | 2020-09-29T16:13:31 | 212,810,607 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | def count_even_odd(ls):
even, odd = 0, 0
for i in ls:
if i % 2 == 0:
even += 1
else:
odd += 1
return (even, odd)
for _ in range(int(input())):
p = int(input())
ps = list(map(int, input().split()))
q = int(input())
qs = list(map(int, input().split()))
even_p, odd_p = count_even_odd(ps)
even_q, odd_q = count_even_odd(qs)
print((even_p * even_q) + (odd_p * odd_q))
| [
"ipreetmishra@gmail.com"
] | ipreetmishra@gmail.com |
0facbb2cb2b1430ef5345e6a5b4328243b4b7126 | 6f331acb287b91ab6e297da88dc42489b256249d | /clear_complete_flag.py | a794ad6462bea3072c3185165a1ac24442468c37 | [
"MIT"
] | permissive | One-sixth/getchu_character_picture_grabber | 0cb650c209622600bd192614e83839eb3138e100 | 2a8d44ecf987ca51784449f9803c5e53d54a3e4e | refs/heads/master | 2020-06-19T15:04:41.074789 | 2020-01-14T03:21:38 | 2020-01-14T03:21:38 | 196,755,381 | 18 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,166 | py | '''
很多时候,爬虫内的忽略标志并不好用
'''
import os
stage1_complete_name = '.complete'
stage2_complete_name = '.complete'
stage3_complete_name = '.complete'
stage3_complete_pic_name = '.complete_pic'
# -----------------------------------------------------------
# 参数区
# 清除标志开关
clear_stage1_complete = False
clear_stage2_complete = False
clear_stage3_complete = False
clear_stage3_complete_pic = False
# -----------------------------------------------------------
dataset_root = 'dataset'
wait_to_delete_mark = []
# 使用一个大循环来解决这个问题
for company_id in os.listdir(dataset_root):
company_path = os.path.join(dataset_root, company_id)
if clear_stage1_complete and company_id == stage1_complete_name:
print('Wait to delete', company_path)
wait_to_delete_mark.append(company_path)
elif os.path.isdir(company_path):
# 如果是文件夹
for product_id in os.listdir(company_path):
product_path = os.path.join(company_path, product_id)
if clear_stage2_complete and product_id == stage2_complete_name:
print('Wait to delete', product_path)
wait_to_delete_mark.append(product_path)
elif os.path.isdir(product_path):
for file_name in os.listdir(product_path):
file_path = os.path.join(product_path, file_name)
if clear_stage3_complete and file_name == stage3_complete_name:
print('Wait to delete', file_path)
wait_to_delete_mark.append(file_path)
elif clear_stage3_complete_pic and file_name == stage3_complete_pic_name:
print('Wait to delete', file_path)
wait_to_delete_mark.append(file_path)
while True:
r = input('If you want to delete them.\nPlease input y to continue or n to cancel.\n')
if r == 'n':
print('Cancel')
exit(0)
elif r == 'y':
break
else:
print('Please input y or n.')
for f in wait_to_delete_mark:
# print(f)
os.remove(f)
print('Success')
| [
"One-sixth@users.noreply.github.com"
] | One-sixth@users.noreply.github.com |
88aaaf265c27f0e7826a4b1bda5b42dff316c456 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/3/hc1.py | 168bddc7a840e82f5abb3977a411aeb871b621cb | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'hC1':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.