blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8234cbe836cb2e4d9efcf0914b8cd6810e01c590 | 71217d0679438a49749f7e1a0dda2d0aab8f6c00 | /sdk/python/kfp/cli/cli.py | 3c3f497dfbac037f01f99c66ea4da57808cda171 | [
"Apache-2.0"
] | permissive | RedbackThomson/pipelines | 65e8c7411e32419404d9c0729798a8bf63c3280d | a5b3e7e3f00feb8dc908b84db4158409c20aa594 | refs/heads/master | 2023-01-30T00:46:02.086217 | 2020-04-28T16:42:06 | 2020-04-28T16:42:06 | 255,702,063 | 1 | 0 | Apache-2.0 | 2020-04-14T19:11:53 | 2020-04-14T19:11:52 | null | UTF-8 | Python | false | false | 1,978 | py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
import logging
import sys
from .._client import Client
from .run import run
from .pipeline import pipeline
from .diagnose_me_cli import diagnose_me
@click.group()
@click.option('--endpoint', help='Endpoint of the KFP API service to connect.')
@click.option('--iap-client-id', help='Client ID for IAP protected endpoint.')
@click.option('-n', '--namespace', default='kubeflow', help='Kubernetes namespace to connect to the KFP API.')
@click.option('--other-client-id', help='Client ID for IAP protected endpoint to obtain the refresh token.')
@click.option('--other-client-secret', help='Client ID for IAP protected endpoint to obtain the refresh token.')
@click.pass_context
def cli(ctx, endpoint, iap_client_id, namespace, other_client_id, other_client_secret):
"""kfp is the command line interface to KFP service."""
if ctx.invoked_subcommand == 'diagnose_me':
# Do not create a client for diagnose_me
return
ctx.obj['client'] = Client(endpoint, iap_client_id, namespace, other_client_id, other_client_secret)
ctx.obj['namespace']= namespace
def main():
logging.basicConfig(format='%(message)s', level=logging.INFO)
cli.add_command(run)
cli.add_command(pipeline)
cli.add_command(diagnose_me,'diagnose_me')
try:
cli(obj={}, auto_envvar_prefix='KFP')
except Exception as e:
logging.error(e)
sys.exit(1)
| [
"k8s-ci-robot@users.noreply.github.com"
] | k8s-ci-robot@users.noreply.github.com |
0e599945ce4472d58f9866a53ab0704c0f221798 | e811a08b8b653da94e516ca147ec49b534f74a62 | /inflearn/selenium/youtube_crawling.py | 5ee1344531250a2e894b10b021c4dc663b4896ac | [] | no_license | HoYaStudy/Python_Study | 0feb4a9ba7e68ebea6b2db15b20a3680f979a4de | 59c2cc093ae8ae87c8e07365cc432d87ded29ccc | refs/heads/master | 2023-02-07T23:40:16.135565 | 2023-01-24T06:17:58 | 2023-01-24T06:17:58 | 200,445,372 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,630 | py | import time
import urllib.request as req
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from io import BytesIO
import xlsxwriter
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--mute-audio')
# browser = webdriver.Chrome('./chromedriver.exe')
browser = webdriver.Chrome('./chromedriver.exe', options=chrome_options)
browser.implicitly_wait(2)
browser.set_window_size(1920, 1280)
browser.get('https://www.youtube.com/watch?v=oS8f7fbMHbI')
time.sleep(2)
WebDriverWait(browser, 2).until(EC.presence_of_element_located((By.TAG_NAME, 'html'))).send_keys(Keys.PAGE_DOWN)
time.sleep(2)
# print('Before Page Contents: {}'.format(browser.page_source))
scroll_pause_time = 2
last_height = browser.execute_script('return document.documentElement.scrollHeight')
# last_height = browser.execute_script('return document.body.scrollHeight') # IE
while True:
browser.execute_script('window.scrollTo(0, document.documentElement.scrollHeight)')
time.sleep(scroll_pause_time)
new_height = browser.execute_script('return document.documentElement.scrollHeight')
print('Last Height: {}, Current Height: {}'.format(last_height, new_height))
if new_height == last_height:
break
last_height = new_height
workbook = xlsxwriter.Workbook('./result.xlsx')
worksheet = workbook.add_worksheet()
row = 2
soup = BeautifulSoup(browser.page_source, 'html.parser')
top_level = soup.select('div#menu-container yt-formatted-string#text')
comment = soup.select('ytd-comment-renderer#comment')
for dom in comment:
img_src = dom.select_one('#img').get('src')
author = dom.select_one('#author-text > span').text.strip()
content = dom.select_one('#content-text').text.strip()
vote = dom.select_one('#vote-count-middle').text.strip()
print('Thumbnail: {}'.format(img_src if img_src else 'None'))
print('Author: {}'.format(author))
print('Content: {}'.format(content))
print('Vote: {}'.format(vote))
worksheet.write('A%s' % row, author)
worksheet.write('B%s' % row, content)
worksheet.write('B%s' % row, vote)
if img_src:
img_data = BytesIO(req.urlopen(img_src).read())
worksheet.insert_image('D%s' % row, author, {'image_data': img_data})
else:
worksheet.write('D%s' % row, 'None')
row += 1
browser.quit()
workbook.close()
| [
"hoya128@gmail.com"
] | hoya128@gmail.com |
06059aed5c5948562a19955d6daf315d91ff4e1f | f87f51ec4d9353bc3836e22ac4a944951f9c45c0 | /.history/test_20210719160759.py | 39a3c495535fc8cbd8e9c4ea3de97d83e654feef | [] | no_license | sanjayMamidipaka/cs1301 | deaffee3847519eb85030d1bd82ae11e734bc1b7 | 9ddb66596497382d807673eba96853a17884d67b | refs/heads/main | 2023-06-25T04:52:28.153535 | 2021-07-26T16:42:44 | 2021-07-26T16:42:44 | 389,703,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,537 | py | print(("Good",) + (4,) + ("You",))
print([["Solar", "Power"]] * 2)
print("pitch perfect" in ["pitch", "perfect"])
print({print("scary"): "hours", 2: ";p"}[None])
print(( "Surf's Up", {"The": "Beach Boys"[2], "Beach": [2]} ) [1] ["The"] * 3)
# aList = ["Dolly Parton", "Arlo Guthrie", "Paul Simon"]
# cList = aList
# cList.append("Pete Seeger")
# bList = cList[:]
# bList.remove("Paul Simon")
# cList += "Elivis"
# print(cList)
aList = [["Everything", "Everything"], "jammin’", 2020]
bList = aList
cList = bList[:]
aList[2] = aList[2] + 1
cList[0][1] = "All the Time"
bList.append("vibes")
cList[0] = "Fleet Foxes"
print(cList)
def festival(artistList):
goodArtists = []
songRating = {"Breezeblocks": 9, "Skinny Love": 9, "Riptide": 5, "Oxford Comma": 8, "Holland, 1946": 7}
for artist in artistList:
try:
if songRating[artistList[artist]] > 7:
goodArtists.append(artist)
else:
print("not good enough")
except:
print("not one of your artists")
continue
return goodArtists
artistList= {"alt—J": "Breezeblocks", "The Strokes": "Hard To Explain", "Bon Iver": "Skinny Love", "Vampire Weekend": "Oxford Comma"}
print(festival(artistList))
def noteFile(notes):
sheet = open('sheet.txt', 'w')
for note in notes:
sheet.write(note + '\n')
sheet.close()
music = open('sheet.txt')
one = music.readlines()
print(one[2][0])
notes = 'ABCAG'
noteFile(notes)
def concerts():
ratings = {5.0: ["The Shins"], 4.5: ["The", "Beatles"]}
venues = [(5.0, "infinite energy"), (2, "the loft")]
for r, c in venues:
if r in ratings:
print("Add {} to {}".format(c, r))
ratings[r].append(c)
else:
print("Add {} to ratings".format(r))
print(concerts())
def listen(platformDict):
platformName = ''
platformUsers = 0
for key in platformDict:
if len(platformDict[key]) > platformUsers:
platformName = key
platformUsers = len(platformDict[key])
return (platformName, platformUsers)
print(listen({'spotify': ['c','k','e'], 'apple music': ['m', 'e'], 'soundcloud': ['c', 'b']}))
def bestSongs(totalDict):
newDict = {}
for key in totalDict:
for song in totalDict[key]
print(song)
music = {"Drake": [("What's Next", 7), ("POPSTAR", 8), ("Headlines", 9)], "The Weeknd": [("Save Your Tears", 9), ("Starboy", 8), ("After Hours", 10)]} | [
"sanjay.mamidipaka@gmail.com"
] | sanjay.mamidipaka@gmail.com |
28a643bf09bcd27175187850ab9c60376afe9b41 | 3a22d9a1c4a8d5530208f4b9af004711bd620111 | /reinforcement_learning/rl_hvac_ray_energyplus/source/hvac_ray_launcher.py | 01a94ec321cd97d3d0981453d39be0e9264c3571 | [
"Apache-2.0"
] | permissive | jme3192/amazon-sagemaker-examples | 45fae7b1e2a8b8c0b8149eb1195caa8fd701a12a | 93cd954b6f57c8f905340479b92eaca17b2527ff | refs/heads/master | 2023-04-29T02:40:42.483934 | 2021-05-11T21:31:59 | 2021-05-11T21:31:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,722 | py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import ast
import os
import json
import subprocess
import ray
from ray.tune.tune import run_experiments, run, _make_scheduler
from sagemaker_rl.ray_launcher import SageMakerRayLauncher
from sagemaker_rl.tf_serving_utils import export_tf_serving, natural_keys
TERMINATION_SIGNAL = "JOB_TERMINATED"
MODEL_OUTPUT_DIR = "/opt/ml/model"
CHECKPOINTS_DIR = "/opt/ml/checkpoints"
def custom_sync_func(source, target):
"""Custom rsync cmd to sync experiment artifact from remote nodes to driver node."""
sync_cmd = (
'rsync -havP --inplace --stats -e "ssh -i /root/.ssh/id_rsa" {source} {target}'.format(
source=source, target=target
)
)
sync_process = subprocess.Popen(sync_cmd, shell=True)
sync_process.wait()
class HVACSageMakerRayLauncher(SageMakerRayLauncher):
"""Launcher class for Procgen experiments using Ray-RLLib.
Customers should sub-class this, fill in the required methods, and
call .train_main() to start a training process.
Example::
class MyLauncher(ProcgenSageMakerRayLauncher):
def register_env_creator(self):
register_env(
"stacked_procgen_env", # This should be different from procgen_env_wrapper
lambda config: gym.wrappers.FrameStack(ProcgenEnvWrapper(config), 4)
)
def get_experiment_config(self):
return {
"training": {
"env": "procgen_env_wrapper",
"run": "PPO",
...
}
}
if __name__ == "__main__":
MyLauncher().train_main()
"""
def register_algorithms_and_preprocessors(self):
raise NotImplementedError()
def create_tf_serving_model(self, algorithm=None, env_string=None):
self.register_env_creator()
self.register_algorithms_and_preprocessors()
if ray.__version__ >= "0.6.5":
from ray.rllib.agents.registry import get_agent_class
else:
from ray.rllib.agents.agent import get_agent_class
cls = get_agent_class(algorithm)
with open(os.path.join(MODEL_OUTPUT_DIR, "params.json")) as config_json:
config = json.load(config_json)
use_torch = config.get("use_pytorch", False)
if not use_torch:
if "callbacks" in config:
callback_cls_str = config["callbacks"]
callback_cls = callback_cls_str.split("'")[-2].split(".")[-1]
config["callbacks"] = ast.literal_eval()(callback_cls)
print("Loaded config for TensorFlow serving.")
config["monitor"] = False
config["num_workers"] = 1
config["num_gpus"] = 0
agent = cls(env=env_string, config=config)
checkpoint = os.path.join(MODEL_OUTPUT_DIR, "checkpoint")
agent.restore(checkpoint)
export_tf_serving(agent, MODEL_OUTPUT_DIR)
def find_checkpoint_path_for_spot(self, prefix):
ckpts = []
ckpts_prefix = ""
for root, directories, files in os.walk(prefix):
for directory in directories:
if directory.startswith("checkpoint"):
if not ckpts_prefix:
ckpts_prefix = root
ckpts.append(directory)
return ckpts_prefix, ckpts
def find_checkpoint_file_for_spot(self, prefix):
ckpts_prefix, ckpts = self.find_checkpoint_path_for_spot(prefix)
if not ckpts:
return ""
else:
ckpts.sort(key=natural_keys)
ckpt_name = ckpts[-1].replace("_", "-")
return os.path.join(ckpts_prefix, ckpts[-1], ckpt_name)
def launch(self):
"""Actual entry point into the class instance where everything happens."""
self.register_env_creator()
self.register_algorithms_and_preprocessors()
experiment_config, args, verbose = self.get_experiment_config()
# All worker nodes will block at this step during training
ray_cluster_config = self.ray_init_config()
if not self.is_master_node:
return
ray_custom_cluster_config = {
"object_store_memory": args.ray_object_store_memory,
"memory": args.ray_memory,
"redis_max_memory": args.ray_redis_max_memory,
"num_cpus": args.ray_num_cpus,
"num_gpus": args.ray_num_gpus,
}
all_workers_host_names = self.get_all_host_names()[1:]
# Overwrite redis address for single instance job
if len(all_workers_host_names) == 0:
ray_custom_cluster_config.update({"address": args.ray_address})
ray_cluster_config.update(ray_custom_cluster_config)
# Start the driver on master node
ray.init(**ray_cluster_config)
# Spot instance is back
if os.path.exists(CHECKPOINTS_DIR) and os.listdir(CHECKPOINTS_DIR):
print("Instance is back. Local checkpoint path detected.")
checkpoint_file = self.find_checkpoint_file_for_spot(CHECKPOINTS_DIR)
print("Setting checkpoint path to {}".format(checkpoint_file))
if checkpoint_file:
experiment_config["training"]["restore"] = checkpoint_file # Overwrite
experiment_config = self.customize_experiment_config(experiment_config)
experiment_config = self.set_up_checkpoint(experiment_config)
experiment_config["training"]["sync_to_driver"] = custom_sync_func
run_experiments(
experiment_config,
scheduler=_make_scheduler(args),
queue_trials=args.queue_trials,
resume=args.resume,
verbose=verbose,
concurrent=True,
)
# If distributed job, send TERMINATION_SIGNAL to all workers.
if len(all_workers_host_names) > 0:
self.sage_cluster_communicator.create_s3_signal(TERMINATION_SIGNAL)
@classmethod
def train_main(cls, args):
"""main function that kicks things off"""
launcher = cls(args)
launcher.launch()
| [
"noreply@github.com"
] | jme3192.noreply@github.com |
0429f7a6497c3ad8beb348791d38ddfb746c4c92 | 91dfd2193d73d4c0f547706bb1a954025dd8c9fd | /autolens/pipeline/phase/imaging/result.py | 925f183b193e70609d45b35b8d5a11c8055b42dc | [
"MIT"
] | permissive | FreeworkEarth/PyAutoLens | 13913d6a8b9696f225e85164e62dff3251aa7831 | 434f4bb329c93bcdc11b1f87962e7e2bd1097d9b | refs/heads/master | 2023-03-21T19:51:58.245105 | 2021-03-18T14:32:13 | 2021-03-18T14:32:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,075 | py | from autoconf import conf
import autoarray as aa
import numpy as np
from autogalaxy.galaxy import galaxy as g
from autolens.pipeline.phase import dataset
class Result(dataset.Result):
@property
def max_log_likelihood_fit(self):
hyper_image_sky = self.analysis.hyper_image_sky_for_instance(
instance=self.instance
)
hyper_background_noise = self.analysis.hyper_background_noise_for_instance(
instance=self.instance
)
return self.analysis.masked_imaging_fit_for_tracer(
tracer=self.max_log_likelihood_tracer,
hyper_image_sky=hyper_image_sky,
hyper_background_noise=hyper_background_noise,
)
@property
def unmasked_model_image(self):
return self.max_log_likelihood_fit.unmasked_blurred_image
@property
def unmasked_model_image_of_planes(self):
return self.max_log_likelihood_fit.unmasked_blurred_image_of_planes
@property
def unmasked_model_image_of_planes_and_galaxies(self):
fit = self.max_log_likelihood_fit
return fit.unmasked_blurred_image_of_planes_and_galaxies
def image_for_galaxy(self, galaxy: g.Galaxy) -> np.ndarray:
"""
Parameters
----------
galaxy
A galaxy used in this phase
Returns
-------
ndarray or None
A numpy arrays giving the model image of that galaxy
"""
return self.max_log_likelihood_fit.galaxy_model_image_dict[galaxy]
@property
def image_galaxy_dict(self) -> {str: g.Galaxy}:
"""
A dictionary associating galaxy names with model images of those galaxies
"""
return {
galaxy_path: self.image_for_galaxy(galaxy)
for galaxy_path, galaxy in self.path_galaxy_tuples
}
@property
def hyper_galaxy_image_path_dict(self):
"""
A dictionary associating 1D hyper_galaxies galaxy images with their names.
"""
hyper_minimum_percent = conf.instance["general"]["hyper"][
"hyper_minimum_percent"
]
hyper_galaxy_image_path_dict = {}
for path, galaxy in self.path_galaxy_tuples:
galaxy_image = self.image_galaxy_dict[path]
if not np.all(galaxy_image == 0):
minimum_galaxy_value = hyper_minimum_percent * max(galaxy_image)
galaxy_image[galaxy_image < minimum_galaxy_value] = minimum_galaxy_value
hyper_galaxy_image_path_dict[path] = galaxy_image
return hyper_galaxy_image_path_dict
@property
def hyper_model_image(self):
hyper_model_image = aa.Array2D.manual_mask(
array=np.zeros(self.mask.mask_sub_1.pixels_in_mask),
mask=self.mask.mask_sub_1,
)
for path, galaxy in self.path_galaxy_tuples:
hyper_model_image += self.hyper_galaxy_image_path_dict[path]
return hyper_model_image
| [
"james.w.nightingale@durham.ac.uk"
] | james.w.nightingale@durham.ac.uk |
932d48399c77d21d7a6888b2120007a165bd202e | 38f9e6dc2643d955dbf04c4cfd5430c441f72b44 | /pyweb/css/__init__.py | b56e53aab520ef3401bc5eae0c36e5413fe1c3df | [
"MIT"
] | permissive | Dmunch04/PyWeb | 0119b3eaf4e456376603a3b43b17c1a4dc8eb5f2 | 459d3953e4a31a91619d1911d9eda2b2e14b721c | refs/heads/master | 2022-07-29T00:27:55.383733 | 2022-07-13T21:30:56 | 2022-07-13T21:30:56 | 177,850,308 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | from pyweb.css.animation import *
from pyweb.css.backdropfilter import *
from pyweb.css.border import *
from pyweb.css.color import *
from pyweb.css.gradient import *
from pyweb.css.position import *
from pyweb.css.style_value import *
from pyweb.css.style import *
from pyweb.css.unit import *
| [
"daniellmunch@gmail.com"
] | daniellmunch@gmail.com |
84e7f1e74bf3f7a0b387804908de4724c54b6157 | 128d593efd591dc83a3aef2d4bfad39e73ee637e | /python_code/complete/no032 | f900a4b44d95b290e9c26d3ca7550214d7c86d20 | [] | no_license | jwan/ProjectEuler | 93be87d89cc58516d503dd5ed53bdbd706748cda | 65aec4f87b8899db6bad94a36412a28a4b4527e9 | refs/heads/master | 2021-01-17T08:21:46.654529 | 2011-05-02T23:11:35 | 2011-05-02T23:11:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,469 | #!/usr/bin/env python
# an n-digit number is >= 10^(n-1)
# n*m >= 10^(n+m-2), must have at least n + m - 1 digits
#subsets of cardinality 5,6
from python_code.decorators import euler_timer
def all_orderings(list_):
if len(list_) == 1:
return [list_]
result = []
for elt in list_:
sublist = list_[:]
sublist.remove(elt)
result.extend([[elt] + ordering
for ordering in all_orderings(sublist)])
return result
# Will take a list and break it at various places, returning
# the product of the integers formed
def possible_products(list_):
result = []
for i in range(1,len(list_)):
left = list_[:i]
left = int("".join([str(elt) for elt in left]))
right = list_[i:]
right = int("".join([str(elt) for elt in right]))
result.append(left*right)
return result
@euler_timer(32)
def main():
products = set()
candidates = all_orderings(range(1,10))
for candidate in candidates:
prods = possible_products(candidate[:5])
last4 = candidate[-4:]
last4 = int("".join([str(elt) for elt in last4]))
if last4 in prods:
products.add(last4)
prods = possible_products(candidate[:6])
last3 = candidate[-3:]
last3 = int("".join([str(elt) for elt in last3]))
if last3 in prods:
products.add(last3)
print sum(products)
if __name__ == "__main__":
main()
| [
"dan@counsyl.com"
] | dan@counsyl.com | |
0d3c14efd033d21a13c78aebe4b02a60e3327ca1 | 7fb469e93ff89b1c697d5a53a39188127e50d272 | /utils/migration_gitlab.py | d1c48ce75256887b1e71426bfbd77b6e43a5bfee | [] | no_license | seekplum/seekplum | fde98f93145a78fc030032a4499090583aba154a | 9e66f5e62214e566528003d434ef2b74877419fd | refs/heads/master | 2023-02-13T19:00:49.866130 | 2023-01-31T08:55:19 | 2023-02-02T04:33:45 | 182,075,292 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,358 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import logging.handlers
import os
import shutil
import subprocess
import uuid
from datetime import datetime
from multiprocessing import Process
color = lambda c, s: "\033[3%sm%s\033[0m" % (c, s)
red = lambda s: color(1, s)
green = lambda s: color(2, s)
def print_ok(check_status):
fmt = green("[ OK ] %s" % check_status)
print fmt
def print_error(check_status):
fmt = red("[ ERROR ] %s" % check_status)
print fmt
def get_logger(level=None):
"""设置日志格式,路径
"""
if level is None:
level = logging.INFO
file_name = os.path.basename(__file__).rsplit(".", 1)[0]
log_file_name = "%s.log" % file_name
_logger = logging.getLogger(file_name)
formatter = logging.Formatter('[%(name)s %(levelname)s %(asctime)s %(module)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
log_file_handle = logging.handlers.RotatingFileHandler(log_file_name, maxBytes=10 * 1024 * 1024, backupCount=10)
log_file_handle.setFormatter(formatter)
_logger.addHandler(log_file_handle)
_logger.setLevel(level)
return _logger
logger = get_logger()
temp_dir = "/tmp" # 临时目录
def run_cmd(cmd, force=True):
"""执行系统命令
:param cmd: str 系统命令
:param force: bool 执行命令出错是否抛出异常
:rtype str
:return 执行 `cmd` 命令的输出结果
"""
logger.info("cmd: %s" % cmd)
p = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
stdout, stderr = p.communicate()
if stderr:
logger.error("cmd stderr: %s" % stderr)
if not force:
raise Exception("cmd: %s, stderr: %s" % (cmd, stderr))
else:
logger.info("cmd result: %s" % stdout)
return stdout
def md5sum(file_name):
"""计算文件的md5值
:param file_name: str 文件路径
"""
cmd = "md5sum {}".format(file_name)
file_md5 = run_cmd(cmd).split(" ")[0].strip()
return file_md5
def get_time_str():
"""日期字符串
:rtype str
:return
2017-01-05_10-45-00
"""
_str = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
return _str
def update(group, project):
"""执行克隆重新推送到新仓库操作
1. 执行克隆操作
2. 重新推送到新的地址
:param group: str 组名
:param project: str 项目名
"""
path = os.path.join(temp_dir, project, get_time_str()) # 克隆到本地的项目路径
try:
# 执行克隆操作
cmd1 = "git clone --bare git@192.168.1.121:{group}/{project}.git {path}".format(project=project,
path=path,
group=group)
run_cmd(cmd1)
# 重新推送到新的地址
cmd2 = "cd {path} && git push --mirror git@gitlab.woqutech.com:{group}/{project}.git".format(path=path,
project=project,
group=group)
run_cmd(cmd2)
except Exception as e:
print_error(e.message)
else:
if os.path.exists(path):
shutil.rmtree(path, ignore_errors=False)
def check(group, project):
"""检查log/branch/tag是否一致
把 git log / git branch -a / git tag 三条命令的执行结果重定向到文件中.看文件md5值是否一致
"""
check_cmd = [
"git log --color --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) "
"%C(bold blue)<%an>%Creset' --abbrev-commit",
"git branch -a",
"git tag"
]
hosts = [
]
file_name = "{}_commit.txt".format(project)
file_md5 = set()
for host in hosts:
path = os.path.join(temp_dir, "{}_{}_{}".format(project, host, get_time_str())) # 克隆到本地的项目路径
md5 = uuid.uuid4().hex
try:
cmd1 = "git clone git@{host}:{group}/{project}.git {path}".format(project=project,
path=path,
host=host,
group=group
)
run_cmd(cmd1)
file_path = os.path.join(path, file_name)
# 把检查命令的结果重定向到文件中
for cmd in check_cmd:
cmd2 = "cd {} && {} >> {}".format(path, cmd, file_path)
run_cmd(cmd2)
except Exception as e:
print_error(e.message)
else:
md5 = md5sum(file_path)
finally:
file_md5.add(md5)
if os.path.exists(path):
shutil.rmtree(path, ignore_errors=False)
# 在后面打印的 . 数
count = 80 - (len(group) + len(project))
count = count if count > 0 else 0
text = count * "."
# 对比两个文件的md5值是否一致
if len(file_md5) == 1:
print_ok("{}/{} {}".format(group, project, text))
else:
print_error("{}/{} {}".format(group, project, text))
def run(group, project):
"""执行克隆重新推送到新仓库操作
:param group: str 组名
:param project: str 项目名
"""
# update(group, project)
check(group, project)
def main():
projects = [
{
"group": "",
"projects": [
]
}
]
process_list = []
for info in projects:
projects = info["projects"] # 项目名
group = info["group"] # 组名
for project in projects:
process = Process(target=run, args=(group, project,))
process.start()
process_list.append(process)
for process in process_list:
process.join()
if __name__ == '__main__':
main()
| [
"1131909224@qq.com"
] | 1131909224@qq.com |
acb8459364de29de9dc72b61525dc9fdfba1d32b | 94a2c4417c1fdd8577a75b09a17912ebae129e6c | /test/test_props.py | efa2d0c3525309dfcb01666e5b8b0fe8ed37bda3 | [
"MIT"
] | permissive | slavaGanzin/ramda.py | ad88a3cf6e7eb1461d4a09aad35ae1c18ca32db8 | 634bfbe0dcb300315ded327756cb3e33241589b8 | refs/heads/master | 2023-01-23T04:43:48.485314 | 2023-01-06T10:11:53 | 2023-01-06T10:11:53 | 142,413,822 | 68 | 7 | MIT | 2021-12-22T13:59:56 | 2018-07-26T08:43:31 | Python | UTF-8 | Python | false | false | 469 | py | from ramda.private.asserts import *
from ramda import *
def test_props():
assert_equal(props(["x", "y"], {"x": 1, "y": 2}), [1, 2])
assert_equal(props(["c", "a", "b"], {"b": 2, "a": 1}), [None, 1, 2])
full_name = compose(join(" "), props(["first", "last"]))
full_name({"last": "Bullet-Tooth", "age": 33, "first": "Tony"})
assert_equal(
full_name({"last": "Bullet-Tooth", "age": 33, "first": "Tony"}),
"Tony Bullet-Tooth",
)
| [
"slava.ganzin@gmail.com"
] | slava.ganzin@gmail.com |
f106117df3ad5eb8f234be7b240431f878d123cf | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/75/usersdata/247/39815/submittedfiles/maiormenor.py | 214f114523f4a56308de11ca5ddbcce30d0dc097 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | # -*- coding: utf-8 -*-
import math
a = int(input('Digite o número 1: '))
b = int(input('Digite o número 2: '))
c = int(input('Digite o número 3: '))
d = int(input('Digite o número 4: '))
e = int(input('Digite o número 5: '))
if a>b>c>d>e:
print('%d'%a)
print('%d'%e)
if d>b>c>d>e:
print('%d'%a)
print('%d'%e)
if a>b>c>d>e:
print('%d'%a)
print('%d'%e)
if b>c>e:
print('%d'%b)
print('%d'%e)
if e>b>a>c>d:
print('%d'%e)
print('%d'%d)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
b8ec637cf8622bb6e117563c1cb3f60b0272b2f9 | 1b36425f798f484eda964b10a5ad72b37b4da916 | /posthog/migrations/0163_insights_favorited_updatedat_tags.py | c5a40dcc94e66a2437a01cb833cff674ba5315e1 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | dorucioclea/posthog | 0408baa2a7ae98e5bea352c516f741ddc17c0a3e | 8848981baf237117fb22d28af0770a0165881423 | refs/heads/master | 2023-01-23T11:01:57.942146 | 2023-01-13T09:03:00 | 2023-01-13T09:03:00 | 241,222,000 | 0 | 0 | MIT | 2020-02-17T22:34:37 | 2020-02-17T22:34:36 | null | UTF-8 | Python | false | false | 902 | py | # Generated by Django 3.1.12 on 2021-08-05 12:24
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("posthog", "0162_organization_is_member_join_email_enabled"),
]
operations = [
migrations.AddField(
model_name="dashboarditem",
name="favorited",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="dashboarditem",
name="tags",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=32), blank=True, default=list, size=None
),
),
migrations.AddField(
model_name="dashboarditem",
name="updated_at",
field=models.DateTimeField(auto_now=True),
),
]
| [
"noreply@github.com"
] | dorucioclea.noreply@github.com |
e148e7a991356e2098a861f3df4ded833d05c410 | 4e5141121d8b4015db233cbc71946ec3cfbe5fe6 | /samples/basic/codec/models/cisco-ios-xr/Cisco-IOS-XR-ipv4-bgp-cfg/cd-encode-xr-ipv4-bgp-cfg-40-ydk.py | deae332d3af6766cbb63dddd700f23a1d621ed68 | [
"Apache-2.0"
] | permissive | itbj/ydk-py-samples | 898c6c9bad9d6f8072892300d42633d82ec38368 | c5834091da0ebedbb11af7bbf780f268aad7040b | refs/heads/master | 2022-11-20T17:44:58.844428 | 2020-07-25T06:18:02 | 2020-07-25T06:18:02 | 282,382,442 | 1 | 0 | null | 2020-07-25T06:04:51 | 2020-07-25T06:04:50 | null | UTF-8 | Python | false | false | 4,034 | py | #!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Encode configuration for model Cisco-IOS-XR-ipv4-bgp-cfg.
usage: cd-encode-xr-ipv4-bgp-cfg-40-ydk.py [-h] [-v]
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.services import CodecService
from ydk.providers import CodecServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_ipv4_bgp_cfg \
as xr_ipv4_bgp_cfg
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_ipv4_bgp_datatypes \
as xr_ipv4_bgp_datatypes
from ydk.types import Empty
import logging
def config_bgp(bgp):
"""Add config data to bgp object."""
# global configuration
instance = bgp.Instance()
instance.instance_name = "default"
instance_as = instance.InstanceAs()
instance_as.as_ = 0
four_byte_as = instance_as.FourByteAs()
four_byte_as.as_ = 65001
four_byte_as.bgp_running = Empty()
# global address family
global_af = four_byte_as.default_vrf.global_.global_afs.GlobalAf()
global_af.af_name = xr_ipv4_bgp_datatypes.BgpAddressFamily.ipv4_unicast
global_af.enable = Empty()
four_byte_as.default_vrf.global_.global_afs.global_af.append(global_af)
instance_as.four_byte_as.append(four_byte_as)
instance.instance_as.append(instance_as)
bgp.instance.append(instance)
# configure IBGP neighbor group
neighbor_groups = four_byte_as.default_vrf.bgp_entity.neighbor_groups
neighbor_group = neighbor_groups.NeighborGroup()
neighbor_group.neighbor_group_name = "IBGP"
neighbor_group.create = Empty()
# remote AS
neighbor_group.remote_as.as_xx = 0
neighbor_group.remote_as.as_yy = 65001
neighbor_group.update_source_interface = "Loopback0"
neighbor_groups.neighbor_group.append(neighbor_group)
# ipv4 unicast
neighbor_group_af = neighbor_group.neighbor_group_afs.NeighborGroupAf()
neighbor_group_af.af_name = xr_ipv4_bgp_datatypes.BgpAddressFamily.ipv4_unicast
neighbor_group_af.activate = Empty()
neighbor_group_afs = neighbor_group.neighbor_group_afs
neighbor_group_afs.neighbor_group_af.append(neighbor_group_af)
# configure IBGP neighbor
neighbor = four_byte_as.default_vrf.bgp_entity.neighbors.Neighbor()
neighbor.neighbor_address = "172.16.255.2"
neighbor.neighbor_group_add_member = "IBGP"
four_byte_as.default_vrf.bgp_entity.neighbors.neighbor.append(neighbor)
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
args = parser.parse_args()
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create codec provider
provider = CodecServiceProvider(type="xml")
# create codec service
codec = CodecService()
bgp = xr_ipv4_bgp_cfg.Bgp() # create object
config_bgp(bgp) # add object configuration
# encode and print object
print(codec.encode(provider, bgp))
exit()
# End of script
| [
"saalvare@cisco.com"
] | saalvare@cisco.com |
2b1f0d7ff4f43be0da442ede0ce52cd16efbfa97 | 0ec8af8988245d864c63d923e5524403090cd7e0 | /policy_gov_mianyang/policy_gov_mianyang/mongo_work.py | fe86d89c1e9ee93962704279f654c52647018315 | [] | no_license | radtek/Spider | d26b685cb5e41c67c6a7ce0d632072f3cac5f061 | 5a419e8ec77915804d3e659631f09b19aa90a088 | refs/heads/master | 2022-10-30T13:13:29.736256 | 2020-06-11T03:34:58 | 2020-06-11T03:34:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | # coding: utf-8
# Author:houszhou
# Date :2020/6/2 14:03
# Tool :PyCharm
import pymongo
import re
def obj_first(obj, error=''):
return obj[0] if obj else error
def format_file_type(doc_no: str):
file_first = obj_first(re.findall(r'^(.*?)[〔\[【]', doc_no))
if file_first:
file_type = file_first
elif obj_first(re.findall(r'^(.*?)\d{4}', doc_no)):
file_type = obj_first(re.findall(r'^(.*?)\d{4}', doc_no))
elif '第' in doc_no:
file_type = obj_first(re.findall('^(.*?)第', doc_no))
elif obj_first(re.findall(r'^(.*?)\d', doc_no)):
file_type = obj_first(re.findall(r'^(.*?)\d', doc_no))
else:
file_type = ''
return '' if re.findall(r'^\d', file_type) else file_type
def change():
client = pymongo.MongoClient(host='localhost', port=27017)
db = client.pdsp_beta_db
collection = db.gov_info_data
result = collection.find({'website': '北大法宝'})
for i, data in enumerate(result):
if i % 1000 == 0:
print(i)
id_ = data.get('_id')
extension = data.get('extension')
doc_no = extension.get('doc_no', '')
file_type = format_file_type(doc_no) if doc_no else ''
print('id: {}, doc_no: {}, new_file_type: {}'.format(id_, doc_no, file_type))
collection.find_one_and_update({'_id': id_}, {'$set': {'file_type': file_type}})
if __name__ == '__main__':
change()
| [
"1733776802@qq.com"
] | 1733776802@qq.com |
e7d1c7da90d8fe9d28b8cfeebb52df3c85988b16 | d1c605e89fe3f33ba34bc3d29c4f34946cf4835c | /src/openfermion/utils/_trotter_error.py | 02b6d27bfa4935fdcc5921b98841bf874d436e1f | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | nkkchem/OpenFermion | 0bdf776fd2efb3ed19172ddccbc41ecfa15060e0 | 35ca4b389438a63eebf97cf492c135811c1923a6 | refs/heads/master | 2020-03-26T07:07:49.276600 | 2018-08-11T06:34:01 | 2018-08-11T06:34:01 | 144,637,497 | 1 | 0 | Apache-2.0 | 2018-08-13T21:51:34 | 2018-08-13T21:51:34 | null | UTF-8 | Python | false | false | 6,763 | py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to compute the second order Trotter error."""
from future.utils import iteritems
from math import sqrt, ceil
from scipy.linalg import expm
from openfermion.config import *
from openfermion.ops import QubitOperator
from openfermion.utils._operator_utils import normal_ordered
def commutator(op1, op2):
return op1 * op2 - op2 * op1
def trivially_commutes(term_a, term_b):
position_a = 0
position_b = 0
commutes = True
term_op_a, = term_a.terms.keys()
term_op_b, = term_b.terms.keys()
while position_a < len(term_op_a) and position_b < len(term_op_b):
qubit_a, action_a = term_op_a[position_a]
qubit_b, action_b = term_op_b[position_b]
if qubit_a > qubit_b:
position_b += 1
elif qubit_a < qubit_b:
position_a += 1
else:
if action_a != action_b:
commutes = not commutes
position_a += 1
position_b += 1
return commutes
def trivially_double_commutes(term_a, term_b, term_c):
"""Check if the double commutator [term_a, [term_b, term_c]] is zero.
Args:
term_a, term_b, term_c: Single-term QubitOperators.
Notes:
If the sets of qubits which term_b and term_c act on, or if the
intersection of term_a's qubits with (term_b's qubits U term_c's
qubits) is empty, then the double commutator is trivially zero.
"""
# determine the set of qubits each term acts on
term_op_a, = term_a.terms.keys()
term_op_b, = term_b.terms.keys()
term_op_c, = term_c.terms.keys()
qubits_a = set([index for index, _ in term_op_a])
qubits_b = set([index for index, _ in term_op_b])
qubits_c = set([index for index, _ in term_op_c])
return (trivially_commutes(term_b, term_c) or
not qubits_a.intersection(set(qubits_b.union(qubits_c))))
def error_operator(terms, series_order=2):
"""Determine the difference between the exact generator of unitary
evolution and the approximate generator given by Trotter-Suzuki
to the given order.
Args:
terms: a list of QubitTerms in the Hamiltonian to be simulated.
series_order: the order at which to compute the BCH expansion.
Only the second order formula is currently implemented
(corresponding to Equation 9 of the paper).
Returns:
The difference between the true and effective generators of time
evolution for a single Trotter step.
Notes: follows Equation 9 of Poulin et al.'s work in "The Trotter Step
Size Required for Accurate Quantum Simulation of Quantum Chemistry".
"""
if series_order != 2:
raise NotImplementedError
error_operator = QubitOperator()
for beta in range(len(terms)):
for alpha in range(beta + 1):
for alpha_prime in range(beta):
if not trivially_double_commutes(terms[alpha], terms[beta],
terms[alpha_prime]):
double_com = commutator(terms[alpha],
commutator(terms[beta],
terms[alpha_prime]))
error_operator += double_com
if alpha == beta:
error_operator -= double_com / 2.0
return error_operator / 12.0
def error_bound(terms, tight=False):
"""
Numerically upper bound the error in the ground state energy
for the second order Trotter-Suzuki expansion.
Args:
terms: a list of single-term QubitOperators in the Hamiltonian
to be simulated.
tight: whether to use the triangle inequality to give a loose
upper bound on the error (default) or to calculate the
norm of the error operator.
Returns:
A float upper bound on norm of error in the ground state energy.
Notes: follows Poulin et al.'s work in "The Trotter Step Size
Required for Accurate Quantum Simulation of Quantum
Chemistry". In particular, Equation 16 is used for a loose
upper bound, and the norm of Equation 9 is calculated for
a tighter bound using the error operator from error_operator.
Possible extensions of this function would be to get the
expectation value of the error operator with the Hartree-Fock
state or CISD state, which can scalably bound the error in
the ground state but much more accurately than the triangle
inequality.
"""
zero = QubitOperator()
error = 0.0
if tight:
# return the 1-norm of the error operator (upper bound on error)
error = sum(abs(coefficient)
for coefficient in error_operator(terms).terms.values())
elif not tight:
for alpha in range(len(terms)):
term_a = terms[alpha]
coefficient_a, = term_a.terms.values()
if coefficient_a:
error_a = 0.
for beta in range(alpha + 1, len(terms)):
term_b = terms[beta]
coefficient_b, = term_b.terms.values()
if not (trivially_commutes(term_a, term_b) or
commutator(term_a, term_b) == zero):
error_a += abs(coefficient_b)
error += 4.0 * abs(coefficient_a) * error_a ** 2
return error
def trotter_steps_required(trotter_error_bound, time, energy_precision):
"""Determine the number of Trotter steps for accurate simulation.
Args:
trotter_error_bound (float): Upper bound on Trotter error in the
state of interest.
time (float): The total simulation time.
energy_precision (float): Acceptable shift in state energy.
Returns:
The integer minimum number of Trotter steps required for
simulation to the desired precision.
Notes:
The number of Trotter steps required is an upper bound on the
true requirement, which may be lower.
"""
return int(ceil(time * sqrt(trotter_error_bound / energy_precision)))
| [
"ryanbabbush@gmail.com"
] | ryanbabbush@gmail.com |
194b57951d3551b17b1e5082928f57cc5902ee37 | f072d766c00c0931b753a9cc50e36994400153c0 | /plot_lnumass_lhe.py | e0a85908f54d6c53307dd67989977f191b379d32 | [] | no_license | UniMiBAnalyses/Utils | 475208a32bc77dfc43c78048f1652a0c96144704 | d0375a5c8debcba4e39c7187ca7933c810d68357 | refs/heads/master | 2021-06-27T10:02:01.438794 | 2020-11-16T23:29:36 | 2020-11-16T23:29:36 | 177,773,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,393 | py | import ROOT as R
import os
import sys
import utils
from math import cosh
from itertools import combinations
from operator import itemgetter
file = R.TFile("/afs/cern.ch/work/d/dvalsecc/private/CMSSW_10_2_0/src/LatinoTreesGEN/GenDumper/test/output_lhe.root", "READ")
tree = file.Get("Analyzer/myTree")
h_lnujjmass= R.TH1D("h_lnujjmass", "lnu mass", 100, 0, 200)
for iev, event in enumerate(tree):
if iev % 1000 == 0: print(".", end="")
lep = R.TLorentzVector()
nu = R.TLorentzVector()
lep.SetPtEtaPhiE(event.lhept1, event.lheeta1, event.lhephi1, event.lhept1*cosh(event.lheeta1))
nu.SetPtEtaPhiE(event.nu_lhept1, event.nu_lheeta1, event.nu_lhephi1, event.nu_lhept1*cosh(event.nu_lheeta1))
jets = []
jetsids = []
for i in range(1,5):
jet = R.TLorentzVector()
# print(getattr(event, f"lhejetpt{i}"), getattr(event, f"lhejeteta{i}"),
# getattr(event, f"lhejetphi{i}"),getattr(event, f"lhejetpt{i}"))
jet.SetPtEtaPhiE(getattr(event, f"lhejetpt{i}"), getattr(event, f"lhejeteta{i}"),
getattr(event, f"lhejetphi{i}"),getattr(event, f"lhejetpt{i}")*cosh(getattr(event, f"lhejeteta{i}")))
jets.append(jet)
jetsids.append(getattr(event, f"lhejetpdgid{i}"))
if (lep+nu).M() < 60:
good_pair = utils.nearest_mass_pair(jets, 80.375)
W_jets = [j for ij, j in enumerate(jets) if ij in good_pair]
else:
# We are looking at WplusTo2J_WminusToLNu
W_jets = ()
Wp = [(2,-1),(2,-3),(2,-5),(4,-1),(4,-3),(4,-5)]
#print("ids", jetsids)
masses = []
for p1,p2 in combinations(range(len(jetsids)),2):
#print((jetsids[p1],jetsids[p2]))
if (jetsids[p1],jetsids[p2]) in Wp or (jetsids[p2],jetsids[p1]) in Wp:
#W_jets = (jets[p1], jets[p2])
masses.append((jets[p1],jets[p2], (jets[p1]+jets[p2]).M()))
#print(jetsids[p1],jetsids[p2],(jets[p1]+jets[p2]).M())
#print(list(map(itemgetter(2), masses)))
# Now get the pair with the smaller mass
W_jets = sorted(masses, key=itemgetter(2))[0]
lnujj = lep + nu + W_jets[0] + W_jets[1]
#print((good_jets[0] + good_jets[1]).M())
h_lnujjmass.Fill(lnujj.M())
c = R.TCanvas()
h_lnujjmass.Draw("hist")
c.SetLogy()
c.Draw()
| [
"davide.valsecchi@cern.ch"
] | davide.valsecchi@cern.ch |
a15f316975d4df0d503c6776c28a8a97f11bddd6 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03229/s182952830.py | f7d0cdedb254735814e092d73da13fd79ac65aaf | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | n = int(input())
a = [int(input()) for _ in range(n)]
a.sort()
front = a[n - 1]
back = a[n - 1]
i = 0
j = n - 2
ans = 0
while i <= j:
diff1 = abs(front - a[i])
diff2 = abs(back - a[i])
diff3 = abs(front - a[j])
diff4 = abs(back - a[j])
mx = max(diff1, diff2, diff3, diff4)
ans += mx
if mx == diff1:
front = a[i]
i += 1
elif mx == diff2:
back = a[i]
i += 1
elif mx == diff3:
front = a[j]
j -= 1
else:
back = a[j]
j -= 1
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
85aed72f0bd6e3d1dde024e704cac846d3c294a1 | 03c9cd5bd96874d6117fb17c37ac4d7450c15933 | /django-tutorial/chapter04/orm_intro_demo/book/models.py | fbfa5a5d79b26d829f5b15b1c5f0278bcae96c54 | [] | no_license | atiger808/opencv-tutorial | 603de35e97679d6beae104298ae355edfdd9036a | 2ea9bb3818284fb75f85697e36fde37b6479d1c6 | refs/heads/master | 2020-05-29T23:16:30.462022 | 2019-11-05T10:08:20 | 2019-11-05T10:08:20 | 189,425,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | from django.db import models
# Create your models here.
class Book(models.Model):
id = models.AutoField(primary_key=True)
title = models.CharField(max_length=100, null=False)
author = models.CharField(max_length=100, null=False)
price= models.FloatField(null=False, default=0)
# 1 makemigrations命令 生成迁移脚本文件
# python manage.py makemigrations
# 2 migrate命令 将生成的迁移脚本文件映射到数据库
# python manage.py migrate
class Published(models.Model):
name = models.CharField(max_length=100, null=False)
address = models.CharField(max_length=100, null=False) | [
"atiger0614@163.com"
] | atiger0614@163.com |
3faa5711aeeb59f4ef00fa91833c41f63cacdad4 | 3027ca01be33d07d7acd3a08f8bc812fed71544c | /docs/source/conf.py | 6fddb100fbcd2cc38db5bc457f8c7508d827c92f | [] | no_license | hirune924/ayniy | 016c32e34bf61d074554b4bdd4339d76d14d718f | 10537ab50283144fa6267afd912b387e75f3790c | refs/heads/master | 2022-11-29T02:45:16.474280 | 2020-08-09T05:56:08 | 2020-08-09T05:56:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,030 | py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
# -- Project information -----------------------------------------------------
project = 'Ayniy'
copyright = '2020, Shotaro Ishihara'
author = 'Shotaro Ishihara'
# The full version, including alpha/beta/rc tags
release = '0.1.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The master toctree document.
master_doc = 'index'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| [
"upura0@gmail.com"
] | upura0@gmail.com |
afb43707671fdb41caaf35d21da658269313c95c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02642/s476928460.py | 64de74dd01fb109dd7acd2a9fb2b1a7c2da06fd8 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | import collections
import heapq
import math
import random
import sys
input = sys.stdin.readline
sys.setrecursionlimit(500005)
ri = lambda: int(input())
rl = lambda: list(map(int, input().split()))
rs = lambda: input().rstrip()
n = ri()
a = rl()
N = 1000000
f = [0] * (N + 10)
for v in a:
f[v] += 1
for i in range(N, 0, -1):
if f[i] == 0:
continue
j = i * 2
while j <= N:
f[j] += f[i]
j += i
cnt = sum(f[i] == 1 for i in a)
print(cnt)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
83e951914a404df185985df975b14b4ab79ebab7 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/logic/v20160601/integration_account_assembly.py | 066e5da2149a6ba23cfac81ccc509fccdca4431d | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 6,192 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['IntegrationAccountAssembly']
class IntegrationAccountAssembly(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
assembly_artifact_name: Optional[pulumi.Input[str]] = None,
integration_account_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['AssemblyPropertiesArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
The assembly definition.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] assembly_artifact_name: The assembly artifact name.
:param pulumi.Input[str] integration_account_name: The integration account name.
:param pulumi.Input[str] location: The resource location.
:param pulumi.Input[pulumi.InputType['AssemblyPropertiesArgs']] properties: The assembly properties.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The resource tags.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if assembly_artifact_name is None:
raise TypeError("Missing required property 'assembly_artifact_name'")
__props__['assembly_artifact_name'] = assembly_artifact_name
if integration_account_name is None:
raise TypeError("Missing required property 'integration_account_name'")
__props__['integration_account_name'] = integration_account_name
__props__['location'] = location
if properties is None:
raise TypeError("Missing required property 'properties'")
__props__['properties'] = properties
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:logic/latest:IntegrationAccountAssembly"), pulumi.Alias(type_="azure-nextgen:logic/v20180701preview:IntegrationAccountAssembly"), pulumi.Alias(type_="azure-nextgen:logic/v20190501:IntegrationAccountAssembly")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(IntegrationAccountAssembly, __self__).__init__(
'azure-nextgen:logic/v20160601:IntegrationAccountAssembly',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'IntegrationAccountAssembly':
"""
Get an existing IntegrationAccountAssembly resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return IntegrationAccountAssembly(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Gets the resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.AssemblyPropertiesResponse']:
"""
The assembly properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Gets the resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
e4a14f008c431ab5a0199aef5f80e6faa18074b2 | f82e67dd5f496d9e6d42b4fad4fb92b6bfb7bf3e | /scripts/client/gui/scaleform/daapi/view/lobby/prb_windows/switchperipherywindow.py | a61163d0f6e932c14da512b0c3b78e6e2d0e305f | [] | no_license | webiumsk/WOT0.10.0 | 4e4413ed4e7b00e22fb85d25fdae9400cbb4e76b | a84f536c73f86d9e8fab559e97f88f99f2ad7e95 | refs/heads/master | 2021-01-09T21:55:00.662437 | 2015-10-23T20:46:45 | 2015-10-23T20:46:45 | 44,835,654 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,904 | py | # Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/prb_windows/SwitchPeripheryWindow.py
from ConnectionManager import connectionManager
import constants
from debug_utils import LOG_DEBUG
from gui.Scaleform.daapi.settings import BUTTON_LINKAGES
from gui.Scaleform.daapi.view.meta.SwitchPeripheryWindowMeta import SwitchPeripheryWindowMeta
from gui.Scaleform.locale.DIALOGS import DIALOGS
from gui.shared.formatters import text_styles
from helpers.i18n import makeString as _ms
from gui.Scaleform.locale.RES_ICONS import RES_ICONS
from gui.Scaleform.genConsts.TEXT_ALIGN import TEXT_ALIGN
from predefined_hosts import g_preDefinedHosts
from gui import makeHtmlString
from gui import DialogsInterface
from gui import game_control
from adisp import process
class SwitchPeripheryWindow(SwitchPeripheryWindowMeta):
_BTN_WIDTH = 140
_CLOSE_BTN_ACTION = 'closeAction'
_SWITCH_BTN_ACTION = 'switchAction'
def __init__(self, ctx):
super(SwitchPeripheryWindow, self).__init__()
self.__ctx = ctx
def onBtnClick(self, action):
if action == self._CLOSE_BTN_ACTION:
self.onWindowClose()
def requestForChange(self, peripheryId):
if connectionManager.peripheryID != peripheryId:
self.__relogin(peripheryId)
else:
LOG_DEBUG('Current server for relogin has been chosen: %s' % peripheryId)
def onWindowClose(self):
self.destroy()
def _updateServersList(self):
hostsList = g_preDefinedHosts.getSimpleHostsList(g_preDefinedHosts.hostsWithRoaming())
serversList = []
for key, name, csisStatus, peripheryID in hostsList:
if peripheryID not in self.__ctx.getForbiddenPeripherieIDs():
serversList.append({'label': name if not constants.IS_CHINA else makeHtmlString('html_templates:lobby/serverStats', 'serverName', {'name': name}),
'id': peripheryID,
'csisStatus': csisStatus,
'selected': True})
label = _ms(self.__ctx.getSelectServerLabel())
if len(serversList) == 1:
label = _ms(self.__ctx.getApplySwitchLabel(), server=text_styles.stats(serversList[0]['label']))
self.as_setDataS({'label': label,
'peripheries': serversList,
'isServerDropdownMenuVisibility': len(serversList) > 1,
'selectedIndex': 0})
def _populate(self):
super(SwitchPeripheryWindow, self)._populate()
self.as_setImageS(RES_ICONS.MAPS_ICONS_WINDOWS_SWITCH_PERIPHERY_WINDOW_BG, 0)
self.as_setWindowTitleS(_ms(DIALOGS.SWITCHPERIPHERYWINDOW_WINDOWTITLE))
currentServer = connectionManager.serverUserName
self.as_setTextS(_ms(self.__ctx.getHeader()), _ms(self.__ctx.getDescription(), server=text_styles.error(currentServer)))
self._updateServersList()
self.as_setButtonsS([{'label': _ms(DIALOGS.SWITCHPERIPHERYWINDOW_BTNSWITCH),
'btnLinkage': BUTTON_LINKAGES.BUTTON_NORMAL,
'action': self._SWITCH_BTN_ACTION,
'isFocused': True,
'tooltip': ''}, {'label': _ms(DIALOGS.SWITCHPERIPHERYWINDOW_BTNCANCEL),
'btnLinkage': BUTTON_LINKAGES.BUTTON_BLACK,
'action': self._CLOSE_BTN_ACTION,
'isFocused': False,
'tooltip': ''}], TEXT_ALIGN.RIGHT, self._BTN_WIDTH)
def _dispose(self):
super(SwitchPeripheryWindow, self)._dispose()
@process
def __relogin(self, peripheryID):
self.__isGuiUpdateSuppressed = True
if g_preDefinedHosts.isRoamingPeriphery(peripheryID):
success = yield DialogsInterface.showI18nConfirmDialog('changeRoamingPeriphery')
else:
success = yield DialogsInterface.showI18nConfirmDialog('changePeriphery')
if success:
game_control.g_instance.relogin.doRelogin(peripheryID, extraChainSteps=self.__ctx.getExtraChainSteps())
| [
"info@webium.sk"
] | info@webium.sk |
8adc6e2f1aa9c4d7713b8cb7dd3b2fbb27f47ab4 | dde5bd0d7819a25751570de8d2b9cdd4337337b0 | /Password.py | e7c28aefd9ba7dd2001e72c9632274d43cc5ba5b | [] | no_license | Anubamagpljecs07/Codekata_Hunter | 01ba4a12d2447525d8b1729af1701b6202b5f00e | 06b1c9b272d505f54a98539413dc2d4824305509 | refs/heads/master | 2020-04-18T14:07:22.766109 | 2019-07-28T12:39:31 | 2019-07-28T12:39:31 | 167,580,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | r,s=map(str,input().split())
r=list(r)
s=list(s)
a=len(r)
b=len(s)
g=""
if a<b:
for i in range(1,(b-a)+1):
r.append(i)
else:
for i in range(1,(a-b)+1):
s.append(i)
for i in range(0,len(r)):
for j in range(0,len(s)):
if i==j:
g=g+str(r[i])
g=g+str(s[j])
print(g)
| [
"noreply@github.com"
] | Anubamagpljecs07.noreply@github.com |
fd59c3f1feb10e6b28954e5a34f2009dff2dd443 | ccbfc7818c0b75929a1dfae41dc061d5e0b78519 | /aliyun-openapi-python-sdk-master/aliyun-python-sdk-cloudapi/aliyunsdkcloudapi/request/v20160714/SetAppsAuthoritiesRequest.py | 7c04bda93500029462eeb32ea6b4059064120130 | [
"Apache-2.0"
] | permissive | P79N6A/dysms_python | 44b634ffb2856b81d5f79f65889bfd5232a9b546 | f44877b35817e103eed469a637813efffa1be3e4 | refs/heads/master | 2020-04-28T15:25:00.368913 | 2019-03-13T07:52:34 | 2019-03-13T07:52:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,302 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class SetAppsAuthoritiesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'CloudAPI', '2016-07-14', 'SetAppsAuthorities','apigateway')
def get_AuthVaildTime(self):
return self.get_query_params().get('AuthVaildTime')
def set_AuthVaildTime(self,AuthVaildTime):
self.add_query_param('AuthVaildTime',AuthVaildTime)
def get_StageName(self):
return self.get_query_params().get('StageName')
def set_StageName(self,StageName):
self.add_query_param('StageName',StageName)
def get_AppIds(self):
return self.get_query_params().get('AppIds')
def set_AppIds(self,AppIds):
self.add_query_param('AppIds',AppIds)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_GroupId(self):
return self.get_query_params().get('GroupId')
def set_GroupId(self,GroupId):
self.add_query_param('GroupId',GroupId)
def get_Description(self):
return self.get_query_params().get('Description')
def set_Description(self,Description):
self.add_query_param('Description',Description)
def get_ApiId(self):
return self.get_query_params().get('ApiId')
def set_ApiId(self,ApiId):
self.add_query_param('ApiId',ApiId)
def get_AuthValidTime(self):
return self.get_query_params().get('AuthValidTime')
def set_AuthValidTime(self,AuthValidTime):
self.add_query_param('AuthValidTime',AuthValidTime) | [
"1478458905@qq.com"
] | 1478458905@qq.com |
2522d3ebf8d60e701f772219533f915da1524c41 | 861fb6e46bfd1ef4ef94b82269babe8fd8e04da1 | /src/programy/parser/template/nodes/srai.py | 7c50ac234eb307b5badc810d202ce48547351ed6 | [
"MIT"
] | permissive | Doshmaku/program-y | 5b436b83f409feeddb9fe626c156a426e1170187 | 6c2e14eebaa92fb61b27deba1b28e6775090c327 | refs/heads/master | 2021-01-18T20:45:29.330015 | 2017-03-31T23:35:30 | 2017-03-31T23:35:30 | 86,988,411 | 0 | 0 | null | 2017-04-02T13:35:03 | 2017-04-02T13:35:03 | null | UTF-8 | Python | false | false | 1,832 | py | """
Copyright (c) 2016 Keith Sterling
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
from programy.parser.template.nodes.base import TemplateNode
class TemplateSRAINode(TemplateNode):
def __init__(self):
TemplateNode.__init__(self)
def resolve(self, bot, clientid):
srai_text = self.resolve_children_to_string(bot, clientid)
logging.debug("[%s] SRAI Text [%s]"%(self.to_string(), srai_text))
resolved = bot.ask_question(clientid, srai_text, srai=True)
logging.debug("[%s] resolved to [%s]", self.to_string(), resolved)
return resolved
def to_string(self):
return "[SRAI]"
def to_xml(self, bot, clientid):
xml = "<srai>"
for child in self.children:
xml += child.to_xml(bot, clientid)
xml += "</srai>"
return xml
| [
"keith@keithsterling.com"
] | keith@keithsterling.com |
540d539597adb3706f95ff6300b78db3ff41ab95 | 0bfb4c9185743a5b2024816c4ef0f1c9031d8294 | /Ewaah/Ewaah/settings.py | a3d89991066848017ea4db1f7e7da3afc971a048 | [] | no_license | Husain-Jinia/Django-Ewaah | f0d9e114d0d1a3129a88f3fc0b9bb7a4f5c8c4e0 | 7862a1bfa984b101eb2a5e9da4bd6be65167befe | refs/heads/master | 2023-06-24T05:09:58.524651 | 2021-07-27T16:16:44 | 2021-07-27T16:16:44 | 388,485,460 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,603 | py | """
Django settings for Ewaah project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-=45g%1*rf&dty^7qiz9-x87krmvwh@*eh#w8$iba(mn7zq2f6f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'education.apps.EducationConfig',
'users.apps.UsersConfig',
'product.apps.ProductConfig',
'blog.apps.BlogConfig',
'crispy_forms',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Ewaah.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Ewaah.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = "/images/downloads/"
MEDIA_ROOT = BASE_DIR
STATICFILES_DIRS=[
os.path.join(BASE_DIR,'static')
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
LOGIN_REDIRECT_URL = 'product-home'
LOGIN_URL = 'login'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"husainjinia0201@gmail.com"
] | husainjinia0201@gmail.com |
635b68ce18964e25d7cbb14f15778925c773d5e3 | 1c452562b8899cc8d16fc074d660c5d736647144 | /update/test/process_my.py | 2b42cdf5822e181c5d63a449ae22847701d13e1a | [] | no_license | cash2one/autumn | 6ac6caec6f7eeec06cb622689aa5ec9ee3173321 | dc03c31beb3b331264ac83643cefee4839e456fc | refs/heads/master | 2020-05-23T12:58:09.635418 | 2016-09-28T01:25:30 | 2016-09-28T01:25:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,077 | py | import multiprocessing
import time
def wait_for_event(e):
"""Wait for the event to be set before doing anything"""
print ('wait_for_event: starting')
e.wait()
print ('wait_for_event: e.is_set()->' + str(e.is_set()))
def wait_for_event_timeout(e, t):
"""Wait t seconds and then timeout"""
print ('wait_for_event_timeout: starting')
e.wait(t)
print ('wait_for_event_timeout: e.is_set()->' + str(e.is_set()))
if __name__ == '__main__':
e = multiprocessing.Event()
w1 = multiprocessing.Process(name='block',
target=wait_for_event,
args=(e,))
w1.start()
w2 = multiprocessing.Process(name='non-block',
target=wait_for_event_timeout,
args=(e, 2))
w2.start()
time.sleep(3)
e.set()
print ('main: event is set')
#the output is:
#wait_for_event_timeout: starting
#wait_for_event: starting
#wait_for_event_timeout: e.is_set()->False
#main: event is set
#wait_for_event: e.is_set()->True | [
"xutao.ding@chinascopefinancial.com"
] | xutao.ding@chinascopefinancial.com |
6d4b704f15fa273b6ad4e27a805bd1cd9d828c8f | 0600f0979fe17624d33aa74c739775f0f27a3bb5 | /docs/support/python2_module.py | 5a0f9c7411e1d5dc7f7e231dc98a1e38ad09a560 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | pmacosta/putil | 2c8177fb6b9be667b8d52b48bfd3272de8b0160d | 416cea52df8221981727e25d133e9b4e3f464798 | refs/heads/master | 2021-01-21T13:33:41.232773 | 2016-05-17T12:57:30 | 2016-05-17T12:57:30 | 44,289,408 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | # python2_module.py
# Copyright (c) 2013-2016 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0111,W0212
def _set_value(self, value):
self._value = value+2
| [
"pmasdev@gmail.com"
] | pmasdev@gmail.com |
bf2e6170279efa042ff2f01f06ad6eb43b661e82 | f3b233e5053e28fa95c549017bd75a30456eb50c | /bace_input/L4A/4A-4O_wat_20Abox/set_1ns_equi.py | ac6bea51cee9ca3647a26c47a2e7f61edbdeda9e | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | import os
dir = '/mnt/scratch/songlin3/run/bace/L4A/wat_20Abox/ti_one-step/4A_4O/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi.in'
temp_pbs = filesdir + 'temp_1ns_equi.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../4A-4O_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
b9a19bc74f525432842757cd26bb159cadd3965a | fe6740673af5f093f41d9cfab5c12883aa6ebbb2 | /plata/contact/models.py | b1711efce2ce444b95b4e2a266df5032ee10a468 | [
"BSD-2-Clause"
] | permissive | chrisglass/plata | 7cb0a2697faff7e0482909aedc2c1b4d3fe8fb0d | fce185e5a1c528b0e059a875eaa5724292827bc7 | refs/heads/master | 2021-01-17T09:41:05.019482 | 2011-01-20T17:11:41 | 2011-01-20T17:11:41 | 1,320,066 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,466 | py | from datetime import datetime
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
from cldr_countries.fields import CountryField
from plata.fields import CurrencyField
class BillingShippingAddress(models.Model):
ADDRESS_FIELDS = ['company', 'first_name', 'last_name', 'address',
'zip_code', 'city', 'country']
billing_company = models.CharField(_('company'), max_length=100, blank=True)
billing_first_name = models.CharField(_('first name'), max_length=100)
billing_last_name = models.CharField(_('last name'), max_length=100)
billing_address = models.TextField(_('address'))
billing_zip_code = models.CharField(_('ZIP code'), max_length=50)
billing_city = models.CharField(_('city'), max_length=100)
billing_country = CountryField()
shipping_same_as_billing = models.BooleanField(_('shipping address equals billing address'),
default=True)
shipping_company = models.CharField(_('company'), max_length=100, blank=True)
shipping_first_name = models.CharField(_('first name'), max_length=100, blank=True)
shipping_last_name = models.CharField(_('last name'), max_length=100, blank=True)
shipping_address = models.TextField(_('address'), blank=True)
shipping_zip_code = models.CharField(_('ZIP code'), max_length=50, blank=True)
shipping_city = models.CharField(_('city'), max_length=100, blank=True)
shipping_country = CountryField(blank=True)
class Meta:
abstract = True
def addresses(self):
billing = dict((f, getattr(self, 'billing_%s' % f)) for f in self.ADDRESS_FIELDS)
if self.shipping_same_as_billing:
shipping = billing
else:
shipping = dict((f, getattr(self, 'shipping_%s' % f)) for f in self.ADDRESS_FIELDS)
return {'billing': billing, 'shipping': shipping}
class Contact(BillingShippingAddress):
user = models.OneToOneField(User, verbose_name=_('user'),
related_name='contactuser')
dob = models.DateField(_('date of birth'), blank=True, null=True)
created = models.DateTimeField(_('created'), default=datetime.now)
currency = CurrencyField(help_text=_('Preferred currency.'))
notes = models.TextField(_('notes'), blank=True)
class Meta:
verbose_name = _('contact')
verbose_name_plural = _('contacts')
def __unicode__(self):
return unicode(self.user)
| [
"mk@spinlock.ch"
] | mk@spinlock.ch |
dc65aff0849a6f9e7add17f702ee6edab4d723f5 | 61aa319732d3fa7912e28f5ff7768498f8dda005 | /src/arch/x86/isa/insts/simd128/floating_point/arithmetic/addition.py | 2f6ec4017b97d797e5b8083d54b8c9509e44e395 | [
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] | permissive | TeCSAR-UNCC/gem5-SALAM | 37f2f7198c93b4c18452550df48c1a2ab14b14fb | c14c39235f4e376e64dc68b81bd2447e8a47ff65 | refs/heads/main | 2023-06-08T22:16:25.260792 | 2023-05-31T16:43:46 | 2023-05-31T16:43:46 | 154,335,724 | 62 | 22 | BSD-3-Clause | 2023-05-31T16:43:48 | 2018-10-23T13:45:44 | C++ | UTF-8 | Python | false | false | 3,943 | py | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
microcode = '''
def macroop ADDSS_XMM_XMM {
maddf xmml, xmml, xmmlm, size=4, ext=Scalar
};
def macroop ADDSS_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
maddf xmml, xmml, ufp1, size=4, ext=Scalar
};
def macroop ADDSS_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
maddf xmml, xmml, ufp1, size=4, ext=Scalar
};
def macroop ADDSD_XMM_XMM {
maddf xmml, xmml, xmmlm, size=8, ext=Scalar
};
def macroop ADDSD_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
maddf xmml, xmml, ufp1, size=8, ext=Scalar
};
def macroop ADDSD_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
maddf xmml, xmml, ufp1, size=8, ext=Scalar
};
def macroop ADDPS_XMM_XMM {
maddf xmml, xmml, xmmlm, size=4, ext=0
maddf xmmh, xmmh, xmmhm, size=4, ext=0
};
def macroop ADDPS_XMM_M {
ldfp ufp1, seg, sib, "DISPLACEMENT", dataSize=8
ldfp ufp2, seg, sib, "DISPLACEMENT + 8", dataSize=8
maddf xmml, xmml, ufp1, size=4, ext=0
maddf xmmh, xmmh, ufp2, size=4, ext=0
};
def macroop ADDPS_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, "DISPLACEMENT", dataSize=8
ldfp ufp2, seg, riprel, "DISPLACEMENT + 8", dataSize=8
maddf xmml, xmml, ufp1, size=4, ext=0
maddf xmmh, xmmh, ufp2, size=4, ext=0
};
def macroop ADDPD_XMM_XMM {
maddf xmml, xmml, xmmlm, size=8, ext=0
maddf xmmh, xmmh, xmmhm, size=8, ext=0
};
def macroop ADDPD_XMM_M {
ldfp ufp1, seg, sib, "DISPLACEMENT", dataSize=8
ldfp ufp2, seg, sib, "DISPLACEMENT + 8", dataSize=8
maddf xmml, xmml, ufp1, size=8, ext=0
maddf xmmh, xmmh, ufp2, size=8, ext=0
};
def macroop ADDPD_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, "DISPLACEMENT", dataSize=8
ldfp ufp2, seg, riprel, "DISPLACEMENT + 8", dataSize=8
maddf xmml, xmml, ufp1, size=8, ext=0
maddf xmmh, xmmh, ufp2, size=8, ext=0
};
'''
| [
"sroger48@uncc.edu"
] | sroger48@uncc.edu |
579a5194f8865743d8b7c464ce393de43269a4ed | ad2704933de4502ae9de91e6d915f9dbe010b446 | /kurosawa/chapter02/knock16.py | b57dff9bb7454290f6f99303a1a306d21f23b3c4 | [] | no_license | tmu-nlp/100knock2017 | 266e68917d8d5a7f5d0c064f1bc2da5fa402a253 | 629bd1155d0fe78cd9302ae9a7cdf0922b778fe7 | refs/heads/master | 2021-01-19T17:36:53.328997 | 2017-07-24T07:09:54 | 2017-07-24T07:09:54 | 88,334,932 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | with open('hightemp.txt','r') as f:
n = int(input('N > '))
i = 0
for line in f:
if i % n ==0:
print()
print(line, end="")
i += 1
# split -l 10 hightemp.txt split_
| [
"michiki@Michiki-no-MacBook-Pro.local"
] | michiki@Michiki-no-MacBook-Pro.local |
e0311af79cc11245fd436e626060357ede26188e | 9f483a97f0ecf8c606c9695fab283e1d83f807de | /docs/source/conf.py | 55d81e87f24de5a9b7797699e0723307d6d730b7 | [] | no_license | AndreaCensi/geometric_saccade_detector | 306c75db79d4410713cb68806dabb75cb55726d9 | a83bb74c6f22c6d566481ebe6f57833a7449e095 | refs/heads/master | 2016-09-06T13:51:34.423876 | 2012-06-24T00:12:38 | 2012-06-24T00:12:38 | 949,846 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 7,232 | py | # -*- coding: utf-8 -*-
#
# RepRep documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 18 20:45:54 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', 'sphinxtogithub']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['my_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'The Geometric Saccade Detector'
copyright = u'2010, Andrea Censi'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme = 'default'
html_theme = "haiku"
html_theme_options = { }
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['my_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'GSDdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'geometric_saccade_detector.tex', u'geometric_saccade_detector Documentation',
u'Andrea Censi', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'The Geometric Saccade Detector', u'geometric_saccade_detector Documentation',
[u'Andrea Censi'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| [
"andrea@cds.caltech.edu"
] | andrea@cds.caltech.edu |
c44588f710820a05e261c2f28202de73c5a2db69 | c7967ec500b210513aa0b1f540144c931ca687ac | /알고리즘 스터디/개인공부/파이썬 문법/Chapter2/Chapter2_2_float.py | 9f2181e9034ed7925522325dd57519b48b641c3e | [] | no_license | sunminky/algorythmStudy | 9a88e02c444b10904cebae94170eba456320f8e8 | 2ee1b5cf1f2e5f7ef87b44643210f407c4aa90e2 | refs/heads/master | 2023-08-17T01:49:43.528021 | 2023-08-13T08:11:37 | 2023-08-13T08:11:37 | 225,085,243 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | a = 1.2
print(a.is_integer()) #a가 정수로 오차없이 표현가능한가?, False
a = 1.0
print(a.is_integer()) #a가 정수로 오차없이 표현가능한가?, True
import math as mm
print(mm.ceil(1.2)) #1.2보다 크거나 같은 정수
print(mm.floor(1.2)) #1.2보다 작거나 같은 정수
from fractions import Fraction #분수형태의 연산을 가능하게 해주는 모듈
print(Fraction('5/7') * Fraction('14/15')) # 5/7 * 14/15
from decimal import * #실수를 오차없이 계산하게 해주는 모듈
temp = 0
for i in range(100):
temp += Decimal('0.01') #0.01을 100번더함
print(temp)
del temp
x = y = 10
x1 = y1 = 11
print(x is y) #x와 y가 같은 객체인지, True
print(x is y1) #x와 y1가 같은 객체인지, False | [
"suns1502@gmail.com"
] | suns1502@gmail.com |
7aea6af0f1582ad8eb4db2522aea82debe1aa40a | 36959b56e506dbbe2d3c381cdccfe16965c14d24 | /alms/employee/forms.py | 1732062e713481d4df10db6ae09d1f6fe1a8ec33 | [] | no_license | Sathishkumar-M/Django | e2935fe0c69acb4cb39be2bc0504fd3d5619d002 | e54038ef70295274639b6207efe8e7e3939cbe36 | refs/heads/master | 2020-03-21T20:22:48.684770 | 2018-06-28T10:42:51 | 2018-06-28T10:42:51 | 139,003,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | from django import forms
from django.contrib.auth.models import User
from employee.models import EmployeeProfileInfo
class EmployeeForm(forms.ModelForm):
class Meta():
model = User
fields = ('first_name','email',)
class EmployeeProfileInfoForm(forms.ModelForm):
class Meta():
model = EmployeeProfileInfo
fields = ('profile_pic','age','phone','address')
widgets = {
'phone':forms.TextInput(attrs={'class':'textinputclass'}),
'address':forms.Textarea(attrs={'class':'editable'})
}
| [
"sathishkumar.appiness@gmail.com"
] | sathishkumar.appiness@gmail.com |
c824fd1884b5d04e2dbd69645eafc7d1f0077a29 | 37f1563cdacf4b37b5b927b892538218aae79c77 | /medium/string/findAndReplacePattern.py | 10a8d751878274bf0b03f1f2f1c0571cf919e852 | [] | no_license | unsortedtosorted/elgoog | 9dee49a20f981305910a8924d86e8f2a16fe14c2 | 5be9fab24c0c1fd9d5dc7a7bdaca105f1ca873ee | refs/heads/master | 2020-04-15T00:51:12.114249 | 2019-05-19T04:37:24 | 2019-05-19T04:37:24 | 164,254,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | """
#890 Find and Replace pattern
1. Runtime : O(P + W*N )
"""
class Solution:
def findAndReplacePattern(self, words, pattern):
"""
:type words: List[str]
:type pattern: str
:rtype: List[str]
"""
p=""
pm={}
for i,x in enumerate(pattern):
if x in pm:
p=p+pm[x]
else:
p=p+str(i)
pm[x]=str(i)
r = []
for word in words:
w=""
wm={}
if len(word)!=len(pattern):
continue
for i,x in enumerate(word):
if x in wm:
w=w+wm[x]
else:
w=w+str(i)
wm[x]=str(i)
if w==p:
r.append(word)
return (r)
| [
"noreply@github.com"
] | unsortedtosorted.noreply@github.com |
e5e6e121285d7248c84fdf05f7584f8e05b9251f | 438d6a867e23e49fe84041d1dcb3456b71af8ebb | /Modulos/entradas/models.py | 54d3b94c457ff563b802ec8f071b7d5589d0c784 | [] | no_license | Nicko1722/inventario-is | 76d0108ecd2e01843c60292d80f6c27c39f53faa | b7ed35de235673ad896ffdcefcf6d6c9c08501c5 | refs/heads/master | 2021-01-01T03:56:34.485005 | 2016-04-24T21:35:07 | 2016-04-24T21:35:07 | 56,994,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | from django.db import models
from Modulos.inventario.models import Producto
from django.contrib.auth.models import User
# Create your models here.
class Entrada(models.Model):
usuario = models.ForeignKey(User)
producto = models.ForeignKey(Producto)
fecha = models.DateField(auto_now_add=True)
unidades = models.PositiveIntegerField()
| [
"ioswxd@gmail.com"
] | ioswxd@gmail.com |
e346746ff6b18c3d47a4d0417aebf6bbe6593c32 | e9321204dfca38eaf12eca38f83476879c170441 | /bwi_my_ws/bwi_ws/devel/lib/python2.7/dist-packages/bwi_msgs/msg/_LEDControlFeedback.py | fa964812af45f7efee3aa6131e7b8cb289fbe085 | [] | no_license | jumperbeng/backup | 1d96d471e4aa1adc1179fa78db02b08ff944f7ab | 64e36db87446ddae132524e19fef45f2b1b01242 | refs/heads/master | 2021-07-14T08:27:34.831316 | 2017-10-17T04:09:31 | 2017-10-17T04:09:31 | 107,211,167 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,917 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from bwi_msgs/LEDControlFeedback.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import genpy
class LEDControlFeedback(genpy.Message):
_md5sum = "75824f08f05571689ccf412bca8194f3"
_type = "bwi_msgs/LEDControlFeedback"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
#feedback
duration time_running
"""
__slots__ = ['time_running']
_slot_types = ['duration']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
time_running
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(LEDControlFeedback, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.time_running is None:
self.time_running = genpy.Duration()
else:
self.time_running = genpy.Duration()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_2i.pack(_x.time_running.secs, _x.time_running.nsecs))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.time_running is None:
self.time_running = genpy.Duration()
end = 0
_x = self
start = end
end += 8
(_x.time_running.secs, _x.time_running.nsecs,) = _struct_2i.unpack(str[start:end])
self.time_running.canon()
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_2i.pack(_x.time_running.secs, _x.time_running.nsecs))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.time_running is None:
self.time_running = genpy.Duration()
end = 0
_x = self
start = end
end += 8
(_x.time_running.secs, _x.time_running.nsecs,) = _struct_2i.unpack(str[start:end])
self.time_running.canon()
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_2i = struct.Struct("<2i")
| [
"wangzixuan828@gmail.com"
] | wangzixuan828@gmail.com |
d68a1825942a601091c0bb9216565841509b6d29 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5690574640250880_0/Python/Hierynomus/MinesweeperMaster.py | 63f8968bba3f816d5a599a74f31c299ba2954136 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,993 | py | import sys
R = 0
C = 0
M = 0
def gridToString(grid):
return '\n'.join([''.join(line) for line in grid])
def transposedGridToString(grid):
return '\n'.join([''.join([grid[y][x] for y in range(R)]) for x in range(C)])
def fillGrid(grid):
if M == 0:
return
x, y = (0, 0)
m = M
# Fill left to right until one but last row or 1 mine left
while m > 1 and y < R - 2:
grid[y][x] = '*'
m -= 1
x = x + 1
if (x == C):
x = 0
y += 1
# Fill top to bottom until one mine left
while m > 1:
grid[y][x] = '*'
m -= 1
if y == R - 2:
y = y + 1
else:
y = y - 1
x = x + 1
# If last mine would be placed at one-but-last
# And there are still 2 rows underneath empty
# Move to first of new row
if x == C - 2 and y < R - 2:
grid[y + 1][0] = '*'
else:
grid[y][x] = '*'
# # Last mine
# if x + 1 == C - 1 and y + 1 < R:
# # Now in second to last column and not on last row
# grid[y + 1][0] = '*'
# elif x == 1 and C > 2:
# # Let's place the last one at the end of the row
# grid[y][C - 1] = '*'
# else:
# grid[y][x] = '*'
neig = [(i, j) for i in (-1, 0, 1) for j in (-1, 0, 1) if not (i == j == 0)]
def get_adjacent_cells(grid, x, y):
for dx, dy in neig:
if 0 <= (x + dx) < C and 0 <= y + dy < R:
yield grid[y + dy][x + dx]
def cleanCells(grid, x, y):
for dx, dy in neig:
ny = y + dy
nx = x + dx
if 0 <= nx < C and 0 <= ny < R and not grid[ny][nx] == 'c':
grid[ny][nx] = '.'
def hasNeighbourMines(grid, x, y):
l = list(get_adjacent_cells(grid, x, y))
# print x, y, l
return '*' in l
def doPlacement(grid):
# We can always place right-bottom
if R * C > M:
grid[R - 1][C - 1] = 'c'
else:
# Already done all are mines
return
for x in reversed(range(C)):
for y in reversed(range(R)):
if grid[y][x] in ['?', 'c', '.'] and not hasNeighbourMines(grid, x, y):
cleanCells(grid, x, y)
if '*' == grid[y][x]:
break
def solveOne(file):
global R, C, M
R, C, M = map(lambda s: int(s), file.readline().split())
print("R = ", R, "C = ", C, "M = ", M)
grid = [['?' for x in range(C)] for y in range(R)]
fillGrid(grid)
doPlacement(grid)
s = gridToString(grid)
if '?' in s:
# Transpose
R, C = C, R
grid = [['?' for x in range(C)] for y in range(R)]
fillGrid(grid)
doPlacement(grid)
s = transposedGridToString(grid)
print s
return s if '?' not in s else "Impossible"
f = open(sys.argv[1], 'r')
of = open(sys.argv[2], 'w')
T = int(f.readline())
print "Running [%s] Cases" % T
for t in range(T):
res = solveOne(f)
of.write("Case #%d:\n%s\n" % (t + 1, res))
f.close()
of.close()
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
c40ab6abed19bb93242d6c71cdc7ea9bd555c69a | 7002ec859c1f8d59448dba5f091a3c1162cb4d92 | /App/forms.py | 093cabcf41ebe13a0e84cb356e1ed8cba847f25a | [] | no_license | shubhamjain31/Employee_Management_System | 39ad0226b6b6ca86b9ff3fc132d8467d0adbf802 | e80d09a8f9c93a65fc64c1179d21130059e23780 | refs/heads/main | 2023-08-06T00:53:57.498939 | 2021-09-16T16:29:08 | 2021-09-16T16:29:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,688 | py | from django import forms
from App.models import Employee, Company
# This is for employee
class EmployeeForm(forms.ModelForm):
eFname = forms.CharField(
label = 'First Name',
widget = forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'First Name',
'rows': 1
})
)
eLname = forms.CharField(
label = 'Last Name',
widget = forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Last Name',
'rows': 1
})
)
eCompany = forms.ModelChoiceField(queryset=Company.objects.all(), empty_label='-----',
to_field_name="cName", error_messages={
'required' : 'Please Select Company Name',
},
widget=forms.Select(
attrs={
"placeholder" : "Company",
"class" : "form-control form-control-alternative",
}
))
eEmail = forms.EmailField(error_messages={
'required' : 'Please Enter Company Email',
},
widget=forms.EmailInput(
attrs={
"placeholder" : "Employee Email",
"class" : "form-control form-control-alternative"
}
))
ePhone = forms.CharField(
label = 'Employee Phone',
widget = forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Employee Phone',
'rows': 1
})
)
class Meta:
model = Employee
fields = ['eFname', 'eLname', 'eCompany', 'eEmail', 'ePhone']
# This is for company
class CompanyForm(forms.ModelForm):
cName = forms.CharField(
label = 'Customer',
widget = forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Company Name',
'rows': 1
})
)
cEmail = forms.EmailField(error_messages={
'required' : 'Please Enter Company Email',
},
widget=forms.EmailInput(
attrs={
"placeholder" : "Company Email",
"class" : "form-control form-control-alternative"
}
))
cLogo = forms.FileField(required=False)
cUrl = forms.CharField(
label = 'Customer',
widget = forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Company URL',
'rows': 1
})
)
class Meta:
model = Company
fields = ['cName', 'cEmail', 'cLogo', 'cUrl'] | [
"sj27754@gmail.com"
] | sj27754@gmail.com |
53116c468f023c8dc22f35757c12e5ea7a48855c | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-projectman/huaweicloudsdkprojectman/v4/model/create_project_module_response.py | 62c7bbfa7375d683eb98a59a28db5cbf95bb5dc2 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 5,790 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateProjectModuleResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'description': 'str',
'module_name': 'str',
'module_id': 'int',
'owner': 'ModuleOwner'
}
attribute_map = {
'description': 'description',
'module_name': 'module_name',
'module_id': 'module_id',
'owner': 'owner'
}
def __init__(self, description=None, module_name=None, module_id=None, owner=None):
"""CreateProjectModuleResponse
The model defined in huaweicloud sdk
:param description: 模块描述
:type description: str
:param module_name: 模块名称
:type module_name: str
:param module_id: 模块id
:type module_id: int
:param owner:
:type owner: :class:`huaweicloudsdkprojectman.v4.ModuleOwner`
"""
super(CreateProjectModuleResponse, self).__init__()
self._description = None
self._module_name = None
self._module_id = None
self._owner = None
self.discriminator = None
if description is not None:
self.description = description
if module_name is not None:
self.module_name = module_name
if module_id is not None:
self.module_id = module_id
if owner is not None:
self.owner = owner
@property
def description(self):
"""Gets the description of this CreateProjectModuleResponse.
模块描述
:return: The description of this CreateProjectModuleResponse.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this CreateProjectModuleResponse.
模块描述
:param description: The description of this CreateProjectModuleResponse.
:type description: str
"""
self._description = description
@property
def module_name(self):
"""Gets the module_name of this CreateProjectModuleResponse.
模块名称
:return: The module_name of this CreateProjectModuleResponse.
:rtype: str
"""
return self._module_name
@module_name.setter
def module_name(self, module_name):
"""Sets the module_name of this CreateProjectModuleResponse.
模块名称
:param module_name: The module_name of this CreateProjectModuleResponse.
:type module_name: str
"""
self._module_name = module_name
@property
def module_id(self):
"""Gets the module_id of this CreateProjectModuleResponse.
模块id
:return: The module_id of this CreateProjectModuleResponse.
:rtype: int
"""
return self._module_id
@module_id.setter
def module_id(self, module_id):
"""Sets the module_id of this CreateProjectModuleResponse.
模块id
:param module_id: The module_id of this CreateProjectModuleResponse.
:type module_id: int
"""
self._module_id = module_id
@property
def owner(self):
"""Gets the owner of this CreateProjectModuleResponse.
:return: The owner of this CreateProjectModuleResponse.
:rtype: :class:`huaweicloudsdkprojectman.v4.ModuleOwner`
"""
return self._owner
@owner.setter
def owner(self, owner):
"""Sets the owner of this CreateProjectModuleResponse.
:param owner: The owner of this CreateProjectModuleResponse.
:type owner: :class:`huaweicloudsdkprojectman.v4.ModuleOwner`
"""
self._owner = owner
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateProjectModuleResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
4ab388d8d0e0d57aea8e2618360b21ac2695539d | 7c593f4cc70ee56106cc9cce105e6b9e7839431e | /tests/nn_moving_average.py | 668ad36ea26c9ff97d100fc9637d63a35adc5bc6 | [
"Apache-2.0"
] | permissive | google/objax | 84e397cafb70813a1e89467f745facf828ed24b8 | a2d025d9e1da8660a1883404207c41d4327d8c48 | refs/heads/master | 2023-09-02T07:04:26.801269 | 2023-06-12T22:12:53 | 2023-06-12T22:12:53 | 288,923,752 | 801 | 80 | Apache-2.0 | 2023-06-12T22:12:54 | 2020-08-20T06:20:40 | Python | UTF-8 | Python | false | false | 1,921 | py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittests for MovingAverage and ExponentialMovingAverage Layer."""
import unittest
import jax.numpy as jn
import numpy as np
import objax
class TestMovingAverage(unittest.TestCase):
def test_MovingAverage(self):
"""Test MovingAverage."""
x1 = jn.array([[0, 1, 2]])
x2 = jn.array([[0, 0, 0]])
x3 = jn.array([[-3, -4, 5]])
init_value = 100
shape = x1.shape
ma = objax.nn.MovingAverage(shape=shape, buffer_size=2, init_value=init_value)
x_ma1 = ma(x1)
x_ma2 = ma(x2)
x_ma3 = ma(x3)
np.testing.assert_allclose(x_ma1, np.array([[50, 50.5, 51]]))
np.testing.assert_allclose(x_ma2, np.array([[0, 0.5, 1]]))
np.testing.assert_allclose(x_ma3, np.array([[-1.5, -2, 2.5]]))
def test_ExponentialMovingAverage(self):
"""Test ExponentialMovingAverage."""
x1 = jn.array([[0, 1, 2]]) * 100
x2 = jn.array([[-3, -4, 5]]) * 100
init_value = 100
shape = x1.shape
ema = objax.nn.ExponentialMovingAverage(shape=shape, init_value=init_value, momentum=0.8)
x_ema1 = ema(x1)
x_ema2 = ema(x2)
np.testing.assert_allclose(x_ema1, np.array([[80, 100, 120]]))
np.testing.assert_allclose(x_ema2, np.array([[4, 0, 196]]))
if __name__ == '__main__':
unittest.main()
| [
"dberth@google.com"
] | dberth@google.com |
920da64de4037860257f877a1e15c50ee0aa9612 | 2daa3894e6d6929fd04145100d8a3be5eedbe21c | /tests/artificial/transf_cumsum/trend_poly/cycle_0/ar_12/test_artificial_128_cumsum_poly_0_12_100.py | fe8bd2b350a1675d404de1ea84265e5c035b44d2 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Henri-Lo/pyaf | a1f73a0cc807873bd7b79648fe51de9cfd6c126a | 08c968425d85dcace974d90db7f07c845a0fe914 | refs/heads/master | 2021-07-01T12:27:31.600232 | 2017-09-21T11:19:04 | 2017-09-21T11:19:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
dataset = tsds.generate_random_TS(N = 128 , FREQ = 'D', seed = 0, trendtype = "poly", cycle_length = 0, transform = "cumsum", sigma = 0.0, exog_count = 100, ar_order = 12);
art.process_dataset(dataset); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
2f727ec747999ca318d678a2849754bc2c28fa40 | d49cfe38764aa35992ba5cf65655a6a45d9487c8 | /旋转矩阵.py | 12438a807352bdee830a20ba6fcda6d71748d2fc | [] | no_license | getabear/leetcode | fc0797f664ab4052aa2635341f4bbe40b74ec2b8 | 4af6608166f2e4cdfcfb0bbb92133b4a0f90ea34 | refs/heads/master | 2021-07-15T11:05:23.049235 | 2020-11-15T12:57:14 | 2020-11-15T12:57:14 | 224,601,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | from typing import List
class Solution:
def rotate(self, matrix: List[List[int]]) -> None:
high=len(matrix)
width=len(matrix[0])
tmp=[[0 for i in range(width)] for j in range(high)]
for i in range(high):
for j in range(width):
tmp[j][width-i-1]=matrix[i][j]
matrix[:]=tmp | [
"1874178998@qq.com"
] | 1874178998@qq.com |
19946adb6c4387a7e49170a912c4c7ecbfb5a0f0 | a0eceb74aeb933defece02c5c74d0872c35e9742 | /django_bootstrap3view/django_bootstrap3view_app/utils/render.py | e71eb23c53a48748e6bcf7f49450decae5308593 | [
"BSD-3-Clause"
] | permissive | bossiernesto/django-bootstrap3-classview | 77ab317a818521437b64a4599083ec5cdf762e29 | 25d5efdf321953578fd1aaa6f1e3dd6ee688f806 | refs/heads/master | 2021-01-21T21:47:38.735681 | 2016-04-20T00:44:43 | 2016-04-20T00:44:43 | 17,086,076 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | from django.template.loader import get_template
from django.template import Context, Template
def render(template, context):
context = Context(context)
return get_template(template).render(context)
def render_string(string, context):
template = Template(string)
context = Context(context)
return template.render(context) | [
"bossi.ernestog@gmail.com"
] | bossi.ernestog@gmail.com |
f03fc963c85f85a748d2f0eaf1fbb2922fdb2816 | f2770a694a8aedb71c326ced49e76531d69d9f4f | /bin_SPIDERS_CLUSTERS/create_gaia_mask_CODEX.py | ed05458c277af7fc3b8f8f23cddfede122232c25 | [] | no_license | JohanComparat/makeSample | 258280ccb0f9dfa7dc13e934f453c520a1f416b8 | b15f942f51750f504150f6ec29eba857123c29c4 | refs/heads/master | 2021-06-07T18:21:27.559530 | 2020-01-22T08:46:04 | 2020-01-22T08:46:04 | 106,016,246 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,189 | py | import astropy.io.fits as fits
import os, sys, glob
from os.path import join
#import pymangle as mangle
import numpy as np
import matplotlib.pyplot as p
from scipy.interpolate import interp1d
import astropy.units as u
import astropy.cosmology as cc
cosmo = cc.Planck13
from astropy.coordinates import SkyCoord
from sklearn.neighbors import BallTree, DistanceMetric
from astropy.table import Table,unique
from math import radians, cos, sin, asin, sqrt, pi
in_dir = '/data36s/comparat/CODEX_clustering/catalogs/'
out_dir = '/data36s/comparat/CODEX_clustering/angular_clustering/'
deg_to_rad = np.pi/180.
arcsec = 1. / 3600.
rs = 10**np.arange(-1,1.6,0.1) *arcsec
#rs = 10**np.arange(-1,2.6,0.1) *arcsec
# DATA
path_2_data_2rxs = join( in_dir, 'cat_spiders_masked_X.fits' )
data_file = path_2_data_2rxs
ra_name_data = 'RA'
dec_name_data = 'Dec'
hduD = fits.open(data_file)
ra_data = hduD[1].data[ra_name_data]
dec_data = hduD[1].data[dec_name_data]
z_data = hduD[1].data['z']
coords = SkyCoord(ra_data, dec_data, unit='deg', frame='icrs')
bb_data = coords.galactic.b.value
ll_data = coords.galactic.l.value
bb_ecl_data = coords.barycentrictrueecliptic.lat
selection_data = (z_data>0)
N_data = len(ra_data[selection_data])
# GAIA CATALOGS
gaia_dir = '/data44s/eroAGN_WG_DATA/DATA/photometry/catalogs/GAIA/DR2/'
gaia_table_list = np.array(glob.glob(os.path.join(gaia_dir, 'table_*.fits')))
gaia_table_list.sort()
for gaia_file in gaia_table_list[1:][::-1][:5]:
print(gaia_file)
hdu_G = fits.open(gaia_file)
ra_gaia, dec_gaia = hdu_G[1].data['ra'], hdu_G[1].data['dec']
coords = SkyCoord(ra_gaia, dec_gaia, unit='deg', frame='icrs')
bb_gaia = coords.galactic.b.value
ll_gaia = coords.galactic.l.value
bb_ecl_gaia = coords.barycentrictrueecliptic.lat
x_gal_gaia = (abs(bb_gaia)>20)&(dec_gaia<80)&(dec_gaia>-80)&(bb_ecl_gaia.value>-80)
selection_gaia = (x_gal_gaia)
N_gaia = len(ra_gaia[selection_gaia])
agn_coordinates = deg_to_rad * np.transpose([dec_data[selection_data], ra_data[selection_data]])
gaia_coordinates = deg_to_rad * np.transpose([dec_gaia[selection_gaia], ra_gaia[selection_gaia]])
Tree_obj_Gaia = BallTree(gaia_coordinates, metric='haversine')
Tree_obj_AGN = BallTree(agn_coordinates , metric='haversine')
test_c = np.array([ Tree_obj_Gaia.query_radius(agn_coordinates, r = rr, count_only=True) for rr in rs ])
N_pairs_total = test_c.sum(axis=1)
Delta_N_pairs = N_pairs_total[1:]-N_pairs_total[:-1]
area = 4.*np.pi*(rs[1:]**2 - rs[:-1]**2)
pair_density = Delta_N_pairs/(area*N_data*N_gaia)
out_data = os.path.join(out_dir , 'cat_spiders_masked_X_GAIA_'+os.path.basename(gaia_file)+'.data')
np.savetxt(out_data, np.transpose([rs[1:], pair_density]), header='radius_arcsec density' )
# EXCEPTION for the first file, that has a broad magnitude range 1-5 that I re-cut by hand into 2 and extend the radius.
gaia_file = gaia_table_list[0]
print(gaia_file)
hdu_G = fits.open(gaia_file)
ra_gaia, dec_gaia = hdu_G[1].data['ra'], hdu_G[1].data['dec']
coords = SkyCoord(ra_gaia, dec_gaia, unit='deg', frame='icrs')
bb_gaia = coords.galactic.b.value
ll_gaia = coords.galactic.l.value
bb_ecl_gaia = coords.barycentrictrueecliptic.lat
g_mag = hdu_G[1].data['phot_g_mean_mag']
x_gal_gaia = (abs(bb_gaia)>20)&(dec_gaia<80)&(dec_gaia>-80)&(bb_ecl_gaia.value>-80)
mag_sel = (g_mag>4)
selection_gaia = (x_gal_gaia)&(mag_sel)
N_gaia = len(ra_gaia[selection_gaia])
# Tree to select pairs
# COUNT UNIQUE
agn_coordinates = deg_to_rad * np.transpose([dec_data[selection_data], ra_data[selection_data]])
gaia_coordinates = deg_to_rad * np.transpose([dec_gaia[selection_gaia], ra_gaia[selection_gaia]])
Tree_obj_Gaia = BallTree(gaia_coordinates, metric='haversine')
Tree_obj_AGN = BallTree(agn_coordinates , metric='haversine')
test_c = np.array([ Tree_obj_Gaia.query_radius(agn_coordinates, r = rr, count_only=True) for rr in rs ])
N_pairs_total = test_c.sum(axis=1)
Delta_N_pairs = N_pairs_total[1:]-N_pairs_total[:-1]
area = 4.*np.pi*(rs[1:]**2 - rs[:-1]**2)
pair_density = Delta_N_pairs/(area*N_data*N_gaia)
out_data = os.path.join(out_dir , 'cat_spiders_masked_X_GAIA_table_4_g_5.fits.data')
np.savetxt(out_data, np.transpose([rs[1:], pair_density]), header='radius_arcsec density' )
mag_sel = (g_mag<=4)
selection_gaia = (x_gal_gaia)&(mag_sel)
N_gaia = len(ra_gaia[selection_gaia])
# Tree to select pairs
# COUNT UNIQUE
agn_coordinates = deg_to_rad * np.transpose([dec_data[selection_data], ra_data[selection_data]])
gaia_coordinates = deg_to_rad * np.transpose([dec_gaia[selection_gaia], ra_gaia[selection_gaia]])
Tree_obj_Gaia = BallTree(gaia_coordinates, metric='haversine')
Tree_obj_AGN = BallTree(agn_coordinates , metric='haversine')
test_c = np.array([ Tree_obj_Gaia.query_radius(agn_coordinates, r = rr, count_only=True) for rr in rs ])
N_pairs_total = test_c.sum(axis=1)
Delta_N_pairs = N_pairs_total[1:]-N_pairs_total[:-1]
area = 4.*np.pi*(rs[1:]**2 - rs[:-1]**2)
pair_density = Delta_N_pairs/(area*N_data*N_gaia)
out_data = os.path.join(out_dir , 'cat_spiders_masked_X_GAIA_table_1_g_4.fits.data')
np.savetxt(out_data, np.transpose([rs[1:], pair_density]), header='radius_arcsec density' )
| [
"johan.comparat@gmail.com"
] | johan.comparat@gmail.com |
69428a8de65e3a9d7e128759819730afde9441e3 | 2b88e4385b2ed720005f9809e257ef0ed5b6b1d5 | /hashing/Permutations_of_A_in_B.py | aa12c64fb98d821e421e8a2807a9927423d7bf71 | [] | no_license | arnabs542/DataStructures_Algorithms | c5efbbbea9aafc3406b8cefa7bf4c33c4c3c6f2e | 431e7bddcb04f7a3df84fcd6ee823fd91ce317ad | refs/heads/master | 2023-06-16T13:11:25.378555 | 2021-07-08T13:29:00 | 2021-07-08T13:29:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,787 | py | '''
Permutations of A in B
You are given two strings A and B of size N and M respectively.
You have to find the count of all permutations of A present in B as a substring. You can assume a string will have only lowercase letters.
Problem Constraints
1 <= N < M <= 105
Example Input
Input 1:
A = "abc"
B = "abcbacabc"
Input 2:
A = "aca"
B = "acaa"
Example Output
Output 1:
5
Output 2:
2
'''
# we will maintain two frequency hashmaps
# one map will have the pattern string and another will be the search window of the main string
# the window will be from i to j
# for each window check if the window hashmap is equal to the pattern map
class Solution:
# comparing maps
def cmp(self,win,map):
for key in map:
if key not in win or win[key] != map[key]:
return 0
return 1
def solve(self, a, b):
map = {}
window = {}
for i in range(len(a)):
if a[i] not in map:
map[a[i]] = 1
else:
map[a[i]] += 1
# initial window
for i in range(len(a)):
if b[i] not in window:
window[b[i]] = 1
else:
window[b[i]] += 1
i=0
j=len(a)
count = 0
if self.cmp(window,map) == 1:
count += 1
# create window from i to j of size len(a)
while j<len(b) and i<len(b):
if b[j] not in window:
window[b[j]] = 1
else:
window[b[j]] += 1
window[b[i]] -= 1
j += 1
i += 1
if self.cmp(window,map) == 1:
count += 1
return count
# TC O(m), m is the len(b)
# SC O(m), m is the len(b) | [
"nik.dhoot@gmail.com"
] | nik.dhoot@gmail.com |
c6e60e69fb58f34c5459db46cc74163d54e47571 | 2af4823ae83fbcc780ef538bd02fa5bf3a51208c | /ABC134/C.py | 4b2fa43a79291ed0783a652b320f75a2c2d0e7c0 | [] | no_license | jre233kei/procon-atcoder | 102420cc246a5123ac9774f8b28440f1d8b70b0f | c463c9c92d45f19fba32d0c8a25f97d73db67bc5 | refs/heads/master | 2022-12-04T09:30:39.271433 | 2020-08-14T11:38:46 | 2020-08-14T11:38:46 | 276,779,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | n = int(input())
a = []
maxa = 0
maxa1 = 0
maxcnt = 0
for i in range(n):
ai = int(input())
a.append(ai)
if maxa < ai:
maxa1 = maxa
maxa = ai
maxcnt = 0
elif maxa == ai:
maxcnt += 1
elif maxa1 < ai:
maxa1 = ai
for i in range(n):
if a[i] == maxa:
if maxcnt > 0:
print(maxa)
else:
print(maxa1)
else:
print(maxa)
| [
"jre233kei+github@gmail.com"
] | jre233kei+github@gmail.com |
f9e7743751a0b8047b216a8b84778e5fb3854cec | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03503/s091731182.py | 541db4752b84f1d8e1675c7c44d0fe983000cf86 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | n=int(input())
f=[]
P=[]
for i in range(n):
a=list(map(int,input().split()))
f.append(a)
for i in range(n):
a=list(map(int, input().split()))
P.append(a)
from itertools import product
ans=-(10**10)
for p in product([0, 1], repeat = 10):
if sum(p)!=0:
ret = 0
for i in range(n):
cnt = 0
for j in range(10):
if p[j] == f[i][j] and p[j] == 1:
cnt += 1
ret += P[i][cnt]
ans = max(ans, ret)
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4a8afc496c44532758c44e68adbf5ca79f4fc7d0 | 4e4bc85d0f17b6999db4cd62bb3016f96ed8f0ff | /on_convolution-12.py | 334aceb04aefbfa739d97be7ddb6e98902914f08 | [] | no_license | practical-neuroimaging/practical-neuroimaging.github.com | e6b06b6a83559cdb7527d5e34cc402e36cecd7c0 | 26a5830c7d6668ae3690f45da9792c71c1c7fad3 | refs/heads/master | 2020-04-23T18:55:02.058479 | 2017-02-15T19:07:54 | 2017-02-15T19:07:54 | 8,209,908 | 8 | 3 | null | 2015-02-11T19:53:00 | 2013-02-14T23:29:41 | JavaScript | UTF-8 | Python | false | false | 199 | py | bold_signal = np.convolve(neural_signal, hrf_signal)
plt.plot(times_and_tail, bold_signal)
plt.xlabel('time (seconds)')
plt.ylabel('bold signal')
plt.title('Our algorithm is the same as convolution') | [
"matthew.brett@gmail.com"
] | matthew.brett@gmail.com |
e92a0a2bd507628450b55e377eb3da19b6dba4a4 | 66c94b5e427c0b8f8f7101de9c17af1423f00682 | /keras/keras05_train_test2.py | f5c59354a7e50d47f591bc1843fa17a044e404ff | [] | no_license | NamkyuHan/bit_seoul | a34ea3c49666ee2183026e960e45092778643d55 | 3112eb576089cdf906c4f326337b4d2b5e5e4c29 | refs/heads/master | 2023-01-30T19:02:53.323592 | 2020-12-17T01:05:17 | 2020-12-17T01:05:17 | 311,277,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,588 | py | import numpy as np
#1. 데이터
x_train = np.array([1,2,3,4,5,6,7,8,9,10])
y_train = np.array([1,2,3,4,5,6,7,8,9,10])
x_test = np.array([11,12,13,14,15])
y_test = np.array([11,12,13,14,15])
x_pred = np.array([16,17,18])
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
#2. 모델구성
# 하이퍼파라미터 튜닝(나의 취미이자 특기가 될것이다)
# Sequential을 통해 (연산) 을 만든다
# layers를 통해 레이어(줄,층) 을 만든다
# Dense DNN
# Dense 레이어는 입력과 출력을 모두 연결해주며 입력과 출력을 각각 연결해주는 가중치를 포함하고 있습니다.
# 입력이 3개 출력이 4개라면 가중치는 총 3X4인 12개가 존재하게 됩니다.
# Dense레이어는 머신러닝의 기본층으로 영상이나 서로 연속적으로 상관관계가 있는 데이터가 아니라면
# Dense레이어를 통해 학습시킬 수 있는 데이터가 많다는 뜻이 됩니다.
model = Sequential()
model.add(Dense(300, input_dim=1))
# input_dim = 1, 입력 차원이 1이라는 뜻이며 입력 노드가 한개라고 생각하면 됩니다.
# 만약 x배열의 데이터가 2개라면 2, 3개라면 3으로 지정을 해줍니다.
# 그 다음, 만든 시퀀스 오브젝트 model에 5개의 노드를 Dense레이어를 통해 연결해줍니다. 여기서 add를 통해 하나의 레이어를 추가해주는 것입니다.
# input 1 output 5
model.add(Dense(500))
model.add(Dense(300))
model.add(Dense(70))
model.add(Dense(1))
#3. 컴파일 시키기, 훈련시키기
# 하이퍼파라미터 튜닝(나의 취미이자 특기가 될것이다)
# mse 평균제곱오차 손실값은 mse로 잡을거야
# optimizer 손실 최적화를 위해서는? 이번 최적화는 아담을 쓸거야
# metrics 평가지표 평가지표는 ACC를 쓸거야
#정리하자면? 손실값을 구할때는 평균제곱오차 mse를 쓰고 손실 최적화를 위해서 adam을 쓸거야 평가지표는 acc를 쓸거고
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
# 하이퍼파라미터 튜닝(나의 취미이자 특기가 될것이다)
# fit 훈련 실행
# epochs 100번 작업할거야
# batch_size 1개씩 배치해서
# model.fit(x, y, epochs=10000, batch_size=1)
model.fit(x_train, y_train, epochs=1000)
#4. 평가 예측
# loss, acc = model.evaluate(x,y, batch_size=1)
# loss, acc = model.evaluate(x,y)
loss = model.evaluate(x_test,y_test)
print("loss : ", loss)
# print("acc : ", acc)
y_pred = model.predict(x_pred)
print("결과물 : \n : ", y_pred)
| [
"rksh333@naver.com"
] | rksh333@naver.com |
9dd33882af864b4b5fc7ab3f5089bc4419d2ee5d | 59de7788673ade984b9c9fbc33664a7cbdba67d3 | /res/scripts/common/versionupdater.py | c3b4f0e5cda0cb664ce768974e6c7c47946eb77d | [] | no_license | webiumsk/WOT-0.9.15-CT | 3fa24ab37a6c91b7073034afb2f355efa5b7fe36 | fbd194fbaa6bdece51c7a68fc35bbb5257948341 | refs/heads/master | 2020-12-24T21:27:23.175774 | 2016-05-01T13:47:44 | 2016-05-01T13:47:44 | 57,600,180 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 4,344 | py | # 2016.05.01 15:26:19 Střední Evropa (letní čas)
# Embedded file name: scripts/common/VersionUpdater.py
import sys
from debug_utils import LOG_DEBUG
class VersionUpdaterBase(object):
"""Base class for different types of version updaters. Suggested descendant implementation:
@singleton
class SomeVersionUpdater(VersionUpdaterBase):
def __init__(self):
super(self.__class__, self).__init__(UPDATE_FUNCTION_TEMPLATE, LATEST_VERSION)
def updateVersion(self, logID, owner, data):
self._updateToLatestVersion(lambda _, data: data['ver'], logID, owner, data)
Usage:
SomeVersionUpdater.updateVersion('Some, id:%d' % (owner.id,), owner, owner.data)
"""
def __init__(self, template, latestVersion, moduleWithUpdaters = None):
self._startVersion = None
self._updaters = None
self._template = template
self._latestVersion = latestVersion
self._module = moduleWithUpdaters
if moduleWithUpdaters is None:
self._module = sys.modules[self.__module__]
return
latestVersion = property(lambda self: self._latestVersion)
def __buildUpdaters(self):
"""Builds and caches list of updater functions."""
raise self._updaters is None or AssertionError('Build once is enough')
self._updaters = []
for fromVer in xrange(self._latestVersion):
args = (fromVer,) if self._template.count('%d') == 1 else (fromVer, fromVer + 1)
funcName = self._template % args
func = getattr(self._module, funcName, None)
if func is not None:
self._updaters.append(func)
if self._startVersion is None:
self._startVersion = fromVer
else:
raise self._startVersion is None or AssertionError('Sequence of updaters should be continuous, absentFunc=%s, ' % (funcName,))
raise self._startVersion is not None or AssertionError
raise len(self._updaters) == self._latestVersion - self._startVersion or AssertionError
LOG_DEBUG('__buildUpdaters', self.__class__, self._startVersion, self._latestVersion)
return
def __getUpdaters(self, startVersion):
"""Returns list of updaters from startVersion to self._latestVersion."""
if startVersion == self._latestVersion:
return []
else:
if self._updaters is None:
self.__buildUpdaters()
raise startVersion >= self._startVersion or AssertionError('%s >= %s' % (startVersion, self._startVersion))
raise startVersion <= self._latestVersion or AssertionError('%s <= %s' % (startVersion, self._latestVersion))
return enumerate(self._updaters[startVersion - self._startVersion:], start=startVersion)
def _updateToLatestVersion(self, versionOrGetter, logID, *args):
"""Updates data to latest version by applying updaters [currentVersion..latestVersion).
logID is a data owner identity, f.e. account.logID.
versionOrGetter is current data version or function to get current data version from args.
args is an argument list to be passed to every updater function. Also used by versionOrGetter.
Returns updated args.
Update algorithm depends on callable(versionOrGetter), True or False:
for ver, updater: updater(args)
or
for ver, updater: ver, args = updater(args)
"""
isCallable = callable(versionOrGetter)
currentVersion = versionOrGetter(*args) if isCallable else versionOrGetter
for fromVer, updater in self.__getUpdaters(currentVersion):
LOG_DEBUG('_updateToLatestVersion', logID, fromVer)
result = updater(*args)
if isCallable:
resultVer = versionOrGetter(*args)
else:
resultVer, args = result[0], result[1:]
raise resultVer == fromVer + 1 or AssertionError('resultVer=%s, ver=%s, updater=%s' % (resultVer, fromVer, updater.__name__))
return args
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\common\versionupdater.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.05.01 15:26:19 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
1a3f22259ba23bd2b200afd33cef1fe3ec4a8fbe | 45467e07e77131f631d0865046dcc4d18f483601 | /src/Hackerearth/codearena/adam.py | 104e61f60ac19049ac1addef4cd3944a4194b77b | [] | no_license | bvsbrk/Algos | 98374e094bd3811579276d25a82bbf2c0f046d96 | cbb18bce92054d57c0e825069ef7f2120a9cc622 | refs/heads/master | 2021-09-25T10:01:59.323857 | 2018-10-20T16:07:07 | 2018-10-20T16:07:07 | 98,708,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | from math import ceil as c
if __name__ == '__main__':
tc = int(input().strip())
for _ in range(tc):
n = int(input().strip())
arr = [int(i) for i in input().strip().split(" ")]
while True:
sm = min(arr)
d = arr.index(sm) + 1
de = []
if sm == 0:
for i in range(len(arr)):
arr[i] -= 1
if arr[i] < 0:
de.append(i)
else:
n = c((sm + d) / sm)
r = -n * d
for i in range(len(arr)):
arr[i] += r
if arr[i] < 0:
de.append(i)
co = 0
for i in de:
del arr[i - co]
co += 1
if len(arr) == 1:
print("Ladia")
break
elif len(arr) == 0:
print("Kushagra")
break
| [
"v.ramakoteswararao2015@vit.ac.in"
] | v.ramakoteswararao2015@vit.ac.in |
0c1e33f9aacaa50af3117320d47326b1f04c0b7f | 6206732b002f820d3664f8e3e689ed48a18ccc44 | /ex16/ex16_2.py | f249735f091e4a7c98328fc83ed1fc4d0ce00bea | [] | no_license | jamtot/LearnPythonTheHardWay | 4c0992f0bf7e6f9d130fe595c32483eec3a6e578 | b08409f4f501af91130a99fae7eacd6847d5691f | refs/heads/master | 2021-01-10T08:53:53.570473 | 2016-03-03T00:11:48 | 2016-03-03T00:11:48 | 51,035,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,950 | py | #asks user for input as to whether they want to read or write a file
print "Would you like to read or write a file? Insert r or w"
readOrWrite = raw_input(" >")
#if the user types a word beginning with r or R do this
if readOrWrite[0] == 'r' or readOrWrite[0] == 'R':
print "Enter the name of the file you would like to read:"
filename = raw_input(' >')
#get the files name from input, and open the file into a var
txt = open(filename)
print "Here's your file %r:" % filename
#read the file
print txt.read()
#close the file
txt.close()
#if the user types a word beginning with w or W
elif readOrWrite[0] == 'w' or readOrWrite[0] == 'W':
print "Enter the name of the file you would like to create/edit:"
#get the files name from output
filename = raw_input(' >')
print "We're going to erase %r." % filename
print "If you don't want that, hit CTRL-C (^C)."
print "If you do want that, hit RETURN."
raw_input("?")
print "Opening the file..."
#open the file to be written
target = open(filename, 'w')
#remove everything from the file
print "Truncating the file. Goodbye!"
target.truncate()
print "Now I'm going to ask you for three lines."
#take in 3 lines to write to the file
line1 = raw_input("line 1: ")
line2 = raw_input("line 2: ")
line3 = raw_input("line 3: ")
print "I'm going to write these to the file."
#write each line to the file
target.write(line1)
target.write("\n")
target.write(line2)
target.write("\n")
target.write(line3)
target.write("\n")
print "And finally, we close it."
#close the file
target.close()
else:
#a catch for if the user types something
#other than a letter or word beginning with r or w
print "Yeah... that's not an 'r' or a 'w', buddy.",
print "You can even type in the whole word, because I only check the first letter!"
| [
"jongrammor@gmail.com"
] | jongrammor@gmail.com |
15dd4a1f748631efab3f33142b10c78e6df7b814 | 871f1ef6b9a476051b883ab551b44c7581284cdc | /tests/test_bert.py | 742b13fac25dd5614a2c91cc8207eb09f5fba37c | [
"MIT"
] | permissive | bradfox2/keras-bert | 6979a9fa69cba215930a2172de642bbc72a540e0 | 1ffd3f6beb1ffac08a4ee235b71e3bb2eed36923 | refs/heads/master | 2020-04-14T22:23:28.315133 | 2019-01-18T22:08:00 | 2019-01-18T22:08:00 | 164,160,370 | 0 | 1 | MIT | 2019-01-04T22:20:45 | 2019-01-04T22:20:44 | null | UTF-8 | Python | false | false | 3,975 | py | import unittest
import os
import tempfile
import random
import keras
import numpy as np
from keras_bert import gelu, get_model, get_custom_objects, get_base_dict, gen_batch_inputs
class TestBERT(unittest.TestCase):
def test_sample(self):
model = get_model(
token_num=200,
head_num=3,
transformer_num=2,
)
model_path = os.path.join(tempfile.gettempdir(), 'keras_bert_%f.h5' % random.random())
model.save(model_path)
model = keras.models.load_model(
model_path,
custom_objects=get_custom_objects(),
)
model.summary(line_length=200)
def test_fit(self):
current_path = os.path.dirname(os.path.abspath(__file__))
model_path = os.path.join(current_path, 'test_bert_fit.h5')
sentence_pairs = [
[['all', 'work', 'and', 'no', 'play'], ['makes', 'jack', 'a', 'dull', 'boy']],
[['from', 'the', 'day', 'forth'], ['my', 'arm', 'changed']],
[['and', 'a', 'voice', 'echoed'], ['power', 'give', 'me', 'more', 'power']],
]
token_dict = get_base_dict()
for pairs in sentence_pairs:
for token in pairs[0] + pairs[1]:
if token not in token_dict:
token_dict[token] = len(token_dict)
token_list = list(token_dict.keys())
if os.path.exists(model_path):
model = keras.models.load_model(
model_path,
custom_objects=get_custom_objects(),
)
else:
model = get_model(
token_num=len(token_dict),
head_num=5,
transformer_num=12,
embed_dim=25,
feed_forward_dim=100,
seq_len=20,
pos_num=20,
dropout_rate=0.05,
attention_activation=gelu,
lr=1e-3,
)
model.summary()
def _generator():
while True:
yield gen_batch_inputs(
sentence_pairs,
token_dict,
token_list,
seq_len=20,
mask_rate=0.3,
swap_sentence_rate=1.0,
)
model.fit_generator(
generator=_generator(),
steps_per_epoch=1000,
epochs=1,
validation_data=_generator(),
validation_steps=100,
callbacks=[
keras.callbacks.ReduceLROnPlateau(monitor='val_MLM_loss', factor=0.5, patience=3),
keras.callbacks.EarlyStopping(monitor='val_MLM_loss', patience=5)
],
)
# model.save(model_path)
for inputs, outputs in _generator():
predicts = model.predict(inputs)
outputs = list(map(lambda x: np.squeeze(x, axis=-1), outputs))
predicts = list(map(lambda x: np.argmax(x, axis=-1), predicts))
batch_size, seq_len = inputs[-1].shape
for i in range(batch_size):
for j in range(seq_len):
if inputs[-1][i][j]:
self.assertEqual(outputs[0][i][j], predicts[0][i][j])
self.assertTrue(np.allclose(outputs[1], predicts[1]))
break
def test_get_layers(self):
def _custom_layers(x, trainable=True):
return keras.layers.LSTM(
units=768,
trainable=trainable,
name='LSTM',
)(x)
inputs, output_layer = get_model(
token_num=200,
embed_dim=768,
custom_layers=_custom_layers,
training=False,
)
model = keras.models.Model(inputs=inputs, outputs=output_layer)
model.compile(
optimizer='adam',
loss='mse',
metrics={},
)
model.summary()
self.assertTrue(model is not None)
| [
"CyberZHG@gmail.com"
] | CyberZHG@gmail.com |
2065478dacf11a2c9e50fda8a21efd7a7b02a512 | 0abbb442df5cab82aa02e91839f090e325626818 | /ppy_terminal/imports/setup_logging.py | 014843f03653447465ce647d7f1d0a603ec2cef5 | [
"MIT"
] | permissive | jimmyauyeung/generative_art | 6f5571d8f60e2aeb1ab64237d9a82d3e6c3baf6a | eb1730d90e438327edaa53375765a9690b968d2b | refs/heads/master | 2023-08-04T14:02:36.050556 | 2021-09-21T02:36:58 | 2021-09-21T02:36:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | import sys
import logging
logging.basicConfig(level=logging.INFO,
stream=sys.stdout,
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
log = logging.getLogger(__name__)
| [
"aaronpenne@users.noreply.github.com"
] | aaronpenne@users.noreply.github.com |
fc83d9a049d663d70a7fd53e2c1f4a20d99684a5 | b42850bc3e36bbd1683070393582617f2b3cd8e6 | /Retake_22_08_2020/project/everland.py | 1356a49f74400af83924dd1bd987fe8f49f82ac0 | [] | no_license | marianidchenko/Python_OOP | aecca18be6df3850c0efbf2fa6d25bf3ff53ae96 | 547c12cbdad5b8c16fa55bba6c03b71db181ad2b | refs/heads/main | 2023-07-09T05:42:43.863681 | 2021-08-14T14:55:51 | 2021-08-14T14:55:51 | 381,572,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,637 | py | from project.rooms.room import Room
class Everland:
def __init__(self):
self.rooms = []
def add_room(self, room: Room):
self.rooms.append(room)
def get_monthly_consumptions(self):
monthly_consumptions = 0
for room in self.rooms:
monthly_consumptions += room.expenses + room.room_cost
return f"Monthly consumption: {monthly_consumptions:.2f}$."
def pay(self):
result = []
rooms_to_remove = []
for room in self.rooms:
total_owed = room.expenses + room.room_cost
if room.budget >= total_owed:
room.budget -= total_owed
result.append(f"{room.family_name} paid {total_owed:.2f}$ and have {room.budget:.2f}$ left.")
else:
result.append(f"{room.family_name} does not have enough budget and must leave the hotel.")
rooms_to_remove.append(room)
self.rooms = [x for x in self.rooms if x not in rooms_to_remove]
return '\n'.join(result)
def status(self):
result = [f"Total population: {[x.members_count for x in self.rooms]}"]
for room in self.rooms:
result.append(f"{room.family_name} with {room.members_count} members. Budget: {room.budget:.2f}$, Expenses: {room.expenses:.2f}$")
for i, child in enumerate(room.children):
result.append(f"--- Child {i} monthly cost: {child.cost*30:.2f}$")
for appliance in room.appliances:
result.append(f"--- Appliances monthly cost: {appliance.get_monthly_expenses():.2f}$")
return '\n'.join(result)
| [
"marianidchenko@gmail.com"
] | marianidchenko@gmail.com |
72ad5a472eabffc145e2b07652c5f9b17d06717d | 1508b3e3f56e750e38db4334343beedcbb2f9c95 | /318/kel.py | 708fb80bd9f9411fded135560d22aed7500f9137 | [] | no_license | kellyseeme/pythonexample | 3bb325e31c677160c1abd6c3f314f7ef3af55daa | 3eab43cdfa5c59a0f4553de84c9de21e5ded44bb | refs/heads/master | 2021-01-21T13:52:43.076697 | 2016-05-30T06:32:37 | 2016-05-30T06:32:37 | 51,348,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | #!/usr/bin/env python
from multiprocessing import Pool
class calculate(object):
def run(self):
def f(x):
return x*x
p = Pool()
return p.map(f,[1,2,3])
c1 = calculate()
print c1.run()
| [
"root@python.(none)"
] | root@python.(none) |
5b05bd4fc2a322b216ae791dd0c8a890a6ad8f42 | 2b167e29ba07e9f577c20c54cb943861d0ccfa69 | /numerical_analysis_backup/small-scale-multiobj/pareto2/arch5_pod100_old2/pareto3_0.py | f485b95797c5da4248e1e6822e5815b276913e33 | [] | no_license | LiYan1988/kthOld_OFC | 17aeeed21e195d1a9a3262ec2e67d6b1d3f9ff0f | b1237577ea68ad735a65981bf29584ebd889132b | refs/heads/master | 2021-01-11T17:27:25.574431 | 2017-01-23T05:32:35 | 2017-01-23T05:32:35 | 79,773,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,373 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 4 15:15:10 2016
@author: li
optimize both throughput and connections
"""
#import sys
#sys.path.insert(0, '/home/li/Dropbox/KTH/numerical_analysis/ILPs')
import csv
from gurobipy import *
import numpy as np
from arch5_decomposition_new import Arch5_decompose
np.random.seed(2010)
num_cores=3
num_slots=80
i = 0
time_limit_routing = 1200 # 1000
time_limit_sa = 108 # 10800
filename = 'traffic_matrix_old_'+str(i)+'.csv'
# print filename
tm = []
with open(filename) as f:
reader = csv.reader(f)
for idx, row in enumerate(reader):
row = [float(u) for u in row]
tm.append(row)
tm = np.array(tm)
#%% arch2
betav = np.array([8e-4, 1e-3, 2e-3, 4e-3])
connection_ub = []
throughput_ub = []
obj_ub = []
connection_lb = []
throughput_lb = []
obj_lb = []
connection_he = []
throughput_he = []
obj_he = []
for beta in betav:
m = Arch5_decompose(tm, num_slots=num_slots, num_cores=num_cores,
alpha=1,beta=beta)
m.create_model_routing(mipfocus=1,timelimit=1800,mipgap=0.01, method=2)
connection_ub.append(m.connection_ub_)
throughput_ub.append(m.throughput_ub_)
obj_ub.append(m.obj_ub_)
m.create_model_sa(mipfocus=1,timelimit=10800,mipgap=0.01, method=2,
SubMIPNodes=2000, heuristics=0.8)
connection_lb.append(m.connection_lb_)
throughput_lb.append(m.throughput_lb_)
obj_lb.append(m.obj_lb_)
m.write_result_csv('cnklist_lb_%d_%.2e.csv'%(i,beta), m.cnklist_lb)
connection_lb.append(0)
throughput_lb.append(0)
obj_lb.append(0)
m.heuristic()
connection_he.append(m.obj_heuristic_connection_)
throughput_he.append(m.obj_heuristic_throughput_)
obj_he.append(m.obj_heuristic_)
m.write_result_csv('cnklist_heuristic_%d_%.2e.csv'%(i,beta), m.cnklist_heuristic_)
result = np.array([betav,
connection_ub,throughput_ub,obj_ub,
connection_lb,throughput_lb,obj_lb,
connection_he,throughput_he,obj_he]).T
file_name = "result_pareto_arch5_old_3_{}.csv".format(i)
with open(file_name, 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(['beta', 'connection_ub', 'throughput_ub',
'obj_ub', 'connection_lb', 'throughput_lb', 'obj_lb',
'connection_he', 'throughput_he', 'obj_he'])
writer.writerows(result)
| [
"li.yan.ly414@gmail.com"
] | li.yan.ly414@gmail.com |
240e2ace0249c7ce9fc65739b1d3a3f3488491cd | a94757526253f3a3ae4b65b662316e3b03b271e9 | /week10/Similar_String_Groups.py | fa1c9ac411bb77fca9663c66e116dfcec5876acd | [] | no_license | RRoundTable/CPPS | 3e30f4d9c48aa9c85d74db6696c6d458fa38523a | fb679053ee89c15e2844fda1d705d46778ea1c0d | refs/heads/master | 2021-07-06T07:19:43.806752 | 2020-12-15T10:59:08 | 2020-12-15T10:59:08 | 214,435,381 | 1 | 2 | null | 2020-02-06T14:53:13 | 2019-10-11T12:50:22 | Python | UTF-8 | Python | false | false | 2,608 | py | '''
link: https://leetcode.com/problems/similar-string-groups/
Two strings X and Y are similar if we can swap two letters (in different positions) of X, so that it equals Y.
For example, "tars" and "rats" are similar (swapping at positions 0 and 2), and "rats" and "arts" are similar, but "star" is not similar to "tars", "rats", or "arts".
Together, these form two connected groups by similarity: {"tars", "rats", "arts"} and {"star"}. Notice that "tars" and "arts" are in the same group even though they are not similar. Formally, each group is such that a word is in the group if and only if it is similar to at least one other word in the group.
We are given a list A of strings. Every string in A is an anagram of every other string in A. How many groups are there?
Example 1:
Input: A = ["tars","rats","arts","star"]
Output: 2
Constraints:
1 <= A.length <= 2000
1 <= A[i].length <= 1000
A.length * A[i].length <= 20000
All words in A consist of lowercase letters only.
All words in A have the same length and are anagrams of each other.
The judging time limit has been increased for this question.
'''
from collections import defaultdict
from itertools import combinations
class Solution:
def numSimilarGroups(self, A: List[str]) -> int:
'''
if N < W ^ 2:
O(N^2*W)/O(N)
else:
O(N*W^3)/O(N^2 W)
'''
A = list(set(A))
n, w = len(A), len(A[0])
def compare(a, b):
count = 0
for i in range(len(b)):
if count > 2: return False
if a[i] != b[i]:
count += 1
if count == 2: return True
def find(x):
while x != union[x]:
x = union[x]
return x
union = list(range(len(A)))
if n < w * w:
for i in range(len(A)):
for j in range(i+1, len(A)):
if find(i) == find(j):continue
if compare(A[i], A[j]):
union[find(i)] = find(j)
else:
d = defaultdict(lambda: set())
for i, word in enumerate(A):
L = list(word)
for l1, l2 in combinations(range(w), 2):
L[l1], L[l2] = L[l2], L[l1]
d["".join(L)].add(i)
L[l1], L[l2] = L[l2], L[l1]
for i1, word in enumerate(A):
for i2 in d[word]:
union[find(i1)] = find(i2)
union = [find(u) for u in union]
return len(set(union)) | [
"ryu071511@gmail.com"
] | ryu071511@gmail.com |
cab4f24e669a87cef9633c9f0867231e26c396d9 | 8051c8863119bc2b5e5b3107ce9c47ab12616e63 | /Python/Programmers/Level_2_더_맵게.py | bb3a43d9979c2aa6cdba8c853e1a403e60ff9b7a | [] | no_license | Sunghwan-DS/TIL | c01f6f4c84f3e2d85e16893bbe95c7f1b6270c77 | 6d29f6451f9b17bc0acc67de0e520f912dd0fa74 | refs/heads/master | 2021-07-11T16:23:58.077011 | 2021-03-12T02:28:26 | 2021-03-12T02:28:26 | 234,990,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,525 | py | # def solution(scoville, K):
# if sum(scoville) < K:
# return -1
#
# scoville.sort()
# scoville.insert(0, 0)
# answer = 0
# print("초기조건:", scoville)
#
# while scoville[1] < K:
# scoville[1], scoville[-1] = scoville[-1], scoville[1]
# min_s = scoville.pop(-1)
# target_idx = 1
#
#
# # scoville[-1] = scoville[-1] * 2 + min_s
# # target_idx = len(scoville) - 1
# while True:
# if scoville[target_idx] > scoville[target_idx * 2] or scoville[target_idx] > scoville[target_idx * 2 + 1]:
# if scoville[target_idx * 2] >= scoville[target_idx * 2 + 1]:
# scoville[target_idx], scoville[target_idx * 2 + 1] = scoville[target_idx * 2 + 1], scoville[target_idx]
# target_idx = target_idx * 2 + 1
# else:
# scoville[target_idx], scoville[target_idx * 2] = scoville[target_idx * 2], scoville[target_idx]
# target_idx *= 2
# else:
# break
# answer += 1
# print(scoville)
# return answer
import heapq
def solution(scoville, K):
if sum(scoville) < K:
return -1
answer = 0
scoville.sort()
print(scoville)
while scoville[0] < K:
min_s = heapq.heappop(scoville)
min2_s = heapq.heappop(scoville)
heapq.heappush(scoville, min_s + min2_s * 2)
answer += 1
print(scoville)
return answer
print(solution([1, 3], 7)) | [
"jeonsung02@gmail.com"
] | jeonsung02@gmail.com |
385d36b5fee83969ac145aefc9965f04e2273e19 | d348cc04a8e4bd8b16e6663b19b1545da4ac6f05 | /average_link.py | ec4984257cd5a9ecee0157cef44578166f082b15 | [] | no_license | vam-sin/Agglomerative-Clustering | 1225254aeca1aae9812ad8e4286895c186349e45 | 1ccb58af7b4f729f08338faae0ca571c64d53d67 | refs/heads/master | 2020-05-04T11:08:24.565854 | 2019-04-02T15:09:38 | 2019-04-02T15:09:38 | 179,101,583 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,066 | py | import numpy as np
from Bio import SeqIO
import random
from collections import defaultdict
from Levenshtein import distance
import plotly.plotly as py
import plotly.figure_factory as ff
import matplotlib.pyplot as plt
import scipy.cluster.hierarchy as sch
import scipy.spatial as scs
import math
# Making the dataset
lists = []
for record in SeqIO.parse("DNASequences.fasta", "fasta"):
lists.append(record)
# Loading the proximity matrix
f = open("prox_first.bin", 'rb')
d = np.load(f)
f.close()
print("Distances Matrix Loaded")
# Functions
def min_in_matrix(X):
min = math.inf
min_i = 0
min_j = 0
for i in range(len(X)):
for j in range(len(X[i])):
if i != j and min > X[i][j]:
min = X[i][j]
min_i = i
min_j = j
return min_i,min_j,min
def check_inf(X):
for i in range(len(X)):
for j in range(len(X[i])):
if X[i][j]!=math.inf:
return 1
return 0
def marked(Z):
for i in range(len(Z)):
if Z[i]==0:
return 1
return 0
def average_link(X):
iterator=0
Z = [] # New Linkage matrix
clusters = {} # keeps track of the clusters
count_clusters = {}
for j in range(len(X)):
count_clusters[j] = 1
while check_inf(X):
min_i,min_j,min = min_in_matrix(X)
# Adding to the Linkage matrix
if min_i in clusters and min_j in clusters:
Z.append([clusters[min_i],clusters[min_j],min,count_clusters[clusters[min_i]]+count_clusters[clusters[min_j]]])
elif min_i in clusters:
a = count_clusters[clusters[min_i]]+1
Z.append([clusters[min_i],min_j,min,a])
elif min_j in clusters:
a = 1+count_clusters[clusters[min_j]]
Z.append([min_i,clusters[min_j],min,a])
else:
Z.append([min_i,min_j,min,2])
# Updating the rest of the array
for i in range(len(X)):
if i !=min_i and i!=min_j:
dist = 0
a = 0 # Total points in both clusters combined
# Find a
if min_i in clusters and min_j in clusters:
a = count_clusters[clusters[min_i]] + count_clusters[clusters[min_j]]
elif min_i in clusters:
a = count_clusters[clusters[min_i]] + count_clusters[min_j]
elif min_j in clusters:
a = count_clusters[min_i] + count_clusters[clusters[min_j]]
else:
a = 2
# Find total distance
# if check_inf_row(X[i]):
if min_i in clusters:
dist+=X[i][min_i]*count_clusters[clusters[min_i]]
else:
dist+=X[i][min_i]
if min_j in clusters:
dist+=X[min_j][i]*count_clusters[clusters[min_j]]
else:
dist+=X[min_j][i]
dist = dist/float(a)
X[min_j][i] = X[i][min_j] = float(dist)
# Updating the clusters counts and clusters
clusters[min_j] = len(X)+iterator
iterator+=1
count_clusters[clusters[min_j]] = 1
if min_i in clusters and min_j in clusters:
count_clusters[clusters[min_j]] = count_clusters[clusters[min_i]] + count_clusters[clusters[min_j]]
elif min_i in clusters:
count_clusters[clusters[min_j]] = count_clusters[clusters[min_i]] + 1
elif min_j in clusters:
count_clusters[clusters[min_j]] = count_clusters[clusters[min_j]] + 1
else:
count_clusters[clusters[min_j]] = 2
# removing one of the data points
for i in range(len(X)):
X[min_i][i]=X[i][min_i] = math.inf
return X,Z
for i in range(len(d)):
d[i][i] = math.inf
# average_link
d_average, Zdash_average = average_link(d)
Zdash_average = np.array(Zdash_average,dtype='float64')
sch.dendrogram(Zdash_average)
plt.savefig('average.png')
plt.show()
print("Average Generated")
| [
"vamsinallapareddy@gmail.com"
] | vamsinallapareddy@gmail.com |
174e9986a5f9a4ed60b9f16b51c97810e0abb5f5 | 4dda92393e178a45b4be36b4e322ac8d99ad19b4 | /src/generative_playground/utils/respawner.py | 6ecccaa5a6d1134d8c696e678d8304776a23ae18 | [
"MIT"
] | permissive | JGU-dev/generative_playground | 6567c7eaea4a09c95902c9a8dbc0af479294c8aa | 5c336dfbd14235e4fd97b21778842a650e733275 | refs/heads/master | 2022-03-25T06:11:37.121235 | 2019-12-29T16:00:21 | 2019-12-29T16:00:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | import sys, subprocess
print("respawner starting with ", sys.argv[1:])
counter = 0
while True:
print('spawning, iteration', counter)
retval = subprocess.run(sys.argv[1:])
if retval.returncode <0:
break
counter += 1 | [
"egor.kraev@gmail.com"
] | egor.kraev@gmail.com |
665a85b8cfcc9e0244dfb2ef9b10d85932d06f4b | d1236e327bc750a845b46edd78e6dae63c157b86 | /facade_pattern/screen.py | 1cbc5a4ed7a6ac4f1658fdcfd78d14b7741916fd | [] | no_license | EricMontague/Head-First-Design-Patterns-In-Python | 12cc14c592d8728c6fc5677d4d6e641a94df4e3f | 296edada143336f88e8b2d066f5fc99c95ecfcc7 | refs/heads/master | 2022-12-31T05:12:24.719241 | 2020-10-07T21:51:20 | 2020-10-07T21:51:20 | 298,710,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | class Screen:
def __init__(self, description):
self._description = description
def up(self):
print(f"{self._description} going up")
def down(self):
print(f"{self._description} going down")
def __str__(self):
return self._description
| [
"eric.g.montague@gmail.com"
] | eric.g.montague@gmail.com |
a6f52906d319e6ac298f0c74898e58f12778ca34 | 4d892dc51e2dda0fcce246ac608fc4e0ce98c52b | /FirstStepsInPython/Basics/Lab6 Nested Loops/03. Combinations.py | decabc50ed97c774f38521b558b51d2bf60acf67 | [
"MIT"
] | permissive | inovei6un/SoftUni-Studies-1 | 510088ce65e2907c2755a15e427fd156909157f0 | 3837c2ea0cd782d3f79353e61945c08a53cd4a95 | refs/heads/main | 2023-08-14T16:44:15.823962 | 2021-10-03T17:30:48 | 2021-10-03T17:30:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | num = int(input())
counter = 0
for x1 in range(0, num + 1):
for x2 in range(0, num + 1):
for x3 in range(0, num + 1):
if (x1 + x2 + x3) == num:
counter += 1
print(counter)
| [
"lazar_off@yahoo.com"
] | lazar_off@yahoo.com |
8e6dfd9314f2e7e039efeb9bd4b6210d0c8c14e0 | ae65873c3584cef7139066b224daad04410af6d2 | /NestedList.py | c2c204a177c6612160c5c7fd71373586bc565716 | [] | no_license | rajatkashyap/Python | 2240c7472d07803c460c7a55d570e20694b694f9 | f74c85c65b0e209a5f7ab25b653d42835222faaf | refs/heads/master | 2022-06-25T19:20:52.847498 | 2022-06-08T14:40:45 | 2022-06-08T14:40:45 | 145,714,257 | 0 | 0 | null | 2022-04-25T00:18:37 | 2018-08-22T13:39:14 | Python | UTF-8 | Python | false | false | 308 | py | i=int(raw_input())
l=[]
for x in range(i):
a=raw_input()
b=float(raw_input())
l.append([a,b])
#l1=[[input(),float(input())] for x in range(i)]
#print l1
scores = sorted({s[1] for s in l})
result = sorted(s[0] for s in l if s[1] == scores[1])
print '\n'.join(result)
#print result | [
"rajatkashyap@Rajats-MBP.T-mobile.com"
] | rajatkashyap@Rajats-MBP.T-mobile.com |
f752427116cf14f7650884c3dbad47ed6984b31c | fb7efe44f4d9f30d623f880d0eb620f3a81f0fbd | /tools/perf/page_sets/tough_texture_upload_cases.py | 033db97946706c90be33819ef2fa3d6d8b28aa75 | [
"LGPL-2.0-or-later",
"BSD-3-Clause",
"Apache-2.0",
"LGPL-2.0-only",
"MIT",
"LicenseRef-scancode-unknown",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-only"
] | permissive | wzyy2/chromium-browser | 2644b0daf58f8b3caee8a6c09a2b448b2dfe059c | eb905f00a0f7e141e8d6c89be8fb26192a88c4b7 | refs/heads/master | 2022-11-23T20:25:08.120045 | 2018-01-16T06:41:26 | 2018-01-16T06:41:26 | 117,618,467 | 3 | 2 | BSD-3-Clause | 2022-11-20T22:03:57 | 2018-01-16T02:09:10 | null | UTF-8 | Python | false | false | 1,424 | py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry import story
class ToughTextureUploadCasesPage(page_module.Page):
def __init__(self, url, page_set):
super(
ToughTextureUploadCasesPage,
self).__init__(
url=url,
page_set=page_set,
name=url.split('/')[-1])
def RunPageInteractions(self, action_runner):
with action_runner.CreateInteraction('Animation'):
action_runner.Wait(10)
class ToughTextureUploadCasesPageSet(story.StorySet):
"""
Description: A collection of texture upload performance tests
"""
def __init__(self):
super(ToughTextureUploadCasesPageSet, self).__init__()
urls_list = [
'file://tough_texture_upload_cases/background_color_animation.html',
# pylint: disable=line-too-long
'file://tough_texture_upload_cases/background_color_animation_with_gradient.html',
'file://tough_texture_upload_cases/small_texture_uploads.html',
'file://tough_texture_upload_cases/medium_texture_uploads.html',
'file://tough_texture_upload_cases/large_texture_uploads.html',
'file://tough_texture_upload_cases/extra_large_texture_uploads.html',
]
for url in urls_list:
self.AddStory(ToughTextureUploadCasesPage(url, self))
| [
"jacob-chen@iotwrt.com"
] | jacob-chen@iotwrt.com |
662bcebd1c7043cefd580e5dda9e0b107b76de69 | 33f8e35b33d4f64d737b9b4239204d2c7d669f7e | /leapp/views/subcription.py | e1710c8afdf48fa2748a089da5cd8f146323080d | [] | no_license | Emad-ahmed/EmailSenderWebsite | 872fd6b8ed0766d9a5bfe2cc1afff2bfdbc071c7 | e6acc443211acdc53869510661b6178f63f2ad73 | refs/heads/main | 2023-06-25T17:48:35.765922 | 2021-07-08T06:31:59 | 2021-07-08T06:31:59 | 384,022,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | from django.shortcuts import render
from django.views import View
# Create your views here.
class SubcriptionView(View):
def get(self, request):
mydata = {
'subscription': 'active'
}
return render(request, 'subscription.html', mydata)
| [
"amadahmed1234678@gmail.com"
] | amadahmed1234678@gmail.com |
ad99f3146a98393b269f70c8806e9022691ec639 | c57376701537dc6969939c3afb51d542d670db61 | /Numpy/10_Xu-li-data-su-dung-Numpy/9_Tuong-quan-Sepallength-Petallength.py | fa216982379d73b542d85191febe912c74f5e532 | [] | no_license | dangnam739/Learn_Python_Begin | d3f5f24504b3c703de4e981abb432f3734558e5d | 81764172475f26374a3e21d150395a99e8a183e6 | refs/heads/master | 2021-07-15T19:13:58.156215 | 2020-08-21T06:38:04 | 2020-08-21T06:38:04 | 200,081,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | #Cau 9: tim moi tuong quan giuwa sepallength(cot 1) va petallength(cot 3)
import numpy as np
iris_2d = np.genfromtxt('iris.csv', delimiter=',', dtype=float, usecols=[0, 1, 2, 3])
#Cach 1: dung ham corrcoef tu numpy
out = np.corrcoef(iris_2d[:, 0], iris_2d[:, 2])[0,1]
print(out)
#Cach 2: dung ham pearsonr tu scipy
from scipy.stats.stats import pearsonr
out = pearsonr(iris_2d[:, 0], iris_2d[:, 2])[0]
print(out)
| [
"47108512+ChrisZangNam@users.noreply.github.com"
] | 47108512+ChrisZangNam@users.noreply.github.com |
98bbad055766dde226dadd52b65c212f7cd94bc9 | 5ec06dab1409d790496ce082dacb321392b32fe9 | /clients/python-flask/generated/openapi_server/models/com_adobe_cq_social_calendar_servlets_time_zone_servlet_info.py | 4705252b57fa0690f21c882f27a71d10c9493c11 | [
"Apache-2.0"
] | permissive | shinesolutions/swagger-aem-osgi | e9d2385f44bee70e5bbdc0d577e99a9f2525266f | c2f6e076971d2592c1cbd3f70695c679e807396b | refs/heads/master | 2022-10-29T13:07:40.422092 | 2021-04-09T07:46:03 | 2021-04-09T07:46:03 | 190,217,155 | 3 | 3 | Apache-2.0 | 2022-10-05T03:26:20 | 2019-06-04T14:23:28 | null | UTF-8 | Python | false | false | 5,145 | py | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server.models.com_adobe_cq_social_calendar_servlets_time_zone_servlet_properties import ComAdobeCqSocialCalendarServletsTimeZoneServletProperties # noqa: F401,E501
from openapi_server import util
class ComAdobeCqSocialCalendarServletsTimeZoneServletInfo(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, pid: str=None, title: str=None, description: str=None, properties: ComAdobeCqSocialCalendarServletsTimeZoneServletProperties=None): # noqa: E501
"""ComAdobeCqSocialCalendarServletsTimeZoneServletInfo - a model defined in OpenAPI
:param pid: The pid of this ComAdobeCqSocialCalendarServletsTimeZoneServletInfo. # noqa: E501
:type pid: str
:param title: The title of this ComAdobeCqSocialCalendarServletsTimeZoneServletInfo. # noqa: E501
:type title: str
:param description: The description of this ComAdobeCqSocialCalendarServletsTimeZoneServletInfo. # noqa: E501
:type description: str
:param properties: The properties of this ComAdobeCqSocialCalendarServletsTimeZoneServletInfo. # noqa: E501
:type properties: ComAdobeCqSocialCalendarServletsTimeZoneServletProperties
"""
self.openapi_types = {
'pid': str,
'title': str,
'description': str,
'properties': ComAdobeCqSocialCalendarServletsTimeZoneServletProperties
}
self.attribute_map = {
'pid': 'pid',
'title': 'title',
'description': 'description',
'properties': 'properties'
}
self._pid = pid
self._title = title
self._description = description
self._properties = properties
@classmethod
def from_dict(cls, dikt) -> 'ComAdobeCqSocialCalendarServletsTimeZoneServletInfo':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The comAdobeCqSocialCalendarServletsTimeZoneServletInfo of this ComAdobeCqSocialCalendarServletsTimeZoneServletInfo. # noqa: E501
:rtype: ComAdobeCqSocialCalendarServletsTimeZoneServletInfo
"""
return util.deserialize_model(dikt, cls)
@property
def pid(self) -> str:
"""Gets the pid of this ComAdobeCqSocialCalendarServletsTimeZoneServletInfo.
:return: The pid of this ComAdobeCqSocialCalendarServletsTimeZoneServletInfo.
:rtype: str
"""
return self._pid
@pid.setter
def pid(self, pid: str):
"""Sets the pid of this ComAdobeCqSocialCalendarServletsTimeZoneServletInfo.
:param pid: The pid of this ComAdobeCqSocialCalendarServletsTimeZoneServletInfo.
:type pid: str
"""
self._pid = pid
@property
def title(self) -> str:
"""Gets the title of this ComAdobeCqSocialCalendarServletsTimeZoneServletInfo.
:return: The title of this ComAdobeCqSocialCalendarServletsTimeZoneServletInfo.
:rtype: str
"""
return self._title
@title.setter
def title(self, title: str):
"""Sets the title of this ComAdobeCqSocialCalendarServletsTimeZoneServletInfo.
:param title: The title of this ComAdobeCqSocialCalendarServletsTimeZoneServletInfo.
:type title: str
"""
self._title = title
@property
def description(self) -> str:
"""Gets the description of this ComAdobeCqSocialCalendarServletsTimeZoneServletInfo.
:return: The description of this ComAdobeCqSocialCalendarServletsTimeZoneServletInfo.
:rtype: str
"""
return self._description
@description.setter
def description(self, description: str):
"""Sets the description of this ComAdobeCqSocialCalendarServletsTimeZoneServletInfo.
:param description: The description of this ComAdobeCqSocialCalendarServletsTimeZoneServletInfo.
:type description: str
"""
self._description = description
@property
def properties(self) -> ComAdobeCqSocialCalendarServletsTimeZoneServletProperties:
"""Gets the properties of this ComAdobeCqSocialCalendarServletsTimeZoneServletInfo.
:return: The properties of this ComAdobeCqSocialCalendarServletsTimeZoneServletInfo.
:rtype: ComAdobeCqSocialCalendarServletsTimeZoneServletProperties
"""
return self._properties
@properties.setter
def properties(self, properties: ComAdobeCqSocialCalendarServletsTimeZoneServletProperties):
"""Sets the properties of this ComAdobeCqSocialCalendarServletsTimeZoneServletInfo.
:param properties: The properties of this ComAdobeCqSocialCalendarServletsTimeZoneServletInfo.
:type properties: ComAdobeCqSocialCalendarServletsTimeZoneServletProperties
"""
self._properties = properties
| [
"cliffano@gmail.com"
] | cliffano@gmail.com |
471c7653b0647a0c5a036552877062a532af9262 | 77311ad9622a7d8b88707d7cee3f44de7c8860cb | /res/scripts/client/gui/shared/gui_items/dossier/achievements/handofdeathachievement.py | b4dde0ec5ab4cca88dd1f0162a9554600461124e | [] | no_license | webiumsk/WOT-0.9.14-CT | 9b193191505a4560df4e872e022eebf59308057e | cfe0b03e511d02c36ce185f308eb48f13ecc05ca | refs/heads/master | 2021-01-10T02:14:10.830715 | 2016-02-14T11:59:59 | 2016-02-14T11:59:59 | 51,606,676 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 832 | py | # 2016.02.14 12:41:38 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/shared/gui_items/dossier/achievements/HandOfDeathAchievement.py
from dossiers2.ui.achievements import ACHIEVEMENT_BLOCK as _AB
from abstract import SeriesAchievement
class HandOfDeathAchievement(SeriesAchievement):
def __init__(self, dossier, value = None):
super(HandOfDeathAchievement, self).__init__('handOfDeath', _AB.SINGLE, dossier, value)
def _getCounterRecordNames(self):
return ((_AB.TOTAL, 'killingSeries'), (_AB.TOTAL, 'maxKillingSeries'))
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\shared\gui_items\dossier\achievements\handofdeathachievement.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.02.14 12:41:38 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
94ebcfc404c31b585404c1fe589095b751eceede | 2eb3487fc70b806674cc0b92e7113f63c54ade08 | /testing.py | 55681a7bb373c8d669592acc3b7d274282828b2b | [] | no_license | foryourselfand/informatics_lab4 | 34d4dfdb996c37cf20da3a5176e37077c2d15249 | 0adb16985c19795b7bbf2bdb6b265ebe716af80a | refs/heads/master | 2020-09-01T01:03:28.659326 | 2019-11-18T17:58:18 | 2019-11-18T17:58:18 | 218,835,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | from inout import IO
from converter import FromJsonToProtoConverter
def main():
io = IO()
converter = FromJsonToProtoConverter()
timetable_input = io.get_input_timetable()
timetable_output = converter.convert(timetable_input)
# print(timetable_output)
io.write_output_timetable(timetable_output)
timetable_serialized = io.get_serialized_timetable()
# print(timetable_serialized)
print(timetable_output == timetable_serialized)
io.speed_test()
if __name__ == '__main__':
main()
| [
"foryourselfand@gmail.com"
] | foryourselfand@gmail.com |
5a7c2839089386462220c4728d49c8cc96fd0ffd | acac4b642cdaae8fa85cafbaf856212f9984bdf9 | /python_object_extractor/substitutions.py | a706af8f8b5c3f84314efca26a34d3b3d9000f17 | [
"MIT"
] | permissive | ali1rathore/python-object-extractor | bfa0f7cf3b3d1d5d7b81c7c54c2962d83657e517 | e66c47c840c8c99d9706e299ecf499df361926c2 | refs/heads/master | 2020-08-04T04:16:20.489242 | 2019-02-24T22:08:41 | 2019-02-24T22:08:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,753 | py | import inspect
from typing import Dict, List, Set, Optional, Iterable
from python_object_extractor.attributes import AttributesAccessChain
from python_object_extractor.attributes import extract_attributes_access_chains
from python_object_extractor.exceptions import PythonObjectExtractorException
from python_object_extractor.imports import ObjectImport
from python_object_extractor.imports import ObjectImportsGroupped
from python_object_extractor.modules import get_module_by_name
from python_object_extractor.references import ObjectReference
class NoAccessedModuleObjects(PythonObjectExtractorException):
def __init__(
self,
imported_module_name: str,
source: str,
):
super().__init__(
f"object refers to imported module but does not access any of its "
f"members, imported module: '{imported_module_name}', "
f"object source:\n{source}"
)
def _extract_imported_objects_access_chains(
source: str,
imported_objects_names: Set[str],
) -> List[AttributesAccessChain]:
access_chains = extract_attributes_access_chains(source)
return [
x for x in access_chains
if x.object_name in imported_objects_names
]
def _find_first_non_module_index(
module_name: str,
object_name: str,
access_chain: List[str],
) -> Optional[int]:
if module_name == object_name:
object_full_name = module_name
else:
object_full_name = f"{module_name}.{object_name}"
module = get_module_by_name(module_name)
try:
the_object = getattr(module, object_name)
except AttributeError:
the_object = get_module_by_name(object_full_name)
if not inspect.ismodule(the_object):
return 0
if not access_chain:
return
subindex = _find_first_non_module_index(
object_full_name,
access_chain[0],
access_chain[1:],
)
if subindex is not None:
return subindex + 1
def substitute_accesses_to_imported_modules(
source: str,
imports: List[ObjectImport],
) -> List[ObjectImport]:
alias_to_imports = {
(x.alias or x.object_reference.object_name): x
for x in imports
}
access_chains = _extract_imported_objects_access_chains(
source=source,
imported_objects_names=set(alias_to_imports.keys()),
)
results = set(imports)
for access_chain in access_chains:
original_import = alias_to_imports[access_chain.object_name]
new_reference = maybe_make_import_substitution(
imported_module_name=original_import.object_reference.module_name,
imported_object_name=original_import.object_reference.object_name,
access_chain=access_chain.sequence,
source=source,
)
if new_reference:
original_import.access_chain = access_chain.sequence
results.remove(original_import)
results.add(ObjectImport(
object_reference=new_reference,
alias=None,
substituted=original_import,
))
return list(results)
def maybe_make_import_substitution(
imported_module_name: str,
imported_object_name: str,
access_chain: List[str],
source: str,
) -> Optional[ObjectReference]:
idx = _find_first_non_module_index(
module_name=imported_module_name,
object_name=imported_object_name,
access_chain=access_chain,
)
if idx is None:
imported_module_name = "{}.{}.{}".format(
imported_module_name,
imported_object_name,
".".join(access_chain),
)
raise NoAccessedModuleObjects(imported_module_name, source)
if idx == 0:
return
idx -= 1
new_subpath = access_chain[:idx]
new_module_name_path = [imported_module_name, ]
if imported_module_name != imported_object_name:
new_module_name_path.append(imported_object_name)
new_module_name_path.extend(new_subpath)
new_module_name = ".".join(new_module_name_path)
new_object_name = access_chain[idx]
return ObjectReference(
module_name=new_module_name,
object_name=new_object_name,
)
def substitute_aliases_of_imports(
imports: Iterable[ObjectImport],
references_to_aliases: Dict[ObjectReference, str],
) -> List[ObjectImport]:
results = []
for imported_object in imports:
imported_alias = (
imported_object.alias
or imported_object.object_reference.object_name
)
substituted_alias = references_to_aliases.get(
imported_object.object_reference,
)
if substituted_alias and imported_alias != substituted_alias:
imported_object = ObjectImport(
object_reference=imported_object.object_reference,
alias=substituted_alias,
substituted=imported_object,
)
results.append(imported_object)
return results
def substitute_aliases_of_groupped_imports(
groupped_imports: List[ObjectImportsGroupped],
references_to_aliases: Dict[ObjectReference, str],
) -> None:
for item in groupped_imports:
if item.stdlib:
item.stdlib = substitute_aliases_of_imports(
item.stdlib,
references_to_aliases,
)
if item.third_party:
item.third_party = substitute_aliases_of_imports(
item.third_party,
references_to_aliases,
)
if item.project:
item.project = substitute_aliases_of_imports(
item.project,
references_to_aliases,
)
| [
"oblovatniy@gmail.com"
] | oblovatniy@gmail.com |
20e19af0cb679491df4f0179aacc547a6adb16b2 | 625f2f86f2b2e07cb35204d9b3232427bf462a09 | /HIRun2018PbPb/L1T/L1Ntuple_Pythia8_AllQCDPhoton15_Hydjet_Quenched_Cymbal5Ev8_SK1212/crabConfig.py | b64926fe63684d20622521fc7fa158ffd7313f4f | [] | no_license | ttrk/production | abb84c423a076fd9966276b7ed4350936c755e0b | f8a64c9c38de215802799365f0f7a99e1ee78276 | refs/heads/master | 2023-02-08T23:48:56.355141 | 2023-01-26T08:46:22 | 2023-01-26T08:46:22 | 52,877,406 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,767 | py | from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = "L1Ntuple_Pythia8_AllQCDPhoton15_Hydjet_Quenched_Cymbal5Ev8_SK1212"
config.General.transferLogs = False
config.section_("JobType")
config.JobType.pluginName = "Analysis"
config.JobType.psetName = "l1Ntuple_RAW2DIGI.py"
config.JobType.maxMemoryMB = 2500 # request high memory machines, 2500 is the maximum guaranteed number.
config.JobType.maxJobRuntimeMin = 2750 # request longer runtime, ~47 hours. 2750 is the maximum guaranteed number.
## Software
# CMSSW_10_2_1, l1t-integration-v99.0
# This integration contains the new L1 EG bypass flags : https://github.com/cms-l1t-offline/cmssw/pull/708
# https://twiki.cern.ch/twiki/bin/view/CMSPublic/SWGuideL1TStage2Instructions?rev=140#Environment_Setup_with_Integrati
## Driver
# https://twiki.cern.ch/twiki/bin/view/CMS/L1HITaskForce?rev=42#Offline_SW_setup
# l1Ntuple_RAW2DIGI.py is created after calling ./createConfigs.sh
# Samples are listed here : https://twiki.cern.ch/twiki/bin/view/CMS/PbPb5TeV2018PrivateMC?rev=19#Tunes
config.section_("Data")
config.Data.inputDataset = "/Pythia8_AllQCDPhoton15_Hydjet_Quenched_Cymbal5Ev8/clindsey-RAWSIM_20180630-d863108fee469c130ddd2763f36829bb/USER"
config.Data.inputDBS = "phys03"
config.Data.splitting = "FileBased"
config.Data.unitsPerJob = 120
config.Data.totalUnits = -1
config.Data.publication = False
config.Data.outputDatasetTag = "L1Ntuple_egHOverEcut_EB1_EE1_egEtaCut24_SK1212"
config.Data.outLFNDirBase = "/store/user/katatar/HIRun2018PbPb/L1T/"
config.section_("Site")
config.Site.storageSite = "T2_US_MIT"
config.Site.whitelist = ["T2_US_MIT"]
config.section_("Debug")
config.Debug.extraJDL = ["+CMS_ALLOW_OVERFLOW=False"]
| [
"tatark@mit.edu"
] | tatark@mit.edu |
6cfb11fd5ed1ef1359d61db18fbbb8fce9c6937c | ee974d693ca4c4156121f8cb385328b52eaac07c | /insightface/src/align/align_megaface.py | 3c72359a2891be91019ca6823668870e9bf13c69 | [
"MIT"
] | permissive | ngonhi/Attendance_Check_System_with_Face_Recognition | f4531cc4dee565d0e45c02217f73f3eda412b414 | 92ff88cbc0c740ad48e149033efd38137c9be88d | refs/heads/main | 2023-03-12T07:03:25.302649 | 2021-02-26T15:37:33 | 2021-02-26T15:37:33 | 341,493,686 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:2bb6d2c41848e33065bda3ace8e5bb9cde68ae11c4953056ed6f06f786f35245
size 10062
| [
"Nqk180998!"
] | Nqk180998! |
cab0faaf8cbfa90afa125c95d28f00e49cda4abc | 64cdb9e8fdcde8a71a16ce17cd822441d9533936 | /_swea/d4/1865_동철이의일분배.py | 8d88a9dbf686174790d59eec94820c2da8ca8d8c | [] | no_license | heecheol1508/algorithm-problem | fa42769f0f2f2300e4e463c5731e0246d7b7643c | 6849b355e15f8a538c9a071b0783d1789316d29d | refs/heads/main | 2023-07-20T23:46:07.037975 | 2021-08-31T12:47:33 | 2021-08-31T12:47:33 | 302,830,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,944 | py | #
# ################################ ver.1
# # import sys
# # sys.stdin = open("input.txt", "r")
#
#
# def solve(k):
# global ans
# if k == N :
# val = 1
# for i in range(N):
# val *= mat[i][perm[i]]
# if val > ans:
# ans = val
# else:
# for i in range(k, N):
# perm[k], perm[i] = perm[i], perm[k]
# solve(k + 1)
# perm[k], perm[i] = perm[i], perm[k]
#
#
# TC = int(input())
# for tc in range(1, TC + 1):
# N = int(input())
#
# mat = [0.0] * N
# for i in range(N):
# mat[i] = list(map(lambda x: int(x)/100, input().split()))
#
# ans = 0
# perm = [x for x in range(N)]
# solve(0)
# print("#%d %.6f" % (tc, ans * 100))
#
#
# ################################ ver.2
# # import sys
# # import time
# # sys.stdin = open("input.txt", "r")
# #
# # st = time.time()
#
# def solve(k):
# global ans
# global cnt
# cnt += 1
# if k == N :
# val = 1
# for i in range(N):
# val *= mat[i][perm[i]]
# if val > ans:
# ans = val
# else:
# for i in range(k, N):
# perm[k], perm[i] = perm[i], perm[k]
# val = 1
# for j in range(k + 1):
# val *= mat[j][perm[j]]
# if val > ans:
# solve(k + 1)
# perm[k], perm[i] = perm[i], perm[k]
#
#
#
# for tc in range(1, int(input()) + 1):
# N = int(input())
#
# mat = [0.0] * N
# for i in range(N):
# mat[i] = list(map(lambda x: int(x) / 100, input().split()))
#
# cnt = 0
# ans = 0
# perm = [x for x in range(N)]
# solve(0)
# print("#%d %.6f" % (tc, ans * 100))
#
# # print("#%d %.6f" % (tc, ans * 100), cnt)
# #
# # print(time.time() - st)
#
# ################################ ver.3
import sys
# import time
sys.stdin = open("input.txt", "r")
#
# st = time.time()
def grid_ans():
global ans
val = 1
chk = [0] * N
for i in range(N):
tmaxi = 0
for j in range(N):
if not chk[j] and tmaxi < mat[i][j]:
tmaxi = j
chk[tmaxi] = 1
val *= mat[i][tmaxi]
ans = val
def solve(k, val):
global ans
global cnt
cnt += 1
if k == N:
if val > ans:
ans = val
else:
for i in range(k, N):
perm[k], perm[i] = perm[i], perm[k]
if val * mat[k][perm[k]] > ans:
solve(k + 1, val * mat[k][perm[k]])
perm[k], perm[i] = perm[i], perm[k]
for tc in range(1, int(input()) + 1):
N = int(input())
mat = [0] * N
for i in range(N):
mat[i] = list(map(lambda x: int(x)/100, input().split()))
cnt = 0
ans = 0
perm = [x for x in range(N)]
grid_ans()
solve(0, 1)
print("#%d %.6f" % (tc, ans * 100))
# print("#%d %.6f" % (tc, ans * 100), cnt)
# print(time.time() - st)
| [
"heecheol1508@gmail.com"
] | heecheol1508@gmail.com |
7990115461cd14cba0e698790d6ce6b1d3ff43c5 | 8968a2696cea58d7b04cb59c2525e89cf6f88a0a | /train_mlbert_en_plus_sv.py | 170193242a33f4be050b6475c701fedfbd89f373 | [] | no_license | Vottivott/swedsquad | a739c7aa938a5db381e2bda9e047c8db93390a90 | 5ed279f166d26b45228501012fbb8817d1c5ae11 | refs/heads/master | 2020-09-26T21:02:36.342548 | 2020-02-09T01:39:38 | 2020-02-09T01:39:38 | 226,342,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | import run_squad
args = """--model_type bert
--model_name_or_path bert-base-multilingual-cased
--do_train
--train_file original_plus_confident_translated_train_no_impossible.json
--predict_file confident_translated_dev_no_impossible.json
--learning_rate 3e-5
--num_train_epochs 2
--max_seq_length 384
--doc_stride 128
--output_dir ml_bert_en_plus_sv
--per_gpu_eval_batch_size=3
--per_gpu_train_batch_size=3
--overwrite_cache
--save_steps=2000
""".split() # --eval_all_checkpoints
import os
import __main__ as main
print("Script: " + os.path.basename(main.__file__))
print(args)
results = run_squad.main(args)
import json
import datetime
date = datetime.datetime.now().strftime("%I.%M.%S.%f %p on %B %d, %Y")
outname = "results " + os.path.basename(main.__file__)[:-3] + " " + date
with open(outname + ".json", "w") as out:
json.dump(results, out)
print(args)
print("Script: " + os.path.basename(main.__file__)) | [
"hannes.von.essen@gmail.com"
] | hannes.von.essen@gmail.com |
089414c504cc26930fa000d737ddcd9717f10394 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/express_route_service_provider_py3.py | 0741f9964bc0eeb80c778d26a8aa0b432f94461d | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 2,490 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource_py3 import Resource
class ExpressRouteServiceProvider(Resource):
"""A ExpressRouteResourceProvider object.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param peering_locations: Get a list of peering locations.
:type peering_locations: list[str]
:param bandwidths_offered: Gets bandwidths offered.
:type bandwidths_offered:
list[~azure.mgmt.network.v2017_10_01.models.ExpressRouteServiceProviderBandwidthsOffered]
:param provisioning_state: Gets the provisioning state of the resource.
:type provisioning_state: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'peering_locations': {'key': 'properties.peeringLocations', 'type': '[str]'},
'bandwidths_offered': {'key': 'properties.bandwidthsOffered', 'type': '[ExpressRouteServiceProviderBandwidthsOffered]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(self, *, id: str=None, location: str=None, tags=None, peering_locations=None, bandwidths_offered=None, provisioning_state: str=None, **kwargs) -> None:
super(ExpressRouteServiceProvider, self).__init__(id=id, location=location, tags=tags, **kwargs)
self.peering_locations = peering_locations
self.bandwidths_offered = bandwidths_offered
self.provisioning_state = provisioning_state
| [
"noreply@github.com"
] | xiafu-msft.noreply@github.com |
d19c95cc3477fbfb2cc7512698ec60452a67a34a | abc4a73e5f93ebf90be946b95ef215e32c823353 | /colour/models/rgb/ycocg.py | 47ab9224b921f2a4ab1488ab30e8875b5d8d1d84 | [
"BSD-3-Clause"
] | permissive | OmarWagih1/colour | 69f5108e83ec443551c5593c066bcd4e3596060f | bdc880a2783ff523dafb19f1233212dd03a639bd | refs/heads/develop | 2021-04-14T20:30:29.635916 | 2020-07-26T05:46:00 | 2020-07-26T05:46:00 | 249,263,927 | 0 | 0 | BSD-3-Clause | 2020-03-22T20:11:06 | 2020-03-22T20:11:06 | null | UTF-8 | Python | false | false | 2,601 | py | # -*- coding: utf-8 -*-
"""
YCoCg Colour Encoding
======================
Defines the *YCoCg* colour encoding related transformations:
- :func:`colour.RGB_to_YCoCg`
- :func:`colour.YCoCg_to_RGB`
References
----------
- :cite:`Malvar2003` : Malvar, H., & Sullivan, G. (2003). YCoCg-R: A Color
Space with RGB Reversibility and Low Dynamic Range.
https://www.microsoft.com/en-us/research/wp-content/uploads/2016/06/\
Malvar_Sullivan_YCoCg-R_JVT-I014r3-2.pdf
"""
from __future__ import division, unicode_literals
import numpy as np
from colour.utilities import dot_vector
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Development'
__all__ = [
'RGB_TO_YCOCG_MATRIX',
'YCOCG_TO_RGB_MATRIX',
'RGB_to_YCoCg',
'YCoCg_to_RGB',
]
RGB_TO_YCOCG_MATRIX = np.array([
[1 / 4, 1 / 2, 1 / 4],
[1 / 2, 0, -1 / 2],
[-1 / 4, 1 / 2, -1 / 4],
])
"""
*R'G'B'* colourspace to *YCoCg* colour encoding matrix.
RGB_TO_YCOCG_MATRIX : array_like, (3, 3)
"""
YCOCG_TO_RGB_MATRIX = np.array([
[1, 1, -1],
[1, 0, 1],
[1, -1, -1],
])
"""
*YCoCg* colour encoding to *R'G'B'* colourspace matrix.
YCOCG_TO_RGB_MATRIX : array_like, (3, 3)
"""
def RGB_to_YCoCg(RGB):
"""
Converts an array of *R'G'B'* values to the corresponding *YCoCg* colour
encoding values array.
Parameters
----------
RGB : array_like
Input *R'G'B'* array.
Returns
-------
ndarray
*YCoCg* colour encoding array.
References
----------
:cite:`Malvar2003`
Examples
--------
>>> RGB_to_YCoCg(np.array([1.0, 1.0, 1.0]))
array([ 1., 0., 0.])
>>> RGB_to_YCoCg(np.array([0.75, 0.5, 0.5]))
array([ 0.5625, 0.125 , -0.0625])
"""
return dot_vector(RGB_TO_YCOCG_MATRIX, RGB)
def YCoCg_to_RGB(YCoCg):
"""
Converts an array of *YCoCg* colour encoding values to the corresponding
*R'G'B'* values array.
Parameters
----------
YCoCg : array_like
*YCoCg* colour encoding array.
Returns
-------
ndarray
Output *R'G'B'* array.
References
----------
:cite:`Malvar2003`
Examples
--------
>>> YCoCg_to_RGB(np.array([1.0, 0.0, 0.0]))
array([ 1., 1., 1.])
>>> YCoCg_to_RGB(np.array([0.5625, 0.125, -0.0625]))
array([ 0.75, 0.5 , 0.5 ])
"""
return dot_vector(YCOCG_TO_RGB_MATRIX, YCoCg)
| [
"thomas.mansencal@gmail.com"
] | thomas.mansencal@gmail.com |
4c54d59e42900cfb57b6bc8ccdda606828c9490e | 177cf1fb99de1a09ac92507f336c49dbb4dd05ae | /scrape/spatula/pages.py | da2191049105f8c599a51f046e5f85ca8fdf23bc | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | brianhlin/people | 9574746c37d850ac3aa6b629be598293497cd67b | 1e62124c24feafe26437c344aa5a5bb1de00c740 | refs/heads/main | 2023-02-19T05:17:18.351344 | 2021-01-08T20:38:22 | 2021-01-08T20:38:22 | 328,417,651 | 0 | 0 | null | 2021-01-10T15:41:06 | 2021-01-10T15:41:05 | null | UTF-8 | Python | false | false | 3,186 | py | import lxml.html
from .sources import URL
class Page:
source = None
dependencies = {}
def _fetch_data(self, scraper):
# process dependencies first
for val, dep in self.dependencies.items():
dep._fetch_data(scraper)
setattr(self, val, dep.get_data())
if not self.source:
if hasattr(self, "get_source_from_input"):
self.source = self.get_source_from_input()
else:
raise Exception(
f"{self.__class__.__name__} has no source or get_source_from_input"
)
print(f"fetching {self.source} for {self.__class__.__name__}")
data = self.source.get_data(scraper)
self.set_raw_data(data)
def __init__(self, input_val=None):
"""
a Page can be instantiated with a url & options (TBD) needed to fetch it
"""
self.input = input_val
# TODO: special case, maybe __url__ or something?
if isinstance(input_val, dict) and "url" in input_val:
self.source = URL(input_val["url"])
if hasattr(input_val, "url"):
self.source = URL(input_val.url)
def set_raw_data(self, raw_data):
""" callback to handle raw data returned by grabbing the URL """
self.raw_data = raw_data
def get_data(self):
""" return data extracted from this page and this page alone """
raise NotImplementedError()
class HtmlPage(Page):
def set_raw_data(self, raw_data):
super().set_raw_data(raw_data)
self.root = lxml.html.fromstring(raw_data)
if hasattr(self.source, "url"):
self.root.make_links_absolute(self.source.url)
class ListPage(Page):
class SkipItem(Exception):
pass
def skip(self):
raise self.SkipItem()
# TODO
# class CSVListPage(ListPage):
# def get_data(self):
# print(self.raw_data)
# for item in items:
# try:
# item = self.process_item(item)
# except self.SkipItem:
# continue
# yield item
# def process_item(self, item):
# return item
class HtmlListPage(ListPage, HtmlPage):
"""
Simplification for HTML pages that get a list of items and process them.
When overriding the class, instead of providing get_data, one must only provide
a selector and a process_item function.
"""
selector = None
# common for a list page to only work on one URL, in which case it is more clear
# to set it as a property
def __init__(self, url=None):
"""
a Page can be instantiated with a url & options (TBD) needed to fetch it
"""
if url is not None:
self.url = url
def get_data(self):
if not self.selector:
raise NotImplementedError("must either provide selector or override scrape")
items = self.selector.match(self.root)
for item in items:
try:
item = self.process_item(item)
except self.SkipItem:
continue
yield item
def process_item(self, item):
return item
| [
"dev@jamesturk.net"
] | dev@jamesturk.net |
a75b54c62dc1b03dc4355d30db6af0896bd501f9 | 6de1fdf1de71e82e0a1af2ac4f6647444c986641 | /django/phishmon/phishmon/views.py | ef3a471dd64f50c3a34fc42f281f4edebf468be1 | [] | no_license | Epul-94/proyecto | 43fc3597800c0058176232487eace4830768b602 | 8955f3c2c477734568e399db2a37f66e177baa6c | refs/heads/master | 2020-03-19T05:25:01.837551 | 2018-06-03T18:15:22 | 2018-06-03T18:15:22 | 135,928,444 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 818 | py | from django.http import HttpResponse
from django.shortcuts import render
from .models import Codigoestadohistorico, SitiosPhishing, Ipwhois, Email
def home(request):
return render(request,"index.html")
def monito(request):
sph = SitiosPhishing.objects.all.filter(estatus='no reportado').order_by('id_sitio').first()
print sph
# ceh = Codigoestadohistorico.objects.order_by('')
# return render(request,"monitor.html",{'Codigoestadohistorico':ceh, 'SitiosPhishing': sph, 'Ipwhois': ipwho, 'Email':email})
return render(request, "monitor.html".{'SitiosPhishing':sph})
def historico(request):
return render(request,"historico.html")
def reportes(request):
return render(request,"reportes.html")
def verifica(request):
return render(request,"verifica.html")
def monito_list(request):
return render(request, ) | [
"root@debian"
] | root@debian |
0cccbf0a4dcf64cd3c945f7cc81b9807fb27b8cb | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03700/s833680310.py | fa2a25e2949cdb3ab5391ac4d929463210e52564 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | n, a, b = map(int, input().split())
h = [int(input()) for _ in range(n)]
l = 0
r = 10 ** 10
c = a - b
while l + 1 != r:
m = (l + r) // 2
tmp = 0
for i in range(n):
if h[i] > b * m:
tmp += ((h[i] - b * m) // c + (1 if (h[i] - b * m) % c else 0))
if tmp > m:
l = m
else:
r = m
print(r) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
64daf39edee03462cd72b0132960043675cf120d | 18d51ac0a6ca14c8221c26f0dacd8d3721ca28e9 | /prset2-2.py | b59271f810016dca44f4f63a3ee620b00d033379 | [] | no_license | mahakalai/mahak | 05f96d52880ed7b2e5eb70dd1dbf14fc533236e8 | 613be9df7743ef59b1f0e07b7df987d29bb23ec7 | refs/heads/master | 2020-04-15T05:01:58.541930 | 2019-07-15T16:28:32 | 2019-07-15T16:28:32 | 164,406,486 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | n,q=map(int,input().split())
l=[int(x) for x in input().split()]
l1=[]
for i in range(0,q):
l2=[int(x) for x in input().split()]
l1.append(l2)
for i in range(0,q):
m=l1[i][0]
n=l1[i][1]
l3=l[m-1:n]
print(sum(l3))
| [
"noreply@github.com"
] | mahakalai.noreply@github.com |
1cf0614c2d36b96c0fe09b5e2261b03e65a76372 | d037002f9d2b383ef84686bbb9843dac8ee4bed7 | /tutorials/DuelingDQN/arguments.py | 6adb435909f0337931676055ae5982b241dd9146 | [
"MIT"
] | permissive | ICSL-hanyang/Code_With_RL | 4edb23ca24c246bb8ec75fcf445d3c68d6c40b6d | 1378996e6bf6da0a96e9c59f1163a635c20b3c06 | refs/heads/main | 2023-08-15T18:37:57.689950 | 2021-10-18T07:31:59 | 2021-10-18T07:31:59 | 392,944,467 | 0 | 0 | null | 2021-08-05T07:20:57 | 2021-08-05T07:20:56 | null | UTF-8 | Python | false | false | 2,577 | py | # Copyright (c) 2021: Zhiyuan Nan (namjw@hanyang.ac.kr).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import argparse
import torch as T
device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')
def get_args():
parser = argparse.ArgumentParser("Deep Q Network")
parser.add_argument("--device", default=device, help="GPU or CPU")
parser.add_argument("--seed", type=int, default=123, help="random seed")
parser.add_argument("--env-name", type=str, default="CartPole-v0", help="name of the scenario script")
parser.add_argument("--render", type=bool, default=False, help="")
parser.add_argument("--time-steps", type=int, default=3000000, help="number of time steps")
parser.add_argument("--episode", type=int, default=int(1e6), help="number of episode")
parser.add_argument("--critic-lr", type=float, default=1e-3, help="learning rate of critic")
parser.add_argument("--hidden-size", type=int, default=128, help="hidden layer units")
parser.add_argument("--update-rate", type=int, default=100, help="update rate")
parser.add_argument("--max_epsilon", type=float, default=1.0, help="max epsilon")
parser.add_argument("--min_epsilon", type=float, default=0.1, help="min epsilon")
parser.add_argument("--epsilon_decay", type=float, default=0.0005, help="epsilon decay")
parser.add_argument("--gamma", type=float, default=0.99, help="discount factor")
parser.add_argument("--tau", type=float, default=0.01, help="parameter for updating the target network")
parser.add_argument("--buffer-size", type=int, default=1000, help="number of transitions can be stored in buffer")
parser.add_argument("--batch-size", type=int, default=32, help="number of episodes to optimize at the same time")
parser.add_argument("--save-dir", type=str, default="./model", help="directory in which training state and model should be saved")
parser.add_argument("--save-rate", type=int, default=2000, help="save model once every time this many episodes are completed")
parser.add_argument("--model-dir", type=str, default="", help="directory in which training state and model are loaded")
parser.add_argument("--evaluate-episodes", type=int, default=10, help="number of episodes for evaluating")
parser.add_argument("--evaluate", type=bool, default=False, help="whether to evaluate the model")
parser.add_argument("--evaluate-rate", type=int, default=1000, help="how often to evaluate model")
return parser.parse_args()
| [
"nzy1414117007@gmail.com"
] | nzy1414117007@gmail.com |
86754c7633d599806713c66152b4c042dba9c6b3 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/454dbbb27b55d17c57de7b4ba986b1162a9bf813-<test_symlog>-bug.py | 4b4a1cace6659e962a753cb7c52ea165e8431b91 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | @image_comparison(baseline_images=['symlog'])
def test_symlog():
x = np.array([0, 1, 2, 4, 6, 9, 12, 24])
y = np.array([1000000, 500000, 100000, 100, 5, 0, 0, 0])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y)
ax.set_yscale('symlog')
ax.set_xscale = 'linear'
ax.set_ylim((- 1), 10000000) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
da1cf67d7273f6617e8896ca137a04c77be4e01a | 9ed964c0fb14494e9685f13570d42aa7aebefa6f | /selfdrive/controls/lib/lateral_mpc/libmpc_py.py | 3f92090935c4d1e26f9b2090e4c90b4eeb68c404 | [
"MIT"
] | permissive | pjlao307/chffrplus | 144d5e8da242d04692b9d949b8774ed809f568a9 | dae0dd09d88a87db81ce2fd48f52580eb7b73cc3 | refs/heads/release | 2021-07-15T06:16:33.734692 | 2017-10-19T03:35:26 | 2017-10-19T03:35:26 | 107,692,678 | 0 | 0 | null | 2017-10-20T15:09:08 | 2017-10-20T15:09:07 | null | UTF-8 | Python | false | false | 681 | py | import os
import subprocess
from cffi import FFI
mpc_dir = os.path.dirname(os.path.abspath(__file__))
libmpc_fn = os.path.join(mpc_dir, "libcommampc.so")
subprocess.check_output(["make", "-j4"], cwd=mpc_dir)
ffi = FFI()
ffi.cdef("""
typedef struct {
double x, y, psi, delta, t;
} state_t;
typedef struct {
double x[50];
double y[50];
double psi[50];
double delta[50];
} log_t;
void init();
void run_mpc(state_t * x0, log_t * solution,
double l_poly[4], double r_poly[4], double p_poly[4],
double l_prob, double r_prob, double p_prob, double curvature_factor, double v_ref, double lane_width);
""")
libmpc = ffi.dlopen(libmpc_fn)
| [
"user@comma.ai"
] | user@comma.ai |
7f666ea7c97ea37ab15a3f83444aeac9d6483d87 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-1/88c5c8b3ed1440435a8aeac8186d7ecfc83b97ab-<resize_image_with_pad>-bug.py | 60f58affe758c27a4a06caf6b205f7d10b4abe93 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,712 | py |
@tf_export('image.resize_image_with_pad')
def resize_image_with_pad(image, target_height, target_width, method=ResizeMethod.BILINEAR):
"Resizes and pads an image to a target width and height.\n\n Resizes an image to a target width and height by keeping\n the aspect ratio the same without distortion. If the target\n dimensions don't match the image dimensions, the image\n is resized and then padded with zeroes to match requested\n dimensions.\n\n Args:\n image: 4-D Tensor of shape `[batch, height, width, channels]` or\n 3-D Tensor of shape `[height, width, channels]`.\n target_height: Target height.\n target_width: Target width.\n method: Method to use for resizing image. See `resize_images()`\n\n Raises:\n ValueError: if `target_height` or `target_width` are zero or negative.\n\n Returns:\n Resized and padded image.\n If `images` was 4-D, a 4-D float Tensor of shape\n `[batch, new_height, new_width, channels]`.\n If `images` was 3-D, a 3-D float Tensor of shape\n `[new_height, new_width, channels]`.\n "
with ops.name_scope(None, 'resize_image_with_pad', [image]):
image = ops.convert_to_tensor(image, name='image')
image_shape = image.get_shape()
is_batch = True
if (image_shape.ndims == 3):
is_batch = False
image = array_ops.expand_dims(image, 0)
elif (image_shape.ndims is None):
is_batch = False
image = array_ops.expand_dims(image, 0)
image.set_shape(([None] * 4))
elif (image_shape.ndims != 4):
raise ValueError("'image' must have either 3 or 4 dimensions.")
assert_ops = _CheckAtLeast3DImage(image, require_static=False)
assert_ops += _assert((target_width > 0), ValueError, 'target_width must be > 0.')
assert_ops += _assert((target_height > 0), ValueError, 'target_height must be > 0.')
image = control_flow_ops.with_dependencies(assert_ops, image)
def max_(x, y):
if (_is_tensor(x) or _is_tensor(y)):
return math_ops.maximum(x, y)
else:
return max(x, y)
(_, height, width, _) = _ImageDimensions(image, rank=4)
f_height = math_ops.cast(height, dtype=dtypes.float64)
f_width = math_ops.cast(width, dtype=dtypes.float64)
f_target_height = math_ops.cast(target_height, dtype=dtypes.float64)
f_target_width = math_ops.cast(target_width, dtype=dtypes.float64)
ratio = max_((f_width / f_target_width), (f_height / f_target_height))
resized_height_float = (f_height / ratio)
resized_width_float = (f_width / ratio)
resized_height = math_ops.cast(math_ops.floor(resized_height_float), dtype=dtypes.int32)
resized_width = math_ops.cast(math_ops.floor(resized_width_float), dtype=dtypes.int32)
padding_height = ((f_target_height - resized_height_float) / 2)
padding_width = ((f_target_width - resized_width_float) / 2)
f_padding_height = math_ops.floor(padding_height)
f_padding_width = math_ops.floor(padding_width)
p_height = max_(0, math_ops.cast(f_padding_height, dtype=dtypes.int32))
p_width = max_(0, math_ops.cast(f_padding_width, dtype=dtypes.int32))
resized = resize_images(image, [resized_height, resized_width], method)
padded = pad_to_bounding_box(resized, p_height, p_width, target_height, target_width)
if (padded.get_shape().ndims is None):
raise ValueError('padded contains no shape.')
_ImageDimensions(padded, rank=4)
if (not is_batch):
padded = array_ops.squeeze(padded, squeeze_dims=[0])
return padded
| [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
bc981ca8ee42a2e8cd3057d7c0311152534a8dc6 | eb2df6020f5759feee3d6d78c5f8c78999454a09 | /scheduled_jobs/niagara4/run_read_meter_points.py | 2030f6978f6ebbf395d95c11893cb82798b3433b | [] | no_license | mywork-dragon/dave-energy | 7a08f855d245c2d90a9c13aa85fc3b9f28ae9294 | 4b3430be6ef6957389ab05be3a17a0245f5d6662 | refs/heads/master | 2023-07-28T02:55:26.791724 | 2021-09-06T11:44:30 | 2021-09-06T11:44:30 | 365,872,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 863 | py |
import traceback
from config import logger
from scheduled_jobs.niagara4.run import Run
from scheduled_jobs.niagara4.n4_events_handler import N4EventHandler
from common import slack
class RunReadMeterPoints(Run):
def get_n4_points_sql(self):
return '''select pt.id as point_id, pt.path as point_path, pt.name as point_name, ast.name as asset_name,
at.name as asset_type_name, pt.tag as tag_name
from asset ast
INNER JOIN point pt ON ast.id = pt.asset_id
INNER JOIN asset_type at ON ast.asset_type_id = at.id
where ast.building_id = %s
and at.name in ('HVAC', 'Meter')
and pt.tag in ('METER', 'METER_EXPORT')
'''
if __name__ == '__main__':
run_every = RunReadMeterPoints()
run_every.copy_n4_values() | [
"dragonblueyounger@gmail.com"
] | dragonblueyounger@gmail.com |
4a8bb0e72be3d9bd3f3ae6317741f6c2b5cce534 | 971c5ae1d87cdfbb97723485c3d76c17395b82b0 | /x86-semantics/semantics_using_uifs/z3EquivFormulas/x86-movlhps_xmm_xmm.py | 86a53322a07dadd89753423c03078be47b7b8212 | [
"NCSA"
] | permissive | mewbak/binary-decompilation | 7d0bf64d6cd01bfa5f5fc912d74a85ce81124959 | f58da4c53cd823edc4bbbad6b647dbcefd7e64f8 | refs/heads/master | 2020-04-16T06:08:14.983946 | 2019-01-06T17:21:50 | 2019-01-06T17:21:50 | 165,334,058 | 1 | 0 | NOASSERTION | 2019-01-12T01:42:16 | 2019-01-12T01:42:16 | null | UTF-8 | Python | false | false | 4,029 | py | from z3 import *
import proverUtils
# Declarations
CF = BitVec('CF', 1)
PF = BitVec('PF', 1)
AF = BitVec('AF', 1)
ZF = BitVec('ZF', 1)
SF = BitVec('SF', 1)
OF = BitVec('OF', 1)
RAX = BitVec('RAX', 64)
RCX = BitVec('RCX', 64)
RDX = BitVec('RDX', 64)
ZERO1 = BitVecVal(0, 1)
ONE1 = BitVecVal(1, 1)
cf = (CF == ONE1)
pf = (PF == ONE1)
af = (AF == ONE1)
zf = (ZF == ONE1)
sf = (SF == ONE1)
of = (OF == ONE1)
undef = BitVecVal(0, 1)
cvt_int32_to_single = Function('cvt_int32_to_single', IntSort(), Float32())
# Uninterpreted binary function declaration
add_double = Function('add_double', BitVecSort(64), BitVecSort(64), BitVecSort(64))
add_single = Function('add_single', BitVecSort(32), BitVecSort(32), BitVecSort(32))
sub_double = Function('sub_double', BitVecSort(64), BitVecSort(64), BitVecSort(64))
sub_single = Function('sub_single', BitVecSort(32), BitVecSort(32), BitVecSort(32))
mul_double = Function('mul_double', BitVecSort(64), BitVecSort(64), BitVecSort(64))
mul_single = Function('mul_single', BitVecSort(32), BitVecSort(32), BitVecSort(32))
div_double = Function('div_double', BitVecSort(64), BitVecSort(64), BitVecSort(64))
div_single = Function('div_single', BitVecSort(32), BitVecSort(32), BitVecSort(32))
maxcmp_double = Function('maxcmp_double', BitVecSort(64), BitVecSort(64), BitVecSort(1))
maxcmp_single = Function('maxcmp_single', BitVecSort(32), BitVecSort(32), BitVecSort(1))
mincmp_double = Function('mincmp_double', BitVecSort(64), BitVecSort(64), BitVecSort(1))
mincmp_single = Function('mincmp_single', BitVecSort(32), BitVecSort(32), BitVecSort(1))
# Uninterpreted binary function declaration
approx_reciprocal_double = Function('approx_reciprocal_double', BitVecSort(64), BitVecSort(64))
approx_reciprocal_single = Function('approx_reciprocal_single', BitVecSort(32), BitVecSort(32))
sqrt_double = Function('sqrt_double', BitVecSort(64), BitVecSort(64))
sqrt_single = Function('sqrt_single', BitVecSort(32), BitVecSort(32))
approx_reciprocal_sqrt_double = Function('approx_reciprocal_sqrt_double_double', BitVecSort(64), BitVecSort(64))
approx_reciprocal_sqrt_single = Function('approx_reciprocal_sqrt_double_single', BitVecSort(32), BitVecSort(32))
cvt_single_to_double = Function('cvt_single_to_double', BitVecSort(32), BitVecSort(64))
cvt_single_to_int32 = Function('cvt_single_to_int32', BitVecSort(32), BitVecSort(32))
cvt_single_to_int64 = Function('cvt_single_to_int64', BitVecSort(32), BitVecSort(64))
cvt_int32_to_single = Function('cvt_int32_to_single', BitVecSort(32), BitVecSort(32))
cvt_int32_to_double = Function('cvt_int32_to_double', BitVecSort(32), BitVecSort(64))
# Uninterpreted ternary function declaration
vfmadd132_double = Function('vfmadd132_double', BitVecSort(64), BitVecSort(64), BitVecSort(64), BitVecSort(64))
vfmadd132_single = Function('vfmadd132_single', BitVecSort(32), BitVecSort(32), BitVecSort(32), BitVecSort(32))
vfmsub132_double = Function('vfmsub132_double', BitVecSort(64), BitVecSort(64), BitVecSort(64), BitVecSort(64))
vfmsub132_single = Function('vfmsub132_single', BitVecSort(32), BitVecSort(32), BitVecSort(32), BitVecSort(32))
vfnmadd132_double = Function('vfnmadd132_double', BitVecSort(64), BitVecSort(64), BitVecSort(64), BitVecSort(64))
vfnmadd132_single = Function('vfnmadd132_single', BitVecSort(32), BitVecSort(32), BitVecSort(32), BitVecSort(32))
vfnmsub132_double = Function('vfnmsub132_double', BitVecSort(64), BitVecSort(64), BitVecSort(64), BitVecSort(64))
vfnmsub132_single = Function('vfnmsub132_single', BitVecSort(32), BitVecSort(32), BitVecSort(32), BitVecSort(32))
print('[6;30;44m' + 'Opcode:movlhps_xmm_xmm' + '[0m')
R1 = BitVec('R1', 256)
R2 = BitVec('R2', 256)
PK_R2 = (Concat(Extract( R2.size() - 0 - 1, R2.size() - 128, R2 ) , Concat(Extract( R1.size() - 192 - 1, R1.size() - 256, R1 ) , Extract( R2.size() - 192 - 1, R2.size() - 256, R2 ) )) )
PS_R2 = (Concat((Extract (255, 128, ((R2)))), (Concat((Extract (63, 0, ((R1)))), (Extract (63, 0, ((R2))))))))
proverUtils.prove( PK_R2 == PS_R2 )
| [
"sdasgup3@illinois.edu"
] | sdasgup3@illinois.edu |
a3dd7063c42f87059ff0734aa6184d0fe693d10a | c02369e1ebeb99f1d59b767fe180a383759ce38e | /object_conversions/python/conversion_to_histo.py | d79173836980faf7c6c14a48a54432aeb4f41b8b | [] | no_license | jbsauvan/L1T-Utilities | 0ed51b88ab31963463277eae85c8b7fccc595f17 | 4600200f990f63e9d5f5b401b282d7adb37b2f4d | refs/heads/master | 2021-01-16T20:50:38.979533 | 2016-07-13T13:51:32 | 2016-07-13T13:51:32 | 62,325,564 | 0 | 0 | null | 2016-06-30T16:17:27 | 2016-06-30T16:17:27 | null | UTF-8 | Python | false | false | 792 | py | from rootpy.plotting import Hist2D
# 'function' must be able to take an array of (2D) array as input
def function2th2(function, binsx, binsy, titlex='', titley=''):
histo = Hist2D(*(binsx+binsy))
histo.SetXTitle(titlex)
histo.SetYTitle(titley)
# Prepare array of inputs, one entry for each bin
values = []
for bx in histo.bins_range(0):
x = histo.GetXaxis().GetBinCenter(bx)
for by in histo.bins_range(1):
y = histo.GetYaxis().GetBinCenter(by)
values.append([x,y])
# Call function for each value
results = function(values)
for result,value in zip(results, values):
bx = histo.GetXaxis().FindBin(value[0])
by = histo.GetYaxis().FindBin(value[1])
histo[bx,by].value = result
return histo
| [
"jean-baptiste.sauvan@cern.ch"
] | jean-baptiste.sauvan@cern.ch |
fd07c7d1bdc76fcbee5db9d0e881bf4321f4f0ab | 33d95a2e5222f1ab9993480a8bdc354aa6dc672f | /game/migrations/0005_darkdieroll_fate_points.py | 3ee7161af0d937ac468c2220d2fb67e60fcce10f | [] | no_license | JackSnowdon/die-master | ab3eb7b1dbbe8b16031515c09b843aaba879aef5 | d0af9fb8d6851904d466165d190dc9ec3700f2fd | refs/heads/master | 2023-07-29T13:20:18.171612 | 2020-06-19T18:27:56 | 2020-06-19T18:27:56 | 265,283,325 | 0 | 0 | null | 2021-09-22T19:14:04 | 2020-05-19T15:16:26 | Python | UTF-8 | Python | false | false | 533 | py | # Generated by Django 3.0.6 on 2020-06-02 16:41
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('game', '0004_auto_20200522_1754'),
]
operations = [
migrations.AddField(
model_name='darkdieroll',
name='fate_points',
field=models.PositiveIntegerField(default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)]),
),
]
| [
"jacksnowdondrums@gmail.com"
] | jacksnowdondrums@gmail.com |
4d8a740f2425a882dbbcd033a0b302aa265db11d | 3fe272eea1c91cc5719704265eab49534176ff0d | /scripts/field/lightning_tuto_13_0.py | 40c852ba5cc3187275d8a0d5513fed44b2a3ff09 | [
"MIT"
] | permissive | Bratah123/v203.4 | e72be4843828def05592298df44b081515b7ca68 | 9cd3f31fb2ef251de2c5968c75aeebae9c66d37a | refs/heads/master | 2023-02-15T06:15:51.770849 | 2021-01-06T05:45:59 | 2021-01-06T05:45:59 | 316,366,462 | 1 | 0 | MIT | 2020-12-18T17:01:25 | 2020-11-27T00:50:26 | Java | UTF-8 | Python | false | false | 502 | py | # Created by MechAviv
# ID :: [910141060]
# Hidden Street : The Frenzy
sm.reservedEffect("Effect/Direction8.img/lightningTutorial2/Scene4")
sm.sendDelay(10000)
sm.setTemporarySkillSet(0)
sm.setInGameDirectionMode(False, True, False, False)
# Unhandled User Effect [PlaySoundWithMuteBGM] Packet: 23 11 00 4D 69 6E 69 47 61 6D 65 2E 69 6D 67 2F 4F 70 65 6E
# Unhandled Message [47] Packet: 2F 01 00 00 00 B0 83 08 00 00 00 00 00 2E 02 00 00 00 00 00 80 05 BB 46 E6 17 02 00 00
sm.warp(910141030, 0)
| [
"pokesmurfuwu@gmail.com"
] | pokesmurfuwu@gmail.com |
66f5818bf49f9b2f75b321768a98f06eedf0e493 | b7f3edb5b7c62174bed808079c3b21fb9ea51d52 | /components/viz/service/frame_sinks/DEPS | 93c434996fdedd3564cd4da8b83f221ddc3502d0 | [
"BSD-3-Clause"
] | permissive | otcshare/chromium-src | 26a7372773b53b236784c51677c566dc0ad839e4 | 64bee65c921db7e78e25d08f1e98da2668b57be5 | refs/heads/webml | 2023-03-21T03:20:15.377034 | 2020-11-16T01:40:14 | 2020-11-16T01:40:14 | 209,262,645 | 18 | 21 | BSD-3-Clause | 2023-03-23T06:20:07 | 2019-09-18T08:52:07 | null | UTF-8 | Python | false | false | 555 | # Please consult components/viz/README.md about allowable dependencies.
include_rules = [
"+cc/base",
"+cc/scheduler",
"+components/viz/service/display",
"+components/viz/service/display_embedder",
"+components/viz/service/hit_test",
"+components/viz/service/surfaces",
"+gpu/ipc/common",
"+mojo/public",
]
specific_include_rules = {
".*unittest\.cc": [
"+third_party/khronos/GLES2",
],
"external_begin_frame_source_android.cc": [
"+components/viz/service/service_jni_headers/ExternalBeginFrameSourceAndroid_jni.h",
],
}
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org | |
5639d463d2f4c33b95fd48aa52bd71d7e45ddf75 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/eventhub/azure-mgmt-eventhub/generated_samples/schema_registry_delete.py | 5bd620cf6b1f8f12b53fc5ec98efd552c094664b | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,639 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.eventhub import EventHubManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-eventhub
# USAGE
python schema_registry_delete.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = EventHubManagementClient(
credential=DefaultAzureCredential(),
subscription_id="e8baea74-64ce-459b-bee3-5aa4c47b3ae3",
)
response = client.schema_registry.delete(
resource_group_name="alitest",
namespace_name="ali-ua-test-eh-system-1",
schema_group_name="testSchemaGroup1",
)
print(response)
# x-ms-original-file: specification/eventhub/resource-manager/Microsoft.EventHub/stable/2021-11-01/examples/SchemaRegistry/SchemaRegistryDelete.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
35135a73f723dbed0d3c25b356fcb0f60b41890f | d039da1c0b99e2642d3c354de9faa6f427141ee3 | /problems/leetcode/LargestRectangleinHistogram.py | 8bcff46e2f705aa31b50d539325cc64efb60c878 | [
"MIT"
] | permissive | qicst23/pyshua | 5a3e317823d0620d2034adfe345eddd6a722c7ff | 4ae7bb8b626f233ebc2267024ba67dcfe49051ed | refs/heads/master | 2016-09-15T20:26:16.694738 | 2013-12-15T04:50:04 | 2013-12-15T04:50:04 | 15,198,867 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 974 | py | from problems.leetcode.LeetcodeProblem import LeetcodeProblem
class LargestRectangleinHistogram(LeetcodeProblem):
def solve(self, height):
height.append(0)
n = len(height)
res = 0
stack = []
i = 0
while i < n:
if not stack or height[i] >= height[stack[-1]]:
stack.append(i)
i += 1
else:
h = height[stack.pop()]
w = i - (stack[-1] if stack else -1) - 1
area = h * w
if res < area:
res = area
return res
def verify(self, original_input, input, s1, s2):
return s1 == s2
def input(self):
from Parser import parseIntArray
return parseIntArray(open(self.inputPath))
def output(self):
from Parser import parseOneInt
for o in parseOneInt(open(self.outputPath)):
yield o[0]
problem = LargestRectangleinHistogram
| [
"baidingding7@gmail.com"
] | baidingding7@gmail.com |
f06390f4aa8e80e49b7ba4a92cfa1fc42113c66f | d9f4b0a788d39655e29993756aedf3f11bf9086c | /practice/solve.py | 68ce083b81ffb14c0b63464e482f038cccf4df83 | [] | no_license | OmarTahoun/competitive-programming | 07a9123f195c87945c6e56b1d0a3c6fba805d946 | 3b3a4348817fce6637d7bd675ce6f62a4fe69cc8 | refs/heads/master | 2021-08-07T02:45:20.752603 | 2020-04-30T09:50:40 | 2020-04-30T09:50:40 | 164,177,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,159 | py | #!/usr/bin/python3
import json
import argparse
import shutil
import subprocess
def create_file(id, lang):
id = id.strip()
lang = lang.strip()
problem = problems[int(id.strip())]
if(lang.lower() == "cpp"):
file_name = problem[0] +"."+ lang.lower()
path = r"../Code Forces/CPP/"+file_name
default_path = r"../Code Forces/CPP/default.cpp"
shutil.copyfile(default_path, path)
print(file_name)
list_files = subprocess.run(["subl", path], stdout=subprocess.DEVNULL)
else:
file_name = problem[0] +"."+ lang.lower()
path = r"../Code Forces/PY/"+file_name
print(file_name)
list_files = subprocess.run(["subl", path], stdout=subprocess.DEVNULL)
def main():
parser = argparse.ArgumentParser(description="testing")
parser.add_argument('-id', dest="id", help = "the id of the problem to update", type = str, required=True)
parser.add_argument('-l', dest="lang", help = "the Language the problem was solved with", type = str, required=True)
args = parser.parse_args()
create_file(args.id, args.lang)
if __name__ == '__main__':
with open("problems.js", "r") as f:
problems = json.load(f)
main() | [
"omaraiz489@gmail.com"
] | omaraiz489@gmail.com |
c0b2e8faa338dca0df9b00fb60c53f05f3eda904 | 0fd974aa23da92b81d7a7a769d24563b49020023 | /venom/rpc/resolver.py | 1e6f479789501e06961c82e3d7a959c21179ca7e | [
"MIT"
] | permissive | lyschoening/venom | 188b21c14092bfb39eed5b4911c64425e4497818 | 167967e7a8078a7227dc50dbc40df38e5dd6f520 | refs/heads/master | 2020-04-18T07:27:03.160796 | 2018-01-30T10:31:26 | 2018-01-30T10:31:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | from abc import abstractmethod, ABCMeta
from typing import TypeVar, Generic, Type
T = TypeVar('_T')
class Resolver(Generic[T], metaclass=ABCMeta):
python: Type[T] = None
async def __call__(self,
service: 'venom.rpc.service.Service',
request: 'venom.message.Message') -> T:
return await self.resolve(service, request)
@abstractmethod
async def resolve(self,
service: 'venom.rpc.service.Service',
request: 'venom.message.Message') -> T:
pass
| [
"lars@lyschoening.de"
] | lars@lyschoening.de |
e1bc9fe5632a807432c86b9ec2bf5a4116df296b | e7c2b9ef846f4159975b024ec2f9a535ce19de40 | /setup.py | 393ce848df845539658582c039428d375177ddd3 | [
"MIT"
] | permissive | kellychenma/db_tools | c7f8af420d472d0d412d4bf9d1ec74f99a67d3d1 | 23d971af485a90176cfbfd2b2ed2d1f7c5b28f9b | refs/heads/master | 2021-09-10T08:17:24.388278 | 2018-01-09T18:05:41 | 2018-01-09T18:05:41 | 116,858,439 | 0 | 0 | null | 2018-01-09T19:08:36 | 2018-01-09T19:08:36 | null | UTF-8 | Python | false | false | 2,292 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
import inspect
from pathlib import Path
HOME_DIR = Path(inspect.getfile(inspect.currentframe())).parent
def filter_req_paths(paths, func):
"""Return list of filtered libs."""
if not isinstance(paths, list):
raise ValueError("Paths must be a list of paths.")
libs = set()
junk = set(['\n'])
for p in paths:
with p.open(mode='r') as reqs:
lines = set([line for line in reqs if func(line)])
libs.update(lines)
return list(libs - junk)
def is_pipable(line):
"""Filter for pipable reqs."""
if "# not_pipable" in line:
return False
elif line.startswith('#'):
return False
else:
return True
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = filter_req_paths(paths=[HOME_DIR / "requirements.txt",
HOME_DIR / "requirements.pip.txt"], func=is_pipable)
setup_requirements = ["pytest-runner"]
test_requirements = test_requirements = ["pytest"]
setup(
name='db_tools',
version='0.0.2',
description="A set of command line executable and script importable tools to aid the Snapper Lab in managing and combining RedCap, FreezerPro, and other databases.",
long_description=readme + '\n\n' + history,
author="Gus Dunn",
author_email='w.gus.dunn@gmail.com',
url='https://github.com/xguse/db_tools',
packages=find_packages(include=['db_tools']),
entry_points={
'console_scripts': [
'db_tools=db_tools.cli:main'
]
},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='db_tools',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
)
| [
"w.gus.dunn@gmail.com"
] | w.gus.dunn@gmail.com |
a3b2d408518bb11b90aaba063943fb0caa56731f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02239/s374795899.py | eb3b7249256f6b5886a4bce9720035bb327680f5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | #=============================================================================
# Breadth First Search
#=============================================================================
import queue
N=int(input())
Graph=[[] for _ in range(N+1)]
dist=[-1 for _ in range(N+1)]
for i in range(1,N+1):
Graph[i]=list(map(int,input().split()))[2:]
que=queue.Queue()
que.put(1)
dist[1]=0
while not que.empty():
v=que.get()
for nv in Graph[v]:
if dist[nv] != -1:
continue
dist[nv]=dist[v]+1
que.put(nv)
for i in range(1,N+1):
print(i,dist[i],sep=" ")
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
0edc58fbf257055980a8afbc37b213fd0c436e90 | a8d55214bc9ff1f113f52dec0b26bbf6692e239b | /06/6.3/property_test3.py | 12160271c4b30e13f794a83a0a39da718249f8e4 | [] | no_license | wmm0165/crazy_python | e61ea186cd9120500b3235aed9afc62be5dc49bc | 1266cc8ae1d3c3b91fe17b298de0686d1d93bb24 | refs/heads/master | 2022-03-24T03:31:57.205022 | 2019-12-23T10:13:22 | 2019-12-23T10:13:22 | 193,018,528 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | # -*- coding: utf-8 -*-
# @Time : 2019/8/19 14:14
# @Author : wangmengmeng
# 使用@property修饰方法,使之成为属性
class Cell:
# 使用@property修饰方法,相当于为该属性设置getter方法
@property
def state(self):
return self._state
# 为state属性设置setter方法
@state.setter
def state(self, value):
if 'alive' in value.lower():
self._state = 'alive'
else:
self._state = 'dead'
# 为is_dead属性设置getter方法
# 只有getter方法属性是只读属性
@property
def is_dead(self):
return not self._state.lower() == 'alive'
c = Cell()
# 修改state属性
c.state = 'Alive'
# 访问state属性
print(c.state)
# 访问is_dead属性
print(c.is_dead)
| [
"wmm_0165@163.com"
] | wmm_0165@163.com |
7172d06518cd011353d2ff89b60b6c63ebdf4531 | 14ca66a826cec8172f880c388375b3b55483cf37 | /rplugin/python3/deoplete/source/deoppet.py | c5b08d030cb9d9cd4798763bab3b3703fadf8441 | [
"MIT"
] | permissive | the-lazy-learner/deoppet.nvim | 744abd06adb789c9b21035fdf552ac16bcca85df | 16c6f272c694fd12fc288fc98d4b474e26aa1485 | refs/heads/master | 2021-07-03T22:43:04.506152 | 2017-09-24T08:55:16 | 2017-09-24T08:55:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 779 | py | #=============================================================================
# FILE: deoppet.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
#=============================================================================
from .base import Base
from deoppet.parser import Parser
from deoppet.util import globruntime, debug
class Source(Base):
def __init__(self, vim):
Base.__init__(self, vim)
self.name = 'deoppet'
self.mark = '[dp]'
self.rank = 200
def gather_candidates(self, context):
bvars = self.vim.current.buffer.vars
if 'deoppet_snippets' not in bvars:
return []
return [{'word': x['trigger']} for x in
bvars['deoppet_snippets'].values()]
| [
"Shougo.Matsu@gmail.com"
] | Shougo.Matsu@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.