blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8b218abc4a3878b1c37aa9b451d5db94469a9754 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_vials.py | e2e2135144acb3eec3b153e8a65f424466bd7147 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py |
from xai.brain.wordbase.nouns._vial import _VIAL
#calss header
class _VIALS(_VIAL, ):
def __init__(self,):
_VIAL.__init__(self)
self.name = "VIALS"
self.specie = 'nouns'
self.basic = "vial"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
668598c1677135991798fc681fc9b79dbb890462 | f4b7207e407d4b8d693cb1f549228f0e9dfe15f2 | /wizard/__init__.py | f5dd943f161b34c98e06101b0fee4d8e53968bd8 | [] | no_license | cgsoftware/BomForTemplate- | c74943790da9c49e44ac58ba3f15f9785f363483 | acbfcf0269c1c239c34a5505e01dca0653a046b9 | refs/heads/master | 2016-09-08T01:22:12.331360 | 2011-07-26T14:25:25 | 2011-07-26T14:25:25 | 2,107,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2009 Domsense SRL (<http://www.domsense.com>).
# All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import GeneraDistinta
| [
"g.dalo@cgsoftware.it"
] | g.dalo@cgsoftware.it |
6063f7361ab755af9ef9d192f4341075d6c01b0e | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Games/Cheese Boys/cheeseboys/character/navpoint.py | 2303769f6de56879ddc36d3ff5acaca3a6e1bd2d | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:e6854cdd531b75169a9912aa95b5fb937517d9124a0c484cac9b640d801e35fa
size 4235
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
112d51dc7aeee6b51999754cea1d6531ecd117dc | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_personifies.py | cec8300a13cfbafaf3a5631fe6f9b682c22c0460 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py |
from xai.brain.wordbase.verbs._personify import _PERSONIFY
#calss header
class _PERSONIFIES(_PERSONIFY, ):
def __init__(self,):
_PERSONIFY.__init__(self)
self.name = "PERSONIFIES"
self.specie = 'verbs'
self.basic = "personify"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
4f2aa479c10a74a807ed92d08466b41ffdcac02d | cb20ef5b4048457a2e6dca4a4cb45c53c9843744 | /scripts/migration/2017022_migraterouterosvlan.py | 6191cdfe265c19448e444e923ea100168d288c7c | [] | no_license | rudecs/openvcloud | 5001b77e8d943427c1bed563f3dcc6b9467936e2 | 12ccce2a54034f5bf5842e000c2cc3d7e22836d8 | refs/heads/master | 2020-03-24T00:00:10.422677 | 2018-11-22T13:41:17 | 2018-11-22T13:41:17 | 142,267,808 | 2 | 1 | null | 2018-07-25T08:02:37 | 2018-07-25T08:02:36 | null | UTF-8 | Python | false | false | 435 | py | from JumpScale import j
vcl = j.clients.osis.getNamespace('vfw')
ccl = j.clients.osis.getNamespace('cloudbroker')
for vfw in vcl.virtualfirewall.search({})[1:]:
space = next(iter(ccl.cloudspace.search({'gid': vfw['gid'], 'networkId': vfw['id']})[1:]), None)
if space:
externalnetwork = ccl.externalnetwork.get(space['externalnetworkId'])
vfw['vlan'] = externalnetwork.vlan
vcl.virtualfirewall.set(vfw)
| [
"deboeck.jo@gmail.com"
] | deboeck.jo@gmail.com |
4d44108df374a32aec622b77ee7de26ed9b32be8 | 0809ea2739d901b095d896e01baa9672f3138825 | /jobproject_4/jobApp/forms.py | c5d11f10832fc3d8d286690d106532d7052c0a19 | [] | no_license | Gagangithub1988/djangoprojects | dd001f2184e78be2fb269dbfdc8e3be1dd71ce43 | ea236f0e4172fbf0f71a99aed05ed7c7b38018e2 | refs/heads/master | 2022-11-15T23:46:46.134247 | 2020-07-15T06:37:51 | 2020-07-15T06:37:51 | 273,479,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | from django import forms
from jobApp.models import Hyderabad_jobs,Mumbai_jobs,Pune_jobs,Bangalore_jobs
class hydjobsForm(forms.ModelForm):
class Meta:
model=Hyderabad_jobs
fields='__all__'
class punejobsForm(forms.ModelForm):
class Meta:
model=Pune_jobs
fields='__all__'
class mumbaijobsForm(forms.ModelForm):
class Meta:
model=Mumbai_jobs
fields='__all__'
class bangalorejobsForm(forms.ModelForm):
class Meta:
model=Bangalore_jobs
fields='__all__'
| [
"djangopython1988@gmail.com"
] | djangopython1988@gmail.com |
375e89d3f8a2b3ccbf4dd393faeedd95a83ff1de | 2bc0ecba05876be2a674bdc215ed7bf7332956aa | /Challenges_2020_public/misc/Disconfigured/challenge/src/cogs/notes_cog.py | ae73712d66d0430e36af60d67846c105e562b1f3 | [
"MIT"
] | permissive | Silentsoul04/ctfs | 04c29c28347f3336d8feb945da20eba83326699e | fa3b187be2c8a1d20f414c2b37277d3961a366b0 | refs/heads/master | 2023-02-18T10:20:46.508273 | 2021-01-23T18:30:57 | 2021-01-23T18:30:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,749 | py | from __future__ import annotations
from datetime import datetime
from os import environ
from discord import ChannelType, Embed, Message
from discord.ext import commands, menus
from models.note import Note
from util import db
from util.logger import get_logger
logger = get_logger(__name__)
MAX_ALLOWED_NOTES = int(environ["MAX_ALLOWED_NOTES"])
class Notes(commands.Cog):
""" Functionality relating to adding and retrieving notes """
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, message: Message):
""" Runs every time a message is sent
Args:
message (discord.Message): The message that triggered the invokation
"""
if message.author.bot:
return
logger.debug(
"%s#%s: %s", message.author.name, message.author.discriminator, message.clean_content)
# Private message
if message.channel.type == ChannelType.private:
waiting_user = db.get_creating_note_user(message.author)
if not waiting_user:
return
note = message.clean_content
if not note:
await message.channel.send("Your note must contain text.")
return
if len(note) >= 100:
await message.channel.send("Notes must be less than 200 characters in length")
return
# If its not a self note
if waiting_user.note_type == "guild_user":
cleared_notes = db.add_guild_note(note, waiting_user)
if cleared_notes:
await message.channel.send(
f"Saved your note for {waiting_user.guild_name} and cleared "
f"the rest. You are only allowed up to {MAX_ALLOWED_NOTES} notes.")
else:
await message.channel.send(
f"Saved your note for {waiting_user.guild_name}!")
elif waiting_user.note_type == "dm_user" and message.id != waiting_user.command_message_id:
cleared_notes = db.add_self_note(note, waiting_user)
if cleared_notes:
await message.channel.send(
"Saved your personal note and cleared the rest. You are "
f"only allowed up to {MAX_ALLOWED_NOTES} notes.")
else:
await message.channel.send("Saved your personal note!")
@commands.command()
async def note(self, ctx: commands.Context):
""" Start the creation of a new note. Be careful what you note - server admins can see all 👀
Args:
ctx (commands.Context): The invoking context
"""
dm = ctx.author.dm_channel or await ctx.author.create_dm()
if ctx.channel.type == ChannelType.private:
db.add_creating_self_note_user(ctx.author, ctx.message.id)
await dm.send("Please enter your personal note to save.\n"
"The format of a note is ```<title> | <contents>```")
else:
db.add_creating_note_user(ctx.author, ctx.guild)
# try:
# await ctx.message.delete()
# except Exception:
# logger.exception(
# "Couldn't delete message in guild %s: %s", ctx.guild.id, ctx.guild.name)
await dm.send(f"Please enter your note to savein {ctx.guild.name}.\n"
"The format of a note is ```<title> | <contents>```")
@commands.command()
async def notes(self, ctx: commands.Context):
""" Retrieve previously saved notes
Args:
ctx (commands.Context): The invoking context
"""
dm = ctx.author.dm_channel or await ctx.author.create_dm()
if ctx.channel.type == ChannelType.private:
db.delete_from_waiting(user_id=ctx.author.id)
notes = db.get_self_notes(ctx.author)
if not notes:
await dm.send(f"You have no DM notes saved!")
return
else:
# try:
# await ctx.message.delete()
# except Exception:
# logger.exception(
# "Couldn't delete message in guild %s: %s", ctx.guild.id, ctx.guild.name)
db.delete_from_waiting(user_id=ctx.author.id)
notes = db.get_member_notes(ctx.author, ctx.guild)
if not notes:
await dm.send(f"You have no notes saved in {ctx.guild.name}!")
return
pages = menus.MenuPages(source=NotesSource(
notes), clear_reactions_after=True, timeout=15)
await pages.start(ctx, channel=dm)
@commands.command()
async def clear(self, ctx: commands.Context):
""" Clear a users notes
Args:
ctx (commands.Context): The invoking context
"""
dm = ctx.author.dm_channel or await ctx.author.create_dm()
if ctx.channel.type == ChannelType.private:
db.clear_dm_notes(ctx.author.id)
await dm.send("Cleared your DM notes!")
else:
db.clear_guild_notes(ctx.author.id, ctx.guild.id)
# try:
# await ctx.message.delete()
# except Exception:
# logger.exception(
# "Couldn't delete message in guild %s: %s", ctx.guild.id, ctx.guild.name)
await dm.send(f"Cleared your notes for {ctx.guild.name}!")
class NotesSource(menus.ListPageSource):
""" A source used to generate note info for the paginated displaying
of stored notes """
def __init__(self, notes: [Note]):
super().__init__(notes, per_page=10)
async def format_page(self, menu, notes: [Note]) -> Embed:
""" Determine which notes to display in the embed and return the embed
Args:
menu ([type]): [description]
notes ([Note]): The notes that are able to be displayed
Returns:
[discord.Embed]: The paginating embed that will be sent to the user
"""
offset = menu.current_page * self.per_page
fields = []
for i, note in enumerate(notes, start=offset):
value = ""
if not note.content:
value = "no content"
else:
value = note.content
fields.append({
'name': f'{i+1}. {note.title}',
'value': value+"."
})
embed = Embed.from_dict({
'title': 'Your Notes',
'type': 'rich',
'fields': fields,
'color': 0x89c6f6
})
return embed
| [
"you@example.com"
] | you@example.com |
8df9f667b2eeb9681ccbf493100549ddcaf96614 | 34691663465d37f3c24c004c27646ba070ef5198 | /tomopy_ui/config.py | 4837ee85aaeaa05d5d57573985fb07f5df696d6c | [] | no_license | cpchuang/tomopy_ui | ff8df8809286ff1a8dbf968e3b46b12c66da7d40 | a2e64d87b9b8269b90ba56be94f422474b905474 | refs/heads/main | 2023-07-15T19:48:18.801354 | 2021-08-19T02:14:01 | 2021-08-19T02:14:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,587 | py | import os
import sys
from ij import IJ
from os.path import expanduser
home = expanduser("~")
CONFIG_FILE_NAME = os.path.join(home, "tomopy_ui.txt")
class DatasetParameters:
def __init__(self, fields):
self.fields = fields
self.set()
def set(self):
self.fname = ""
self.energy = ""
self.propagation_distance = ""
self.pixel_size = ""
self.height = "2048"
self.width = "0"
self.scanType = "Standard"
self.center = "0"
self.originalRoiX = "0"
class RecoParameters:
def __init__(self, fields):
self.fields = fields
self.set()
def set(self):
self.pfname = CONFIG_FILE_NAME
self.fname = ""
self.algorithm = 0
self.filter_index = 0
self.stripe_method = 0
self.center = 0
self.slice = 0
self.nsino_x_chunk = 16
self.center_search_width = 5
self.energy = 0
self.propagation_distance = 60
self.pixel_size = 1
self.alpha = 0.2
self.queue = 'local'
self.nnodes = 4
def readParametersFromFile(self):
print("Read parameters from ", self.pfname)
FILE = open(self.pfname,"r")
for line in FILE:
linelist = line.split()
if len(linelist)>0:
if linelist[0] == "FileName":
self.fname = linelist[1]
elif linelist[0] == "Algorithm":
self.algorithm = linelist[1]
elif linelist[0] == "Filter":
self.filter_index = int(linelist[1])
elif linelist[0] == "RemoveStripeMethod":
self.stripe_method = linelist[1]
elif linelist[0] == "Center":
self.center = linelist[1]
elif linelist[0] == "Slice":
self.slice = linelist[1]
elif linelist[0] == "NsinoPerChunk":
self.nsino_x_chunk = linelist[1]
elif linelist[0] == "SearchWidth":
self.center_search_width = linelist[1]
elif linelist[0] == "Energy":
self.energy = linelist[1]
elif linelist[0] == "PropagationDistance":
self.propagation_distance = linelist[1]
elif linelist[0] == "PixelSize":
self.pixel_size = linelist[1]
elif linelist[0] == "Alpha":
self.alpha = linelist[1]
elif linelist[0] == "Queue":
self.queue = linelist[1]
elif linelist[0] == "Nnodes":
self.nnodes = linelist[1]
FILE.close()
def readParametersFromGUI(self,originalRoiX):
self.fname = self.fields.selectedDatasetField.getText()
self.algorithm = self.fields.algorithmChooser.getSelectedIndex()
self.energy = self.fields.energyField.getText()
self.propagation_distance = self.fields.propagation_distanceField.getText()
self.pixel_size = self.fields.pixel_sizeField.getText()
self.alpha = self.fields.alphaField.getText()
self.filter_index = self.fields.filterChooser.getSelectedIndex()
self.filterUsed = self.fields.filterList[self.filter_index]
if self.filter_index == 0:
self.filterOption = "none"
elif self.filter_index == 1:
self.filterOption = "shepp"
elif self.filter_index == 2:
self.filterOption = "hann"
elif self.filter_index == 3:
self.filterOption = "hammimg"
elif self.filter_index == 4:
self.filterOption = "ramlak"
elif self.filter_index == 5:
self.filterOption = "parzen"
elif self.filter_index == 6:
self.filterOption = "cosine"
elif self.filter_index == 7:
self.filterOption = "butterworth"
self.center = self.fields.centerField.getText()
self.stripe_method = self.fields.stripe_methodChooser.getSelectedIndex()
self.slice = self.fields.sliceField.getText()
self.center_search_width = self.fields.centerSearchField.getText()
self.nsino_x_chunk = self.fields.nsino_x_chunkField.getText()
if self.fields.localButton.isSelected():
self.queue="local"
print("local cluster is selected")
elif self.fields.lcrcButton.isSelected():
self.queue="LCRC"
print("LCRC cluster is selected")
elif self.fields.alcfButton.isSelected():
self.queue="ALCF"
print("ALCF cluster is selected")
else:
print("This queue option is not implemented yet")
sys.exit()
self.nnodes = self.fields.nnodeChooser.getSelectedIndex()+1
if self.queue=="ALCF":
if self.nnodes>8:
self.nnodes = 8
self.fields.nnodeChooser.setSelectedIndex(7)
else:
if self.nnodes>4:
self.nnodes = 4
self.fields.nnodeChooser.setSelectedIndex(3)
def writeParametersToFile(self, section='recon'):
print("Write to local file")
try:
FILE = open(self.pfname,"w+")
if section == 'recon':
FILE.write("FileName " + self.fname + '\n')
FILE.write("Algorithm " + str(self.algorithm) +"\n")
FILE.write("Filter " + str(self.filter_index) + "\n")
FILE.write("RemoveStripeMethod " + str(self.stripe_method) + "\n")
FILE.write("Center " + str(self.center) + "\n")
FILE.write("Slice " + str(self.slice) + "\n")
FILE.write("NsinoPerChunk " + str(self.nsino_x_chunk) + "\n")
FILE.write("SearchWidth " + str(self.center_search_width) + "\n")
FILE.write("Energy " + str(self.energy) + "\n")
FILE.write("PropagationDistance " + str(self.propagation_distance) + "\n")
FILE.write("PixelSize " + str(self.pixel_size) + "\n")
FILE.write("Alpha " + str(self.alpha) + "\n")
FILE.write("Queue " + str(self.queue) +"\n")
FILE.write("Nnodes " + str(self.nnodes) +"\n")
FILE.write("\n")
FILE.close()
elif section == 'dataset':
pass
except IOError:
pass
def writeParametersToGUI(self):
self.fields.selectedDatasetField.setText(self.fname)
self.fields.algorithmChooser.setSelectedIndex(int(self.algorithm))
self.fields.energyField.setText(self.energy)
self.fields.propagation_distanceField.setText(self.propagation_distance)
self.fields.pixel_sizeField.setText(str(self.pixel_size))
self.fields.alphaField.setText(self.alpha)
self.fields.filterChooser.setSelectedIndex(self.filter_index)
self.fields.centerField.setText(str(self.center))
self.fields.stripe_methodChooser.setSelectedIndex(int(self.stripe_method))
self.fields.sliceField.setText(str(self.slice))
self.fields.centerSearchField.setText(self.center_search_width)
self.fields.nsino_x_chunkField.setText(str(self.nsino_x_chunk))
| [
"decarlof@gmail.com"
] | decarlof@gmail.com |
cf0b9cdd5d82ab982ce3bd9f1ff077f2de5eb38d | c50e7eb190802d7849c0d0cea02fb4d2f0021777 | /src/keyvault-preview/azext_keyvault_preview/vendored_sdks/azure_mgmt_keyvault/_version.py | 93ed747b94265f5417e23205ffd9859524ab0665 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-cli-extensions | c1615b19930bba7166c282918f166cd40ff6609c | b8c2cf97e991adf0c0a207d810316b8f4686dc29 | refs/heads/main | 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 | MIT | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | Python | UTF-8 | Python | false | false | 345 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
VERSION = "9.3.0"
| [
"noreply@github.com"
] | Azure.noreply@github.com |
365bda0ab035c045fe5cb3cca87d84322833509a | 6f21068b31084e81f38db304a51a2609d8af37cd | /2_Scientific_Libraries/plottypes.py | 35d0f603d222150bf508e57c4626974bd928f729 | [] | no_license | vickyf/eurocontrol_datascience | 374b889cac7b8d377caa78079fb57098e73bba0a | 0a7c09002e3b5f22ad563b05a6b4afe4cb6791d7 | refs/heads/master | 2020-03-19T06:03:14.864839 | 2018-06-04T07:24:25 | 2018-06-04T07:24:25 | 135,986,678 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 922 | py | %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
#simple plot
x = np.arange(0,2*np.pi,0.1)
y_cos,y_sin = np.cos(x),np.sin(x)
plt.subplot(2,2,1)
plt.plot(x,y_sin,color = "blue")
plt.plot(x,y_cos,color = "red", linewidth = 3, linestyle = '--')
plt.title('Simple plot')
plt.xticks(())
plt.yticks(())
#scatter plot
n = 1024
x = np.random.normal(0,1,n)
y = np.random.normal(0,1,n)
plt.subplot(2,2,2)
plt.scatter(x,y, s = 0.5, color = 'brown')
plt.title('Scatter plot')
plt.xlim(-3,3)
plt.ylim(-3,3)
plt.xticks(())
plt.yticks(())
#pie plot
n=20
z=np.random.uniform(0, 1, n)
plt.subplot(2,2,3)
plt.pie(z, colors = ['%f' % (i/float(n)) for i in range(n)])
plt.title('Pie plot')
plt.axis('equal')
#bar plot
n = 12
x = np.arange(n)
y = (1 - x / float(n)) * np.random.uniform(0.5, 1.0, n)
plt.subplot(2,2,4)
plt.bar(x,y, facecolor='purple')
plt.title('Bar plot')
plt.xticks(())
plt.yticks(())
plt.show() | [
"vicky.froyen@infofarm.be"
] | vicky.froyen@infofarm.be |
41d0a17ac3c3cc7c90992fc154448f084c0fd0cf | 06ee12fb2efa2c67ef1b711450df75af73ef45cd | /day15/08-飞机大战-抽取基类.py | 5c09a0f870bf57a7091073eb1b030bd7ec486959 | [] | no_license | itkasumy/PythonGrammer | c5ed00db3097b8a7dedd49cff79b817e3488d6d9 | 33f17c20ee6533beae1cf422ba5c1376b3765e20 | refs/heads/master | 2020-04-05T15:11:17.337134 | 2018-11-13T05:37:13 | 2018-11-13T05:37:13 | 156,957,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,575 | py | import pygame
from pygame.locals import *
import time
import random
class Base(object):
def __init__(self, screen, x, y, imgPath):
# 子弹的坐标
self.x = x
self.y = y
# 子弹的图片
self.imagePath = imgPath
self.image = pygame.image.load(self.imagePath)
# 显示窗口
self.screen = screen
def display(self):
# 显示子弹
self.screen.blit(self.image, (self.x, self.y))
class BasePlane(Base):
def __init__(self, screen, x, y, imgPath, moveStep, rightLimit):
super(BasePlane, self).__init__(screen, x, y, imgPath)
self.moveStep = moveStep
# 子弹的列表
self.bullets = []
self.rightLimit = rightLimit
def display(self):
# 显示飞机
super(BasePlane, self).display()
tmp = []
for bullet in self.bullets:
bullet.display()
bullet.move()
if bullet.judge():
tmp.append(bullet)
for bullet in tmp:
self.bullets.remove(bullet)
def moveLeft(self):
self.x -= self.moveStep
if self.x <= 0:
self.x = 0
def moveRight(self):
self.x += self.moveStep
if self.x >= self.rightLimit:
self.x = self.rightLimit
class BaseBullet(Base):
def __init__(self, screen, x, y, imgPath):
super(BaseBullet, self).__init__(screen, x, y, imgPath)
def move(self):
self.y -= 5
def __del__(self):
print('子弹销毁了...')
def judge(self):
return self.y <= 0
class HeroPlane(BasePlane):
def __init__(self, screen):
super(HeroPlane, self).__init__(screen, 190, 520, './feiji/hero.gif', 15, 380)
def shoot(self):
# 发射一颗子弹
bullet = Bullet(self.x, self.y, self.screen)
self.bullets.append(bullet)
class EnemyPlane(BasePlane):
def __init__(self, screen):
super(EnemyPlane, self).__init__(screen, 0, 0, './feiji/enemy-1.gif', 3, 430)
# 移动的方向状态
self.oritation = True
def move(self):
if self.x <= 0:
self.oritation = True
if self.x >= 430:
self.oritation = False
if self.oritation:
self.moveRight()
else:
self.moveLeft()
def shoot(self):
# 发射一颗子弹
num = random.randint(1, 100)
if num < 6:
bullet = EnemyBullet(self.x, self.y, self.screen)
self.bullets.append(bullet)
class Bullet(BaseBullet):
def __init__(self, planeX, planeY, screen):
super(Bullet, self).__init__(screen, planeX + 40, planeY - 22, './feiji/bullet.png')
def move(self):
self.y -= 5
def judge(self):
return self.y <= 0
class EnemyBullet(BaseBullet):
def __init__(self, planeX, planeY, screen):
super(EnemyBullet, self).__init__(screen, planeX + 21, planeY + 40, './feiji/bullet1.png')
def move(self):
self.y += 10
def judge(self):
return self.y >= 700
def main():
"""程序的主逻辑"""
screen = pygame.display.set_mode((480, 700), 0, 32)
# 创建背景图片
bg = pygame.image.load('./feiji/background.png')
# 创建玩家飞机
hero = HeroPlane(screen)
# 创建敌人飞机
enemy = EnemyPlane(screen)
while True:
# 显示背景图片
screen.blit(bg, (0, 0))
# 显示玩家飞机
hero.display()
enemy.display()
enemy.move()
enemy.shoot()
# 获取事件,比如按键等
for event in pygame.event.get():
# 判断是否点击了退出按钮
if event.type == QUIT:
print('exit')
exit()
# 判断是否按下了键
elif event.type == KEYDOWN:
# 检测按键是否是a 或者 left
if event.key == K_a or event.key == K_LEFT:
print('left')
hero.moveLeft()
# 检测按键是否是d 或者 right
elif event.key == K_d or event.key == K_RIGHT:
print('right')
hero.moveRight()
# 检测按键是否是空格
elif event.key == K_SPACE:
print('space')
hero.shoot()
# 刷新界面
pygame.display.update()
time.sleep(1 / 100)
if __name__ == '__main__':
print('程序开始')
main()
print('程序结束')
| [
"18500682038@163.com"
] | 18500682038@163.com |
355024ce6827f5d9554258b98a1491c97c09aa07 | 33c5ba033aaed849328a4d12a07d69603e3bf499 | /rer/groupware/multilanguage/extender.py | ebd4b7027e5f26470dab5904970fd637baa44348 | [] | no_license | PloneGov-IT/rer.groupware.multilanguage | aa543585b7838246c1f5febd19d4e6dc99133074 | 6ad3df7ee20be7d0e67845ffef00a6196f39e35f | refs/heads/master | 2021-01-21T17:46:05.658255 | 2014-11-24T09:49:38 | 2014-11-24T09:49:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,477 | py | # -*- coding: utf-8 -*-
from archetypes.schemaextender.field import ExtensionField
from archetypes.schemaextender.interfaces import IBrowserLayerAwareExtender, ISchemaExtender
from Products.Archetypes.atapi import StringField
from Products.Archetypes.Widget import LanguageWidget
from Products.ATContentTypes.interface.interfaces import IATContentType
from Products.Archetypes import PloneMessageFactory as _
from rer.groupware.multilanguage.interfaces import IRERGroupwareMultilanguageLayer
from rer.groupware.room.interfaces import IGroupRoom
from zope.component import adapts
from zope.interface import implements
class GroupwareStringField(ExtensionField, StringField):
"""Extension field for arguments"""
class GroupwareLanguageExtender(object):
"""
Re-define language field and use a custom default method for all Content types
"""
adapts(IATContentType)
implements(ISchemaExtender, IBrowserLayerAwareExtender)
layer = IRERGroupwareMultilanguageLayer
fields = [GroupwareStringField(
'language',
accessor="Language",
schemata="categorization",
default_method='gpwDefaultLanguage',
vocabulary_factory='plone.app.vocabularies.SupportedContentLanguages',
widget=LanguageWidget(
label=_(u'label_language', default=u'Language'),
format="select",
),
),
]
def __init__(self, context):
self.context = context
def getFields(self):
return self.fields
class GroupwareRoomLanguageExtender(object):
"""
For groupware rooms, move language field in default schemata
"""
adapts(IGroupRoom)
implements(ISchemaExtender, IBrowserLayerAwareExtender)
layer = IRERGroupwareMultilanguageLayer
fields = [GroupwareStringField(
'language',
accessor="Language",
schemata="default",
default_method='gpwDefaultLanguage',
vocabulary_factory='plone.app.vocabularies.SupportedContentLanguages',
widget=LanguageWidget(
label=_(u'label_language', default=u'Language'),
format="select",
),
),
]
def __init__(self, context):
self.context = context
def getFields(self):
return self.fields
| [
"andrea.cecchi85@gmail.com"
] | andrea.cecchi85@gmail.com |
c309b24c5233096607053ef27b246b64c8bf58cc | 1ceb35da7b1106a4da4e8a3a5620d23a326a68e4 | /corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/caiman/010_motion_correction_zstack_caiman_multichannel.py | fae38c75308d66e6f6d96c8dd03f31f6a74cf3e7 | [] | no_license | zhuangjun1981/corticalmapping | c3870a3f31ed064d77f209a08e71f44c375676a3 | 0ddd261b3993f5ce5608adfbd98a588afc56d20c | refs/heads/master | 2022-11-14T03:24:53.443659 | 2020-07-13T23:48:50 | 2020-07-13T23:48:50 | 84,975,797 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,145 | py | import sys
sys.path.extend([r"E:\data\github_packages\CaImAn"])
import caiman as cm
import numpy as np
import os
from caiman.motion_correction import MotionCorrect, tile_and_correct, motion_correction_piecewise
import tifffile as tf
import h5py
import warnings
from multiprocessing import Pool
base_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \
r"\180605-M391355-2p\zstack\zstack_zoom2"
reference_chn = 'green'
n_processes = 5
def correct_single_movie(folder_path):
#=======================================setup parameters==============================================
# number of iterations for rigid motion correction
niter_rig = 5
# maximum allowed rigid shift in pixels (view the movie to get a sense of motion)
max_shifts = (30, 30)
# for parallelization split the movies in num_splits chuncks across time
# if none all the splits are processed and the movie is saved
splits_rig = 56
# intervals at which patches are laid out for motion correction
# num_splits_to_process_rig = None
# create a new patch every x pixels for pw-rigid correction
strides = (48, 48)
# overlap between pathes (size of patch strides+overlaps)
overlaps = (24, 24)
# for parallelization split the movies in num_splits chuncks across time
splits_els = 56
# num_splits_to_process_els = [28, None]
# upsample factor to avoid smearing when merging patches
upsample_factor_grid = 4
# maximum deviation allowed for patch with respect to rigid shifts
max_deviation_rigid = 3
# if True, apply shifts fast way (but smoothing results) by using opencv
shifts_opencv = True
# if True, make the SAVED movie and template mostly nonnegative by removing min_mov from movie
nonneg_movie = False
# =======================================setup parameters==============================================
offset_mov = 0.
file_path = [f for f in os.listdir(folder_path) if f[-4:] == '.tif']
if len(file_path) == 0:
raise LookupError('no tif file found in folder: {}'.format(folder_path))
elif len(file_path) > 1:
raise LookupError('more than one tif files found in folder: {}'.format(folder_path))
else:
file_path = os.path.join(folder_path, file_path[0])
# create a motion correction object# creat
mc = MotionCorrect(file_path, offset_mov,
dview=None, max_shifts=max_shifts, niter_rig=niter_rig,
splits_rig=splits_rig, strides=strides, overlaps=overlaps,
splits_els=splits_els, upsample_factor_grid=upsample_factor_grid,
max_deviation_rigid=max_deviation_rigid,
shifts_opencv=shifts_opencv, nonneg_movie=nonneg_movie)
mc.motion_correct_rigid(save_movie=True)
# load motion corrected movie
m_rig = cm.load(mc.fname_tot_rig)
m_rig = m_rig.astype(np.int16)
save_name = os.path.splitext(file_path)[0] + '_corrected.tif'
tf.imsave(os.path.join(folder_path, save_name), m_rig)
tf.imsave(os.path.join(folder_path, 'corrected_mean_projection.tif'),
np.mean(m_rig, axis=0).astype(np.float32))
tf.imsave(os.path.join(folder_path, 'corrected_max_projection.tif'),
np.max(m_rig, axis=0).astype(np.float32))
offset_f = h5py.File(os.path.join(folder_path, 'correction_offsets.hdf5'))
offsets = mc.shifts_rig
offsets = np.array([np.array(o) for o in offsets]).astype(np.float32)
offset_dset = offset_f.create_dataset(name='file_0000', data=offsets)
offset_dset.attrs['format'] = 'height, width'
offset_dset.attrs['path'] = file_path
os.remove(mc.fname_tot_rig[0])
if __name__ == '__main__':
data_folder = os.path.join(base_folder, reference_chn)
chunk_p = Pool(n_processes)
folder_list = [f for f in os.listdir(data_folder) if os.path.isdir(os.path.join(data_folder, f))]
folder_list.sort()
print('\n'.join(folder_list))
folder_list = [os.path.join(data_folder, f) for f in folder_list]
chunk_p.map(correct_single_movie, folder_list) | [
"junz@alleninstitute.org"
] | junz@alleninstitute.org |
790e2066d1faca87986538cc4d7037df448d580e | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /_VSCODE-extensions/vscode-jupyter/pythonFiles/vscode_datascience_helpers/daemon/__main__.py | 666f245b2a23462a33bfce132b6e6d3bdedd2a9d | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 3,982 | py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import importlib
import json
import os
import logging
import logging.config
import sys
log = logging.getLogger(__name__)
LOG_FORMAT = (
"%(asctime)s UTC - %(levelname)s - (PID: %(process)d) - %(name)s - %(message)s"
)
queue_handler = None
def add_arguments(parser):
parser.description = "Daemon"
parser.add_argument(
"--daemon-module",
default="vscode_datascience_helpers.daemon.daemon_python",
help="Daemon Module",
)
log_group = parser.add_mutually_exclusive_group()
log_group.add_argument(
"--log-config", help="Path to a JSON file containing Python logging config."
)
log_group.add_argument(
"--log-file",
help="Redirect logs to the given file instead of writing to stderr."
"Has no effect if used with --log-config.",
)
parser.add_argument(
"-v",
"--verbose",
action="count",
default=0,
help="Increase verbosity of log output, overrides log config file",
)
class TemporaryQueueHandler(logging.Handler):
"""Logger used to temporarily store everything into a queue.
Later the messages are pushed back to the RPC client as a notification.
Once the RPC channel is up, we'll stop queuing messages and sending id directly.
"""
def __init__(self):
logging.Handler.__init__(self)
self.queue = []
self.server = None
def set_server(self, server):
# Send everything that has beeen queued until now.
self.server = server
for msg in self.queue:
self.server._endpoint.notify("log", msg)
self.queue = []
def emit(self, record):
data = {
"level": record.levelname,
"msg": self.format(record),
"pid": os.getpid(),
}
# If we don't have the server, then queue it and send it later.
if self.server is None:
self.queue.append(data)
else:
self.server._endpoint.notify("log", data)
def _configure_logger(verbose=0, log_config=None, log_file=None):
root_logger = logging.root
global queue_handler
if log_config:
with open(log_config, "r") as f:
logging.config.dictConfig(json.load(f))
else:
formatter = logging.Formatter(LOG_FORMAT)
if log_file:
log_handler = logging.handlers.RotatingFileHandler(
log_file,
mode="a",
maxBytes=50 * 1024 * 1024,
backupCount=10,
encoding=None,
delay=0,
)
log_handler.setFormatter(formatter)
root_logger.addHandler(log_handler)
else:
queue_handler = TemporaryQueueHandler()
root_logger.addHandler(queue_handler)
if verbose == 0:
level = logging.WARNING
elif verbose == 1:
level = logging.INFO
elif verbose >= 2:
level = logging.DEBUG
root_logger.setLevel(level)
def main():
"""Starts the daemon.
The daemon_module allows authors of modules to provide a custom daemon implementation.
E.g. we have a base implementation for standard python functionality,
and a custom daemon implementation for DS work (related to jupyter).
"""
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
_configure_logger(args.verbose, args.log_config, args.log_file)
log.info("Starting daemon from %s.PythonDaemon", args.daemon_module)
try:
daemon_module = importlib.import_module(args.daemon_module)
daemon_cls = daemon_module.PythonDaemon
daemon_cls.start_daemon(queue_handler)
except Exception:
import traceback
log.error(traceback.format_exc())
raise Exception("Failed to start daemon")
if __name__ == "__main__":
main()
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
497196af0825121b296b2753bc579672864906c4 | 0258ce084f66f5c4080b686f7fd388ef8094ac75 | /Flask Programs/SQLite/crud.py | 83aaabb26dbec997c4d7e6a2081a619863c3a509 | [] | no_license | Jaydeep-07/Flask | 524be9056c9e5cc6e7733d5780f8ce1e110cc9b4 | 172b5fd30785d3876aeb5509eb685887f1692d52 | refs/heads/master | 2020-12-26T11:15:05.092803 | 2020-05-09T10:19:13 | 2020-05-09T10:19:13 | 237,491,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,918 | py | from flask import *
import sqlite3
app = Flask(__name__,template_folder='Template')
@app.route("/")
def index():
return render_template("index.html");
@app.route("/add")
def add():
return render_template("add.html")
@app.route("/savedetails",methods = ["POST","GET"])
def saveDetails():
msg = "msg"
if request.method == "POST":
try:
name = request.form["name"]
email = request.form["email"]
address = request.form["address"]
with sqlite3.connect("employee.db") as con:
cur = con.cursor()
cur.execute("INSERT into Employees (name, email, address) values (?,?,?)",(name,email,address))
con.commit()
msg = "Employee successfully Added"
except:
con.rollback()
msg = "We can not add the employee to the list"
finally:
return render_template("success.html",msg = msg)
con.close()
@app.route("/view")
def view():
con = sqlite3.connect("employee.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("select * from Employees")
rows = cur.fetchall()
return render_template("view.html",rows = rows)
@app.route("/delete")
def delete():
return render_template("delete.html")
@app.route("/deleterecord",methods = ["POST"])
def deleterecord():
id = request.form["id"]
with sqlite3.connect("employee.db") as con:
try:
cur = con.cursor()
cur.execute("delete from Employees where id = ?",id)
msg = "record successfully deleted"
except:
msg = "can't be deleted"
finally:
return render_template("delete_record.html",msg = msg)
if __name__ == "__main__":
app.run(debug = True) | [
"jaydeepvpatil225@gmail.com"
] | jaydeepvpatil225@gmail.com |
4a20713389c308d697f51ef7fcbc1917e11778a8 | dae2da1b80124ba6923cb1674033208af01e031f | /problems/202101/R-boj-7453-합이 0인 네 정수.py | eedffde840ca2557e26440da7144a901cb79aa2b | [] | no_license | MaxKim-J/Algo | ad09fa19da3e764ba7af18d015bbaa186643f7fb | 34771c45361db4aade4c364179e13708c5004b3a | refs/heads/master | 2023-01-08T00:49:11.479545 | 2022-12-26T15:45:35 | 2022-12-26T15:48:52 | 237,583,281 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 966 | py | # 부분수열 합이랑 똑같은 아이디어
#! 자료형을 임의로 나눠 시간을 줄이는 간단하고도 명쾌한 방법!!!!!!!!
#! n^4에는 당연히 못풀고 O(2n^2) 쯤으로 시간을 줄이는 방법이다. 이렇게 백트랙킹도 가능한듯
# 입력이 최대 4 * 4000이니 sys를 사용하자
import sys
N = int(sys.stdin.readline())
answer = 0
A = B = C = D = []
for _ in range(N):
a1, b1, c1, d1 = map(int, input().split(" "))
A.append(a1)
B.append(b1)
C.append(c1)
D.append(d1)
AB = dict()
# 전에 풀었던 문제처럼, 합을 완성할때 값을 도출시키면 시간이 더 준다
# 곱경우 구하기
for i in range(N):
for j in range(N):
temp = A[i] + B[j]
if temp in AB:
AB[temp] += 1
else:
AB[temp] = 1
for i in range(N):
for j in range(N):
temp = -(C[i] + D[j])
if temp in AB:
answer += AB[temp]
print(answer)
| [
"hwaseen@gmail.com"
] | hwaseen@gmail.com |
6dd1608accf61ff244bcb11ab0befc4eceb2c646 | 9d7d69178c6f1f1db6ed6767e0af32bfe836549c | /new_workspace/Gumtree_Workspace/Magnet/Yick/P9363/100 Alignment/20210130/Overnight/2021_time_55_5K_0Oe_bottom_40min.py | 2f815255bbf7912196d0d54ccc32594d35ba372e | [] | no_license | Gumtree/Quokka_scripts | 217958288b59adbdaf00a9a13ece42f169003889 | c9687d963552023d7408a8530005a99aabea1697 | refs/heads/master | 2023-08-30T20:47:32.142903 | 2023-08-18T03:38:09 | 2023-08-18T03:38:09 | 8,191,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,432 | py | histmem preset 60
histmem mode time
#Time scan
#-----------------------------------------------------------------
#System reset (15 minutes)
hset /sample/tc1/control/tolerance1 1
drive ma1_setpoint 0
drive tc1_driveable 90
wait 10
drive ma1_setpoint 0
wait 10
hset /sample/tc1/control/tolerance1 0.1
drive tc1_driveable 55.8
#-----------------------------------------------------------------
drive ma1_setpoint 0
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
# 10 minutes
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
# 20 minutes
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
# 30 minutes
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
# 40 minutes
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
# 50 minutes
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
# 60 minutes | [
"quokka@DAV5-QUOKKA.nbi.ansto.gov.au"
] | quokka@DAV5-QUOKKA.nbi.ansto.gov.au |
728444c91e5b1c215f157d91c477de72e296153f | 7cf119239091001cbe687f73018dc6a58b5b1333 | /datashufflepy-zeus/src/branch_scripts2/NEWS/ZX_CJXW_ZYCJ/ZX_CJXW_ZYCJ_CJW_HGSY.py | fa6658d4b23ae9065f2f10549ae13c7b765d6153 | [
"Apache-2.0"
] | permissive | ILKKAI/dataETL | 0f5b80c3482994f735f092a1e01fa1009bac4109 | 32f7ec3aaaf32b5074536a615cb9cd5c28bd499c | refs/heads/master | 2022-04-04T19:27:05.747852 | 2020-02-28T11:17:48 | 2020-02-28T11:17:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | # -*- coding: utf-8 -*-
from database._mongodb import MongoClient
def data_shuffle(data):
return data
if __name__ == '__main__':
main_mongo = MongoClient(entity_code="ZX_CJXW_ZYCJ_CJW_HGSY", mongo_collection="ZX_CJXW_ZYCJ")
data_list = main_mongo.main()
for data in data_list:
re_data = data_shuffle(data)
print(re_data)
| [
"499413642@qq.com"
] | 499413642@qq.com |
f2c6e6187a2ca1c6010314be23e1c55da99dce8a | d8f1c299d1b1c3619272bb2f81197e170c88887a | /postgresqleu/confreg/migrations/0008_volunteers.py | 8861f16b53e93b766bb5df47fc697c623fa35b10 | [] | no_license | mhagander/pgeu-website | 1d324898a8ba47af80488d1f803fd8684157bc80 | c88cd42b39a84d16a056f2b668fd1949c893c4e0 | refs/heads/master | 2021-01-24T06:06:19.545244 | 2018-12-04T19:53:33 | 2018-12-04T19:55:40 | 619,420 | 0 | 2 | null | 2019-11-02T08:00:17 | 2010-04-20T11:51:40 | Python | UTF-8 | Python | false | false | 2,592 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.validators
import django.contrib.postgres.fields.ranges
class Migration(migrations.Migration):
dependencies = [
('confreg', '0007_new_specialtype'),
]
operations = [
migrations.CreateModel(
name='VolunteerAssignment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('vol_confirmed', models.BooleanField(default=False, verbose_name=b'Confirmed by volunteer')),
('org_confirmed', models.BooleanField(default=False, verbose_name=b'Confirmed by organizers')),
],
),
migrations.CreateModel(
name='VolunteerSlot',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('timerange', django.contrib.postgres.fields.ranges.DateTimeRangeField()),
('title', models.CharField(max_length=50)),
('min_staff', models.IntegerField(default=1, validators=[django.core.validators.MinValueValidator(1)])),
('max_staff', models.IntegerField(default=1, validators=[django.core.validators.MinValueValidator(1)])),
],
),
migrations.AddField(
model_name='conference',
name='volunteers',
field=models.ManyToManyField(help_text=b'Users who volunteer', related_name='volunteers_set', to='confreg.ConferenceRegistration', blank=True),
),
migrations.AddField(
model_name='conferenceregistration',
name='regtoken',
field=models.TextField(unique=True, null=True, blank=True),
),
migrations.AddField(
model_name='volunteerslot',
name='conference',
field=models.ForeignKey(to='confreg.Conference', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='volunteerassignment',
name='reg',
field=models.ForeignKey(to='confreg.ConferenceRegistration', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='volunteerassignment',
name='slot',
field=models.ForeignKey(to='confreg.VolunteerSlot', on_delete=models.CASCADE),
),
migrations.RunSQL(
"CREATE INDEX confreg_volunteerslot_timerange_idx ON confreg_volunteerslot USING gist(timerange)",
),
]
| [
"magnus@hagander.net"
] | magnus@hagander.net |
edae2068ba8fc33d1442dfb5bfa8df67c369737f | 18a2e479f4edef528fa7803723822f9f5974e5f8 | /17_adding_fips_to_counties.py | 706e66e8deef1a90e70c521945b4d6217b762bcd | [] | no_license | wpower12/RedditCountyBias | ee25cb870b807466ed53225471e9ac6f5eec1cd0 | 59f0b6642f20547ac129b47496ef3ca0ac135a39 | refs/heads/master | 2023-04-04T22:24:24.258295 | 2021-04-15T17:50:18 | 2021-04-15T17:50:18 | 329,438,176 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | import pandas as pd
import praw
from psaw import PushshiftAPI
import time
import datetime as dt
import pymysql as sql
import progressbar
import requests
import math
COUNTY_FN = "./data/counties_fips.csv"
conn = sql.connect(host='localhost',
user='bill',
password='password',
database='reddit_data')
df = pd.read_csv(COUNTY_FN)
UPDATE_COUNTY_SQL = """UPDATE county SET fips='{}' WHERE county_id={};"""
for row in df.iterrows():
state = row[1]['state']
county = row[1]['county_name']
county_id = row[1]['county_id']
fips_val = row[1]['fips']
if not math.isnan(fips_val):
# need to make sure this gets 0' padded at the front.
fips_val = "{:0>5}".format(int(row[1]['fips']))
else:
fips_val = -1
with conn.cursor() as cursor:
cursor.execute(UPDATE_COUNTY_SQL.format(fips_val, county_id))
conn.commit() | [
"willpowe@gmail.com"
] | willpowe@gmail.com |
34569ddf4dde722008a3ee453027d40c2911e838 | c50e7eb190802d7849c0d0cea02fb4d2f0021777 | /src/portal/azext_portal/vendored_sdks/portal/aio/operations_async/_operation_operations_async.py | aebe6bdf31d0b930019b5627d296786e99d0c031 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-cli-extensions | c1615b19930bba7166c282918f166cd40ff6609c | b8c2cf97e991adf0c0a207d810316b8f4686dc29 | refs/heads/main | 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 | MIT | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | Python | UTF-8 | Python | false | false | 4,017 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class OperationOperations:
"""OperationOperations async operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~portal.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> "models.ResourceProviderOperationList":
"""The Microsoft Portal operations API.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceProviderOperationList or the result of cls(response)
:rtype: ~portal.models.ResourceProviderOperationList
:raises: ~portal.models.ErrorResponseException:
"""
cls: ClsType["models.ResourceProviderOperationList"] = kwargs.pop('cls', None )
error_map = kwargs.pop('error_map', {})
api_version = "2019-01-01-preview"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
else:
url = next_link
# Construct parameters
query_parameters: Dict[str, Any] = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters: Dict[str, Any] = {}
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ResourceProviderOperationList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.ErrorResponseException.from_response(response, self._deserialize)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.Portal/operations'}
| [
"noreply@github.com"
] | Azure.noreply@github.com |
e7e1a396fe3a3d6b353cf863995d1b47d0be9c89 | cc86a7c9f27b45002c9d0d5388b6457b6470dc2c | /modern_business/settings.py | 0404b4aa16bb9461d3c290680556037305bad97c | [] | no_license | innotexak/DjangoCMS | 5bf18df6aa1212cf0c048c83e1faa203b93a7a36 | 6545f928fb9afb09a004b9c5935158e244d1086b | refs/heads/main | 2023-02-26T09:59:50.974034 | 2021-02-04T14:53:11 | 2021-02-04T14:53:11 | 331,692,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,452 | py | import os # isort:skip
gettext = lambda s: s
DATA_DIR = os.path.dirname(os.path.dirname(__file__))
"""
Django settings for modern_business project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f!*h4ap^i6o-pdm+qj9yqb@(56g_)epg6rl_5%-2x_45s^0f@f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
ROOT_URLCONF = 'modern_business.urls'
WSGI_APPLICATION = 'modern_business.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'Africa/Lagos'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(DATA_DIR, 'media')
STATIC_ROOT = os.path.join(DATA_DIR, 'static')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'modern_business', 'static'),
)
SITE_ID = 1
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'modern_business', 'templates'),],
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.i18n',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.template.context_processors.csrf',
'django.template.context_processors.tz',
'sekizai.context_processors.sekizai',
'django.template.context_processors.static',
'cms.context_processors.cms_settings'
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader'
],
},
},
]
MIDDLEWARE = [
'cms.middleware.utils.ApphookReloadMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'cms.middleware.language.LanguageCookieMiddleware'
]
INSTALLED_APPS = [
'djangocms_admin_style',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.staticfiles',
'django.contrib.messages',
'cms',
'menus',
'sekizai',
'treebeard',
'djangocms_text_ckeditor',
'filer',
'easy_thumbnails',
'djangocms_bootstrap4',
'djangocms_bootstrap4.contrib.bootstrap4_alerts',
'djangocms_bootstrap4.contrib.bootstrap4_badge',
'djangocms_bootstrap4.contrib.bootstrap4_card',
'djangocms_bootstrap4.contrib.bootstrap4_carousel',
'djangocms_bootstrap4.contrib.bootstrap4_collapse',
'djangocms_bootstrap4.contrib.bootstrap4_content',
'djangocms_bootstrap4.contrib.bootstrap4_grid',
'djangocms_bootstrap4.contrib.bootstrap4_jumbotron',
'djangocms_bootstrap4.contrib.bootstrap4_link',
'djangocms_bootstrap4.contrib.bootstrap4_listgroup',
'djangocms_bootstrap4.contrib.bootstrap4_media',
'djangocms_bootstrap4.contrib.bootstrap4_picture',
'djangocms_bootstrap4.contrib.bootstrap4_tabs',
'djangocms_bootstrap4.contrib.bootstrap4_utilities',
'djangocms_file',
'djangocms_icon',
'djangocms_link',
'djangocms_picture',
'djangocms_style',
'djangocms_googlemap',
'djangocms_video',
'modern_business',
'polls_cms_integration',
'polls'
]
LANGUAGES = (
## Customize this
('en', gettext('en')),
)
CMS_LANGUAGES = {
## Customize this
1: [
{
'code': 'en',
'name': gettext('en'),
'redirect_on_fallback': True,
'public': True,
'hide_untranslated': False,
},
],
'default': {
'redirect_on_fallback': True,
'public': True,
'hide_untranslated': False,
},
}
CMS_TEMPLATES = (
## Customize this
('fullwidth.html', 'Fullwidth'),
('home.html', 'Home'),
)
X_FRAME_OPTIONS = 'SAMEORIGIN'
CMS_PERMISSION = True
CMS_PLACEHOLDER_CONF = {}
DATABASES = {
'default': {
'CONN_MAX_AGE': 0,
'ENGINE': 'django.db.backends.sqlite3',
'HOST': 'localhost',
'NAME': 'project.db',
'PASSWORD': '',
'PORT': '',
'USER': ''
}
}
THUMBNAIL_PROCESSORS = (
'easy_thumbnails.processors.colorspace',
'easy_thumbnails.processors.autocrop',
'filer.thumbnail_processors.scale_and_crop_with_subject_location',
'easy_thumbnails.processors.filters'
)
| [
"akuhinnocent2016@gmail.com"
] | akuhinnocent2016@gmail.com |
56288e8ec12376f3caaa2bf6670c4052cd21c2ce | b483c598fa375e9af02348960f210b9f482bd655 | /pythonbrasil/exercicios/repeticao/ER resp 39.py | 3530297090dcd38af2039607fba79850fa812d48 | [
"MIT"
] | permissive | brunofonsousa/python | 6f766d08bf193180ea9a4903cb93ffd167db588d | 8f2f26c77015c0baaa76174e004406b4115272c7 | refs/heads/master | 2022-09-30T14:58:01.080749 | 2020-06-08T09:55:35 | 2020-06-08T09:55:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 938 | py | '''
Faça um programa que leia dez conjuntos de dois valores, o primeiro representando
o número do aluno e o segundo representando a sua altura em centímetros. Encontre
o aluno mais alto e o mais baixo. Mostre o número do aluno mais alto e o número do
aluno mais baixo, junto com suas alturas.
'''
quant_alunos = 10
cod_maisAlto = 0
cod_maisBaixo = 0
mais_alto = 0
mais_baixo = 5
for i in range(quant_alunos):
print('')
cod_aluno = int(input('Digite o número do aluno: '))
altura = float(input('Altura: '))
if altura > mais_alto:
cod_maisAlto = cod_aluno
mais_alto = altura
if altura < mais_baixo:
cod_maisBaixo = cod_aluno
mais_baixo = altura
print('')
print('ALUNOS: ')
print('O aluno mais alto foi o nº %i com a altura de %0.2f'%(cod_maisAlto,mais_alto))
print('O aluno mais baixo foi o nº %i com a altura de %0.2f'%(cod_maisBaixo,mais_baixo))
| [
"brunofonsousa@gmail.com"
] | brunofonsousa@gmail.com |
fef1cd77d6d0cf5499dee830c7afdd90b66cf0ef | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/PyBox/pybox2d-android/examples/test_OneSidedPlatform.py | 9ba5bf332a3936891263a8e35d4532d9f54a5d87 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:64a4150f4e7993b64261e93365583e48ee17c68fcd142d236d7ba99977f09b72
size 2957
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
c8fc07a03a64e1410fc4b6bf2d4fec67f8a9c97d | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/3919.py | 1c304f9ed5704b5147c1697d18850afa52e338ef | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,416 | py | Input = open( "C:\Users\josip kotarac\Desktop\A-small-attempt1.in", "r" )
Output = open( "C:\Users\josip kotarac\Desktop\output.txt", "w" )
array = []
chosenRow=0
maxRows = 4
possibleCards=[]
foundCard=0
cardFoundNumber=0
caseNum =0
setDone = False
for line in Input.readlines():
line = line.split()
array.append( line )
for index in range(len(array)):
if (len(array[index])==1):
chosenRow=int(array[index][0])
indexInRow = 0
foundCard = 0
cardFoundNumber = 0
elif (len(array[index])==4):
if (indexInRow==chosenRow-1 and not possibleCards):
possibleCards=array[index]
elif (indexInRow==chosenRow-1 and possibleCards):
caseNum+=1
for card in array[index]:
for oldCard in possibleCards:
if (card == oldCard):
cardFoundNumber+=1
foundCard = card
if (cardFoundNumber>1):
Output.write( "Case #"+ str(caseNum) +": Bad magician!\n")
elif (cardFoundNumber==1):
Output.write( "Case #"+ str(caseNum) +": "+foundCard+"\n")
elif (cardFoundNumber==0):
Output.write( "Case #"+ str(caseNum) +": Volunteer cheated!\n")
possibleCards=[]
indexInRow+=1
Output.close()
Input.close() | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
2e2e18df159c6ad3d657c307d0904d91462f0f63 | deaf14c242dc5e534e5ce7691e206f9b5305fdb1 | /blog/models.py | 02a89abaf5581be70f4424179254aa806ba755d8 | [] | no_license | Ulorewien/synergee | ac51462cb270efc4d0ef9bf22b1a946c46b6cc1d | 22b285393ff28e8cab8b228c12087bde22280fe7 | refs/heads/main | 2023-08-12T19:15:25.171103 | 2021-10-04T15:47:32 | 2021-10-04T15:47:32 | 332,399,492 | 0 | 1 | null | 2021-01-24T08:21:59 | 2021-01-24T08:21:58 | null | UTF-8 | Python | false | false | 559 | py | from django.db import models
from group.models import Member,Interest
class Post(models.Model):
"""
Attributes of Post Relational Model
"""
author = models.ForeignKey(Member,on_delete=models.CASCADE)
category = models.ForeignKey(Interest,on_delete=models.CASCADE)
title = models.CharField(max_length=128)
content = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('-created_at',)
def __str__(self):
return f"Posted By {self.author.first_name}"
| [
"shubhpathak07@gmail.com"
] | shubhpathak07@gmail.com |
95b84bdb31fb8d15a6125e393c6a3c410d477038 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_stationers.py | 8ecfa5c4d17eaa1e85497e432cc4e12608092d92 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py |
#calss header
class _STATIONERS():
def __init__(self,):
self.name = "STATIONERS"
self.definitions = stationer
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['stationer']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
dd03466d913bf653258193e0979efba7236aa71e | b28b8dc227d9e6b015eeb19db9cb97ae067a3301 | /DivideTwoIntegers.py | f01458a232870b5efe7608a4457339f49dccd822 | [] | no_license | nbrahman/LeetCode | d5dd267e1b64d6d5ac6c7a312f286faa043a3444 | 3f94952eba038ca07ecd57f5dc51889daf7b663a | refs/heads/master | 2021-01-22T10:13:13.769444 | 2017-02-14T22:00:29 | 2017-02-14T22:00:29 | 81,993,285 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,250 | py | '''
Divide two integers without using multiplication, division and mod operator.
If it is overflow, return MAX_INT.
'''
class Solution(object):
intRemainder = 0
intDivisor = 0
def divide(self, dividend, divisor):
"""
:type dividend: int
:type divisor: int
:rtype: int
"""
intQuotient = 1
if (divisor==dividend):
intRemainder = 0
return 1
elif (dividend < divisor):
intRemainder = dividend
return 0
divisor = divisor << 1
print ('divisor',divisor)
intQuotient = intQuotient << 1
print ('intQuotient',intQuotient)
while (divisor <= dividend):
divisor = divisor << 1
print ('divisor',divisor)
intQuotient = intQuotient << 1
print ('intQuotient',intQuotient)
intQuotient = intQuotient + self.divide(dividend - divisor, self.intDivisor)
intQuotient = intQuotient >> 1
rtype = intQuotient
print (rtype)
return rtype
if __name__ == '__main__':
num1 = input ("enter the numerator: ")
num2 = input ("enter the denominator: ")
result = Solution().divide(int(num1), int(num2))
print (result)
| [
"nikhil.brahmankar@gmail.com"
] | nikhil.brahmankar@gmail.com |
36be193c3b32fb7370569d601d089401790b81ab | 8a936bd3e28c9ec116244df37d3ba5aedd48c9cc | /dashboard/internal/pages/register.py | 93615c354eb570ed066b045a84732334169e0466 | [
"Apache-2.0"
] | permissive | AssassinDev422/PHP_Minera | a66bd23610cbcfd43545e5b6a689c2c1b1248814 | f507dbcc4b4609990f14995754d54f42dcaaa618 | refs/heads/master | 2020-03-15T13:16:30.708491 | 2018-05-04T16:20:05 | 2018-05-04T16:20:05 | 132,162,749 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | import os
import sys
import json
import requests
import smtplib
def main():
sid = sys.argv[1]
email = sys.argv[2]
username = sys.argv[3]
password = sys.argv[4]
phone_number = sys.argv[5]
hashpower = sys.argv[6]
reg_user = {'referrals':0,'btcavailible':0,'username':username,'email':email,'phone':phone_number,'password':password,'btchashpower':hashpower,'btcaddress':'.','ltcaddress':'.','dashaddress':'.','accounttype':'customer'}
r = requests.post('http://api.msunicloud.com:2404/users/', data = reg_user, cookies={'sid':sid})
t = r.json()
_id = t['id']
if __name__ == '__main__':
main()
| [
"saas.exp7@gmail.com"
] | saas.exp7@gmail.com |
8383a2063f9f853bc4844b765c99f5e3d10d5d33 | 6e5b8ef12d56a11aa0b68fd5a3700a4be0541acd | /.history/nets/yolov5_20210819163229.py | 028344d8484e367faf34fcceef742e37f85afac1 | [] | no_license | Arcofcosmos/MyYolov5_Pytorch | d8fc7f8398249aeb996fa4b07c3ecdc6fedd2308 | 07bcb7e3b1dd32ec25171d4aa860d462f5a01078 | refs/heads/main | 2023-07-15T16:07:52.624808 | 2021-08-21T07:13:42 | 2021-08-21T07:13:42 | 398,490,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,494 | py | '''
Author: TuZhou
Version: 1.0
Date: 2021-08-18 15:24:52
LastEditTime: 2021-08-19 16:32:29
LastEditors: TuZhou
Description:
FilePath: \my_yolov5\nets\yolov5.py
'''
import torch
import torch.nn as nn
from pathlib import Path
import yaml
import math
from CSPDarknet53 import preprocess_backbone, CSP2, CBL
#------------------------------#
# 计算输出通道数
#------------------------------#
def make_divisible(x, divisor):
# Returns x evenly divisible by divisor
return math.ceil(x / divisor) * divisor
#------------------------------#
# 处理yaml文件
# 读取出backbone模块的信息
#------------------------------#
def process_yaml(yaml_path = './nets/yolov5s.yaml'):
yaml_file = Path(yaml_path)
with open(yaml_file, 'r') as f:
yaml_dict = yaml.safe_load(f)
#提取出网络宽度与深度
gd = yaml_dict['depth_multiple']
gw = yaml_dict['width_multiple']
nc = yaml_dict['nc']
backbone_dict = yaml_dict['head']
filters = []
blocks = []
for n in backbone_dict:
if n[2] == 'C3':
blocks.append(n[1])
if not n[2] == 'Concat':
filters.append(n[3])
blocks = blocks[:]
for i, _ in enumerate(blocks):
blocks[i] = max(round(blocks[i] * gd), 1) if blocks[i] > 1 else blocks[i]
for i, _ in enumerate(filters):
if not isinstance(filters[i][0], str):
filters[i][0] = make_divisible(filters[i][0] * gw, 8)
return nc, blocks, filters
class YoloBody(nn.Module):
def __init__(self, cfg='yolov5s.yaml', image_channels=3, nc=None, anchors=None): # model, input channels, number of classes
super().__init__()
#---------------------------------------------------#
# 生成CSPdarknet53的主干模型
# 以输入为608x608的图片为例
# 获得三个有效特征层,他们的shape分别是:
# 76,76,128
# 38,38,256
# 19,19,512
#---------------------------------------------------#
self.backbone = preprocess_backbone(None)
self.in_channel = 512
self.nc, self.number_blocks, self.filters_info = process_yaml()
self.result_channels = 3 * (self.nc + 5)
#---------------------------------------------------#
# 第一个head特征融合处理
#---------------------------------------------------#
#filter:256x512x1x1,stride = 1
self.conv1 = CBL(self.in_channel, self.filters_info[0][0], self.filters_info[0][1], self.filters_info[0][2])
self.upsample1 = nn.Upsample(scale_factor=self.filters_info[1][1], mode=self.filters_info[1][2])
#self.cat拼接
#第一个csp2是在拼接后,所以输入通道维度翻倍了
self.block1 = CSP2(self.filters_info[0][0]*2, self.filters_info[2][0], self.number_blocks[0])
self.conv2 = CBL(self.filters_info[2][0], self.filters_info[3][0], self.filters_info[3][1], self.filters_info[3][2])
self.upsample2 = nn.Upsample(scale_factor=self.filters_info[4][1], mode=self.filters_info[4][2])
#self.cat拼接
self.block2 = CSP2(self.filters_info[3][0]*2, self.filters_info[5][0], self.number_blocks[1])
self.single_conv1 = nn.Conv2d(self.filters_info[5][0], self.result_channels, 1, 1)
#---------------------------------------------------#
# 第二个head特征融合处理
#---------------------------------------------------#
self.conv3 = CBL(self.filters_info[5][0], self.filters_info[6][0], self.filters_info[6][1], self.filters_info[6][2])
#self.cat拼接
self.block3 = CSP2(self.filters_info[6][0]*2, self.filters_info[7][0], self.filters_info[7][1], self.number_blocks[2])
self.single_conv2 = nn.Conv2d(self.filters_info[7][0], self.result_channels, 1, 1)
#---------------------------------------------------#
# 第三个head特征融合处理
#---------------------------------------------------#
self.conv4 = CBL(self.filters_info[7][0], self.filters_info[8][0], self.filters_info[8][1], self.filters_info[8][2])
#self.cat拼接
self.block4 = CSP2(self.filters_info[8][0]*2, self.filters_info[9][0], self.filters_info[9][1], self.number_blocks[3])
self.single_conv3 = nn.Conv2d(self.filters_info[9][0], self.result_channels, 1, 1)
def forward(self, x):
#backbone, 从2到0特征图尺寸依次从大到小
x2, x1, x0 = self.backbone(x)
#第一个76x76特征图融合处理
y0 = self.conv1(x0) #待融合
x0 = torch.cat([self.upsample1(y0), x1], 1)
y1 = self.conv2(self.block1(x0)) #待融合
out0 = self.block2(torch.cat([self.upsample2(y1), x2], 1))
#第二个38x38特征图融合处理
out1 = self.block3(torch.cat([self.conv3(out0), y1], 1))
#第三个19x19特征图融合处理
out2 = self.block4(torch.cat([self.conv4(out1), y0], 1))
#经过head部分装换结果输出通道维度
out0 = self.single_conv1(out0)
out1 = self.single_conv2(out1)
out2 = self.single_conv3(out2)
#输出特征图尺寸从小到大排列,由19x19到76x76
return out2, out1, out0
if __name__ == "__main__":
model = YoloBody()
x = torch.rand(1, 3, 608, 608)
#torch.unsqueeze(x, )
out1, out2, out3 = model(x)
print(out1)
#print(out1.shape)
| [
"tz2062750487@163.com"
] | tz2062750487@163.com |
dc74e72fedacff9e9cbca0a0d2f5af2dcede9eb3 | d1205c39ad5febd5f73526cb6eda5fd413e998a3 | /ambra_sdk/storage/request.py | e1ac6c7927dbe26c84ab730fc3b9daedbefa09a7 | [
"Apache-2.0"
] | permissive | ppolk-nocimed/sdk-python | 77364972c5559a43787f81373327b86204f9ec39 | 165a8049bbc38a201ef27f60a8ba1b980c1c9a64 | refs/heads/master | 2022-11-22T23:48:22.444967 | 2020-07-17T16:27:05 | 2020-07-17T16:27:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,799 | py | from enum import Enum
from io import BufferedReader
from typing import TYPE_CHECKING, Any, Dict, Mapping, NamedTuple, Optional
from requests import Response
from ambra_sdk.exceptions.storage import AmbraResponseException
from ambra_sdk.storage.response import check_response
if TYPE_CHECKING:
from ambra_sdk.storage.storage import Storage # NOQA:WPS433
class StorageMethod(Enum):
"""Storage methods."""
get = 'GET'
post = 'POST'
delete = 'DELETE'
class PreparedRequest(NamedTuple):
"""Prepared request."""
# This some sort of private field.
# User should not have dicect access to this field
# But we can not use _name in NamedTuple attributes
storage_: 'Storage' # NOQA WPS1120
url: str
method: StorageMethod
# Mapping type is covariant is covariant type
errors_mapping: Optional[Mapping[int, AmbraResponseException]] = None
params: Optional[Dict[str, Any]] = None # NOQA:WPS110
files: Optional[Dict[str, BufferedReader]] = None
headers: Optional[Dict[str, str]] = None
data: Optional[Any] = None # NOQA:WPS110
stream: Optional[bool] = None
def execute(self) -> Response:
"""Execute prepared request.
If sid problems we try to get new sid
and retry request.
:return: response object
"""
response: Response = self.storage_.retry_with_new_sid(
self.execute_once,
)
return response # NOQA:WPS331
def execute_once(self) -> Response:
"""Execute prepared request.
:return: response object
:raises RuntimeError: Unknown request method
"""
request_kwargs: Dict[str, Any] = {}
if self.params is not None:
request_kwargs['params'] = self.params
if self.data is not None:
request_kwargs['data'] = self.data
if self.headers is not None:
request_kwargs['headers'] = self.headers
if self.files is not None:
request_kwargs['files'] = self.files
if self.stream is not None:
request_kwargs['stream'] = self.stream
if self.method == StorageMethod.get:
response = self.storage_.get(self.url, **request_kwargs)
elif self.method == StorageMethod.post:
response = self.storage_.post(self.url, **request_kwargs)
elif self.method == StorageMethod.delete:
response = self.storage_.delete(self.url, **request_kwargs)
else:
raise RuntimeError(
'Unknown storage request method: {method}'.format(
method=self.method,
),
)
return check_response(
response,
self.url,
errors_mapping=self.errors_mapping,
)
| [
"akapustin@ambrahealth.com"
] | akapustin@ambrahealth.com |
47768f668c9203a6d7ed852b25fa14292f1fc879 | 64b6015e35bd45df2f25ba04bf68a3dc6905e841 | /User_App/migrations/0001_initial.py | 67d0ba5a7c289a319c796789b71d03a7b6e840c0 | [] | no_license | masudurHimel/Django_Reboot | 10904c14f001c90ef971340fcccfb5007846fc59 | 2de54484397eecd14f2a7425636beb17fd5cc12d | refs/heads/master | 2022-07-02T22:47:04.133111 | 2020-05-06T20:46:29 | 2020-05-06T20:46:29 | 257,648,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | # Generated by Django 3.0.3 on 2020-05-05 19:57
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfileInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('portfolio', models.URLField(blank=True)),
('profile_pic', models.ImageField(blank=True, upload_to='profile_pics')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"masudurhimel@gmail.com"
] | masudurhimel@gmail.com |
3c2c3c043bbe9bcb82f9b7622ae402f18bd8c648 | cc5a3fa80d2ae90afc2626e4a82b9a927726dfa0 | /huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/list_sku_inventories_response.py | 124ef51c81c9c4847ae336042351acec75c390e9 | [
"Apache-2.0"
] | permissive | Logan118/huaweicloud-sdk-python-v3 | eca15e9b08bdccef7122e40735d444ddc958efa8 | bb230c03bd00225b9f5780a56adce596e9456420 | refs/heads/master | 2023-07-17T14:57:50.799564 | 2021-08-25T10:40:43 | 2021-08-25T10:40:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,420 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListSkuInventoriesResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'sku_inventories': 'list[SkuInventory]'
}
attribute_map = {
'sku_inventories': 'sku_inventories'
}
def __init__(self, sku_inventories=None):
"""ListSkuInventoriesResponse - a model defined in huaweicloud sdk"""
super(ListSkuInventoriesResponse, self).__init__()
self._sku_inventories = None
self.discriminator = None
if sku_inventories is not None:
self.sku_inventories = sku_inventories
@property
def sku_inventories(self):
"""Gets the sku_inventories of this ListSkuInventoriesResponse.
库存的查询结果详情,具体参见表2。
:return: The sku_inventories of this ListSkuInventoriesResponse.
:rtype: list[SkuInventory]
"""
return self._sku_inventories
@sku_inventories.setter
def sku_inventories(self, sku_inventories):
"""Sets the sku_inventories of this ListSkuInventoriesResponse.
库存的查询结果详情,具体参见表2。
:param sku_inventories: The sku_inventories of this ListSkuInventoriesResponse.
:type: list[SkuInventory]
"""
self._sku_inventories = sku_inventories
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListSkuInventoriesResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
b73808b7ece173bea9a10ffa904af73e32d72221 | bf15a97a377bc49495a8c278cd247387a08361fd | /intersight/models/vnic_fc_adapter_policy_ref.py | e5f956c79f737fa320ee12693e463e1fa5431998 | [
"Apache-2.0"
] | permissive | movinalot/intersight-python | ffcb434e5fdf3f6e857dd967c794a64b2d2e05de | cdc3b082d75eac93b74029ab610e16d3008fdd8c | refs/heads/master | 2020-12-18T15:46:06.780834 | 2019-10-29T00:39:49 | 2019-10-29T00:39:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,699 | py | # coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-961
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class VnicFcAdapterPolicyRef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'moid': 'str',
'object_type': 'str',
'selector': 'str'
}
attribute_map = {
'moid': 'Moid',
'object_type': 'ObjectType',
'selector': 'Selector'
}
def __init__(self, moid=None, object_type=None, selector=None):
"""
VnicFcAdapterPolicyRef - a model defined in Swagger
"""
self._moid = None
self._object_type = None
self._selector = None
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
if selector is not None:
self.selector = selector
@property
def moid(self):
"""
Gets the moid of this VnicFcAdapterPolicyRef.
The Moid of the referenced REST resource.
:return: The moid of this VnicFcAdapterPolicyRef.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this VnicFcAdapterPolicyRef.
The Moid of the referenced REST resource.
:param moid: The moid of this VnicFcAdapterPolicyRef.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this VnicFcAdapterPolicyRef.
The Object Type of the referenced REST resource.
:return: The object_type of this VnicFcAdapterPolicyRef.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this VnicFcAdapterPolicyRef.
The Object Type of the referenced REST resource.
:param object_type: The object_type of this VnicFcAdapterPolicyRef.
:type: str
"""
self._object_type = object_type
@property
def selector(self):
"""
Gets the selector of this VnicFcAdapterPolicyRef.
An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.
:return: The selector of this VnicFcAdapterPolicyRef.
:rtype: str
"""
return self._selector
@selector.setter
def selector(self, selector):
"""
Sets the selector of this VnicFcAdapterPolicyRef.
An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.
:param selector: The selector of this VnicFcAdapterPolicyRef.
:type: str
"""
self._selector = selector
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, VnicFcAdapterPolicyRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"ategaw@cisco.com"
] | ategaw@cisco.com |
163b3454ba091eda53c8e7b350d2baac391e73ea | e8dd7dfabac2031d42608920de0aeadc1be8e998 | /thesis/Chapter4/Python/time_distance_plots.py | 0326f20bcb84ee5b7568f7808f713602ed90a9e4 | [
"MIT",
"CC-BY-4.0",
"BSD-2-Clause"
] | permissive | Cadair/Thesis | e7189dfdf74edf3ea2565ee0ec3bdc0777ad99b2 | 792ab1e8cf37af7b9ee52de3566faa928e580500 | refs/heads/master | 2022-05-03T18:29:43.366175 | 2017-01-06T13:37:54 | 2017-01-06T13:37:54 | 21,343,957 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,588 | py | # coding: utf-8
from __future__ import print_function
import os
from functools import partial
import numpy as np
from scipy.interpolate import interp1d
def get_filepath(base_path, driver, period, post_amp, tube_r, exp_fac):
if exp_fac is not None:
data_dir = os.path.join(base_path, '%s/%s_%s_%s_%s/'%(driver, period, post_amp, tube_r, exp_fac))
else:
data_dir = os.path.join(base_path, '%s/%s_%s_%s/'%(driver, period, post_amp, tube_r))
return data_dir
def get_xy(base_path, driver, period, post_amp, tube_r, exp_fac):
data_dir = get_filepath(base_path, driver, period, post_amp, tube_r, exp_fac)
height_Mm = np.load(os.path.join(base_path, "heightMM.npy"))
all_times = np.load(os.path.join(data_dir, ("LineVar_%s_%s_%s_times.npy"%(driver, period, post_amp))))[:,0]
all_spoints = np.load(os.path.join(data_dir, "LineVar_%s_%s_%s_points.npy"%(driver,period,post_amp)))[:,::-1,:]
f = interp1d(np.linspace(0,128,128), height_Mm)
y = f(all_spoints[0,:,2])
return all_times, y, all_spoints
def get_data(base_path, driver, period, post_amp, tube_r, exp_fac):
data_dir = get_filepath(base_path, driver, period, post_amp, tube_r, exp_fac)
path_join = partial(os.path.join, data_dir)
all_svphi = np.load(path_join("LineVar_%s_%s_%s_vphi.npy"%(driver,period,post_amp))).T
all_svperp = np.load(path_join("LineVar_%s_%s_%s_vperp.npy"%(driver,period,post_amp))).T
all_svpar = np.load(path_join("LineVar_%s_%s_%s_vpar.npy"%(driver,period,post_amp))).T
beta_line = np.load(path_join("LineFlux_%s_%s_%s_beta.npy"%(driver,period,post_amp))).T
if post_amp in ['A02k', 'A10']:
data = [all_svpar*1e3, all_svperp*1e3, all_svphi*1e3]
else:
data = [all_svpar, all_svperp, all_svphi]
return data, beta_line
def get_speeds(base_path, driver, period, post_amp, tube_r, exp_fac):
data_dir = get_filepath(base_path, driver, period, post_amp, tube_r, exp_fac)
path_join = partial(os.path.join, data_dir)
cs_line = np.load(path_join("LineFlux_%s_%s_%s_cs.npy"%(driver,period,post_amp))).T
va_line = np.load(path_join("LineFlux_%s_%s_%s_va.npy"%(driver,period,post_amp))).T
return cs_line, va_line
def get_flux(base_path, driver, period, post_amp, tube_r, exp_fac):
data_dir = get_filepath(base_path, driver, period, post_amp, tube_r, exp_fac)
path_join = partial(os.path.join, data_dir)
if exp_fac:
identifier = "%s_%s_%s_%s_%s"%(driver, period, post_amp, tube_r, exp_fac)
else:
identifier = "%s_%s_%s_%s"%(driver, period, post_amp, tube_r)
Fpar_line = np.load(path_join("LineVar_{}_Fwpar.npy".format(identifier))).T
Fperp_line = np.load(path_join("LineVar_{}_Fwperp.npy".format(identifier))).T
Fphi_line = np.load(path_join("LineVar_{}_Fwphi.npy".format(identifier))).T
Ftotal = np.sqrt(Fpar_line**2 + Fperp_line**2 + Fphi_line**2)
Fpar_percent = (Fpar_line / Ftotal)
Fperp_percent = (Fperp_line / Ftotal)
Fphi_percent = (Fphi_line / Ftotal)
#Filter out the noisy flux values before the wave starts propagating.
filter_ftotal = (np.abs(Ftotal) <= 1e-5)
Fpar_percent[filter_ftotal.nonzero()] = np.nan
Fperp_percent[filter_ftotal.nonzero()] = np.nan
Fphi_percent[filter_ftotal.nonzero()] = np.nan
ParAvg = np.mean(Fpar_percent[np.isfinite(Fpar_percent)])
PerpAvg = np.mean(Fperp_percent[np.isfinite(Fperp_percent)])
PhiAvg = np.mean(Fphi_percent[np.isfinite(Fphi_percent)])
beta_line = np.load(path_join("LineFlux_%s_%s_%s_beta.npy"%(driver,period,post_amp))).T
return [Fpar_percent, Fperp_percent, Fphi_percent], beta_line, [ParAvg, PerpAvg, PhiAvg]
def overplot_speeds(axes, y, va_line, cs_line):
delta_x = np.zeros(y.shape)
delta_x[1:] = y[1:] - y[:-1]
delta_t_va = delta_x*1e6 / va_line[:,0]
delta_t_cs = delta_x*1e6 / cs_line[:,0]
delta_t_vf = delta_x*1e6 / np.sqrt(cs_line[:,0]**2 + va_line[:,0]**2)
delta_t_vs = delta_x*1e6 / np.sqrt(cs_line[:,0]**-2 + va_line[:,0]**-2)**-1
ti = 60
t_va = np.cumsum(delta_t_va) + ti
t_cs = np.cumsum(delta_t_cs) + ti
t_vf = np.cumsum(delta_t_vf) + ti
t_vs = np.cumsum(delta_t_vs) + ti
for i in range(0,3):
axes[i].plot(t_va, y, label=r"$V_A$", linewidth=2, linestyle=':', color='k')#b
axes[i].plot(t_cs, y, label=r"$C_s$", linewidth=2, linestyle='--', color='k')#g
axes[i].plot(t_vf, y, label=r"$V_f$", linewidth=2, linestyle='-.', color='k')#r
axes[i].plot(t_vs, y, label=r"$V_s$", linewidth=2, linestyle='-', color='k')#c
| [
"stuart@cadair.com"
] | stuart@cadair.com |
4f8ac3edfd7e3d403c3648bd85382171104548c4 | 1325ecde27307dce9fe6edce88c0249d5c49ae60 | /day11/part2.py | 6fcbdfc51aa26a08e16ea53ce361bbca0406ec05 | [
"Apache-2.0"
] | permissive | jonasmue/adventofcode20 | 24208e7b9f2fb161d3fabb2bfd9b00f11b67840d | 437eb9ff82045ce825b68f1dcb3b79265723bba4 | refs/heads/main | 2023-02-04T17:43:40.082534 | 2020-12-26T08:51:54 | 2020-12-26T08:51:54 | 317,487,937 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | from common import SeatingSystem
if __name__ == "__main__":
# O(s*n^2) time and O(n) space
# with s: number of steps until convergence, n: number of points in grid
# Time complexity could be optimized by saving and reusing neighbors of each point
seating_system = SeatingSystem(5, False)
seating_system.run()
print(seating_system.occupied)
| [
"jonas.mueller@compitencies.com"
] | jonas.mueller@compitencies.com |
d70f750865ab7e20438deebfe95e9082fb4bc964 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_1106+083/sdB_pg_1106+083_lc.py | ecb0d839b701604589194e291b9d3c266426ea9b | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[167.1825,8.030292], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_pg_1106+083/sdB_pg_1106+083_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
129d670a19e33414131c1441c3be5449062746fd | 73758dde83d1a1823c103e1a4ba71e7c95168f71 | /nsd2009/py01/day05/stack2.py | d95af629d461f0dd46de18c3352ef00fc877d385 | [] | no_license | tonggh220/md_5_nsd_notes | 07ffdee7c23963a7a461f2a2340143b0e97bd9e1 | a58a021ad4c7fbdf7df327424dc518f4044c5116 | refs/heads/master | 2023-07-02T01:34:38.798929 | 2021-05-12T08:48:40 | 2021-05-12T08:48:40 | 393,885,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,116 | py | stack = [] # 使用列表模拟栈
def push_it():
"用于压栈"
data = input("数据: ").strip()
if data: # 如果data非空
stack.append(data)
else:
print("未获取到数据。")
def pop_it():
"用于出栈"
if stack: # 如果stack非空
print("从栈中弹出: \033[31;1m%s\033[0m" % stack.pop())
else:
print("\033[31;1m栈已经是空的\033[0m")
def view_it():
"查询"
print('\033[32;1m%s\033[0m' % stack)
def show_menu():
"用于显示菜单,实现代码逻辑"
# 将函数存入字典
funcs = {'0': push_it, '1': pop_it, '2': view_it}
prompt = """(0) 压栈
(1) 出栈
(2) 查询
(3) 退出
请选择(0/1/2/3): """
while 1:
choice = input(prompt).strip() # 去除用户输入的两端空白字符
if choice not in ['0', '1', '2', '3']:
print("无效的输入,请重试。")
continue
if choice == '3':
print('Bye-bye')
break
funcs[choice]() # 在字典中取出函数并调用
if __name__ == '__main__':
show_menu()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
b5989f1e513e71181fa457b7defad96e57e6e4a9 | bf2d010229aece071359662f4fef44e48ba57951 | /fitness-combined | 60c7f5034e05a8c045965320b191f404c78aa9d4 | [] | no_license | Osrip/CriticalEvolution | b97398f74e2fc5b54c9ab92765b08ce3bf97257e | f77cae8acc626cb4c6d64d5a44fdf00310309c2e | refs/heads/master | 2021-06-24T03:44:03.283017 | 2021-04-03T13:09:42 | 2021-04-03T13:09:42 | 215,332,038 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,350 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib.lines import Line2D
from matplotlib.patches import Circle
import pickle
from os import makedirs, path
import os
'''
loadfiles = ['beta_experiment/beta-0-1/sim-20180512-105719',
'beta_experiment/beta-1/sim-20180511-163319',
'beta_experiment/beta-10/sim-20180512-105824']
'''
loadfiles = ['sim-20191114-000009_server']
# os.chdir('D:\Masterarbeit_ausgelagert')
energy_model = True
numAgents = 150
autoLoad = True
saveFigBool = True
fixGen2000 = False
# loadfiles = ['beta_experiment/beta-0-1/sim-20180512-105719',
# 'beta_experiment/beta-0-1/sim-20180512-105725',
# 'beta_experiment/beta-1/sim-20180511-163319',
# 'beta_experiment/beta-1/sim-20180511-163335',
# 'beta_experiment/beta-1/sim-20180511-163347',
# 'beta_experiment/beta-1/sim-20180511-163357',
# 'beta_experiment/beta-10/sim-20180512-105824',
# 'beta_experiment/beta-10/sim-20180512-105819']
# IC = [0, 0, 1, 1, 1, 1, 2, 2]
new_order = [2, 0, 1]
labels = [r'$\beta_i = 0.1$', r'$\beta_i = 1$', r'$\_i = 10$']
iter_list = np.arange(0, 2000, 1)
cmap = plt.get_cmap('seismic')
norm = colors.Normalize(vmin=0, vmax=len(loadfiles)) # age/color mapping
# norm = [[194, 48, 32, 255],
# [146, 49, 182, 255],
# [44, 112, 147, 255]
# ]
# norm = np.divide(norm, 255)
a = 0.15 # alpha
def upper_tri_masking(A):
m = A.shape[0]
r = np.arange(m)
mask = r[:, None] < r
return A[mask]
def fitness(loadfile, iter_list, numAgents, autoLoad, saveFigBool):
folder = 'save/' + loadfile
folder2 = folder + '/figs/fitness/'
fname2 = folder2 + 'fitness-' + \
str(iter_list[0]) + '-' + str(iter_list[1] - iter_list[0]) + '-' + str(iter_list[-1]) + \
'.npz'
if path.isfile(fname2) and autoLoad:
txt = 'Loading: ' + fname2
print(txt)
data = np.load(fname2)
FOOD = data['FOOD']
else:
FOOD = np.zeros((len(iter_list), numAgents))
for ii, iter in enumerate(iter_list):
filename = 'save/' + loadfile + '/isings/gen[' + str(iter) + ']-isings.pickle'
startstr = 'Loading simulation:' + filename
print(startstr)
try:
isings = pickle.load(open(filename, 'rb'))
except Exception:
print("Error while loading %s. Skipped file" % filename)
#Leads to the previous datapoint being drawn twice!!
food = []
for i, I in enumerate(isings):
if energy_model:
food.append(I.energy)
else:
food.append(I.fitness)
# food = np.divide(food, 6)
FOOD[ii, :] = food
if not path.exists(folder2):
makedirs(folder2)
np.savez(fname2, FOOD=FOOD)
return FOOD
FOODS = []
for loadfile in loadfiles:
f = fitness(loadfile, iter_list, numAgents, autoLoad, saveFigBool)
# FIX THE DOUBLE COUNTING PROBLEM
if f.shape[0] > 2000 and fixGen2000:
print('Fixing Double Counting at Gen 2000')
f[2000, :] = f[2000, :] - f[1999, :]
FOODS.append(f)
# FIX THE DOUBLE COUNTING OF THE FITNESS
plt.rc('text', usetex=True)
font = {'family': 'serif', 'size': 28, 'serif': ['computer modern roman']}
plt.rc('font', **font)
plt.rc('legend', **{'fontsize': 20})
fig, ax = plt.subplots(1, 1, figsize=(19, 10))
fig.text(0.51, 0.035, r'$Generation$', ha='center', fontsize=20)
# fig.text(0.07, 0.5, r'$Avg. Food Consumed$', va='center', rotation='vertical', fontsize=20)
fig.text(0.07, 0.5, r'$Food Consumed$', va='center', rotation='vertical', fontsize=20)
title = 'Food consumed per organism'
fig.suptitle(title)
for i, FOOD in enumerate(FOODS):
# for i in range(0, numAgents):
# ax.scatter(iter_list, FOOD[:, i], color=[0, 0, 0], alpha=0.2, s=30)
c = cmap(norm(new_order[i]))
# c = norm[i]
# c = norm[IC[i]]
muF = np.mean(FOOD, axis=1)
ax.plot(iter_list, muF, color=c, label=labels[new_order[i]])
# for numOrg in range(FOOD.shape[1]):
# ax.scatter(iter_list, FOOD[:, numOrg],
# alpha=0.01, s=8, color=c, label=labels[new_order[i]])
# maxF = np.max(FOOD, axis=1)
# minF = np.min(FOOD, axis=1)
# ax.fill_between(iter_list, maxF, minF,
# color=np.divide(c, 2), alpha=a)
sigmaF = FOOD.std(axis=1)
ax.fill_between(iter_list, muF + sigmaF, muF - sigmaF,
color=c, alpha=a
)
custom_legend = [Line2D([0], [0], marker='o', color='w',
markerfacecolor=cmap(norm(1)), markersize=15),
Line2D([0], [0], marker='o', color='w',
markerfacecolor=cmap(norm(0)), markersize=15),
Line2D([0], [0], marker='o', color='w',
markerfacecolor=cmap(norm(2)), markersize=15),]
# custom_legend = [Circle((0, 0), 0.001,
# facecolor=cmap(norm(1))),
# Circle((0, 0), 1,
# facecolor=cmap(norm(0))),
# Circle((0, 0), 1,
# facecolor=cmap(norm(2)))]
ax.legend(custom_legend, [r'$\beta = 10$', r'$\beta = 1$', r'$\beta = 0.1$'], loc='upper left')
# plt.legend(loc=2)
# yticks = np.arange(0, 150, 20)
# ax.set_yticks(yticks)
# xticks = [0.1, 0.5, 1, 2, 4, 10, 50, 100, 200, 500, 1000, 2000]
# ax.set_xscale("log", nonposx='clip')
# ax.set_xticks(xticks)
# ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
folder = 'save/' + loadfile
savefolder = folder + '/figs/fitness_combined/'
savefilename = savefolder + 'fitness_gen_' + str(iter_list[0]) + '-' + str(iter_list[-1]) + '.png'
if not path.exists(savefolder):
makedirs(savefolder)
if saveFigBool:
plt.savefig(savefilename, bbox_inches='tight', dpi=150)
# plt.close()
savemsg = 'Saving ' + savefilename
print(savemsg)
# if saveFigBool:
# savefolder = folder + '/figs/fitness/'
# savefilename = savefolder + 'fitness_gen_' + str(iter_list[0]) + '-' + str(iter_list[-1]) + '.png'
# plt.savefig(bbox_inches = 'tight', dpi = 300)
plt.show()
| [
"jan.prosi@hotmail.com"
] | jan.prosi@hotmail.com | |
93323b86ef220a5a72384a8c2174cc533f7e5ac5 | 45b64f620e474ac6d6b2c04fbad2730f67a62b8e | /Varsity-Final-Project-by-Django-master/.history/project/project/settings_20210405152615.py | 26c367c5f973efc1f18418c5f5bb4e41da83fb2b | [] | no_license | ashimmitra/Final-Project | 99de00b691960e25b1ad05c2c680015a439277e0 | a3e1d3c9d377e7b95b3eaf4dbf757a84a3858003 | refs/heads/master | 2023-04-11T06:12:35.123255 | 2021-04-26T15:41:52 | 2021-04-26T15:41:52 | 361,796,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,682 | py | """
Django settings for project project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'nyg(3!o7_eqr1fk-hb(xfnvj3)ay^zvhiz6o_d029p9$0cr6h^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'student',
'index',
'authentication',
'exams',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'project',
'USER': 'postgres',
'PASSWORD': '12345',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
BASE_DIR / 'static'
]
STATIC_ROOT = BASE_DIR / 'assets'
MEDIA_URL = '/media/'
STATIC_ROOT = BASE_DIR / 'media'
#alert messages are here
from django.contrib.messages import constants as messages
MESSAGE_TAGS = {
messages.DEBUG: 'alert-info',
messages.INFO: 'alert-info',
messages.SUCCESS: 'alert-success',
messages.WARNING: 'alert-warning',
messages.ERROR: 'alert-danger',
} | [
"34328617+ashimmitra@users.noreply.github.com"
] | 34328617+ashimmitra@users.noreply.github.com |
01f79e945aa8ede3d9d38abea3cc7b1fb8882ac5 | 0c8214d0d7827a42225b629b7ebcb5d2b57904b0 | /examples/fileio/E002_Packages/main.py | 19e09b50a6ebd79e9cb6f9d8ecd726852d7fd3a3 | [] | no_license | mertturkmenoglu/python-examples | 831b54314410762c73fe2b9e77aee76fe32e24da | 394072e1ca3e62b882d0d793394c135e9eb7a56e | refs/heads/master | 2020-05-04T15:42:03.816771 | 2020-01-06T19:37:05 | 2020-01-06T19:37:05 | 179,252,826 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | # Example 022: Package example
from Calculator.Math import add
import Calculator.Test.testFile
import Calculator.UI.button as button
Calculator.print_info()
add.print_info()
Calculator.Test.print_info()
Calculator.Test.testFile.print_info()
button.print_info()
Calculator.UI.print_info()
| [
"mertturkmenoglu99@gmail.com"
] | mertturkmenoglu99@gmail.com |
cded64f961102cd11ec1e54dc84e92ce55d24117 | 8cccdb1ca93d1b7ed690eb096522262523948a72 | /accounting/apps/books/templatetags/nav.py | 689c89c26b6fdc2af3ddbe4cf8790bda85d185e3 | [
"MIT"
] | permissive | newhub-spec/django-accounting | 6b886378219dbed1d4f2ee34e71be198a7b28eb5 | 12b01a944d368ce717b57957d26d7aa4ecd04285 | refs/heads/master | 2020-08-21T08:06:41.318805 | 2019-10-19T06:33:41 | 2019-10-19T06:33:41 | 216,116,659 | 0 | 0 | MIT | 2019-10-18T22:35:01 | 2019-10-18T22:35:01 | null | UTF-8 | Python | false | false | 438 | py | # encoding: utf-8
import re
from django import template
register = template.Library()
@register.simple_tag
def active(request, pattern, exact_match=False):
if exact_match:
if not pattern.startswith('^'):
pattern = '^' + pattern
if not pattern.endswith('$'):
pattern = pattern + '$'
if hasattr(request, 'path') and re.search(pattern, request.path):
return 'active'
return ''
| [
"dulacpier@gmail.com"
] | dulacpier@gmail.com |
24274aec2247b51c41ce5a5a4b1cd459f9cc1319 | 34799a9e04b8e22c9d364d1f5dcaea05ea204fcb | /test-suite/tests/__init__.py | f1a56342ffdb3195695527f95065e03ae96c467d | [
"Apache-2.0"
] | permissive | ken-ebert/indy-agent | 90fa0da1f3cf0203ef0d88278c8f97753afb7ad9 | ecd8a2cc927d762acf4c4263cc6d3fc115188c7e | refs/heads/master | 2020-04-01T15:42:59.523802 | 2018-10-17T23:24:18 | 2018-10-17T23:31:38 | 153,348,553 | 0 | 0 | Apache-2.0 | 2018-10-16T20:17:39 | 2018-10-16T20:17:39 | null | UTF-8 | Python | false | false | 695 | py | """ Module containing Agent Test Suite Tests.
"""
import asyncio
import pytest
from typing import Callable
from transport import BaseTransport
async def expect_message(transport: BaseTransport, timeout: int):
get_message_task = asyncio.ensure_future(transport.recv())
sleep_task = asyncio.ensure_future(asyncio.sleep(timeout))
finished, unfinished = await asyncio.wait(
[
get_message_task,
sleep_task
],
return_when=asyncio.FIRST_COMPLETED
)
if get_message_task in finished:
return get_message_task.result()
for task in unfinished:
task.cancel()
pytest.fail("No message received before timing out")
| [
"daniel.bluhm@sovrin.org"
] | daniel.bluhm@sovrin.org |
18d5543138466a911998784eecb91c9d030002f5 | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /important_man/company/seem_company_up_thing.py | 78bc48efc26459683f86fab76e7a748d4698894e | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py |
#! /usr/bin/env python
def other_way(str_arg):
think_able_day_about_long_woman(str_arg)
print('bad_work_and_long_time')
def think_able_day_about_long_woman(str_arg):
print(str_arg)
if __name__ == '__main__':
other_way('next_week')
| [
"jingkaitang@gmail.com"
] | jingkaitang@gmail.com |
1c5c34211e4bc00f464ac84a0e9215cdd9aa535b | 3c2cc8910c4a333a44d2d7b22489ef8d5ddb6a13 | /src/zvt/factors/technical_factor.py | d0cc199697d42ffd729a0796da9dfaf84c4a99b8 | [
"MIT"
] | permissive | zvtvz/zvt | 6341dc765177b1e99727207f1608b730cbbb705a | 03aee869fd432bb933d59ba419401cfc11501392 | refs/heads/master | 2023-08-28T10:05:29.185590 | 2023-08-01T10:19:03 | 2023-08-01T10:19:03 | 179,451,497 | 2,782 | 922 | MIT | 2023-04-04T09:31:03 | 2019-04-04T08:06:57 | Python | UTF-8 | Python | false | false | 3,256 | py | from typing import List, Union, Type, Optional
import pandas as pd
from zvt.api.kdata import get_kdata_schema, default_adjust_type
from zvt.contract import IntervalLevel, TradableEntity, AdjustType
from zvt.contract.factor import Factor, Transformer, Accumulator, FactorMeta
from zvt.domain import Stock
class TechnicalFactor(Factor, metaclass=FactorMeta):
def __init__(
self,
entity_schema: Type[TradableEntity] = Stock,
provider: str = None,
entity_provider: str = None,
entity_ids: List[str] = None,
exchanges: List[str] = None,
codes: List[str] = None,
start_timestamp: Union[str, pd.Timestamp] = None,
end_timestamp: Union[str, pd.Timestamp] = None,
columns: List = None,
filters: List = None,
order: object = None,
limit: int = None,
level: Union[str, IntervalLevel] = IntervalLevel.LEVEL_1DAY,
category_field: str = "entity_id",
time_field: str = "timestamp",
keep_window: int = None,
keep_all_timestamp: bool = False,
fill_method: str = "ffill",
effective_number: int = None,
transformer: Transformer = None,
accumulator: Accumulator = None,
need_persist: bool = False,
only_compute_factor: bool = False,
factor_name: str = None,
clear_state: bool = False,
only_load_factor: bool = False,
adjust_type: Union[AdjustType, str] = None,
) -> None:
if columns is None:
columns = [
"id",
"entity_id",
"timestamp",
"level",
"open",
"close",
"high",
"low",
"volume",
"turnover",
"turnover_rate",
]
# 股票默认使用后复权
if not adjust_type:
adjust_type = default_adjust_type(entity_type=entity_schema.__name__)
self.adjust_type = adjust_type
self.data_schema = get_kdata_schema(entity_schema.__name__, level=level, adjust_type=adjust_type)
if not factor_name:
if type(level) == str:
factor_name = f"{type(self).__name__.lower()}_{level}"
else:
factor_name = f"{type(self).__name__.lower()}_{level.value}"
super().__init__(
self.data_schema,
entity_schema,
provider,
entity_provider,
entity_ids,
exchanges,
codes,
start_timestamp,
end_timestamp,
columns,
filters,
order,
limit,
level,
category_field,
time_field,
keep_window,
keep_all_timestamp,
fill_method,
effective_number,
transformer,
accumulator,
need_persist,
only_compute_factor,
factor_name,
clear_state,
only_load_factor,
)
def drawer_sub_df_list(self) -> Optional[List[pd.DataFrame]]:
return [self.factor_df[["volume"]]]
# the __all__ is generated
__all__ = ["TechnicalFactor"]
| [
"5533061@qq.com"
] | 5533061@qq.com |
55b52b8988367a4f538993e31d1b68bc5744700d | 636fae2d4fa7108c3cc30d55c9feef6dfd334cd8 | /NestedSerializer/api/models.py | d8890ec995bbea37c679ff6e986db7c5e1f2e14e | [] | no_license | anupjungkarki/Django-RESTFramework | 4ee3f5a46a98854284b45545d751822e27a715e1 | dc774c234de09f7e26c7fd5b84ba9359ecc75861 | refs/heads/master | 2023-02-15T02:34:46.421764 | 2021-01-11T08:04:15 | 2021-01-11T08:04:15 | 327,276,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | from django.db import models
# Create your models here.
class Singer(models.Model):
name = models.CharField(max_length=100)
gender = models.CharField(max_length=100)
def __str__(self):
return self.name
class Track(models.Model):
title = models.CharField(max_length=100)
singer = models.ForeignKey(Singer, on_delete=models.CASCADE, related_name='song')
album = models.CharField(max_length=100)
duration = models.IntegerField()
def __str__(self):
return self.title
| [
"anupkarki2012@gmail.com"
] | anupkarki2012@gmail.com |
c80f313a023ea0d6910870d6c5fbb40985a35628 | 00faae803cfa2e5c2f5e662f560eb61bd0074690 | /src/python2/request/item_attachment_request_builder.py | 998217a7f8acc20f524fc5a161c255abfc56cb1c | [
"MIT"
] | permissive | gojohnkevin/msgraph-sdk-python | 6dc00723489eddf013cff82d34d86d677b4d7ecf | 7714c11043e76e856876dd731c6c1df7b37cdbef | refs/heads/master | 2022-07-09T14:35:30.422531 | 2022-06-23T15:52:17 | 2022-06-23T15:52:17 | 85,160,734 | 0 | 1 | null | 2017-03-16T06:29:59 | 2017-03-16T06:29:59 | null | UTF-8 | Python | false | false | 3,157 | py | # -*- coding: utf-8 -*-
"""
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
"""
from __future__ import unicode_literals
from .item_attachment_request import ItemAttachmentRequest
from ..request_builder_base import RequestBuilderBase
from ..request import outlook_item_request_builder
class ItemAttachmentRequestBuilder(RequestBuilderBase):
def __init__(self, request_url, client):
"""Initialize the ItemAttachmentRequestBuilder
Args:
request_url (str): The url to perform the ItemAttachmentRequest
on
client (:class:`GraphClient<microsoft.msgraph.request.graph_client.GraphClient>`):
The client which will be used for the request
"""
super(ItemAttachmentRequestBuilder, self).__init__(request_url, client)
def request(self, expand=None, select=None, options=None):
"""Builds the ItemAttachmentRequest
Args:
expand (str): Default None, comma-separated list of relationships
to expand in the response.
select (str): Default None, comma-separated list of properties to
include in the response.
options (list of :class:`Option<microsoft.msgraph.options.Option>`):
A list of options to pass into the request. Defaults to None.
Returns:
:class:`ItemAttachmentRequest<microsoft.msgraph.request.item_attachment_request.ItemAttachmentRequest>`:
The ItemAttachmentRequest
"""
req = ItemAttachmentRequest(self._request_url, self._client, options)
req._set_query_options(expand=expand, select=select)
return req
def delete(self):
"""Deletes the specified ItemAttachment."""
self.request().delete()
def get(self):
"""Gets the specified ItemAttachment.
Returns:
:class:`ItemAttachment<microsoft.msgraph.model.item_attachment.ItemAttachment>`:
The ItemAttachment.
"""
return self.request().get()
def update(self, item_attachment):
"""Updates the specified ItemAttachment.
Args:
item_attachment (:class:`ItemAttachment<microsoft.msgraph.model.item_attachment.ItemAttachment>`):
The ItemAttachment to update.
Returns:
:class:`ItemAttachment<microsoft.msgraph.model.item_attachment.ItemAttachment>`:
The updated ItemAttachment
"""
return self.request().update(item_attachment)
@property
def item(self):
"""The item for the ItemAttachmentRequestBuilder
Returns:
:class:`OutlookItemRequestBuilder<microsoft.msgraph.request.outlook_item_request.OutlookItemRequestBuilder>`:
A request builder created from the ItemAttachmentRequestBuilder
"""
return outlook_item_request_builder.OutlookItemRequestBuilder(self.append_to_request_url("item"), self._client)
| [
"robert.anderson@microsoft.com"
] | robert.anderson@microsoft.com |
67ac08f194336d675fc9d64d633658f8fd86b045 | 8f6a9ff4c63fd24d145088077d5da1c3e4caaa3a | /code/fv_show.py | bec775f7e12c9d5a361048b6fb24b83528b2fb7c | [] | no_license | liaofuwei/pythoncoding | 6fd2afba0d27c4a4bbb4b2d321b3fa402a60d6fe | 966bd99459be933cf48287412a40e0c7a3d0b8e5 | refs/heads/master | 2021-07-15T10:34:57.701528 | 2017-10-10T05:27:13 | 2017-10-10T05:27:13 | 107,651,470 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | import numpy as np
from matplotlib.pyplot import *
from pylab import *
pv=1000
r=0.08
t=linspace(0,10,10)
fv=pv*(1+r)**t
plot(t,fv)
show()
| [
"459193023@qq.com"
] | 459193023@qq.com |
a248fff8e70b831ad000f09123fad1ffa2eeeac6 | c1b8ff60ed4d8c70e703f71b7c96a649a75c0cec | /ostPython2/FTL_tester.py | ed22531b4ea6f2f193bd5c2a0186bb3d3106022b | [] | no_license | deepbsd/OST_Python | 836d4fae3d98661a60334f66af5ba3255a0cda5c | b32f83aa1b705a5ad384b73c618f04f7d2622753 | refs/heads/master | 2023-02-14T17:17:28.186060 | 2023-01-31T02:09:05 | 2023-01-31T02:09:05 | 49,534,454 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,800 | py | #!/usr/local/bin/python3
#
# File Type Lister Tester (FTL_tester.py)
# (tests FileTypeLister.py module)
#
# by David S. Jackson
# for OST Python 2 on Jan 15, 2015
#
# Instructor Pat Barton
#
"""This program calls the unittest module and tests the accuracy
of the program called FileTypeLister.py. This module lists all
files in the designated directory (dirpath) by suffix and counts
the number of occurances for each file type according to suffix.
This unittest tests the accuracy of that program with some tests.
"""
import os
import glob
import random
import unittest
import tempfile
import FileTypeLister
suf = ['.py','.txt','.doc','.wp','.lts','.gpg','.jpg',\
'.gif','.html','.pl','.sh','.mp3','.mp4','.bin']
bname = ['one','two','three','four','five','six',\
'seven','eight','nine','ten','eleven','twelve']
class TestFTL(unittest.TestCase):
"""Tests the FileTypeLister.py program."""
def setUp(self):
"""Creates a temp dir full of files with various \
suffixes using the randint() function
"""
global file_count
file_count = random.randint(20, 50)
#print("File count is {}".format(file_count)) # debugging
global dirname
dirname = tempfile.mkdtemp("tempdir")
os.chdir(dirname)
for filenum in range(0, file_count):
for suf_idx in range(0, len(suf)):
for bn_idx in range(0, len(bname)):
filename = bname[bn_idx]+str(filenum)+suf[suf_idx]
#print(filename) # debugging
f = open(filename, 'w')
f.write("whatever whatever whatever\n")
f.close()
def test_1(self):
"""Verify that total files listed in tempdir is correct
and agrees with suf_dict
"""
ftl_total = 0
suf_dict = FileTypeLister.listFiles(dirname)
for value in suf_dict.values():
ftl_total = ftl_total + int(value)
total_files = len(bname) * len(suf) * file_count
self.assertEqual(ftl_total, total_files, "Doesn't list correct number of files.")
def test_2(self):
"Verify that suf_dict is accurate for files in tempdir"
suf_dict = FileTypeLister.listFiles(dirname)
for suffix, value in suf_dict.items():
self.assertEqual(suf_dict[suffix], len(bname)*file_count, "Totals wrong for {}".format(suf_dict[suffix]))
def tearDown(self):
os.chdir(dirname)
testfiles = glob.glob('*')
for fn in testfiles:
os.remove(fn)
try:
os.rmdir(dirname)
except OSError:
pass
if __name__ == "__main__":
unittest.main()
| [
"deepbsd@yahoo.com"
] | deepbsd@yahoo.com |
53735cbef4d7f81e2d0940a29b398aa16adfcbe7 | 11f51735176e90f522db8d8250fcac4e28d03367 | /python_basic/ds-10.py | 2ea8e0d6259f9855dacc0e5aa76b02dfacedd5b7 | [] | no_license | schw240/Fastcampus_secondtest | 6d253c9d8e360e0f7c8927c902747981f452bbb6 | d89dc6d7dd031266abc6c0b51e51761c940123a4 | refs/heads/main | 2023-03-21T04:21:21.515959 | 2021-03-09T12:46:07 | 2021-03-09T12:46:07 | 340,637,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,220 | py | # 해쉬 테이블
# 키에 데이터를 저장하는 데이터 구조
# 파이썬은 딕셔너리가 해쉬로 구현되어있으므로 별도로 구현할 필요없음
# 간단하게 hash table 구현하기
hash_table = list([0 for i in range(10)])
#print(hash_table)
# 간단한 해쉬함수
# 가장 간단한 방식인 Division 방법(나누기를 통한 나머지 값을 사용하는 기법)
def hash_func(key):
return key % 5
# 해쉬 테이블에 저장해보기
# 데이터에 따라 필요시 key 생성 방법 정의
data1 = "Andy"
data2 = "Dave"
data3 = "Trump"
# ord(): 문자의 ASCII(아스키) 코드 리턴
print(ord(data1[0]), ord(data2[0]), ord(data3[0]))
print(ord(data1[0]), hash_func(ord(data1[0])))
# 해쉬 테이블에 값 저장 예
# data:value 와 같이 data와 value를 넣으면 해당 data에 대한 key를
# 찾아서 해당 key에 대응하는 해쉬 주소에 value를 저장하는 예
def storage_data(data, value):
key = ord(data[0])
hash_address = hash_func(key)
hash_table[hash_address] = value
# 실제 데이터를 저장하고 읽어보기
def get_data(data):
key = ord(data[0])
hash_address = hash_func(key)
return hash_table[hash_address]
| [
"schw240@gmail.com"
] | schw240@gmail.com |
112511cf83912aae4b88c376b5789ef936473501 | ae4d4087fd03511be038e4c2c8959a2f64f79198 | /doc/misc_plots/nonstationary_phase_plot.py | 2afaa337fe8f888d113810f179aa0a4eeb82ef33 | [] | no_license | jaidevd/pytftb | 88a6a829bd9bf83dc358604f9463189b75513542 | daa5a171ac0d53af0b81c1afd1267f8016bb8fc4 | refs/heads/master | 2021-07-16T18:50:44.302042 | 2021-06-28T06:26:03 | 2021-06-28T06:26:03 | 25,037,207 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | from tftb.generators import fmlin, amgauss
import numpy as np
import matplotlib.pyplot as plt
y_nonstat, _ = fmlin(2048) # Already analytic, no need of Hilbert transorm
y_nonstat *= amgauss(2048)
plt.plot(np.real(y_nonstat), np.imag(y_nonstat))
plt.xlabel("Real part")
plt.ylabel("Imaginary part")
plt.show()
| [
"deshpande.jaidev@gmail.com"
] | deshpande.jaidev@gmail.com |
d2037a1236a4acc57abdf3ddd0792e134f3e7ba6 | 3ea1a45f61932ae0e8b504beb137be2a7c303d06 | /例子-0819-05.函数的返回值.py | 25edb9a4351c71de01443432766084d826f064f0 | [] | no_license | blackplusy/0810 | 8e650f7ce0e5d93b30e266bc7eef13491e588881 | e91290a1b0bea9966a85808935837f9af861f86f | refs/heads/master | 2022-12-14T11:12:27.585683 | 2020-09-05T08:29:49 | 2020-09-05T08:29:49 | 286,350,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | #coding=utf-8
#1.一个返回值
#定义sum函数,需要传入2个参数
def sum(a,b):
#业务逻辑,相加
jisuan=a+b
#返回计算结果
return jisuan
#通过变量接收函数操作后的结果,注意,一定要传入2个参数
a=sum(20,30)
print(a)
#2.多个返回值
def ret(a,b):
a*=10
b*=100
return a,b
num=ret(3,7)
print(num)
print(type(num))
num1,num2=ret(10,20)
print(num1,num2)
| [
"noreply@github.com"
] | blackplusy.noreply@github.com |
c7d15d2d77af850d7e49a03f2d46dbb68ed9f81a | fa8011b6942cac7b23d2dc781f7ae16d2cfab7f2 | /gravoicy/config/settings_local.py | 554622a9e704c6cbfa4d4071777e3fd73ca0964f | [] | no_license | indexofire/gravoicy | be99ae4995e81bf823841857742e60d689f20b1b | a516af5ec67e720ea2f1f695f28afadfb938cff4 | refs/heads/master | 2020-05-18T12:53:55.020633 | 2011-03-24T15:07:05 | 2011-03-24T15:07:05 | 948,414 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,569 | py | # -*- coding: utf-8 -*-
import os
from settings import PROJECT_PATH
from config.settings_base import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
SECRET_KEY = '1F=(lta=1R9je3ze@g#fa^m#hJu^mv%@8+%fZ5p)*1$(*tvbh6'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '../../grav_data.db',
'OPTIONS': {
'timeout': 10,
}
}
}
FEINCMS_ADMIN_MEDIA = '/media/feincms/'
TIME_ZONE = 'Asia/Shanghai'
LANGUAGE_CODE = 'zh-cn'
INTERNAL_IPS = (
'127.0.0.1',
)
MIDDLEWARE_CLASSES += (
'pagination.middleware.PaginationMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS += (
'forum.context_processors.page_size',
)
INSTALLED_APPS += (
'registration',
'base',
#'taggit',
#'voting',
#'blog',
'forum',
#'cms',
#'wiki',
'debug_toolbar',
#'redis_sessions',
'feincms',
'feincms.module.page',
'feincms.module.medialibrary',
'mptt',
'attachment',
#'simpleavatar',
'avatar',
#'userprofile',
'pagination',
'notification',
'content_ext.googlemap',
'account',
'registration',
'categories',
'editor',
)
FEINCMS_TREE_EDITOR_INCLUDE_ANCESTORS = True
#SESSION_ENGINE = 'utils.sessions.backends.redis'
FORUM_CTX_CONFIG = {
'FORUM_TITLE': 'HZCDCLabs Forum',
'FORUM_SUB_TITLE': '',
'FORUM_PAGE_SIZE': 50,
'TOPIC_PAGE_SIZE': 2,
}
SITE_NAME = 'HZCDC'
SITE_SUB_NAME = 'Labs'
MARKUP_CODE_HIGHTLIGHT = True
MARKITUP_JS_URL = '/media/markitup/sets/default/set.js'
| [
"indexofire@gmail.com"
] | indexofire@gmail.com |
5a9db80ab7d58772f4e96ff0ac9215bcd6c3eaf3 | fbe1718ba12e41d45524fa2966087d9f24ae18c3 | /pydal/parsers/sqlite.py | ea2331cc4617cd10dbe4fde350160cc02b923d21 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | michele-comitini/pydal | b5ab988c7ce1ea149128bc70d9f133700f7a0de3 | b6cd09a8bb1c3a27cdcd02297453f74d4380e79c | refs/heads/master | 2020-12-25T05:07:52.401623 | 2016-05-26T02:17:01 | 2016-05-26T02:17:01 | 30,265,122 | 0 | 0 | null | 2015-02-03T21:19:56 | 2015-02-03T21:19:55 | Python | UTF-8 | Python | false | false | 531 | py | from decimal import Decimal
from ..adapters.sqlite import SQLite
from .base import ListsParser, TimeParser, JSONParser
from . import parsers, for_type, before_parse
@parsers.register_for(SQLite)
class SQLiteParser(ListsParser, TimeParser, JSONParser):
@before_parse('decimal')
def decimal_extras(self, field_type):
return {'decimals': field_type[8:-1].split(',')[-1]}
@for_type('decimal')
def _decimal(self, value, decimals):
value = ('%.' + decimals + 'f') % value
return Decimal(value)
| [
"giovanni.barillari@gmail.com"
] | giovanni.barillari@gmail.com |
e6c7be41238f437bec8d2fb51724a27d7b44d87d | 86834273400f125863bb0dd0e8ad22561c119ce1 | /samples/minecraft_clone.py | 18425e66eb4c3030ee769be4f2cf44a1315ac6fb | [
"MIT"
] | permissive | uditmaherwal/ursina | 9b50ae4ebf9008f086328eaeb7c79aadd3414174 | 20c6c396224f6a3b75a93a3142c0efde810fb480 | refs/heads/master | 2021-01-26T02:25:01.987909 | 2020-02-23T18:54:06 | 2020-02-23T18:54:06 | 243,273,206 | 1 | 0 | MIT | 2020-02-26T13:47:23 | 2020-02-26T13:47:22 | null | UTF-8 | Python | false | false | 834 | py | from ursina import *
from ursina.prefabs.first_person_controller import FirstPersonController
app = Ursina()
class Voxel(Button):
def __init__(self, position=(0,0,0)):
super().__init__(
parent = scene,
position = position,
model = 'cube',
origin_y = .5,
texture = 'white_cube',
color = color.color(0, 0, random.uniform(.9, 1.0)),
highlight_color = color.lime,
)
def input(self, key):
if self.hovered:
if key == 'left mouse down':
voxel = Voxel(position=self.position + mouse.normal)
if key == 'right mouse down':
destroy(self)
for z in range(8):
for x in range(8):
voxel = Voxel(position=(x,0,z))
player = FirstPersonController()
app.run()
| [
"pokepetter@gmail.com"
] | pokepetter@gmail.com |
6044707a842d423d814ff79bbe88165aa85c6a63 | 036a41c913b3a4e7ae265e22a672dd89302d3200 | /0201-0300/0202/0202_Python_1.py | 824b7df04f6dc998ee5063168448cdd780ec660e | [] | no_license | ChangxingJiang/LeetCode | e76f96ebda68d7ade53575354479cfc33ad4f627 | a2209206cdd7229dd33e416f611e71a984a8dd9e | refs/heads/master | 2023-04-13T15:23:35.174390 | 2021-04-24T05:54:14 | 2021-04-24T05:54:14 | 272,088,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | class Solution:
def isHappy(self, n: int) -> bool:
already = set()
while n != 1:
if n in already:
return False
already.add(n)
n = sum([int(x) * int(x) for x in str(n)])
return True
if __name__ == "__main__":
print(Solution().isHappy(19)) # True
| [
"1278729001@qq.com"
] | 1278729001@qq.com |
fe3a796bb288f2ea2a15f6a1831131ee1055515f | 641fa8341d8c436ad24945bcbf8e7d7d1dd7dbb2 | /third_party/WebKit/Source/devtools/scripts/jsdoc_validator/run_tests.py | c2625381bf84c0386fa8f16a076d312a87ec7283 | [
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | massnetwork/mass-browser | 7de0dfc541cbac00ffa7308541394bac1e945b76 | 67526da9358734698c067b7775be491423884339 | refs/heads/master | 2022-12-07T09:01:31.027715 | 2017-01-19T14:29:18 | 2017-01-19T14:29:18 | 73,799,690 | 4 | 4 | BSD-3-Clause | 2022-11-26T11:53:23 | 2016-11-15T09:49:29 | null | UTF-8 | Python | false | false | 2,142 | py | #!/usr/bin/python
import hashlib
import operator
import os
import shutil
import stat
import subprocess
import sys
import tempfile
def rel_to_abs(rel_path):
return os.path.join(script_path, rel_path)
java_exec = 'java -Xms1024m -server -XX:+TieredCompilation'
tests_dir = 'tests'
jar_name = 'jsdoc_validator.jar'
script_path = os.path.dirname(os.path.abspath(__file__))
tests_path = rel_to_abs(tests_dir)
validator_jar_file = rel_to_abs(jar_name)
golden_file = os.path.join(tests_path, 'golden.dat')
test_files = [os.path.join(tests_path, f) for f in os.listdir(tests_path) if f.endswith('.js') and os.path.isfile(os.path.join(tests_path, f))]
validator_command = "%s -jar %s %s" % (java_exec, validator_jar_file, " ".join(sorted(test_files)))
def run_and_communicate(command, error_template):
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
(out, _) = proc.communicate()
if proc.returncode:
print >> sys.stderr, error_template % proc.returncode
sys.exit(proc.returncode)
return out
def help():
print 'usage: %s [option]' % os.path.basename(__file__)
print 'Options:'
print '--generate-golden: Re-generate golden file'
print '--dump: Dump the test results to stdout'
def main():
need_golden = False
need_dump = False
if len(sys.argv) > 1:
if sys.argv[1] == '--generate-golden':
need_golden = True
elif sys.argv[1] == '--dump':
need_dump = True
else:
help()
return
result = run_and_communicate(validator_command, "Error running validator: %d")
result = result.replace(script_path, "") # pylint: disable=E1103
if need_dump:
print result
return
if need_golden:
with open(golden_file, 'wt') as golden:
golden.write(result)
else:
with open(golden_file, 'rt') as golden:
golden_text = golden.read()
if golden_text == result:
print 'OK'
else:
print 'ERROR: Golden output mismatch'
if __name__ == '__main__':
main()
| [
"xElvis89x@gmail.com"
] | xElvis89x@gmail.com |
c006e1eef801f0d6aca2fd0af66ee80890b5f2fd | 9a9d6052f8cf91dd57be9a9b6564290b0fac9e52 | /Algorithm/JUNGOL/1. Language_Coder/반복제어문1/539_반복제어문1_자가진단4.py | 00e86ec0148550177bbba5b7f69cc431292e6dbc | [] | no_license | Gyeong-Yeon/TIL | 596ec6a093eec34a17dad68bcd91fa9dd08690e8 | eb1f43ee0525da93233b70716cd35caab8d82bda | refs/heads/master | 2023-03-31T19:56:30.979062 | 2021-03-28T13:09:27 | 2021-03-28T13:09:27 | 280,307,737 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | input_li = list(map(int,input().split()))
i = 0
sum = 0
cnt = 0
for i in range(len(input_li)):
sum += input_li[i]
cnt += 1
if input_li[i] >= 100:
break
avg = sum / cnt
print(sum)
print("%0.1f" % (avg)) | [
"lky4156@naver.com"
] | lky4156@naver.com |
4212810827fc696d2f652d89c4fb5a3ca5b8bcbf | dc222b7713453f4653da00fa8ce7a76d89c51e68 | /python_test/src/templates/crawler_2.py | 92f75f12a72f6a7c9bd5ddda0e469298768b28d1 | [] | no_license | aimeiyan/exercise | d935d48ddab90c55b8b9ac89e821abf117d5f609 | 617261af69db836a649bd4044f97bec7ab3e845d | refs/heads/master | 2020-05-20T11:36:34.698664 | 2014-02-18T11:27:44 | 2014-02-18T11:27:44 | 10,766,847 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | __author__ = 'nancy'
from urllib import urlopen
from bs4 import BeautifulSoup
from urlparse import urlparse, urljoin
def get_and_extract_links(url):
html = urlopen(url).read()
soup = BeautifulSoup(html)
hrefs = soup.find_all('a')
urls = []
for a in hrefs:
href = a.get('href')
if href:
href = href.strip()
if href:
u = urljoin(url, href)
urls.append(u)
return urls
def main():
seed = "http://www.baidu.com"
url = seed
to_be_downloaded = [seed]
downloaded = set()
while to_be_downloaded:
url = to_be_downloaded.pop()
print "download", url, "has", len(to_be_downloaded), "urls remaining", "downloaded", len(downloaded)
urls = get_and_extract_links(url)
downloaded.add(url)
for url in urls:
if seed in url and url[:4] == 'http' and url in downloaded:
to_be_downloaded.append(url)
if __name__ == '__main__':
main() | [
"aimeiyan@gmail.com"
] | aimeiyan@gmail.com |
9bf1c1a1ea823c79a6bf67c90744c7510431225d | 304033f60097c489cbc60aab639be45ccdbef1a5 | /algorithms/boj/brute_force/10819.py | c441a9bdf62e955e7f1408980bc52b89a3bffe5c | [] | no_license | pgw928/TIL | 3d0c47c07bd1f5c73826daf8579a2b0e3f93cb95 | 765906f1e6eecad4ad8ec9bf704041433d7eb304 | refs/heads/master | 2023-06-29T05:46:30.039815 | 2021-08-10T17:38:11 | 2021-08-10T17:38:11 | 288,923,095 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | import sys
from itertools import permutations
input = sys.stdin.readline
N = int(input())
A = list(map(int, input().split()))
M = 0
for perm in permutations(A):
tmp = [ abs(perm[i+1]-perm[i]) for i in range(len(perm)-1)]
M = max(sum(tmp), M)
print(M)
| [
"pku928@naver.com"
] | pku928@naver.com |
c5ee9bd2b4b5e9e1dff46626f83b49a5ac8d4516 | 95e9ec4b3b0d86063da53a0e62e138cf794cce3a | /webroot/py1902/py1902/settings.py | b7b4e02bf8a11be11851ad956f67c31f2b3fafe1 | [] | no_license | wjl626nice/1902 | c3d350d91925a01628c9402cbceb32ebf812e43c | 5a1a6dd59cdd903563389fa7c73a283e8657d731 | refs/heads/master | 2023-01-05T23:51:47.667675 | 2019-08-19T06:42:09 | 2019-08-19T06:42:09 | 180,686,044 | 4 | 1 | null | 2023-01-04T07:35:24 | 2019-04-11T00:46:43 | Python | UTF-8 | Python | false | false | 5,392 | py | """
Django settings for py1902 project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*!b8$5*w^zcz$m@875#i(u2!21k9zz9=$j_s4zhfbw*2ypeh+f'
# SECURITY WARNING: don't run with debug turned on in production!
# 开发阶段 开启调试模式
DEBUG = True
# 设置允许请求的主机
ALLOWED_HOSTS = ['127.0.0.1', 'www.xuxin.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'manager.apps.ManagerConfig',
'Home.apps.HomeConfig',
'api.apps.ApiConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'CheckLoginMW.CheckLoginMW'
]
ROOT_URLCONF = 'py1902.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
# 模板引擎初始化参数,在模板中可以直接使用。
'conf.global.auto_config'
],
},
},
]
WSGI_APPLICATION = 'py1902.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'p1_blog',
'HOST': '127.0.0.1',
'USER': 'root',
'PASSWORD': '123456',
'PORT': '3306'
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# 盐
SALT = 'qwsa12#'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
# 媒体文件路径 别名
MEDIA_URL = '/uploads/'
# 指定媒体文件路径
# MEDIA_ROOT = os.path.join(BASE_DIR, 'uploads'),
# 后台菜单
MENU = [
{"id": "menu-article", "title": "文章管理", "url": '#', "icon": '', 'child': [
{"title": "栏目管理", "url": '/admin/category/'},
{"title": "文章列表", "url": '/admin/article/'}
]
},
{"id": "menu-picture", "title": "随手拍管理", "url": '#', "icon": '', 'child': [
{"title": "图片管理", "url": '#'},
]
},
{"id": "menu-banner", "title": "轮播图管理", "url": '#', "icon": '', 'child': [
{"title": "轮播图", "url": '#'},
]
},
{"id": "menu-comments", "title": "评论管理", "url": '#', "icon": '', 'child': [
{"title": "评论列表", "url": '#'}
]
},
{"id": "menu-system", "title": "系统管理", "url": '#', "icon": '', 'child': [
{"title": "系统设置", "url": '#'},
{"title": "管理员管理", "url": '/admin/manager/'},
{"title": "友情链接管理", "url": '/admin/links/'},
{"title": "留言管理", "url": '#'},
{"title": "屏蔽词", "url": '#'},
{"title": "操作日志", "url": '#'},
]
},
]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
},
}
# 极验id和key
GEETEST = {
'id': '4726b9849ea9f2493787a3fa247a9973',
'key': 'c264c310dbae53a1383770771408b473',
} | [
"18537160262@qq.com"
] | 18537160262@qq.com |
305dcded8a50a6e919ea59991bcdf5aeada117a3 | da1e8e6d9886cabe65887a5e2cfe3fe62c06a564 | /lab1/venv/bin/easy_install | ea69e4a6d164aa9262f47ff5cb644ba718f88864 | [] | no_license | serhiisad/Numerical_Analysis | d0f1a5ae652bf57354f472d3f239f23d4aae2a86 | acdc38d712011fdddc4bbb0aa6712d18745fdfc8 | refs/heads/master | 2020-04-16T04:49:08.128479 | 2019-01-11T17:29:16 | 2019-01-11T17:29:16 | 165,282,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | #!/home/serhiisad/PycharmProjects/LABS_Numerical_Analysis_Onai/lab1/venv/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"serhiisad.kpi@gmail.com"
] | serhiisad.kpi@gmail.com | |
687250aac92bf01376a32104c14b0ea2953ebf56 | 410de43884e51d1edef0bb31035d6e78e3f0f3c0 | /wifipumpkin3/core/servers/proxy/pumpkin_proxy.py | ddf931d79053fac9d7daf8e3bd2b29f2052275af | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | tirkarthi/wifipumpkin3 | 7af13f4f5c7908e555660f4a29b71d245174daa7 | 809baef0c8116410a26f6b263a457f0a1d7f98b9 | refs/heads/master | 2022-12-07T05:25:18.270403 | 2020-08-02T02:04:11 | 2020-08-02T02:04:11 | 291,266,640 | 0 | 0 | Apache-2.0 | 2020-08-29T12:25:13 | 2020-08-29T12:25:12 | null | UTF-8 | Python | false | false | 5,494 | py | from wifipumpkin3.core.config.globalimport import *
from collections import OrderedDict
from functools import partial
from threading import Thread
import queue
from scapy.all import *
import logging, os
import wifipumpkin3.core.utility.constants as C
from wifipumpkin3.core.servers.proxy.proxymode import *
from wifipumpkin3.core.utility.collection import SettingsINI
from wifipumpkin3.core.common.uimodel import *
from wifipumpkin3.core.widgets.docks.dock import DockableWidget
# This file is part of the wifipumpkin3 Open Source Project.
# wifipumpkin3 is licensed under the Apache 2.0.
# Copyright 2020 P0cL4bs Team - Marcos Bomfim (mh4x0f)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class TCPProxyDock(DockableWidget):
id = "TCPProxy"
title = "TCPProxy"
def __init__(self, parent=0, title="", info={}):
super(TCPProxyDock, self).__init__(parent, title, info={})
self.setObjectName(self.title)
self.THeaders = OrderedDict([("Plugin", []), ("Logging", [])])
def writeModeData(self, data):
""" get data output and add on QtableWidgets """
self.THeaders["Plugin"].append(data.keys()[0])
self.THeaders["Logging"].append(data[data.keys()[0]])
Headers = []
print(data)
def stopProcess(self):
pass
class PumpKinProxy(ProxyMode):
Name = "PumpkinProxy 3"
Author = "Pumpkin-Dev"
ID = "pumpkinproxy"
Description = "Transparent proxies that you can use to intercept and manipulate HTTP traffic modifying requests and responses, that allow to inject javascripts into the targets visited."
Hidden = False
LogFile = C.LOG_PUMPKINPROXY
CONFIGINI_PATH = C.CONFIG_PP_INI
_cmd_array = []
ModSettings = True
RunningPort = 8080
ModType = "proxy"
TypePlugin = 1
def __init__(self, parent=None, **kwargs):
super(PumpKinProxy, self).__init__(parent)
self.setID(self.ID)
self.parent = parent
self.setTypePlugin(self.TypePlugin)
self.setRunningPort(self.conf.get("proxy_plugins", "pumpkinproxy_config_port"))
def Initialize(self):
self.add_default_rules(
"iptables -t nat -A PREROUTING -p tcp --destination-port 80 -j REDIRECT --to-port {}".format(
self.conf.get("proxy_plugins", "pumpkinproxy_config_port")
)
)
self.runDefaultRules()
@property
def CMD_ARRAY(self):
port_ssltrip = self.conf.get("proxy_plugins", "pumpkinproxy_config_port")
self._cmd_array = ["-l", port_ssltrip]
return self._cmd_array
def boot(self):
self.reactor = ProcessThread({"sslstrip3": self.CMD_ARRAY})
self.reactor._ProcssOutput.connect(self.LogOutput)
self.reactor.setObjectName(self.ID)
@property
def getPlugins(self):
commands = self.config.get_all_childname("plugins")
list_commands = []
for command in commands:
list_commands.append(self.ID + "." + command)
# find all plugin from pumpkinproxy
for sub_plugin in self.config.get_all_childname("set_{}".format(command)):
list_commands.append("{}.{}.{}".format(self.ID, command, sub_plugin))
return list_commands
def LogOutput(self, data):
if self.conf.get("accesspoint", "status_ap", format=bool):
self.logger.info(data)
def parser_set_pumpkinproxy(self, status, plugin_name):
if len(plugin_name.split(".")) == 2:
try:
# plugin_name = pumpkinproxy.no-cache
name_plugin, key_plugin = (
plugin_name.split(".")[0],
plugin_name.split(".")[1],
)
if key_plugin in self.config.get_all_childname("plugins"):
self.config.set("plugins", key_plugin, status)
else:
print(
display_messages(
"unknown plugin: {}".format(key_plugin), error=True
)
)
except IndexError:
print(display_messages("unknown sintax command", error=True))
elif len(plugin_name.split(".")) == 3:
try:
# plugin_name = pumpkinproxy.beef.url_hook
name_plugin, key_plugin = (
plugin_name.split(".")[1],
plugin_name.split(".")[2],
)
if key_plugin in self.config.get_all_childname(
"set_{}".format(name_plugin)
):
self.config.set("set_{}".format(name_plugin), key_plugin, status)
else:
print(
display_messages(
"unknown plugin: {}".format(key_plugin), error=True
)
)
except IndexError:
print(display_messages("unknown sintax command", error=True))
| [
"mh4root@gmail.com"
] | mh4root@gmail.com |
fa56efff49eee1254f0c1da3bae2a1b0d65471aa | edbabcc3a43a46f83c656f82248f757387629c32 | /weibo_spider/douban.py | 4a8c7e13b96eb2cbc76cbd7f78d9e35f14fc4158 | [] | no_license | INJNainggolan/Pycharm_work | b84fd8b4cd5158161956c96da326e3f0eba6b2eb | 8314aa23ade681cbad9e7abb4d35de508d46482b | refs/heads/master | 2020-04-08T03:30:35.177056 | 2018-03-07T13:15:09 | 2018-03-07T13:15:09 | 124,235,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,547 | py | #coding:utf-8
__author__ = 'hang'
import warnings
warnings.filterwarnings("ignore")
import jieba #分词包
import numpy #numpy计算包
import codecs #codecs提供的open方法来指定打开的文件的语言编码,它会在读取的时候自动转换为内部unicode
import re
import pandas as pd
import matplotlib.pyplot as plt
from urllib import request
from bs4 import BeautifulSoup as bs
#%matplotlib inline
import matplotlib
matplotlib.rcParams['figure.figsize'] = (10.0, 5.0)
from wordcloud import WordCloud#词云包
#分析网页函数
def getNowPlayingMovie_list():
resp = request.urlopen('https://movie.douban.com/nowplaying/hangzhou/')
html_data = resp.read().decode('utf-8')
soup = bs(html_data, 'html.parser')
nowplaying_movie = soup.find_all('div', id='nowplaying')
nowplaying_movie_list = nowplaying_movie[0].find_all('li', class_='list-item')
nowplaying_list = []
for item in nowplaying_movie_list:
nowplaying_dict = {}
nowplaying_dict['id'] = item['data-subject']
for tag_img_item in item.find_all('img'):
nowplaying_dict['name'] = tag_img_item['alt']
nowplaying_list.append(nowplaying_dict)
return nowplaying_list
#爬取评论函数
def getCommentsById(movieId, pageNum):
eachCommentList = [];
if pageNum>0:
start = (pageNum-1) * 20
else:
return False
requrl = 'https://movie.douban.com/subject/' + movieId + '/comments' +'?' +'start=' + str(start) + '&limit=20'
print(requrl)
resp = request.urlopen(requrl)
html_data = resp.read().decode('utf-8')
soup = bs(html_data, 'html.parser')
comment_div_lits = soup.find_all('div', class_='comment')
for item in comment_div_lits:
if item.find_all('p')[0].string is not None:
eachCommentList.append(item.find_all('p')[0].string)
return eachCommentList
def main():
#循环获取第一个电影的前10页评论
commentList = []
NowPlayingMovie_list = getNowPlayingMovie_list()
for i in range(10):
num = i + 1
commentList_temp = getCommentsById(NowPlayingMovie_list[0]['id'], num)
commentList.append(commentList_temp)
#将列表中的数据转换为字符串
comments = ''
for k in range(len(commentList)):
comments = comments + (str(commentList[k])).strip()
#使用正则表达式去除标点符号
pattern = re.compile(r'[\u4e00-\u9fa5]+')
filterdata = re.findall(pattern, comments)
cleaned_comments = ''.join(filterdata)
#使用结巴分词进行中文分词
segment = jieba.lcut(cleaned_comments)
words_df=pd.DataFrame({'segment':segment})
#去掉停用词
stopwords=pd.read_csv("stopwords.txt",index_col=False,quoting=3,sep="\t",names=['stopword'], encoding='utf-8')#quoting=3全不引用
words_df=words_df[~words_df.segment.isin(stopwords.stopword)]
#统计词频
words_stat=words_df.groupby(by=['segment'])['segment'].agg({"计数":numpy.size})
words_stat=words_stat.reset_index().sort_values(by=["计数"],ascending=False)
#用词云进行显示
wordcloud=WordCloud(font_path="simhei.ttf",background_color="white",max_font_size=80)
word_frequence = {x[0]:x[1] for x in words_stat.head(1000).values}
word_frequence_list = []
for key in word_frequence:
temp = (key,word_frequence[key])
word_frequence_list.append(temp)
wordcloud=wordcloud.fit_words(word_frequence_list)
plt.imshow(wordcloud)
#主函数
if __name__ == '__main__':
main() | [
"Nainggolan@github.com"
] | Nainggolan@github.com |
8d4ab8697bc69c095b7c8d6b18ed034aff53b586 | 85b3c686db76bce624a262de20ffb0b882840fdd | /social_bookmarking/views.py | a8de490964067340914be4d7af222a0ca1033d5f | [
"MIT"
] | permissive | truongsinh/django-social-media | 6361413a80bac6e3827dd725cca95db1c82a2974 | 90f51d97409d87b598386f2ca82e6e3168478d32 | refs/heads/master | 2020-12-25T11:53:21.394434 | 2011-02-19T08:47:08 | 2011-02-19T08:47:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | from django.shortcuts import get_object_or_404, redirect
from django.http import Http404
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from social_bookmarking.models import BookmarkRelated, Bookmark
def bookmark_referer(request, slug, content_type, object_pk, url):
"""
Redirect to bookmark url if content_type exists
and object related exists too.
If there is no error, the related bookmark counter is incremented.
"""
bookmark = get_object_or_404(Bookmark, slug=slug)
app, model = content_type.split('.')
try:
ctype = ContentType.objects.get(app_label=app, model=model)
content_object = ctype.get_object_for_this_type(pk=object_pk)
except ObjectDoesNotExist:
raise Http404
related, is_created = BookmarkRelated.objects.get_or_create(content_type=ctype,
object_id=content_object.pk,
bookmark=bookmark)
related.visits += 1
related.save()
return redirect(url) | [
"florent.messa@gmail.com"
] | florent.messa@gmail.com |
f6797ec80344efdb57b3bf0cc450d5e008058142 | df7f13ec34591fe1ce2d9aeebd5fd183e012711a | /hata/discord/guild/guild_join_request_delete_event/tests/test__parse_guild_id.py | d0a4cde74f5380fcf06b03c4fad8746b6745a2a9 | [
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | HuyaneMatsu/hata | 63e2f6a2d7a7539fd8f18498852d9d3fe5c41d2e | 53f24fdb38459dc5a4fd04f11bdbfee8295b76a4 | refs/heads/master | 2023-08-20T15:58:09.343044 | 2023-08-20T13:09:03 | 2023-08-20T13:09:03 | 163,677,173 | 3 | 3 | Apache-2.0 | 2019-12-18T03:46:12 | 2018-12-31T14:59:47 | Python | UTF-8 | Python | false | false | 600 | py | import vampytest
from ..fields import parse_guild_id
def _iter_options():
guild_id = 202305160007
yield {}, 0
yield {'guild_id': None}, 0
yield {'guild_id': str(guild_id)}, guild_id
@vampytest._(vampytest.call_from(_iter_options()).returning_last())
def test__parse_guild_id(input_data):
"""
Tests whether ``parse_guild_id`` works as intended.
Parameters
----------
input_data : `dict<str, object>`
Data to try to parse the guild identifier from.
Returns
-------
output : `int`
"""
return parse_guild_id(input_data)
| [
"re.ism.tm@gmail.com"
] | re.ism.tm@gmail.com |
f99658226c6f0dbf526e4279f61ca16cfd909486 | 942ee5e8d54e8ebe9c5c841fbfdd1da652946944 | /1001-1500/1467.Probability of a Two Boxes Having The Same Number of Distinct Balls.py | 5e7f9b3fd1966c2ca99755119191d27b2d196478 | [] | no_license | kaiwensun/leetcode | 0129c174457f32887fbca078fb448adce46dd89d | 6b607f4aae3a4603e61f2e2b7480fdfba1d9b947 | refs/heads/master | 2023-08-31T07:30:50.459062 | 2023-08-27T07:59:16 | 2023-08-27T07:59:16 | 57,526,914 | 69 | 9 | null | 2023-08-20T06:34:41 | 2016-05-01T05:37:29 | Python | UTF-8 | Python | false | false | 1,431 | py | import functools, collections
class Solution:
def getProbability(self, balls: List[int]) -> float:
box_size = sum(balls) // 2
factorial = [1] * (max(balls) + 1 + 100)
for i in range(1, len(factorial)):
factorial[i] = factorial[i - 1] * i
def choose(total, select):
return factorial[total] // (factorial[select] * factorial[total - select])
def search(color, space_l, space_r, uniq_l, uniq_r):
cnt_uniq = cnt_total = 0
if color == len(balls):
assert(space_l == space_r == 0)
if uniq_l == uniq_r:
return 1, 1
else:
return 0, 1
for left in range(balls[color] + 1):
right = balls[color] - left
if left > space_l or right > space_r:
continue
new_uniq_l = uniq_l + (1 if left else 0)
new_uniq_r = uniq_r + (1 if right else 0)
rtn_uniq, rtn_total = search(color + 1, space_l - left, space_r - right, new_uniq_l, new_uniq_r)
weight = choose(space_l, left) * choose(space_r, right)
cnt_uniq += rtn_uniq * weight
cnt_total += rtn_total * weight
return cnt_uniq, cnt_total
stats = search(0, box_size, box_size, 0, 0)
return float(stats[0]) / stats[1]
| [
"noreply@github.com"
] | kaiwensun.noreply@github.com |
4258a6ba19ea4d7cf50cab6bbeb0bd1e93cb00b0 | 1860aa3e5c0ba832d6dd12bb9af43a9f7092378d | /modules/xlwt3-0.1.2/examples/col_width.py | 90d82eb9415a897615d41007b926e35096306e1a | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | agz1990/GitPython | d90de16451fab9222851af790b67bcccdf35ab75 | 951be21fbf8477bad7d62423b72c3bc87154357b | refs/heads/master | 2020-08-06T18:12:26.459541 | 2015-07-05T14:58:57 | 2015-07-05T14:58:57 | 12,617,111 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | #!/usr/bin/env python
# -*- coding: windows-1251 -*-
# Copyright (C) 2005 Kiseliov Roman
from xlwt3 import *
w = Workbook()
ws = w.add_sheet('Hey, Dude')
for i in range(6, 80):
fnt = Font()
fnt.height = i*20
style = XFStyle()
style.font = fnt
ws.write(1, i, 'Test')
ws.col(i).width = 0x0d00 + i
w.save('col_width.xls')
| [
"522360568@qq.com"
] | 522360568@qq.com |
723c1499308ef1947356732e42ee5c9030771c94 | 20c80f722c451b64d05cc027b66a81e1976c3253 | /commons/libs/pyblish_starter/plugins/integrate_asset.py | 4dd833f1be3f9c0d61e553108a7956a38094f954 | [] | no_license | flypotatojun/Barbarian | 2d3fcb6fcb1b4495b6d62fc5e32634abf4638312 | efe14dd24c65b4852997dad1290e503211bcc419 | refs/heads/master | 2021-07-18T01:43:14.443911 | 2017-10-24T03:37:43 | 2017-10-24T03:37:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,421 | py | import pyblish.api
class IntegrateStarterAsset(pyblish.api.InstancePlugin):
"""Move user data to shared location
This plug-in exposes your data to others by encapsulating it
into a new version.
Schema:
Data is written in the following format.
____________________
| |
| version |
| ________________ |
| | | |
| | representation | |
| |________________| |
| | | |
| | ... | |
| |________________| |
|____________________|
"""
label = "Starter Asset"
order = pyblish.api.IntegratorOrder
families = [
"starter.model",
"starter.rig",
"starter.animation"
]
def process(self, instance):
import os
import json
import errno
import shutil
from pyblish_starter import api
context = instance.context
# Atomicity
#
# Guarantee atomic publishes - each asset contains
# an identical set of members.
# __
# / o
# / \
# | o |
# \ /
# o __/
#
if not all(result["success"] for result in context.data["results"]):
raise Exception("Atomicity not held, aborting.")
# Assemble
#
# |
# v
# ---> <----
# ^
# |
#
stagingdir = instance.data.get("stagingDir")
assert stagingdir, (
"Incomplete instance \"%s\": "
"Missing reference to staging area."
% instance
)
root = context.data["workspaceDir"]
instancedir = os.path.join(root, "shared", instance.data["name"])
try:
os.makedirs(instancedir)
except OSError as e:
if e.errno != errno.EEXIST: # Already exists
self.log.critical("An unexpected error occurred.")
raise
version = api.find_latest_version(os.listdir(instancedir)) + 1
versiondir = os.path.join(instancedir, api.format_version(version))
# Metadata
# _________
# | |.key = value
# | |
# | |
# | |
# | |
# |_________|
#
fname = os.path.join(stagingdir, ".metadata.json")
try:
with open(fname) as f:
metadata = json.load(f)
except IOError:
metadata = {
"schema": "pyblish-starter:version-1.0",
"version": version,
"path": versiondir,
"representations": list(),
# Collected by pyblish-base
"time": context.data["date"],
"author": context.data["user"],
# Collected by pyblish-maya
"source": os.path.join(
"{root}",
os.path.relpath(
context.data["currentFile"],
api.root()
)
),
}
for filename in instance.data.get("files", list()):
name, ext = os.path.splitext(filename)
metadata["representations"].append(
{
"schema": "pyblish-starter:representation-1.0",
"format": ext,
"path": "{dirname}/%s{format}" % name
}
)
# Write to disk
# _
# | |
# _| |_
# ____\ /
# |\ \ / \
# \ \ v \
# \ \________.
# \|________|
#
with open(fname, "w") as f:
json.dump(metadata, f, indent=4)
# Metadata is written before being validated -
# this way, if validation fails, the data can be
# inspected by hand from within the user directory.
api.schema.validate(metadata, "version")
shutil.copytree(stagingdir, versiondir)
self.log.info("Successfully integrated \"%s\" to \"%s\"" % (
instance, versiondir))
| [
"lonegather@users.noreply.github.com"
] | lonegather@users.noreply.github.com |
56010a4e310d6d54195272006cb696d231a7d78a | 5289db68f1573549b287750beed02bc9c37340d3 | /tools/pytorch-quantization/tests/print_test.py | 1a4dec6d13ceb244e45e7154cc07c869329a528f | [
"ISC",
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] | permissive | feizhouxiaozhu/TensorRT | 5d5ac8468f67121a96d68a76e43988630c483040 | af8f24cefba42e367ec09fbb05c08a2946645258 | refs/heads/master | 2023-02-08T10:35:41.382352 | 2020-12-19T00:48:00 | 2020-12-19T02:28:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,051 | py | #
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""test for str and repr
Make sure things can print and in a nice form. Put all the print tests together so that running this test file alone
can inspect all the print messages in the project
"""
import torch
from torch import nn
from pytorch_quantization import calib
from pytorch_quantization import tensor_quant
from pytorch_quantization import nn as quant_nn
from pytorch_quantization.nn.modules.tensor_quantizer import TensorQuantizer
# pylint:disable=missing-docstring, no-self-use
class TestPrint():
def test_print_descriptor(self):
test_desc = tensor_quant.QUANT_DESC_8BIT_CONV2D_WEIGHT_PER_CHANNEL
print(test_desc)
def test_print_tensor_quantizer(self):
test_quantizer = TensorQuantizer()
print(test_quantizer)
def test_print_module(self):
class _TestModule(nn.Module):
def __init__(self):
super(_TestModule, self).__init__()
self.conv = nn.Conv2d(33, 65, 3)
self.quant_conv = quant_nn.Conv2d(33, 65, 3)
self.linear = nn.Linear(33, 65)
self.quant_linear = quant_nn.Linear(33, 65)
test_module = _TestModule()
print(test_module)
def test_print_calibrator(self):
print(calib.MaxCalibrator(7, 1, False))
hist_calibrator = calib.HistogramCalibrator(8, None, True)
hist_calibrator.collect(torch.rand(10))
print(hist_calibrator)
| [
"rajeevsrao@users.noreply.github.com"
] | rajeevsrao@users.noreply.github.com |
ea488ecdd6f29b28a881a7505d69af2f10c4951b | 93872b89471eccf1414306216aa8b97df0c38cc4 | /lib/modules/credentials/mimigatoz/mimitokens.py | 79c4038c9c497452d99be0791f9929141f5078b4 | [
"BSD-3-Clause"
] | permissive | brownbelt/Empire-mod-Hackplayers | bcb1a7cd6f608e9b83011e19e1527d8ec120f932 | 315184020542012fdcf89ba4c0de0e5c00954372 | refs/heads/master | 2021-06-20T02:55:15.991222 | 2017-08-01T09:25:58 | 2017-08-01T09:25:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,522 | py | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-MimiGatoz Tokens',
'Author': ['@JosephBialek', '@gentilkiwi'],
'Description': ("Runs PowerSploit's Invoke-MimiGatoz function "
"to list or enumerate tokens."),
'Background' : False,
'OutputExtension' : None,
'NeedsAdmin' : True,
'OpsecSafe' : True,
'MinPSVersion' : '2',
'Comments': [
'http://clymb3r.wordpress.com/',
'http://blog.gentilkiwi.com'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'list' : {
'Description' : 'Switch. List current tokens on the machine.',
'Required' : False,
'Value' : 'True'
},
'elevate' : {
'Description' : 'Switch. Elevate instead of listing tokens.',
'Required' : False,
'Value' : ''
},
'revert' : {
'Description' : 'Switch. Revert process token.',
'Required' : False,
'Value' : ''
},
'admin' : {
'Description' : 'Switch. List/elevate local admin tokens.',
'Required' : False,
'Value' : ''
},
'domainadmin' : {
'Description' : 'Switch. List/elevate domain admin tokens.',
'Required' : False,
'Value' : ''
},
'user' : {
'Description' : 'User name to list/elevate the token of.',
'Required' : False,
'Value' : ''
},
'id' : {
'Description' : 'Token ID to list/elevate the token of.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# read in the common module source code
moduleSource = self.mainMenu.installPath + "data/module_source/credentials/Invoke-MimiGatoz.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
listTokens = self.options['list']['Value']
elevate = self.options['elevate']['Value']
revert = self.options['revert']['Value']
admin = self.options['admin']['Value']
domainadmin = self.options['domainadmin']['Value']
user = self.options['user']['Value']
processid = self.options['id']['Value']
script = moduleCode
script += "Invoke-MimiGatoz -Command "
if revert.lower() == "true":
script += "'\"token::revert"
else:
if listTokens.lower() == "true":
script += "'\"token::list"
elif elevate.lower() == "true":
script += "'\"token::elevate"
else:
print helpers.color("[!] list, elevate, or revert must be specified!")
return ""
if domainadmin.lower() == "true":
script += " /domainadmin"
elif admin.lower() == "true":
script += " /admin"
elif user.lower() != "":
script += " /user:" + str(user)
elif processid.lower() != "":
script += " /id:" + str(processid)
script += "\"';"
return script
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
0e382a491da509bd9913eff6eb831510b77bd9af | 446d9c9e98bac9bb7d6ba9d6f2639fd1ab0e68af | /pythonBook/chapter06/exercise6-23.py | 67d2bce0e2edc2ef0d9e9213eacb0216d837626a | [] | no_license | thiagofb84jp/python-exercises | 062d85f4f95332549acd42bf98de2b20afda5239 | 88ad7365a0f051021034ac6f0683b3df2de57cdb | refs/heads/main | 2023-07-19T21:15:08.689041 | 2021-08-17T10:59:09 | 2021-08-17T10:59:09 | 308,311,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | """
6.23. Controle da utilização de salas de um cinema
"""
lugaresVagos = [10, 2, 1, 3, 0]
while True:
sala = int(input("Sala (0 para sair do programa) :"))
if sala == 0:
print("Encerrando o programa...")
break
if sala > len(lugaresVagos) or sala < 1:
print("Sala inválida!")
elif lugaresVagos[sala - 1] == 0:
print("Desculpe, sala lotada!")
else:
lugares = int(
input(f"Quantos lugares você deseja ({lugaresVagos[sala - 1]} \
vagos): "))
if lugares > lugaresVagos[sala - 1]:
print("Esse número de lugares não está disponível.")
elif lugares < 0:
print("Número inválido.")
else:
lugaresVagos[sala - 1] -= lugares
print(f"{lugares} lugares vendidos")
print("Utilização das salas")
for x, l in enumerate(lugaresVagos):
print(f"Sala {x + 1} - {l} lugar(es) vazio(s)")
| [
"thiagofb84jp@gmail.com"
] | thiagofb84jp@gmail.com |
ff76df49ad42e40b7da91b22b4e8c1c8a4a40fb2 | a4ea525e226d6c401fdb87a6e9adfdc5d07e6020 | /src/azure-cli/azure/cli/command_modules/cosmosdb/aaz/latest/cosmosdb/postgres/configuration/coordinator/__cmd_group.py | 27271cfb6425eebaca4fa2e123288c385ca59464 | [
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] | permissive | Azure/azure-cli | 13340eeca2e288e66e84d393fa1c8a93d46c8686 | a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca | refs/heads/dev | 2023-08-17T06:25:37.431463 | 2023-08-17T06:00:10 | 2023-08-17T06:00:10 | 51,040,886 | 4,018 | 3,310 | MIT | 2023-09-14T11:11:05 | 2016-02-04T00:21:51 | Python | UTF-8 | Python | false | false | 704 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command_group(
"cosmosdb postgres configuration coordinator",
is_preview=True,
)
class __CMDGroup(AAZCommandGroup):
"""Manage Azure Cosmos DB for PostgreSQL coordinator configurations.
"""
pass
__all__ = ["__CMDGroup"]
| [
"noreply@github.com"
] | Azure.noreply@github.com |
cb606700b2241eebb71ead8c64124867f9a3a585 | f0f285567e706c1d89a730e9807ca44690f745ad | /0x0A-python-inheritance/6-base_geometry.py | 819d3ab4cf7e2b8ba06e805d561fffa5145b6703 | [] | no_license | Faith-qa/alx-higher_level_programming-1 | a43fc57414c6c946407d0795df5f9794a061a890 | 8f66f9a09088b55d44f1754ca616e75d83ca76c4 | refs/heads/main | 2023-08-07T16:41:39.830429 | 2021-09-28T06:40:49 | 2021-09-28T06:40:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | #!/usr/bin/python3
# 6-base_geometry.py
"""Defines a base geometry class BaseGeometry."""
class BaseGeometry:
"""Represent base geometry."""
def area(self):
"""Not implemented."""
raise Exception("area() is not implemented")
| [
"yosefsamuel22@gmail.com"
] | yosefsamuel22@gmail.com |
d053c0a0a25b58a278a960935bd26b497357d1c7 | e24f75482c0ae71fb0dfa6d49f3a82129569c2d9 | /changes/api/serializer/models/plan.py | 9f11af47bb2be272e1a5b9c4efd90040b5cc14a6 | [
"Apache-2.0"
] | permissive | OmarSkalli/changes | aaa6a0083c87b7d5876eb0557466be60dea45e34 | 5280b2cea13c314aeecc770853c14caaff6bbb93 | refs/heads/master | 2022-08-12T11:37:53.571741 | 2014-07-11T22:55:37 | 2014-07-11T22:55:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,482 | py | import json
from changes.api.serializer import Serializer, register
from changes.models import ItemOption, Plan, Step
@register(Plan)
class PlanSerializer(Serializer):
def serialize(self, instance, attrs):
return {
'id': instance.id.hex,
'name': instance.label,
'steps': list(instance.steps),
'dateCreated': instance.date_created,
'dateModified': instance.date_modified,
}
@register(Step)
class StepSerializer(Serializer):
def get_attrs(self, item_list):
option_list = ItemOption.query.filter(
ItemOption.item_id.in_(r.id for r in item_list),
)
options_by_item = {}
for option in option_list:
options_by_item.setdefault(option.item_id, {})
options_by_item[option.item_id][option.name] = option.value
result = {}
for item in item_list:
result[item] = {'options': options_by_item.get(item.id, {})}
return result
def serialize(self, instance, attrs):
implementation = instance.get_implementation()
return {
'id': instance.id.hex,
'implementation': instance.implementation,
'order': instance.order,
'name': implementation.get_label() if implementation else '',
'data': json.dumps(dict(instance.data or {})),
'dateCreated': instance.date_created,
'options': attrs['options'],
}
| [
"cramer@dropbox.com"
] | cramer@dropbox.com |
abac750ec39d365e349d684cf03414f5725f8da0 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_noisy2506.py | a7fde2d65a26c997e4016788e19ab6906db2feb1 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,134 | py | # qubit number=4
# total number=37
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=19
prog.cz(input_qubit[0],input_qubit[3]) # number=20
prog.h(input_qubit[3]) # number=21
prog.cx(input_qubit[0],input_qubit[3]) # number=23
prog.x(input_qubit[3]) # number=24
prog.cx(input_qubit[0],input_qubit[3]) # number=25
prog.cx(input_qubit[0],input_qubit[3]) # number=17
prog.rx(-0.48380526865282825,input_qubit[3]) # number=26
prog.h(input_qubit[1]) # number=2
prog.y(input_qubit[3]) # number=18
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.cx(input_qubit[0],input_qubit[1]) # number=28
prog.h(input_qubit[1]) # number=34
prog.cz(input_qubit[0],input_qubit[1]) # number=35
prog.h(input_qubit[1]) # number=36
prog.x(input_qubit[1]) # number=32
prog.cx(input_qubit[0],input_qubit[1]) # number=33
prog.cx(input_qubit[0],input_qubit[1]) # number=30
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.x(input_qubit[2]) # number=22
prog.y(input_qubit[2]) # number=11
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[0]) # number=14
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy2506.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
6a192ce3a780e557a221c647cf70d5ce37cf901d | 78f3fe4a148c86ce9b80411a3433a49ccfdc02dd | /2017/06/trump-approval-20170601/graphic_config.py | ff3ca865581af3ab3dd6555df9e94d562b335301 | [] | no_license | nprapps/graphics-archive | 54cfc4d4d670aca4d71839d70f23a8bf645c692f | fe92cd061730496cb95c9df8fa624505c3b291f8 | refs/heads/master | 2023-03-04T11:35:36.413216 | 2023-02-26T23:26:48 | 2023-02-26T23:26:48 | 22,472,848 | 16 | 7 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | #!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '1g0Lnpdt7bS8_NDHxYVFOQ54vuxPCkKqdIPdlWZG3O6Q'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
| [
"ahurt@npr.org"
] | ahurt@npr.org |
ee784fcf452c66c9190b9ade1e753cfa0f306994 | ada3899b0d2332121087105ceeba0b138681ecf2 | /modules/signatures/CAPE.py | 2642b2b5625d195eed0e7ad9e39bd475e10a6e4e | [] | no_license | olivierh59500/CAPE | 51d6a4e8b022b660ad8f64860459186f1d308987 | 823f78d22f444ee6db93e7b02c9fa77e64186baa | refs/heads/master | 2021-01-12T01:12:26.614946 | 2016-12-31T23:35:23 | 2016-12-31T23:35:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,171 | py | from lib.cuckoo.common.abstracts import Signature
EXTRACTION_MIN_SIZE = 0x2000
class CAPE_PlugX(Signature):
name = "CAPE PlugX"
description = "CAPE detection: PlugX"
severity = 3
categories = ["chinese", "malware"]
families = ["plugx"]
authors = ["kev"]
minimum = "1.3"
evented = True
filter_apinames = set(["RtlDecompressBuffer", "memcpy"])
def __init__(self, *args, **kwargs):
Signature.__init__(self, *args, **kwargs)
self.compressed_binary = False
self.config_copy = False
self.plugx = False
def on_call(self, call, process):
if call["api"] == "RtlDecompressBuffer":
buf = self.get_argument(call, "UncompressedBuffer")
if "XV" in buf:
self.compressed_binary = True
if "MZ" in buf:
self.compressed_binary = True
if call["api"] == "memcpy":
count = self.get_raw_argument(call, "count")
if (count == 0xae4) or \
(count == 0xbe4) or \
(count == 0x150c) or \
(count == 0x1510) or \
(count == 0x1516) or \
(count == 0x170c) or \
(count == 0x1b18) or \
(count == 0x1d18) or \
(count == 0x2540) or \
(count == 0x254c) or \
(count == 0x2d58) or \
(count == 0x36a4) or \
(count == 0x4ea4):
self.config_copy = True
def on_complete(self):
if self.config_copy == True and self.compressed_binary == True:
self.plugx = True
return True
class CAPE_PlugX_fuzzy(Signature):
name = "CAPE PlugX fuzzy"
description = "CAPE detection: PlugX (fuzzy match)"
severity = 3
categories = ["chinese", "malware"]
families = ["plugx"]
authors = ["kev"]
minimum = "1.3"
evented = True
filter_apinames = set(["RtlDecompressBuffer", "memcpy"])
def __init__(self, *args, **kwargs):
Signature.__init__(self, *args, **kwargs)
self.compressed_binary = False
self.config_copy = False
self.plugx = False
def on_call(self, call, process):
if call["api"] == "RtlDecompressBuffer":
buf = self.get_argument(call, "UncompressedBuffer")
if "XV" in buf:
self.plugx = True
if "MZ" in buf:
self.compressed_binary = True
def on_complete(self):
if self.config_copy == True and self.compressed_binary == True:
self.plugx = True
if self.plugx == True:
return True
class CAPE_Compression(Signature):
name = "CAPE Compression"
description = "CAPE detection: Compression"
severity = 3
categories = ["malware"]
authors = ["kev"]
minimum = "1.3"
evented = True
filter_apinames = set(["RtlDecompressBuffer"])
def __init__(self, *args, **kwargs):
Signature.__init__(self, *args, **kwargs)
self.compressed_binary = False
def on_call(self, call, process):
if call["api"] == "RtlDecompressBuffer":
buf = self.get_argument(call, "UncompressedBuffer")
if "MZ" in buf:
self.compressed_binary = True
def on_complete(self):
if self.compressed_binary == True:
return True
class CAPE_Derusbi(Signature):
name = "CAPE Derusbi"
description = "CAPE detection: Derusbi"
severity = 3
categories = ["chinese", "malware"]
families = ["derusbi"]
authors = ["kev"]
minimum = "1.3"
evented = True
filter_apinames = set(["srand", "memcpy"])
def __init__(self, *args, **kwargs):
Signature.__init__(self, *args, **kwargs)
self.srand = False
self.config_copy = False
self.derusbi = False
def on_call(self, call, process):
if call["api"] == "srand":
self.srand = True
if call["api"] == "memcpy":
count = self.get_raw_argument(call, "count")
if (count == 0x50) or \
(count == 0x1A8) or \
(count == 0x2B4):
self.config_copy = True
def on_complete(self):
if self.config_copy == True and self.srand == True:
self.derusbi = True
#return True
return False
class CAPE_EvilGrab(Signature):
name = "CAPE EvilGrab"
description = "CAPE detection: EvilGrab"
severity = 3
categories = ["malware"]
authors = ["kev"]
minimum = "1.3"
evented = True
filter_apinames = set(["RegSetValueExA", "RegSetValueExW", "RegCreateKeyExA", "RegCreateKeyExW"])
def __init__(self, *args, **kwargs):
Signature.__init__(self, *args, **kwargs)
self.reg_evilgrab_keyname = False
self.reg_binary = False
def on_call(self, call, process):
if call["api"] == "RegCreateKeyExA" or call["api"] == "RegCreateKeyExW":
buf = self.get_argument(call, "SubKey")
if buf == "Software\\rar":
self.reg_evilgrab_keyname = True
if call["api"] == "RegSetValueExA" or call["api"] == "RegSetValueExW":
length = self.get_raw_argument(call, "BufferLength")
if length > 0x10000 and self.reg_evilgrab_keyname == True:
self.reg_binary = True
def on_complete(self):
if self.reg_binary == True:
return True
else:
return False
class ExtractionRWX(Signature):
name = "extraction_rwx"
description = "CAPE detection: Extraction"
severity = 1
categories = ["allocation"]
authors = ["Context"]
minimum = "1.2"
evented = True
def __init__(self, *args, **kwargs):
Signature.__init__(self, *args, **kwargs)
filter_apinames = set(["NtAllocateVirtualMemory","NtProtectVirtualMemory","VirtualProtectEx"])
# PAGE_EXECUTE_READWRITE = 0x00000040
def on_call(self, call, process):
if call["api"] == "NtAllocateVirtualMemory":
protection = self.get_argument(call, "Protection")
regionsize = int(self.get_raw_argument(call, "RegionSize"), 0)
handle = self.get_argument(call, "ProcessHandle")
if handle == "0xffffffff" and protection == "0x00000040" and regionsize >= EXTRACTION_MIN_SIZE:
return True
if call["api"] == "VirtualProtectEx":
protection = self.get_argument(call, "Protection")
size = int(self.get_raw_argument(call, "Size"), 0)
handle = self.get_argument(call, "ProcessHandle")
if handle == "0xffffffff" and protection == "0x00000040" and size >= EXTRACTION_MIN_SIZE:
return True
elif call["api"] == "NtProtectVirtualMemory":
protection = self.get_argument(call, "NewAccessProtection")
size = int(self.get_raw_argument(call, "NumberOfBytesProtected"), 0)
handle = self.get_argument(call, "ProcessHandle")
if handle == "0xffffffff" and protection == "0x00000040" and size >= EXTRACTION_MIN_SIZE:
return True
| [
"kevoreilly@gmail.com"
] | kevoreilly@gmail.com |
463b6bb70cca8e70a802d8985558490eaa73d7b1 | e845f7f61ff76b3c0b8f4d8fd98f6192e48d542a | /djangocg/contrib/gis/sitemaps/__init__.py | 6765153ec0be6d9dcc32bad86c6a4420403d07bf | [
"BSD-3-Clause"
] | permissive | timothyclemans/djangocg | fd150c028013cb5f53f5a3b4fdc960a07fdaaa78 | 52cf28e046523bceb5d436f8e6bf61e7d4ba6312 | refs/heads/master | 2021-01-18T13:20:13.636812 | 2012-08-31T23:38:14 | 2012-08-31T23:38:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | # Geo-enabled Sitemap classes.
from djangocg.contrib.gis.sitemaps.georss import GeoRSSSitemap
from djangocg.contrib.gis.sitemaps.kml import KMLSitemap, KMZSitemap
| [
"timothy.clemans@gmail.com"
] | timothy.clemans@gmail.com |
23f542b49fb43c3fa7f261c20d841f00b6768b43 | dce4a52986ddccea91fbf937bd89e0ae00b9d046 | /jni-build/jni/include/tensorflow/contrib/quantization/python/dequantize_op_test.py | b1d47cc4a2edcea49cb0798241f46968a19d166a | [
"MIT"
] | permissive | Lab603/PicEncyclopedias | 54a641b106b7bb2d2f71b2dacef1e5dbeaf773a6 | 6d39eeb66c63a6f0f7895befc588c9eb1dd105f9 | refs/heads/master | 2022-11-11T13:35:32.781340 | 2018-03-15T05:53:07 | 2018-03-15T05:53:07 | 103,941,664 | 6 | 3 | MIT | 2022-10-28T05:31:37 | 2017-09-18T13:20:47 | C++ | UTF-8 | Python | false | false | 3,108 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Dequantize Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
# TODO(petewarden) - Remove this ugly hack to get around Python linking problems
# with Bazel.
# pylint: disable=g-bad-import-order
from tensorflow.contrib.quantization import load_quantized_ops_so
from tensorflow.contrib.quantization.kernels import load_quantized_kernels_so
class DequantizeOpTest(tf.test.TestCase):
def __init__(self, method_name="runTest"):
super(DequantizeOpTest, self).__init__(method_name)
load_quantized_ops_so.Load()
load_quantized_kernels_so.Load()
def _testDequantizeOp(self, inputs, min_range, max_range, dtype):
with self.test_session():
input_op = tf.constant(inputs, shape=[len(inputs)], dtype=dtype)
dequantized = tf.contrib.quantization.dequantize(
input_op, min_range, max_range)
tf_ans = dequantized.eval()
# TODO(vrv): Add support for DT_QINT32 quantization if needed.
type_dict = {
tf.quint8: np.uint8,
tf.qint8: np.int8,
tf.quint16: np.uint16,
tf.qint16: np.int16
}
self.assertTrue(dtype in type_dict.keys())
v_max = np.iinfo(type_dict[dtype]).max
v_min = np.iinfo(type_dict[dtype]).min
self.assertTrue(min_range >= v_min)
self.assertTrue(max_range <= v_max)
type_range = v_max - v_min
if v_min < 0:
half_range = (type_range + 1) / 2
else:
half_range = 0.0
np_ans = ((inputs.astype(np.float32) + half_range) *
(max_range - min_range) / type_range) + min_range
self.assertAllClose(tf_ans, np_ans)
def testBasicQuint8(self):
self._testDequantizeOp(np.array([0, 128, 255]),
0.0, 6.0, tf.quint8)
self._testDequantizeOp(np.array([0, 128, 255]),
0.0, 123.456, tf.quint8)
self._testDequantizeOp(np.array([0, 4, 42, 108, 243]),
5.0, 200.2, tf.quint8)
def testBasicQint8(self):
self._testDequantizeOp(np.array([-128, 0, 127]),
-1.0, 2.0, tf.qint8)
self._testDequantizeOp(np.array([-2, 4, -17]),
-5.0, -3.0, tf.qint8)
self._testDequantizeOp(np.array([0, -4, 42, -108]),
5.0, 40.0, tf.qint8)
if __name__ == "__main__":
tf.test.main()
| [
"super_mr.z@hotmail.comm"
] | super_mr.z@hotmail.comm |
00c989258ab015ff4cdd1d241d09ff53a6d04033 | 3c4a345c530d8a9df163fad6c438e504e196dda0 | /Challenge6.py | 1addee14b3688487646a776c43d0baf02040f09d | [] | no_license | vebzical/kryptokamut | 9d096810e987bc730f5ed710468b2e2b83401bd0 | 5363185d09199608600c9d1bc7c71e51311a1d84 | refs/heads/master | 2020-05-09T15:16:12.247060 | 2019-04-15T09:47:39 | 2019-04-15T09:47:39 | 181,227,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,138 | py | #/usr/bin/python
import base64
import freqAnalysis
def XOR(input, key):
result = b''
for x,y in zip(input, key):
result += bytes([x ^ y])
return result
def findKeyLen(chiphertext):
final_dist = []
for keysize in range(2,41):
distances = []
chunks = [chiphertext[i:i+keysize] for i in range(0, len(chiphertext), keysize)]
for x in range(len(chunks)):
if x >= (len(chunks)-1):
break;
one = chunks[x]
two = chunks[x+1]
distances.append(calcHammingDistance(one,two)/keysize)
chunks.remove(one)
chunks.remove(two)
final_dist.append([sum(distances)/len(distances), keysize])
sorted_by_second = sorted(final_dist, key=lambda tup: tup[0])
return sorted_by_second[0][1]
def calcHammingDistance(str1, str2):
xord = XOR(str1,str2)
distance = 0
for byte in xord:
for bit in bin(byte):
if bit == '1':
distance += 1
return distance
def XORSingleChar(text, key):
result = b''
for x in text:
result += bytes([x ^ key])
return result
def XORSingleCharBruteforce(input):
messages = []
for y in range(256):
result = b''
result = XORSingleChar(input, y)
messages.append([result, y, freqAnalysis.englishFreqMatchScore(str(result))])
return sorted(messages, key=lambda x: x[2], reverse=True)[0]
def XORRepeatingKey(input, key):
result = b''
y = 0
for x in range(len(input)):
if y >= len(key):
y = 0;
result += bytes([input[x] ^ key[y]])
y += 1
return result
input = ""
with open('6.txt', 'r') as f:
for line in f:
input += line.rstrip()
chiphertext = base64.b64decode(input)
keylen = findKeyLen(chiphertext)
key = b""
plaintexts = []
#Do the Transpose
for i in range(keylen):
block = b''
for j in range(i, len(chiphertext), keylen):
block += bytes([chiphertext[j]])
key += bytes([XORSingleCharBruteforce(block)[1]])
result = XORRepeatingKey(chiphertext, key)
plaintexts.append([result,key,freqAnalysis.englishFreqMatchScore(str(result))])
maxscore = max(plaintexts, key=lambda x:x[2])[2]
for i in sorted(plaintexts, key=lambda x: x[2], reverse=True):
if i[2] == maxscore:
print(i)
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
04dab0209f29ce27605490d094bcc4294c4d3171 | 3cbb592ca471540cc704a547f71f9d6b84669976 | /python/infpy/gp/examples/simple_example.py | f3ecb0972d8eb65d6185c5d3a3f8129c66167827 | [
"BSD-2-Clause"
] | permissive | JohnReid/infpy | a9c896a556abed8ed3abd5e56ca86d31a7a731c0 | 1b825ba7a60f0a0489df5f41b273374aef628a60 | refs/heads/master | 2021-01-01T16:56:27.743588 | 2018-06-28T21:44:17 | 2018-06-28T21:44:17 | 13,087,025 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,824 | py | #
# Copyright John Reid 2008
#
import numpy
import pylab
import infpy.gp
def save_fig(prefix):
"Save current figure in extended postscript and PNG formats."
pylab.savefig('%s.png' % prefix, format='PNG')
pylab.savefig('%s.eps' % prefix, format='EPS')
# Generate some noisy data from a modulated sin curve
x_min, x_max = 10.0, 100.0
X = infpy.gp.gp_1D_X_range(x_min, x_max) # input domain
Y = 10.0 * numpy.sin(X[:, 0]) / X[:, 0] # noise free output
Y = infpy.gp.gp_zero_mean(Y) # shift so mean=0.
e = 0.03 * numpy.random.normal(size=len(Y)) # noise
f = Y + e # noisy output
# plot the noisy data
pylab.figure()
pylab.plot(X[:, 0], Y, 'b-', label='Y')
pylab.plot(X[:, 0], f, 'rs', label='f')
pylab.legend()
save_fig('simple-example-data')
pylab.close()
def predict_values(K, file_tag, learn=False):
"Create a GP with kernel K and predict values. Optionally learn K's hyperparameters if learn==True."
gp = infpy.gp.GaussianProcess(X, f, K)
if learn:
infpy.gp.gp_learn_hyperparameters(gp)
pylab.figure()
infpy.gp.gp_1D_predict(gp, 90, x_min - 10., x_max + 10.)
save_fig(file_tag)
pylab.close()
# import short forms of GP kernel names
import infpy.gp.kernel_short_names as kernels
# create a kernel composed of a squared exponential kernel and a small noise term
K = kernels.SE() + kernels.Noise(.1)
predict_values(K, 'simple-example-se')
# Try a different kernel with a shorter characteristic length scale
K = kernels.SE([.1]) + kernels.Noise(.1)
predict_values(K, 'simple-example-se-shorter')
# Try another kernel with a lot more noise
K = kernels.SE([4.]) + kernels.Noise(1.)
predict_values(K, 'simple-example-more-noise')
# Try to learn kernel hyper-parameters
K = kernels.SE([4.0]) + kernels.Noise(.1)
predict_values(K, 'simple-example-learnt', learn=True)
| [
"johnbaronreid@netscape.net"
] | johnbaronreid@netscape.net |
26c2daefb72b9b841d51c34f8ad6f2f23053f412 | 1fe0b680ce53bb3bb9078356ea2b25e572d9cfdc | /venv/lib/python2.7/site-packages/ansible/modules/cloud/cloudstack/cs_zone_facts.py | 92c815e95756f4fd8ec4bffffdc80dfdaf421e53 | [
"MIT"
] | permissive | otus-devops-2019-02/devopscourses_infra | 1929c4a9eace3fdb0eb118bf216f3385fc0cdb1c | e42e5deafce395af869084ede245fc6cff6d0b2c | refs/heads/master | 2020-04-29T02:41:49.985889 | 2019-05-21T06:35:19 | 2019-05-21T06:35:19 | 175,780,457 | 0 | 1 | MIT | 2019-05-21T06:35:20 | 2019-03-15T08:35:54 | HCL | UTF-8 | Python | false | false | 4,772 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_zone_facts
short_description: Gathering facts of zones from Apache CloudStack based clouds.
description:
- Gathering facts from the API of a zone.
- Sets Ansible facts accessable by the key C(cloudstack_zone) and since version 2.6 also returns results.
version_added: '2.1'
author: René Moser (@resmo)
options:
name:
description:
- Name of the zone.
type: str
required: true
aliases: [ zone ]
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: Gather facts from a zone
cs_zone_facts:
name: ch-gva-1
register: zone
delegate_to: localhost
- name: Show the returned results of the registered variable
debug:
var: zone
- name: Show the facts by the ansible_facts key cloudstack_zone
debug:
var: cloudstack_zone
'''
RETURN = '''
---
id:
description: UUID of the zone.
returned: success
type: str
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the zone.
returned: success
type: str
sample: zone01
dns1:
description: First DNS for the zone.
returned: success
type: str
sample: 8.8.8.8
dns2:
description: Second DNS for the zone.
returned: success
type: str
sample: 8.8.4.4
internal_dns1:
description: First internal DNS for the zone.
returned: success
type: str
sample: 8.8.8.8
internal_dns2:
description: Second internal DNS for the zone.
returned: success
type: str
sample: 8.8.4.4
dns1_ipv6:
description: First IPv6 DNS for the zone.
returned: success
type: str
sample: "2001:4860:4860::8888"
dns2_ipv6:
description: Second IPv6 DNS for the zone.
returned: success
type: str
sample: "2001:4860:4860::8844"
allocation_state:
description: State of the zone.
returned: success
type: str
sample: Enabled
domain:
description: Domain the zone is related to.
returned: success
type: str
sample: ROOT
network_domain:
description: Network domain for the zone.
returned: success
type: str
sample: example.com
network_type:
description: Network type for the zone.
returned: success
type: str
sample: basic
local_storage_enabled:
description: Local storage offering enabled.
returned: success
type: bool
sample: false
securitygroups_enabled:
description: Security groups support is enabled.
returned: success
type: bool
sample: false
guest_cidr_address:
description: Guest CIDR address for the zone
returned: success
type: str
sample: 10.1.1.0/24
dhcp_provider:
description: DHCP provider for the zone
returned: success
type: str
sample: VirtualRouter
zone_token:
description: Zone token
returned: success
type: str
sample: ccb0a60c-79c8-3230-ab8b-8bdbe8c45bb7
tags:
description: List of resource tags associated with the zone.
returned: success
type: dict
sample: [ { "key": "foo", "value": "bar" } ]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
)
class AnsibleCloudStackZoneFacts(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackZoneFacts, self).__init__(module)
self.returns = {
'dns1': 'dns1',
'dns2': 'dns2',
'internaldns1': 'internal_dns1',
'internaldns2': 'internal_dns2',
'ipv6dns1': 'dns1_ipv6',
'ipv6dns2': 'dns2_ipv6',
'domain': 'network_domain',
'networktype': 'network_type',
'securitygroupsenabled': 'securitygroups_enabled',
'localstorageenabled': 'local_storage_enabled',
'guestcidraddress': 'guest_cidr_address',
'dhcpprovider': 'dhcp_provider',
'allocationstate': 'allocation_state',
'zonetoken': 'zone_token',
}
def get_zone(self):
return super(AnsibleCloudStackZoneFacts, self).get_zone()
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
zone=dict(required=True, aliases=['name']),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
acs_zone_facts = AnsibleCloudStackZoneFacts(module=module)
result = acs_zone_facts.get_result_and_facts(
facts_name='cloudstack_zone',
resource=acs_zone_facts.get_zone()
)
module.exit_json(**result)
if __name__ == '__main__':
main()
| [
"skydevapp@gmail.com"
] | skydevapp@gmail.com |
7707734bcaccebbdf25b7547c1dcf12bc305d2b3 | d8c899ca71e511ec0b60b79f11eeb08077ad7b7b | /stubs/pytest/mark.pyi | d1733cfb0524484bc786f3bdab20d218b6b2edca | [
"Apache-2.0"
] | permissive | Michaelll123/sensibility | ff9972baa979745e91f96a1e4953ebe42d7258b5 | 7436ac2fd4faf100b5e21c28b61a431adce06091 | refs/heads/master | 2023-08-30T05:57:05.750677 | 2021-11-14T09:45:57 | 2021-11-14T09:45:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | pyi | from typing import Callable
class skip:
def __init__(self, test: Callable=None, reason: str=None) -> None: ...
def __call__(self, test: Callable) -> Callable: ...
def skipif(condition: bool, reason: str=None) -> Callable[[Callable], Callable]: ...
def xfail(Callable) -> Callable: ...
| [
"easantos@ualberta.ca"
] | easantos@ualberta.ca |
8888104655043e44fc68662cec6bd63cc8bbddee | fced25b055ec68ee522bd156c3be5902172beb55 | /Pages/logs/Actions-logs/onload.py | c209484f123b9b8aeafa573b8463db94067a17c0 | [] | no_license | EAC-Technology/eApp-Builder | 3cff5576139b78fc507406e31a4b5201eab436b7 | c3bbadde24330fb2dff4aa2c32cc6b11e044fbc9 | refs/heads/master | 2021-08-16T23:56:54.241674 | 2017-11-20T14:15:10 | 2017-11-20T14:15:10 | 111,399,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,057 | py | """
"""
from ProSuiteLogsPage import ProSuiteLogsPage
page = ProSuiteLogsPage(self)
page.vdom_objects = {
"growl": self.growl,
"logs.data": self.logs_cnt.hpt,
"topbar.download.btn": self.dwnloadlogs_btn,
"topbar.autoupdate.btn": self.autoupdate.checkbtn,
"dialogs.download": self.dwnload_logs_dialog,
"dialogs.download.close": self.dwnload_logs_dialog.close_btn,
"popup.loglevel": self.loglevelsform,
"popup.loglevel.form": self.loglevelsform,
"popup.loglevel.form.list": self.loglevelsform.lv,
"popup.loglevel.form.hide": self.loglevelsform.hidebtn,
"popup.loglevel.form.reset": self.loglevelsform.resetbtn,
"popup.loglevel.form.submit": self.loglevelsform.submitbtn,
"popup.loggers": self.loggersform,
"popup.loggers.form": self.loggersform,
"popup.loggers.form.list": self.loggersform.lg,
"popup.loggers.form.reset": self.loggersform.resetbtn,
"popup.loggers.form.hide": self.loggersform.hidebtn,
"popup.loggers.form.submit": self.loggersform.submitbtn,
}
page.run('onload') | [
"alain.abraham@web-atrio.com"
] | alain.abraham@web-atrio.com |
6d92d61ee82ad1d16aafdda4e576c854454eb802 | eea1c66c80784d4aefeb0d5fd2e186f9a3b1ac6e | /atcoder/abc/abc301-400/abc302/b.py | 289515c914038268c91d8ae04c92359c07516a7e | [] | no_license | reo11/AtCoder | 4e99d6f40d8befe264761e3b8c33d3a6b7ba0fe9 | 69c6d67f05cb9190d8fb07204488cd7ce4d0bed2 | refs/heads/master | 2023-08-28T10:54:50.859288 | 2023-08-22T18:52:47 | 2023-08-22T18:52:47 | 162,085,118 | 4 | 0 | null | 2023-07-01T14:17:28 | 2018-12-17T06:31:10 | Python | UTF-8 | Python | false | false | 960 | py | h, w = map(int, input().split())
s = [list(input()) for _ in range(h)]
target = list("snuke")
dxy = [[1, 0], [-1, 0], [0, 1], [0, -1], [1, 1], [-1, 1], [1, -1], [-1, -1]]
for i in range(h):
for j in range(w):
if target[0] == s[i][j]:
for dx, dy in dxy:
flag = True
ans = [f"{i + 1} {j + 1}"]
for k in range(1, 5):
if i + k * dy < 0 or i + k * dy >= h:
flag = False
break
if j + k * dx < 0 or j + k * dx >= w:
flag = False
break
if target[k] == s[i + k * dy][j + k * dx]:
ans.append(f"{i + k * dy + 1} {j + k * dx + 1}")
else:
flag = False
break
if flag:
print(*ans, sep="\n")
exit()
| [
"reohirao116@gmail.com"
] | reohirao116@gmail.com |
518b4196e8bf056b4b5e8da9a147b688b1451695 | 6634436cf4f0e4d674cf497e57e5750f9ac415aa | /phylopandas/treeio/write.py | cbb1ec02fff13e127e16464606f9e2e1c78e1529 | [
"BSD-3-Clause"
] | permissive | ScottCarrara/phylopandas | dc946d552ad48b6314a60e845356318b33fbe860 | f163c4a2b9369eb32f6c8f3793f711f6fe4e6130 | refs/heads/master | 2020-05-19T19:44:45.752770 | 2018-11-12T06:55:47 | 2018-11-12T06:56:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,846 | py | import pandas
import dendropy
def _write_doc_template(schema):
s = """Write to {} format.
Parameters
----------
filename : str
File to write {} string to. If no filename is given, a {} string
will be returned.
taxon_col : str (default='sequence')
Sequence column name in DataFrame.
taxon_annotations : str
List of columns to annotation in the tree taxon.
node_col : str (default='id')
ID column name in DataFrame
node_annotations : str
List of columns to annotation in the node taxon.
branch_lengths : bool (default=False)
If True, use only the ID column to label sequences in fasta.
""".format(schema, schema, schema)
return s
def _pandas_df_to_dendropy_tree(
df,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
):
"""Turn a phylopandas dataframe into a dendropy tree.
Parameters
----------
df : DataFrame
DataFrame containing tree data.
taxon_col : str (optional)
Column in dataframe to label the taxon. If None, the index will be used.
taxon_annotations : str
List of columns to annotation in the tree taxon.
node_col : str (optional)
Column in dataframe to label the nodes. If None, the index will be used.
node_annotations : str
List of columns to annotation in the node taxon.
branch_lengths : bool
If True, inclues branch lengths.
"""
if isinstance(taxon_col, str) is False:
raise Exception("taxon_col must be a string.")
if isinstance(node_col, str) is False:
raise Exception("taxon_col must be a string.")
# Construct a list of nodes from dataframe.
taxon_namespace = dendropy.TaxonNamespace()
nodes = {}
for idx in df.index:
# Get node data.
data = df.loc[idx]
# Get taxon for node (if leaf node).
taxon = None
if data['type'] == 'leaf':
taxon = dendropy.Taxon(label=data[taxon_col])
# Add annotations data.
for ann in taxon_annotations:
taxon.annotations.add_new(ann, data[ann])
taxon_namespace.add_taxon(taxon)
# Get label for node.
label = data[node_col]
# Get edge length.
edge_length = None
if branch_lengths is True:
edge_length = data['length']
# Build a node
n = dendropy.Node(
taxon=taxon,
label=label,
edge_length=edge_length
)
# Add node annotations
for ann in node_annotations:
n.annotations.add_new(ann, data[ann])
nodes[idx] = n
# Build branching pattern for nodes.
root = None
for idx, node in nodes.items():
# Get node data.
data = df.loc[idx]
# Get children nodes
children_idx = df[df['parent'] == data['id']].index
children_nodes = [nodes[i] for i in children_idx]
# Set child nodes
nodes[idx].set_child_nodes(children_nodes)
# Check if this is root.
if data['parent'] is None:
root = nodes[idx]
# Build tree.
tree = dendropy.Tree(
seed_node=root,
taxon_namespace=taxon_namespace
)
return tree
def _write(
df,
filename=None,
schema='newick',
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
**kwargs
):
"""Write a phylopandas tree DataFrame to various formats.
Parameters
----------
df : DataFrame
DataFrame containing tree data.
filename : str
filepath to write out tree. If None, will return string.
schema : str
tree format to write out.
taxon_col : str (optional)
Column in dataframe to label the taxon. If None, the index will be used.
taxon_annotations : str
List of columns to annotation in the tree taxon.
node_col : str (optional)
Column in dataframe to label the nodes. If None, the index will be used.
node_annotations : str
List of columns to annotation in the node taxon.
branch_lengths : bool
If True, inclues branch lengths.
"""
tree = _pandas_df_to_dendropy_tree(
df,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
)
# Write out format
print(schema)
if filename is not None:
tree.write(path=filename, schema=schema, suppress_annotations=False, **kwargs)
else:
return tree.as_string(schema=schema)
def _write_method(schema):
"""Add a write method for named schema to a class.
"""
def method(
self,
filename=None,
schema=schema,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
**kwargs):
# Use generic write class to write data.
return _write(
self._data,
filename=filename,
schema=schema,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
**kwargs
)
# Update docs
method.__doc__ = _write_doc_template(schema)
return method
def _write_function(schema):
"""Add a write method for named schema to a class.
"""
def func(
data,
filename=None,
schema=schema,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
**kwargs):
# Use generic write class to write data.
return _write(
data,
filename=filename,
schema=schema,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
**kwargs
)
# Update docs
func.__doc__ = _write_doc_template(schema)
return func
def to_dendropy(
data,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True):
return _pandas_df_to_dendropy_tree(
data,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
)
to_newick = _write_function('newick')
to_nexml = _write_function('nexml')
to_nexus_tree = _write_function('nexus')
| [
"zachsailer@gmail.com"
] | zachsailer@gmail.com |
30a59c7f55545090058475dd238a15e2ff6ab9e4 | 4b1b00f977c27d71b6fe0fab64a9ca09a85a4c09 | /src/vis/visualization.py | 89649b4a0d43ebb6222831997b7667cc2a2da414 | [] | no_license | zhouyanasd/SNN_framework | 67e3d31ef9e709d0b8cb1d18618291f264b006f6 | ba3ab08ac350507120d1ab840701b27014db70e2 | refs/heads/master | 2021-09-29T02:38:10.144613 | 2017-10-24T02:13:10 | 2017-10-24T02:13:10 | 108,064,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,236 | py | import src
import numpy as np
import matplotlib.pyplot as plt
class Visualization(object):
def __init__(self, total_time):
self.t = np.arange(0,total_time)
def show(self):
plt.show()
def I(self,neuron):
I = neuron.I
print(I)
plt.figure()
plt.plot(self.t[:],I)
plt.show()
def add_fired_fig(self,fig,Liquid):
ax = fig.add_subplot(3, 1, 2)
for j in range(Liquid.reservoir_list[0].neuron_list.size):
fired = Liquid.reservoir_list[0].neuron_list[j].fired_sequence
for i in fired:
ax.scatter(i,0.5*j,alpha=.5)
def add_data_fig(self,fig,data):
t = np.arange(0,data.size)
plt.plot(t, data)
def add_test_result(self,fig,result,label):
t = np.arange(0,result.size)
plt.scatter(t,result,color="red")
plt.plot(label,"b--",color="blue")
def add_neu_mem(self,fig,neu):
plt.plot(self.t,neu.membrane_potential[:,0])
def add_neu_mem_n(self,Liquid,start,end):
for i in range(end - start):
fig = plt.figure(figsize=(15,4))
neu = Liquid.reservoir_list[0].neuron_list[i+start]
self.add_neu_mem(fig,neu)
| [
"zhouyanasd@gmail.com"
] | zhouyanasd@gmail.com |
ea2d61ae0c1e7e0201572dfe99c9e2b60c194f4c | 0adb68bbf576340c8ba1d9d3c07320ab3bfdb95e | /regexlib/python_re2_test_file/regexlib_8042.py | 7f3731ea0b2889199ad4f7f644c3eb4d522ba376 | [
"MIT"
] | permissive | agentjacker/ReDoS-Benchmarks | c7d6633a3b77d9e29e0ee2db98d5dfb60cde91c6 | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | refs/heads/main | 2023-05-10T13:57:48.491045 | 2021-05-21T11:19:39 | 2021-05-21T11:19:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | # 8042
# [0-9]*[-| ][0-9]*[-| ][0-9]*[-| ][0-9]*[-| ][0-9]*
# POLYNOMIAL
# nums:5
# POLYNOMIAL AttackString:""+"0"*20000+"!1 _SLQ_1"
import re2 as re
from time import perf_counter
regex = """[0-9]*[-| ][0-9]*[-| ][0-9]*[-| ][0-9]*[-| ][0-9]*"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "" + "0" * i * 10000 + "!1 _SLQ_1"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *10000}: took {DURATION} seconds!") | [
"liyt@ios.ac.cn"
] | liyt@ios.ac.cn |
0542df0613cdfe3e156d53aa7c489a965abbc72e | 3121b64e95d022b12585348070dff048ae879f68 | /render/normalised_ply.py | 39726f5e431102d53b395b35aa7b40c26fb8c952 | [
"MIT"
] | permissive | lt6253090/OcCo | 082d1e5371064c70f9185d9425ea4318539c1785 | 5936a6fe099fe0b5aa5da47e8ba828a72b3adcbf | refs/heads/master | 2022-11-26T14:19:17.070920 | 2020-08-03T13:52:21 | 2020-08-03T13:52:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | # Copyright (c) 2020. Hanchen Wang, hw501@cam.ac.uk
import os, open3d, numpy as np
File_ = open('ModelNet_flist_short.txt', 'w')
if __name__ == "__main__":
root_dir = "../data/ModelNet_subset/"
for root, dirs, files in os.walk(root_dir, topdown=False):
for file in files:
if '.ply' in file:
amesh = open3d.io.read_triangle_mesh(os.path.join(root, file))
out_file_name = os.path.join(root, file).replace('.ply', '_normalised.obj')
center = amesh.get_center()
amesh.translate(-center)
maxR = (np.asarray(amesh.vertices)**2).sum(axis=1).max()**(1/2)
# we found divided by (2*maxR) has best rendered visualisation results
amesh.scale(1/(2*maxR))
open3d.io.write_triangle_mesh(out_file_name, amesh)
File_.writelines(out_file_name.replace('.obj', '').replace(root_dir, '') + '\n')
print(out_file_name)
| [
"hc.wang96@gmail.com"
] | hc.wang96@gmail.com |
10eb4d49285661fe5c622e13b644168cca0601b2 | 56ca0c81e6f8f984737f57c43ad8d44a84f0e6cf | /src/ewaluacja2021/xlsy.py | 4dfb33df91b55d730ce55579e99c89234071e548 | [
"MIT"
] | permissive | iplweb/bpp | c40f64c78c0da9f21c1bd5cf35d56274a491f840 | a3d36a8d76733a479e6b580ba6ea57034574e14a | refs/heads/dev | 2023-08-09T22:10:49.509079 | 2023-07-25T04:55:54 | 2023-07-25T04:55:54 | 87,017,024 | 2 | 0 | NOASSERTION | 2023-03-04T04:02:36 | 2017-04-02T21:22:20 | Python | UTF-8 | Python | false | false | 7,162 | py | import os
from decimal import Decimal
import openpyxl
from django.db.models import Sum, Value
from ewaluacja2021.const import LATA_2017_2018, LATA_2019_2021
from ewaluacja2021.reports import get_data_for_report, write_data_to_report
from ewaluacja2021.util import autor2fn, output_table_to_xlsx
from bpp.models import Autor
class WyjsciowyXLSX:
def __init__(self, title, rekordy, dane, katalog_wyjsciowy):
self.title = title
self.rekordy = rekordy
self.dane = dane
self.katalog_wyjsciowy = katalog_wyjsciowy
self.create_workbook()
def create_workbook(self):
self.wb = openpyxl.Workbook()
def initialize_worksheet(self):
self.ws = self.wb.active
if self.title:
self.ws.title = self.title[:31]
def tabelka(self):
write_data_to_report(self.ws, get_data_for_report(self.rekordy))
def get_output_name(self):
return f"{self.title}.xlsx"
def zapisz(self):
self.wb.save(os.path.join(self.katalog_wyjsciowy, self.get_output_name()))
def metka(self):
raise NotImplementedError()
def zrob(self):
self.initialize_worksheet()
self.metka()
self.ws.append([])
self.tabelka()
self.zapisz()
class CalosciowyXLSX(WyjsciowyXLSX):
def metka(self):
self.ws.append(
[
"Parametry raportu 3N",
"raport całościowy",
]
)
self.ws.append(["Stan na dzień/moment", self.dane["ostatnia_zmiana"]])
self.ws.append(["Dyscyplina", self.dane["dyscyplina"]])
self.ws.append(["Liczba N", self.dane["liczba_n"]])
self.ws.append(["Liczba 0.8N", self.dane["liczba_0_8_n"]])
self.ws.append(["Liczba 2.2N", self.dane["liczba_2_2_n"]])
self.ws.append(["Liczba 3*N", Decimal("3.0") * self.dane["liczba_n"]])
self.ws.append(
[
"Suma slotów za lata 2017-2018",
self.dane["sumy_slotow"][LATA_2017_2018],
]
)
self.ws.append(
[
"Suma slotów za lata 2019-2021",
self.dane["sumy_slotow"][LATA_2019_2021],
]
)
sumy = self.rekordy.filter(do_ewaluacji=True).aggregate(
suma_slot=Sum("slot"), suma_pkdaut=Sum("pkdaut")
)
self.ws.append(["Zebrana suma slotów za wszystkie prace", sumy["suma_slot"]])
self.ws.append(["Zebrana suma PKDAut za wszystkie prace", sumy["suma_pkdaut"]])
class WypelnienieXLSX(CalosciowyXLSX):
def get_data_for_report(self):
id_autorow = self.rekordy.values_list("autor_id", flat=True).distinct()
for autor in Autor.objects.filter(pk__in=id_autorow):
maks_pkt_aut_calosc = self.dane["maks_pkt_aut_calosc"].get(str(autor.pk))
maks_pkt_aut_monografie = self.dane["maks_pkt_aut_monografie"].get(
str(autor.pk)
)
sumy = self.rekordy.filter(do_ewaluacji=True, autor_id=autor.pk).aggregate(
suma_slot=Sum("slot"),
suma_pkdaut=Sum("pkdaut"),
)
sumy_monografie = self.rekordy.filter(
do_ewaluacji=True, monografia=Value("t"), autor_id=autor.pk
).aggregate(
suma_slot=Sum("slot"),
)
sumy_wszystkie = self.rekordy.filter(autor_id=autor.pk).aggregate(
suma_pkdaut=Sum("pkdaut"),
)
yield [
str(autor.id),
autor.nazwisko + " " + autor.imiona,
maks_pkt_aut_calosc,
sumy["suma_slot"] or 0,
(sumy["suma_slot"] or 0) / maks_pkt_aut_calosc,
maks_pkt_aut_monografie,
(sumy_monografie["suma_slot"] or 0),
(sumy_monografie["suma_slot"] or 0) / maks_pkt_aut_calosc,
sumy["suma_pkdaut"],
sumy_wszystkie["suma_pkdaut"],
(sumy["suma_pkdaut"] or 0) / (sumy_wszystkie["suma_pkdaut"] or 1),
]
def write_data_to_report(self, ws: openpyxl.worksheet.worksheet.Worksheet, data):
output_table_to_xlsx(
ws,
"Przeszly",
[
# "ID elementu",
"ID autora",
"Nazwisko i imię",
#
"Maksymalna suma udziałów",
"Sprawozdana suma udziałów",
"Procent sprawozdanej sumy udziałów",
#
"Maksymalna suma udziałów - monografie",
"Sprawozdana suma udziałów - monografie",
"Procent sprawozdanej sumy udziałów - monografie",
#
"PKDaut prac sprawozdanych",
"PKDaut wszystkich prac",
"Procent PKDaut sprawozdanych",
],
data,
first_column_url="https://{site_name}/bpp/autor/",
column_widths={
"A": 10,
"B": 14,
"C": 14,
"D": 14,
"E": 14,
"F": 14,
"G": 14,
"H": 14,
"I": 14,
"J": 14,
"K": 14,
"L": 14,
},
autor_column_url=1,
)
def tabelka(self):
dane = self.get_data_for_report()
self.write_data_to_report(self.ws, dane)
class AutorskiXLSX(WyjsciowyXLSX):
def __init__(self, autor, title, rekordy, dane, katalog_wyjsciowy):
super().__init__(
title=title, rekordy=rekordy, dane=dane, katalog_wyjsciowy=katalog_wyjsciowy
)
self.autor = autor
def metka(self):
self.ws.append(
[
"Parametry raportu 3N",
"wyciąg dla pojedynczego autora",
]
)
self.ws.append(["Stan na dzień/moment", self.dane["ostatnia_zmiana"]])
self.ws.append(["Dyscyplina", self.dane["dyscyplina"]])
self.ws.append(
[
"Maks. suma slotów za wszytkie prace",
self.dane["maks_pkt_aut_calosc"].get(str(self.autor.pk)),
]
)
sumy = self.rekordy.filter(do_ewaluacji=True).aggregate(
suma_slot=Sum("slot"), suma_pkdaut=Sum("pkdaut")
)
self.ws.append(["Zebrana suma slotów za wszystkie prace", sumy["suma_slot"]])
self.ws.append(["Zebrana suma PKDAut za wszystkie prace", sumy["suma_pkdaut"]])
self.ws.append(
[
"Maks. suma slotów za monografie",
self.dane["maks_pkt_aut_monografie"].get(str(self.autor.pk)),
]
)
sumy = self.rekordy.filter(do_ewaluacji=True, monografia=Value("t")).aggregate(
suma_slot=Sum("slot"), suma_pkdaut=Sum("pkdaut")
)
self.ws.append(["Zebrana suma slotów za monografie", sumy["suma_slot"]])
self.ws.append(["Zebrana suma PKDAut za monografie", sumy["suma_pkdaut"]])
def get_output_name(self):
return autor2fn(self.autor) + ".xlsx"
| [
"michal.dtz@gmail.com"
] | michal.dtz@gmail.com |
e9b067b88c153881f73dfe6f385f65c0b6b2d567 | c34380b64145b4ce26df9b27c34139d08de27515 | /findSquare_1.py | 6691dfa3d4ddfc6a058b6d9f149dcc8a10a5ef7b | [] | no_license | codeandrew/python-algorithms | 531bc1574700cb7d822904f1e1ead9a596a85d29 | c71b0941f14825fcaa3fbb1429365ca1f28a3018 | refs/heads/master | 2023-04-28T23:56:01.283434 | 2023-04-05T03:06:22 | 2023-04-05T03:06:22 | 169,078,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | import math
def is_square(n):
if n == 0 : return True
try :
root = math.sqrt(n)
return True if n % root == 0 else False
except :
return False
| [
"jeanandrewfuentes@gmail.com"
] | jeanandrewfuentes@gmail.com |
47f5d7568c984524b2c898478013521badca8f55 | e4ec5b6cf3cfe2568ef0b5654c019e398b4ecc67 | /azure-cli/2.0.18/libexec/lib/python3.6/site-packages/azure/mgmt/network/v2016_09_01/models/frontend_ip_configuration.py | 29047c8dab7e49461129f986c35db4e775edb14a | [] | no_license | EnjoyLifeFund/macHighSierra-cellars | 59051e496ed0e68d14e0d5d91367a2c92c95e1fb | 49a477d42f081e52f4c5bdd39535156a2df52d09 | refs/heads/master | 2022-12-25T19:28:29.992466 | 2017-10-10T13:00:08 | 2017-10-10T13:00:08 | 96,081,471 | 3 | 1 | null | 2022-12-17T02:26:21 | 2017-07-03T07:17:34 | null | UTF-8 | Python | false | false | 4,730 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class FrontendIPConfiguration(SubResource):
"""Frontend IP address of the load balancer.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar inbound_nat_rules: Read only. Inbound rules URIs that use this
frontend IP.
:vartype inbound_nat_rules: list of :class:`SubResource
<azure.mgmt.network.v2016_09_01.models.SubResource>`
:ivar inbound_nat_pools: Read only. Inbound pools URIs that use this
frontend IP.
:vartype inbound_nat_pools: list of :class:`SubResource
<azure.mgmt.network.v2016_09_01.models.SubResource>`
:ivar outbound_nat_rules: Read only. Outbound rules URIs that use this
frontend IP.
:vartype outbound_nat_rules: list of :class:`SubResource
<azure.mgmt.network.v2016_09_01.models.SubResource>`
:ivar load_balancing_rules: Gets load balancing rules URIs that use this
frontend IP.
:vartype load_balancing_rules: list of :class:`SubResource
<azure.mgmt.network.v2016_09_01.models.SubResource>`
:param private_ip_address: The private IP address of the IP configuration.
:type private_ip_address: str
:param private_ip_allocation_method: The Private IP allocation method.
Possible values are: 'Static' and 'Dynamic'. Possible values include:
'Static', 'Dynamic'
:type private_ip_allocation_method: str or :class:`IPAllocationMethod
<azure.mgmt.network.v2016_09_01.models.IPAllocationMethod>`
:param subnet: The reference of the subnet resource.
:type subnet: :class:`Subnet
<azure.mgmt.network.v2016_09_01.models.Subnet>`
:param public_ip_address: The reference of the Public IP resource.
:type public_ip_address: :class:`PublicIPAddress
<azure.mgmt.network.v2016_09_01.models.PublicIPAddress>`
:param provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'inbound_nat_rules': {'readonly': True},
'inbound_nat_pools': {'readonly': True},
'outbound_nat_rules': {'readonly': True},
'load_balancing_rules': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'inbound_nat_rules': {'key': 'properties.inboundNatRules', 'type': '[SubResource]'},
'inbound_nat_pools': {'key': 'properties.inboundNatPools', 'type': '[SubResource]'},
'outbound_nat_rules': {'key': 'properties.outboundNatRules', 'type': '[SubResource]'},
'load_balancing_rules': {'key': 'properties.loadBalancingRules', 'type': '[SubResource]'},
'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'},
'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'Subnet'},
'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'PublicIPAddress'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, private_ip_address=None, private_ip_allocation_method=None, subnet=None, public_ip_address=None, provisioning_state=None, name=None, etag=None):
super(FrontendIPConfiguration, self).__init__(id=id)
self.inbound_nat_rules = None
self.inbound_nat_pools = None
self.outbound_nat_rules = None
self.load_balancing_rules = None
self.private_ip_address = private_ip_address
self.private_ip_allocation_method = private_ip_allocation_method
self.subnet = subnet
self.public_ip_address = public_ip_address
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
| [
"Raliclo@gmail.com"
] | Raliclo@gmail.com |
b30cf9c3fdd4d322be96bee80da321a1ad93e8f1 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_Fisher/trend_MovingMedian/cycle_5/ar_/test_artificial_32_Fisher_MovingMedian_5__0.py | 9e5e9d5395d5905e2f44be0c8fe90fe178f72318 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 267 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 5, transform = "Fisher", sigma = 0.0, exog_count = 0, ar_order = 0); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
eb5df28853640bcb013ac32bdbc48d0220d3c72f | 56ba30f470ddf70d7705d847c0ab2f5f894739e7 | /_src/stage3/break_time.py | 854072eda0ac8a1499473b04f35fc3560197a767 | [] | no_license | chhikara0007/intro-to-programming | c989fd5892ed3fcb4c559e278a72a2d931e7c9e3 | 6a93f43c225b146c6874ee7821c25e1f61f821b0 | refs/heads/master | 2021-01-02T08:13:07.238853 | 2016-10-04T14:44:36 | 2016-10-04T14:44:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | import time
import webbrowser
import random
total_breaks = 3
break_count = 0
web_list = ["http://www.google.com", "http://twitter.com",
"http://www.youtube.com/watch?v=dQw4w9WgXcQ"]
print("This program started on "+time.ctime())
while(break_count < total_breaks):
time.sleep(5)
web = random.choice(web_list)
webbrowser.open(web)
break_count = break_count + 1 | [
"dadac123@gmail.com"
] | dadac123@gmail.com |
1679dfc05c5e136eb693e371474a8d6c679e4eb8 | 46732d613208ee4096fbbd3fd74f22146471d1ce | /wangyiyun_songs&lyrics/all_singer歌手情绪分析/陈粒/sentiments_test.py | 0eaf219349b004a76550efc38b24981a66396e3d | [] | no_license | cassieeric/python_crawler | 7cb02f612382801ae024e2cee70e0c2bcdba927c | 6d2b4db3d34183d729f6fd30555c6d6f04514260 | refs/heads/master | 2022-11-30T20:30:50.031960 | 2022-11-27T02:53:22 | 2022-11-27T02:53:22 | 118,204,154 | 322 | 283 | null | 2022-12-21T09:33:08 | 2018-01-20T03:17:14 | HTML | UTF-8 | Python | false | false | 867 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from snownlp import SnowNLP
# 积极/消极
# print(s.sentiments) # 0.9769551298267365 positive的概率
def get_word():
with open("陈粒歌词关键字.txt") as f:
line = f.readline()
word_list = []
while line:
line = f.readline()
word_list.append(line.strip('\r\n'))
f.close()
return word_list
def get_sentiment(word):
text = u'{}'.format(word)
s = SnowNLP(text)
print(s.sentiments)
if __name__ == '__main__':
words = get_word()
for word in words:
get_sentiment(word)
# text = u'''
# 也许
# '''
# s = SnowNLP(text)
# print(s.sentiments)
# with open('lyric_sentiments.txt', 'a', encoding='utf-8') as fp:
# fp.write(str(s.sentiments)+'\n')
# print('happy end')
| [
"noreply@github.com"
] | cassieeric.noreply@github.com |
d47e989a1e6cd0df97c8b0b1cff955d999fdb136 | abfff8ab3162f7003b51d3fdcc7897684d2d4e54 | /unicode.py | 1562ddfe3bc2f669b166f9b3b02d019d3f409711 | [] | no_license | RedKnite5/Junk | 972dc24c99fe30400ab35e77bb4b69abe9076190 | 93b5bb4b6138518724528770cf56ea1df10e95b4 | refs/heads/master | 2023-04-10T07:25:14.968070 | 2023-04-04T04:19:42 | 2023-04-04T04:19:42 | 143,909,118 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | import io
with io.open("unicode.txt", "w+", encoding="utf8") as file:
for i in range(32, 127):
file.write(chr(i))
for i in range(161, 130_000):
try:
file.write(chr(i))
except UnicodeEncodeError:
pass
| [
"mr.awesome10000@gmail.com"
] | mr.awesome10000@gmail.com |
71511da2fd63661ffd2addee0f4b082d184b1312 | 187a6558f3c7cb6234164677a2bda2e73c26eaaf | /jdcloud_sdk/services/cloudsign/apis/SaveMultiEvidenceRequest.py | bb03fd81acfbacb5edb45efe9f8045287987e3a4 | [
"Apache-2.0"
] | permissive | jdcloud-api/jdcloud-sdk-python | 4d2db584acc2620b7a866af82d21658cdd7cc227 | 3d1c50ed9117304d3b77a21babe899f939ae91cd | refs/heads/master | 2023-09-04T02:51:08.335168 | 2023-08-30T12:00:25 | 2023-08-30T12:00:25 | 126,276,169 | 18 | 36 | Apache-2.0 | 2023-09-07T06:54:49 | 2018-03-22T03:47:02 | Python | UTF-8 | Python | false | false | 2,364 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class SaveMultiEvidenceRequest(JDCloudRequest):
"""
多证据链存证接口
"""
def __init__(self, parameters, header=None, version="v1"):
super(SaveMultiEvidenceRequest, self).__init__(
'/evidence:evidenceMultisave', 'POST', header, version)
self.parameters = parameters
class SaveMultiEvidenceParameters(object):
def __init__(self,businessId, file, ):
"""
:param businessId: 业务流水号
:param file: 存证数据json字符串的Base64
"""
self.businessId = businessId
self.file = file
self.businessCode = None
self.lender = None
self.messageId = None
self.evidenceType = None
self.messageDate = None
def setBusinessCode(self, businessCode):
"""
:param businessCode: (Optional) 证据链代码
"""
self.businessCode = businessCode
def setLender(self, lender):
"""
:param lender: (Optional) 资方信息(借钱传:ZY;票据传 PJ_SHOUXIN--授信,PJ_JIEKUAN--借款)
"""
self.lender = lender
def setMessageId(self, messageId):
"""
:param messageId: (Optional) 请求流水号
"""
self.messageId = messageId
def setEvidenceType(self, evidenceType):
"""
:param evidenceType: (Optional) 业务类型(JIEQIAN–借钱;PIAOJU--票据)
"""
self.evidenceType = evidenceType
def setMessageDate(self, messageDate):
"""
:param messageDate: (Optional) 请求时间
"""
self.messageDate = messageDate
| [
"jdcloud-api@jd.com"
] | jdcloud-api@jd.com |
763df4380182d72f1502e248f2df77a2e82f2563 | 69eb40f099dcc0ea326972ff63db1d4fd131641a | /test_upkern/test_fixtures/test_sources.py | 29cb9c77be4569f6d5831cd4e236626b66b79ad1 | [] | no_license | alunduil/upkern | 4bcc1485629fad8a0ab1c613f71b7ebc3ef9038e | 23d4a98077bc18a229425a3f53dedd89ef5356fd | refs/heads/master | 2021-01-21T00:18:03.646111 | 2014-01-15T03:06:53 | 2014-01-15T03:06:53 | 1,007,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,481 | py | # Copyright (C) 2014 by Alex Brandt <alunduil@alunduil.com>
#
# upkern is freely distributable under the terms of an MIT-style license.
# See COPYING or http://www.opensource.org/licenses/mit-license.php.
SOURCES = {}
SOURCES['correct'] = []
SOURCES['correct'].append(
{
'name': None,
'directory_name': 'linux-3.12.6-gentoo',
'package_name': '=sys-kernel/gentoo-sources-3.12.6',
'binary_name': 'bzImage-3.12.6-gentoo',
'configuration_name': 'config-3.12.6-gentoo',
'system_map_name': 'System.map-3.12.6-gentoo',
'kernel_index': 3012006000,
'kernel_suffix': '-3.12.6-gentoo',
'portage_configuration': { 'MAKEOPTS': '-j5' },
'source_directories': [
'linux-3.12.6-gentoo',
'linux-3.12.5-gentoo',
'linux-3.10.7-gentoo',
],
'configuration_files': [
'config-3.12.6-gentoo',
'config-3.12.5-gentoo',
'config-3.10.7-gentoo',
],
'package_names': [
'sys-kernel/gentoo-sources-3.12.6',
'sys-kernel/gentoo-sources-3.12.5',
'sys-kernel/gentoo-sources-3.10.7',
],
})
SOURCES['correct'].append(
{
'name': 'sys-kernel/gentoo-sources-3.12.6',
'directory_name': 'linux-3.12.6-gentoo',
'package_name': '=sys-kernel/gentoo-sources-3.12.6',
'binary_name': 'bzImage-3.12.6-gentoo',
'configuration_name': 'config-3.12.6-gentoo',
'system_map_name': 'System.map-3.12.6-gentoo',
'kernel_index': 3012006000,
'kernel_suffix': '-3.12.6-gentoo',
'portage_configuration': { 'MAKEOPTS': '-j5' },
'source_directories': [
'linux-3.12.6-gentoo',
'linux-3.12.5-gentoo',
'linux-3.10.7-gentoo',
],
'configuration_files': [
'config-3.12.6-gentoo',
'config-3.12.5-gentoo',
'config-3.10.7-gentoo',
],
'package_names': [
'sys-kernel/gentoo-sources-3.12.6',
'sys-kernel/gentoo-sources-3.12.5',
'sys-kernel/gentoo-sources-3.10.7',
],
})
SOURCES['correct'].append(
{
'name': 'gentoo-sources-3.9.11-r1',
'directory_name': 'linux-3.9.11-gentoo-r1',
'package_name': '=sys-kernel/gentoo-sources-3.9.11-r1',
'binary_name': 'bzImage-3.9.11-gentoo-r1',
'configuration_name': 'config-3.9.11-gentoo-r1',
'system_map_name': 'System.map-3.9.11-gentoo-r1',
'kernel_index': 3009011001,
'kernel_suffix': '-3.9.11-gentoo-r1',
'portage_configuration': { 'MAKEOPTS': '-j5' },
'source_directories': [
'linux-3.12.6-gentoo',
'linux-3.12.5-gentoo',
'linux-3.10.7-gentoo',
'linux-3.9.11-gentoo-r1',
],
'configuration_files': [
'config-3.12.6-gentoo',
'config-3.12.5-gentoo',
'config-3.10.7-gentoo',
'config-3.9.11-gentoo-r1',
],
'package_names': [
'sys-kernel/gentoo-sources-3.12.6',
'sys-kernel/gentoo-sources-3.12.5',
'sys-kernel/gentoo-sources-3.10.7',
'sys-kernel/gentoo-sources-3.9.11-r1',
],
})
SOURCES['correct'].append(
{
'name': 'hardened-sources-3.11.7-r1',
'directory_name': 'linux-3.11.7-hardened-r1',
'package_name': '=sys-kernel/hardened-sources-3.11.7-r1',
'binary_name': 'bzImage-3.11.7-hardened-r1',
'configuration_name': 'config-3.11.7-hardened-r1',
'system_map_name': 'System.map-3.11.7-hardened-r1',
'kernel_index': 3011007001,
'kernel_suffix': '-3.11.7-hardened-r1',
'portage_configuration': { 'MAKEOPTS': '-j5' },
'source_directories': [
'linux-3.12.6-gentoo',
'linux-3.12.5-gentoo',
'linux-3.11.7-hardened-r1',
'linux-3.10.7-gentoo',
],
'configuration_files': [
'config-3.12.6-gentoo',
'config-3.12.5-gentoo',
'config-3.11.7-hardened-r1',
'config-3.10.7-gentoo',
],
'package_names': [
'sys-kernel/gentoo-sources-3.12.6',
'sys-kernel/gentoo-sources-3.12.5',
'sys-kernel/hardened-sources-3.11.7-r1',
'sys-kernel/gentoo-sources-3.10.7',
],
})
SOURCES['all'] = []
SOURCES['all'].extend(SOURCES['correct'])
| [
"alunduil@alunduil.com"
] | alunduil@alunduil.com |
3c55cf6714a04191758570aa165a7e286c861126 | 810ce1c1ac47743e253171ec7541c0e431d952c2 | /cosmic_py/tests/e2e/test_api.py | fc3d0125e3dc13ef34cb093cbd65112a7627ab15 | [] | no_license | hjlarry/practise-py | 91052c25dc7ab706c6234f6d657db76667a27124 | 871e06b9652d356f55e3888f1f7ea180ac2b1954 | refs/heads/master | 2022-09-11T17:47:48.557194 | 2022-08-10T02:07:24 | 2022-08-10T02:07:24 | 136,263,989 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,354 | py | import pytest
import requests
import config
from . import api_client
from ..random_refs import random_sku, random_batchref, random_orderid
@pytest.mark.usefixtures("postgres_db")
@pytest.mark.usefixtures("restart_api")
def test_happy_path_returns_201_and_allocated_batch():
orderid = random_orderid()
sku, othersku = random_sku(), random_sku("other")
earlybatch = random_batchref(1)
laterbatch = random_batchref(2)
otherbatch = random_batchref(3)
api_client.post_to_add_batch(laterbatch, sku, 100, "2011-01-02")
api_client.post_to_add_batch(earlybatch, sku, 100, "2011-01-01")
api_client.post_to_add_batch(otherbatch, othersku, 100, None)
r = api_client.post_to_allocate(orderid, sku, qty=3)
assert r.status_code == 202
r = api_client.get_allocation(orderid)
assert r.ok
assert r.json() == [
{"sku": sku, "batchref": earlybatch},
]
@pytest.mark.usefixtures("postgres_db")
@pytest.mark.usefixtures("restart_api")
def test_unhappy_path_returns_400_and_error_message():
unknown_sku, orderid = random_sku(), random_orderid()
r = api_client.post_to_allocate(orderid, unknown_sku, qty=20, expect_success=False)
assert r.status_code == 400
assert r.json()["message"] == f"Invalid sku {unknown_sku}"
r = api_client.get_allocation(orderid)
assert r.status_code == 404
| [
"hjlarry@163.com"
] | hjlarry@163.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.