blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
26b7e297ba9b1adf0245c7865581ceb0bd4585a7
|
35b28312d4df205ec3758b6641d29f0509026f5a
|
/doom-train.py
|
4656fb546674a86c441977022e646cd2a31fa118
|
[] |
no_license
|
2hands10fingers/DoomMapGenTensorlfow
|
5760789ff3f7db23db4d641f34b003f589080f3b
|
2016fdb54fc207b8d63e6fb40edd5728776e789d
|
refs/heads/master
| 2023-07-09T00:34:17.742248
| 2020-12-31T02:57:37
| 2020-12-31T02:57:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,223
|
py
|
import glob
import time
import cv2
import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
import tensorflow as tf
from tensorflow.keras import layers
from IPython import display
import pathlib
batch_size = 32
buffer_size = 6000
img_height = 128
img_width = 128
data_dir = 'map_images'
def create_dataset(img_folder):
img_data_array = []
class_name = []
for dir1 in os.listdir(img_folder):
for file in os.listdir(os.path.join(img_folder, dir1)):
image_path = os.path.join(img_folder, dir1, file)
image = cv2.imread(image_path, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (img_height, img_width), interpolation=cv2.INTER_AREA)
image = np.array(image)
image = image.astype('float32')
image /= 255
img_data_array.append(image)
class_name.append(dir1)
return img_data_array, class_name
# extract the image array and class name
img_data, class_name = create_dataset(data_dir)
img_data_array = np.array(img_data)
#train_images = img_data_array.reshape(28, 28).astype('float32')
#train_images = (train_images - 127.5) / 127.5 # Normalize the images to [-1, 1]
BUFFER_SIZE = 60000
BATCH_SIZE = 256
train_dataset = tf.data.Dataset.from_tensor_slices(img_data_array).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
def make_generator_model():
model = tf.keras.Sequential()
model.add(layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100,)))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Reshape((7, 7, 256)))
assert model.output_shape == (None, 7, 7, 256) # Note: None is the batch size
model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))
assert model.output_shape == (None, 7, 7, 128)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
assert model.output_shape == (None, 14, 14, 64)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))
assert model.output_shape == (None, 28, 28, 1)
return model
def make_discriminator_model():
model = tf.keras.Sequential()
model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same',
input_shape=[28, 28, 1]))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Flatten())
model.add(layers.Dense(1))
return model
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
generator = make_generator_model()
discriminator = make_discriminator_model()
def discriminator_loss(real_output, fake_output):
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss
def generator_loss(fake_output):
return cross_entropy(tf.ones_like(fake_output), fake_output)
generator_optimizer = tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator)
EPOCHS = 50
noise_dim = 100
num_examples_to_generate = 16
# We will reuse this seed overtime (so it's easier)
# to visualize progress in the animated GIF)
seed = tf.random.normal([num_examples_to_generate, noise_dim])
@tf.function
def train_step(images):
noise = tf.random.normal([batch_size, noise_dim])
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = generator(noise, training=True)
real_output = discriminator(images, training=True)
fake_output = discriminator(generated_images, training=True)
gen_loss = generator_loss(fake_output)
disc_loss = discriminator_loss(real_output, fake_output)
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
def train(dataset, epochs):
for epoch in range(epochs):
start = time.time()
for image_batch in dataset:
train_step(image_batch)
# Produce images for the GIF as we go
display.clear_output(wait=True)
generate_and_save_images(generator,
epoch + 1,
seed)
# Save the model every 15 epochs
if (epoch + 1) % 15 == 0:
checkpoint.save(file_prefix=checkpoint_prefix)
print('Time for epoch {} is {} sec'.format(epoch + 1, time.time() - start))
# Generate after the final epoch
display.clear_output(wait=True)
generate_and_save_images(generator,
epochs,
seed)
def generate_and_save_images(model, epoch, test_input):
# Notice `training` is set to False.
# This is so all layers run in inference mode (batchnorm).
predictions = model(test_input, training=False)
fig = plt.figure(figsize=(4, 4))
for i in range(predictions.shape[0]):
plt.subplot(4, 4, i + 1)
plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')
plt.axis('off')
plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))
plt.show()
train(train_dataset, EPOCHS)
|
[
"gh0stmusc@gmail.com"
] |
gh0stmusc@gmail.com
|
d99053121d4bfa021eaadecc9fc9f08b41146c6c
|
f252f0a656b49e00fd11d32627f93c7e6d17d84c
|
/mytweets/mytweets/mytweets/wsgi.py
|
421d6c50cb974d97f95f30865e5a8ad9827973a1
|
[] |
no_license
|
nmanjos/SideProjects
|
0ab53e192af7ee078f77709f59ca7874234c6862
|
360eb815a80c0eaf6cb0133c6dc462d620a8c85f
|
refs/heads/master
| 2021-01-20T15:12:41.909324
| 2017-05-13T21:31:02
| 2017-05-13T21:31:02
| 90,731,534
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,138
|
py
|
"""
WSGI config for mytweets project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mytweets.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
[
"nmanjos@gmail.com"
] |
nmanjos@gmail.com
|
c007af70fbf0c75d3bd5ca9e2afa6f199344e7ab
|
1e4eee5e5649cfad99a295fc20d0c2643324f9f0
|
/python/ch_07_parse_json.py
|
5ff92393b723bd7938fcf4595d8cfdd39f7b6238
|
[
"MIT"
] |
permissive
|
zeitguy/raspberrypi_cookbook_ed3
|
2ccfd6bfeab71378c3ecb78240b3aa3d78f9e46d
|
bccf35ea309f493c1a45c3b96dec45dc7cebb0d9
|
refs/heads/master
| 2020-06-14T04:19:52.506327
| 2019-06-28T15:46:42
| 2019-06-28T15:46:42
| 194,896,377
| 2
| 0
|
MIT
| 2019-07-02T16:10:37
| 2019-07-02T16:10:36
| null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
import json
s = '{"books" : [{"title" : "Programnming Arduino", "price" : 10.95}, {"title" : "Pi Cookbook", "price" : 19.95}]}''
j = json.loads(s)
print(j['books'][1]['title'])
|
[
"evilgeniusauthor@gmail.com"
] |
evilgeniusauthor@gmail.com
|
e62a2b3a2e84d990ffa3c419908c3c812161cf7b
|
70cf2caf4e3f132772f2549e148a9206450e930d
|
/float.py
|
1833902139bcb0aa4d5e07f86a5b6d0dbfb61c98
|
[] |
no_license
|
y-lukianchenko/Online_G7-Lukianchenko
|
5d908a6b2ff7942238611e482a4d791b54ad709a
|
0ad71127d036de93ebfc085d5aa4b8bcf22e2901
|
refs/heads/master
| 2022-07-29T03:43:06.034663
| 2020-05-22T10:12:12
| 2020-05-22T10:12:12
| 266,067,684
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 174
|
py
|
num1 = float(15.55)
num2 = float(5.1)
res1 = num1 + num2
print(res1)
res2 = num1 - num2
print(res2)
res3 = num1 * num2
print(res3)
res4 = num1 / num2
print(res4)
|
[
"noreply@github.com"
] |
y-lukianchenko.noreply@github.com
|
65299b67215b65ad93d6eedceb4f45b2ff28fd2b
|
26a2b8d5f31adf0d0b39ffc4162462e2483864ac
|
/account/views.py
|
3330afa3843c525b1f673de7fc3f0a270ff313b4
|
[] |
no_license
|
tayogit/social-media
|
92d78740194ff6466bf4857995f809a1bc38691e
|
c833c4518cd8ca5023eb062b35781233c3ab9443
|
refs/heads/master
| 2020-04-03T10:18:10.315278
| 2018-10-29T10:00:48
| 2018-10-29T10:00:48
| 155,189,403
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,361
|
py
|
from django.http import HttpResponse
from django.shortcuts import render
from django.contrib.auth import authenticate, login
from django.contrib import messages
from django.shortcuts import get_object_or_404
from django.contrib.auth.models import User
# forms for the application
from .forms import LoginForm,UserRegistrationForm,UserEditForm, ProfileEditForm
from django.contrib.auth.decorators import login_required
from .models import Profile
@login_required
def dashboard(request):
return render(request,
'account/dashboard.html',
{'section': 'dashboard'})
def user_login(request):
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
user = authenticate(request,
username=cd['username'],
password=cd['password'])
if user is not None:
if user.is_active:
login(request, user)
return HttpResponse('Authenticated '\
'successfully')
else:
return HttpResponse('Disabled account')
else:
return HttpResponse('Invalid login')
else:
form = LoginForm()
return render(request, 'account/login.html', {'form': form})
def register(request):
if request.method == 'POST':
user_form = UserRegistrationForm(request.POST)
if user_form.is_valid():
# Create a new user object but avoid saving it yet
new_user = user_form.save(commit=False)
# Set the chosen password
new_user.set_password(
user_form.cleaned_data['password'])
# Save the User object
new_user.save()
# Create the user profile
Profile.objects.create(user=new_user)
return render(request,'account/register_done.html',
{'new_user': new_user})
else:
user_form = UserRegistrationForm()
return render(request,'account/register.html',
{'user_form': user_form})
@login_required
def edit(request):
if request.method == 'POST':
user_form = UserEditForm(instance=request.user,data=request.POST)
profile_form = ProfileEditForm(instance=request.user.profile,data=request.POST,files=request.FILES)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
messages.success(request, 'Profile updated '\
'successfully')
else:
messages.error(request, 'Error updating your profile')
else:
user_form = UserEditForm(instance=request.user)
profile_form = ProfileEditForm(instance=request.user.profile)
return render(request,'account/edit.html',{'user_form': user_form,'profile_form': profile_form})
@login_required
def user_list(request):
users = User.objects.filter(is_active=True)
return render(request,
'account/user/list.html',
{'section': 'people',
'users': users})
@login_required
def user_detail(request, username):
user = get_object_or_404(User,username=username,
is_active=True)
return render(request,
'account/user/detail.html',
{'section': 'people',
'user': user})
|
[
"tayonetu80@gmail.com"
] |
tayonetu80@gmail.com
|
8bce07ec0ffd7a73e3999b5625a7a45f625579be
|
995c75caf00e161c8dabe5e8baffc551d0f4c199
|
/deneme.py
|
288f186d8a1ba391c6bc49adf883d2456603b1dc
|
[] |
no_license
|
ebubekirtrkr/pythonWorkShop
|
cbfb757704ee5c7fe9c66f7a5e2bc0a8f160c75d
|
994d35b5dab4c2900a33b5f0f7d6dad3a8b3a32d
|
refs/heads/master
| 2020-03-31T19:20:07.938460
| 2018-11-04T11:36:52
| 2018-11-04T11:36:52
| 152,493,387
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 639
|
py
|
"""
You have 6 sticks of lengths 10, 20, 30, 40, 50, and 60 centimeters. Find the number of non-congruent
triangles that can be formed by using three of these sticks as sides
"""
# The length of any side of a triangle must be larger
# than the positive difference of the
# other two sides, but smaller than the sum of
# the other two sides.
"""
idizi1=[10,20,30,40,50,60]
dizi2=[10,20,30,40,50,60]
dizi3=[10,20,30,40,50,60]
a=0
b=1
c=2
for i in range(6):
x=dizi1[a]
y=dizi2[b]
z=dizi3[c]
if b==6:
a+=1
b=0
if c==6:
b+=1
c=0
print (x,y,z)
"""
|
[
"ebubekirtrkr@gmail.com"
] |
ebubekirtrkr@gmail.com
|
8d9ea83bcb06dc6778ae6293893aa91148d5be77
|
b793bec03050db49d205e913b33f67228716e7df
|
/music/views.py
|
72ca97ff88b047178300b400f8a768a99be2281b
|
[] |
no_license
|
tusharchopratech/django_practice_website
|
04fdc167398a9f2dff32b4f8fd79e8c64ce68a0d
|
2c8029e156d89c63bccdabbf26319af2b6838671
|
refs/heads/master
| 2021-01-24T10:17:21.111497
| 2016-09-22T09:51:02
| 2016-09-22T09:51:02
| 68,896,811
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
from django.http import HttpResponse
from django.shortcuts import render
from .models import Album
def index(request):
all_albums = Album.objects.all()
context = {'all_albums': all_albums}
return render(request, 'music/index.html', context)
def detail(request, album_id):
return HttpResponse("<h2>Details for Album Id : " + str(album_id) + "</h2>")
|
[
"tusharchopratech@gmail.com"
] |
tusharchopratech@gmail.com
|
c276ac1e1368bb0c78fdf8bd019c946553a1afe7
|
8efbf0ae3d2eaf3b38e2e9686267d1795a55d9a6
|
/OldClasses/ALGO20/Exams/210128/SubsetSum-03/subsetSum0.py
|
c43489deb00e77078e8e4b693ca5985db4872c8d
|
[] |
no_license
|
giuper/teaching
|
e378a1708b64ef742a8879cab0d327783594df92
|
fe8e762e68c9b155106fe5eef54af36b5cbddad1
|
refs/heads/master
| 2021-12-05T15:33:51.099137
| 2021-11-28T10:05:17
| 2021-11-28T10:05:17
| 160,152,524
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,477
|
py
|
from back import BackTrack
class SubsetSum0(BackTrack):
def __init__(self,L,t):
super().__init__()
self.L=L
self.t=t
self.N=len(L)
self.sol=None
#required by BackTrack
def initState(self):
return [0,[None]*self.N]
#required by BackTrack
def nextAdmMove(self,state,lmove):
[i,S]=state
if i==self.N:
return None
if lmove==None:
nmove=0
if lmove==0:
nmove=1
if lmove==1:
nmove=None
return nmove
#required by BackTrack
# input: current state
# move
# output: new state obtained by making move
def makeMove(self,state,move):
[i,S]=state
S[i]=move
i=i+1
return [i,S]
#required by BackTrack
def setVisited(self,state):
pass
def isFinal(self,state):
[i,S]=state
sm=0
for j in range(i):
if S[j]==1:
sm=sm+self.L[j]
if sm==self.t: #siamo in uno stato finale
for j in range(i,self.N):
S[j]=0 #azzeriamo tutti le rimanenti mosse
return True
else:
return False
def __str__(self):
for i in range(self.N):
if self.sol[1][i] is None or i>=self.sol[0]:
self.sol[1][i]=0
return "Input: "+str(self.L)+"\n"+"Soluzione: "+str(self.sol[1])+"\n"
|
[
"giuper@gmail.com"
] |
giuper@gmail.com
|
201f2a699968508b27aa6b449f252252fa81d03e
|
09090be1f75485e2a90760f2a5807e23d882519a
|
/blender_source/MH_Community/operators/loadpreset.py
|
61eaa1f943327e312959c2151d254ef32c1961b8
|
[] |
no_license
|
indigos33k3r/makehuman-plugin-for-blender
|
237d0efe667d14d3d0d6252b85358cc8474e33ff
|
bcb8d346a40c4368878a4aa17a5145ef2785189f
|
refs/heads/master
| 2021-01-16T13:04:49.410982
| 2020-02-18T20:24:39
| 2020-02-18T20:24:39
| 243,132,559
| 1
| 0
| null | 2020-02-26T00:29:44
| 2020-02-26T00:29:43
| null |
UTF-8
|
Python
| false
| false
| 892
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import bpy
from ..mh_sync.presets import *
class MHC_OT_LoadPresetOperator(bpy.types.Operator):
"""Load an importer UI preset"""
bl_idname = "mh_community.load_preset"
bl_label = "Load preset"
bl_options = {'REGISTER'}
def execute(self, context):
what = context.scene.MhGeneralPreset
settings = None
if what == "DEFAULT":
settings = loadOrCreateDefaultSettings()
if what == "MAKETARGET":
settings = loadOrCreateMakeTargetSettings()
if what == "MAKECLOTHES":
settings = loadOrCreateMakeClothesSettings()
if settings is None:
self.report({'ERROR'}, "Could not find settings")
return {'FINISHED'}
applySettings(settings)
self.report({'INFO'}, "Presets " + what + " loaded")
return {'FINISHED'}
|
[
"joepal1976@hotmail.com"
] |
joepal1976@hotmail.com
|
d9dbc46f730cd9460b86bf76d94aea1a8a069325
|
09f8a3825c5109a6cec94ae34ea17d9ace66f381
|
/cohesity_management_sdk/models/label_attributes_info.py
|
08eba2372d66b27e54c38d866a4acc9e0ee70c61
|
[
"Apache-2.0"
] |
permissive
|
cohesity/management-sdk-python
|
103ee07b2f047da69d7b1edfae39d218295d1747
|
e4973dfeb836266904d0369ea845513c7acf261e
|
refs/heads/master
| 2023-08-04T06:30:37.551358
| 2023-07-19T12:02:12
| 2023-07-19T12:02:12
| 134,367,879
| 24
| 20
|
Apache-2.0
| 2023-08-31T04:37:28
| 2018-05-22T06:04:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,687
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Cohesity Inc.
class LabelAttributesInfo(object):
"""Implementation of the 'LabelAttributesInfo' model.
TODO: type description here.
Attributes:
entity_id (long|int): Entity ID of the label entity in EH.
name (string): Name of the label entity.
uuid (string): UUID of the label entity.
"""
# Create a mapping from Model property names to API property names
_names = {
"entity_id":'entityId',
"name":'name',
"uuid":'uuid',
}
def __init__(self,
entity_id=None,
name=None,
uuid=None,
):
"""Constructor for the LabelAttributesInfo class"""
# Initialize members of the class
self.entity_id = entity_id
self.name = name
self.uuid = uuid
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
entity_id = dictionary.get('entityId')
name = dictionary.get('name')
uuid = dictionary.get('uuid')
# Return an object of this model
return cls(
entity_id,
name,
uuid
)
|
[
"naveena.maplelabs@cohesity.com"
] |
naveena.maplelabs@cohesity.com
|
acde65fdaeaec0990da4019fca898054a5aff3db
|
7f909a2101a90e7ef15455debfdfcae635ade30c
|
/cgi-bin/sendNotification.py
|
770d9426e4620a8cab857f0f5036104e66bb634f
|
[
"Beerware"
] |
permissive
|
vppillai/simpleWebPushServer
|
fbb59b83dc6126ae5d5b6f17037c6baa06e9fd3a
|
4ab48ac70f4c345ca19e57d7b985630f1c922637
|
refs/heads/main
| 2023-05-15T01:09:27.196136
| 2021-06-08T02:31:13
| 2021-06-08T02:31:13
| 370,800,351
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
#!/usr/bin/python
import sys, json
from pywebpush import webpush
publicVapidKey = 'BJsj8xtpUqT2WfcFSYvabvf7F-sdMRuzi4jSvFJkzBWwD9r3OdF8yDaxo6wY-9k4t9_cSdlUtbBTZKPaFqsVlkw'
privateVapidKey = 'rHzgZzYeDMSUMqXyxwX9v-GZf1zyxGlLacqUj2Z6GDc'
with open('..\sub.info') as f:
sub_info = json.load(f)
x=webpush(sub_info,
json.dumps({"title":"test notification title","body":"test notification body"}),
privateVapidKey,
vapid_claims={"sub": "mailto:vysakhpillai@gmail.com"})
print(x)
|
[
"noreply@github.com"
] |
vppillai.noreply@github.com
|
8ad388a47337aaef4482167896b9124cda06d91b
|
24c9128a45173e24845f7f682adf50f793c1e723
|
/BP/etfocs/main.py
|
8c635d91b00dff7b56705a606555734ee6e8bc3d
|
[] |
no_license
|
muhamedparic/RIM1
|
76421c0bf352a0a8952779f2245ec967c87327af
|
f8082dc4f99514b1329841efc0a7122d7cc6415e
|
refs/heads/master
| 2021-09-04T03:56:16.695478
| 2018-01-15T15:40:38
| 2018-01-15T15:40:38
| 107,144,204
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,908
|
py
|
from flask import Flask, request, abort, render_template
import json
import dbfunctions as db
import utils
app = Flask(__name__)
@app.route('/api/login', methods=['POST'])
def api_login():
if 'username' not in request.form or 'password' not in request.form:
return json.dumps({'success': False, 'reason': 'Username or password not provided'})
username = request.form.get('username')
password = request.form.get('password')
token, success = db.login(username, password)
if success:
return json.dumps({'success': True, 'token': token})
else:
return json.dumps({'success': False, 'reason': 'Invalid username or password'})
@app.route('/api/token_valid', methods=['POST'])
def api_token_valid():
if 'token' not in request.form or not utils.valid_json(request.form.get('token')):
return 'false'
return_value = db.token_valid(request.form.get('token'))
return 'true' if return_value else 'false'
@app.route('/')
@app.route('/main')
def index():
return render_template('main.html')
@app.route('/home')
def home():
return render_template('homepage.html')
@app.route('/profile')
def profile():
return render_template('profile-dashboard.html')
@app.route('/admin_profile')
def admin_profile():
return render_template('admin-profile-dashboard.html')
@app.route('/edit_competition_fill')
def edit_competition_fill():
return render_template('edit-competition-fill.html')
@app.route('/edit_competition_code')
def edit_competition_code():
return render_template('edit-competition-code.html')
@app.route('/edit_competition_multiple_choice')
def edit_competition_multiple_choice():
return render_template('edit-competition-multiple-choice.html')
@app.route('/edit_participants')
def edit_participants():
return render_template('edit-participants.html')
@app.route('/competition_fill')
def competition_fill():
return render_template('competition-fill.html')
@app.route('/competition_multiple_choice')
def competition_multiple_choice():
return render_template('competition-multiple-choice.html')
@app.route('/competition_code')
def competition_code():
return render_template('competition-code.html')
@app.route('/api/add_competition', methods=['POST'])
def api_add_competition():
required_fields = ('token', 'type', 'name')
if not all(field in request.form for field in required_fields):
return json.dumps({'success': False, 'reason': 'Missing one or more fields'})
return db.add_competition(request.form.get('token'), request.form.get('type'),
request.form.get('name'))
@app.route('/api/competition_list', methods=['POST'])
def api_competition_list():
required_fields = ('token',)
if not all(field in request.form for field in required_fields):
return json.dumps({'success': False, 'reason': 'Missing one or more fields'})
return db.get_competition_list(request.form.get('token'))
@app.route('/api/add_question', methods=['POST'])
def api_add_question():
required_fields = ('token', 'type', 'competition', 'question_data')
answer_data = None
if not all(field in request.form for field in required_fields):
return json.dumps({'success': False, 'reason': 'Missing one or more fields'})
if request.form.get('type') != 'type' and 'answer_data' not in request.form:
return json.dumps({'success': False, 'reason': 'Missing one or more fields'})
if request.form.get('type') != 'code':
answer_data = request.form.get('answer_data')
return db.add_question(request.form.get('token'), request.form.get('type'),
request.form.get('competition'), request.form.get('question_data'),
answer_data)
@app.route('/api/remove_question', methods=['POST'])
def api_remove_question():
required_fields = ('token', 'competition', 'question')
if not all(field in request.form for field in required_fields):
return json.dumps({'success': False, 'reason': 'Missing one or more fields'})
return db.remove_question(request.form.get('token'), request.form.get('competition'),
request.form.get('question'))
@app.route('/api/submit_solution', methods=['POST'])
def api_submit_solution():
pass
@app.route('/api/submit_answers', methods=['POST'])
def api_submit_answer():
required_fields = ('token', 'competition', 'answers')
if not all(field in request.form for field in required_fields):
return json.dumps({'success': False, 'reason': 'Missing one or more fields'})
return db.submit_answers(request.form.get('token'), request.form.get('competition'),
request.form.get('answers'))
@app.route('/api/add_competitor', methods=['POST'])
def api_add_competitor():
required_fields = ('token', 'competition', 'user')
if not all(field in request.form for field in required_fields):
return json.dumps({'success': False, 'reason': 'Missing one or more fields'})
return db.add_competitor(request.form.get('token'), request.form.get('competition'),
request.form.get('user'))
@app.route('/api/competition_questions', methods=['POST'])
def api_competition_questions():
required_fields = ('token', 'competition')
if not all(field in request.form for field in required_fields):
return json.dumps({'success': False, 'reason': 'Missing one or more fields'})
return db.get_competition_questions(request.form.get('token'), request.form.get('competition'))
@app.route('/api/competition_results', methods=['POST'])
def api_competition_results():
required_fields = ('token', 'competition')
if not all(field in request.form for field in required_fields):
return json.dumps({'success': False, 'reason': 'Missing one or more fields'})
return db.get_competition_results(request.form.get('token'), request.form.get('competition'))
@app.route('/api/add_task_file', methods=['POST'])
def api_add_task_file():
required_fields = ('token', 'competition', 'name')
if not all(field in request.form for field in required_fields):
return json.dumps({'success': False, 'reason': 'Missing one or more fields'})
file_data = request.files.get('filedata')
filename = request.form.get('name') + '.pdf'
return db.add_task_file(request.form.get('token'), request.form.get('competition'),
request.form.get('name'), file_data, filename)
@app.route('/api/download_task_file', methods=['POST'])
def api_download_task_file():
required_fields = ('token', 'competition', 'name')
if not all(field in request.form for field in required_fields):
return json.dumps({'success': False, 'reason': 'Missing one or more fields'})
return db.download_task_file(request.form.get('token'), request.form.get('competition'),
request.form.get('name'))
@app.route('/api/task_list', methods=['POST'])
def api_get_task_list():
required_fields = ('token', 'competition')
if not all(field in request.form for field in required_fields):
return json.dumps({'success': False, 'reason': 'Missing one or more fields'})
return db.get_task_list(request.form.get('token'), request.form.get('competition'))
@app.route('/api/search_users', methods=['POST'])
def api_search_users():
required_fields = ('token', 'username')
if not all(field in request.form for field in required_fields):
return json.dumps({'success': False, 'reason': 'Missing one or more fields'})
return db.search_users(request.form.get('token'), request.form.get('username'))
@app.route('/api/user_competitions', methods=['POST'])
def api_user_competitions():
required_fields = ('token', 'username')
if not all(field in request.form for field in required_fields):
return json.dumps({'success': False, 'reason': 'Missing one or more fields'})
return db.get_user_competitions(request.form.get('token'), request.form.get('username'))
@app.route('/api/competition_participants', methods=['POST'])
def api_competition_participants():
required_fields = ('token', 'competition')
if not all(field in request.form for field in required_fields):
return json.dumps({'success': False, 'reason': 'Missing one or more fields'})
return db.get_competition_participants(request.form.get('token'), request.form.get('competition'))
@app.route('/api/available_competition_list', methods=['POST'])
def api_available_competition_list():
required_fields = ('token',)
if not all(field in request.form for field in required_fields):
return json.dumps({'success': False, 'reason': 'Missing one or more fields'})
return db.get_available_competition_list(request.form.get('token'))
@app.route('/api/apply_for_competition', methods=['POST'])
def api_apply_for_competition():
required_fields = ('token', 'competition')
if not all(field in request.form for field in required_fields):
return json.dumps({'success': False, 'reason': 'Missing one or more fields'})
return db.apply_for_competition(request.form.get('token'), request.form.get('competition'))
@app.route('/api/application_list_admin', methods=['POST'])
def api_application_list_admin():
required_fields = ('token', 'competition')
if not all(field in request.form for field in required_fields):
return json.dumps({'success': False, 'reason': 'Missing one or more fields'})
return db.application_list_admin(request.form.get('token'), request.form.get('competition'))
@app.route('/api/application_list_all', methods=['POST'])
def api_application_list_all():
required_fields = ('token',)
if not all(field in request.form for field in required_fields):
return json.dumps({'success': False, 'reason': 'Missing one or more fields'})
return db.application_list_all(request.form.get('token'))
@app.route('/api/number_of_competitors', methods=['POST'])
def api_number_of_competitors():
required_fields = ('token',)
if not all(field in request.form for field in required_fields):
return json.dumps({'success': False, 'reason': 'Missing one or more fields'})
return db.number_of_competitors(request.form.get('token'))
@app.route('/api/competition_points', methods=['POST'])
def api_competition_points():
required_fields = ('token', 'competition')
if not all(field in request.form for field in required_fields):
return json.dumps({'success': False, 'reason': 'Missing one or more fields'})
return db.competition_points(request.form.get('token'), request.form.get('competition'))
@app.route('/secret/gitpull', methods=['GET'])
def secret_gitpull():
utils.gitpull()
return ""
if __name__ == '__main__':
try:
app.run(host='192.168.0.31', port=8000, processes=1)
except OSError:
app.run(host='localhost', port=8000, processes=1)
|
[
"mparic1@etf.unsa.ba"
] |
mparic1@etf.unsa.ba
|
2ff5ccf995197934f4471170a1775de327804b28
|
00728272713e3ea0c984c75760c53728ff58d384
|
/src/hw_detector/hw_types/hw_rm17_gen.py
|
6eb30f311b6f14c5fb6aaaaa534db98af713bd02
|
[
"Apache-2.0"
] |
permissive
|
akraino-edge-stack/ta-hw-detector
|
31e85c1035956a9a144b98e503bffd79a74d574b
|
d3defb51c389a04a84d2796e227af333afd79636
|
refs/heads/master
| 2023-05-26T21:25:06.266231
| 2020-01-22T15:29:29
| 2020-01-22T15:32:23
| 199,550,335
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 798
|
py
|
# Copyright 2019 Nokia
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hw_detector.hw_types.hw_rm16 import HWRM16
class HWRM17GEN(HWRM16):
def __init__(self):
super(HWRM17GEN, self).__init__()
self.matches = {'Board Product' : 'AR-D51B1U.*'}
self.hwtype = 'RM17GEN'
|
[
"janne.suominen@nokia.com"
] |
janne.suominen@nokia.com
|
5241707837f5a928867651ba78b0bcc67260efe1
|
e405781f3d65111d9932ebe93ab9a4c72e0ca04a
|
/src/core/views/unsplash_proxy.py
|
d456dc981e0f69c473725ea459f09f94e5c66467
|
[] |
no_license
|
CapedHero/karmaspace-backend
|
f8efbbc732ce421810a020165204bf79ea8d367e
|
c6027a2101f1327bb026ee5e810e1ed5c03345a2
|
refs/heads/main
| 2023-09-05T05:00:12.906577
| 2021-10-28T07:15:05
| 2021-10-28T07:15:05
| 397,170,398
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 944
|
py
|
from django.conf import settings
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.request import Request
from rest_framework.response import Response
from src.core.networking import session
@api_view(http_method_names=["GET"])
@permission_classes([IsAuthenticated])
def unsplash_proxy_view(request: Request, api_path: str) -> Response:
relayed_querystring = request.query_params.urlencode()
url = f"https://api.unsplash.com/{api_path}?{relayed_querystring}"
headers = {"Authorization": f"Client-ID {settings.UNSPLASH_ACCESS_KEY}"}
unsplash_response = session.get(url, headers=headers)
dj_response = Response(data=unsplash_response.json(), status=unsplash_response.status_code)
if "X-Total" in unsplash_response.headers:
dj_response.headers["X-Total"] = unsplash_response.headers["X-Total"]
return dj_response
|
[
"mc.wrzesniewski@gmail.com"
] |
mc.wrzesniewski@gmail.com
|
3c7c0cfded18f110dc427d2e2c3c231323ce5560
|
d6cc30fe5b4c41e86ccb5dec2d7ff3fa6e196d78
|
/ints.py
|
14e8b75b45ba0952f3a6f5c41927ae77b1b14cff
|
[] |
no_license
|
PlumpMath/pylz
|
9dfcdb9200e0fd13db93f475a6eadac6ea0fd247
|
e2c16b78e6c82173deb944bc9658063c72b099d5
|
refs/heads/master
| 2021-01-20T09:54:36.450477
| 2012-03-12T04:31:50
| 2012-03-12T04:31:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,276
|
py
|
#!/usr/bin/env python3
#stdlib
from math import log, ceil
'''Utilities to convert integers to bytes objects and back again.'''
###############################################################################
def bitwidth(val):
return ceil(log(val + 1, 2))
assert bitwidth(0b0) == 0
assert bitwidth(0b1) == 1
assert bitwidth(0b10) == 2
assert bitwidth(0b11) == 2
assert bitwidth(0b101) == 3
assert bitwidth(0b1011) == 4
assert bitwidth(0b11010) == 5
assert bitwidth(0b110100101) == 9
assert bitwidth(0b1010110100) == 10
###############################################################################
def bytewidth(val):
return ceil(bitwidth(val) / 8)
assert bytewidth(0b0) == 0
assert bytewidth(0b1) == 1
assert bytewidth(0b10) == 1
assert bytewidth(0b11) == 1
assert bytewidth(0b101) == 1
assert bytewidth(0b10110111) == 1
assert bytewidth(0b101101010) == 2
assert bytewidth(0b1010111010000010) == 2
assert bytewidth(0b1010111010001000010) == 3
###############################################################################
def tobytes(val, length=None):
'''Convert an integer to a bytes of the minimum (or specified) length.'''
w = bytewidth(val)
if length is None:
pass
elif length < w:
raise ValueError('{} is less than width of {}, {}'.
format(length, val, w))
else:
w = length
return bytes(255 & (val >> 8 * i) for i in reversed(range(w)))
assert tobytes(0) == b''
assert tobytes(0, 2) == b'\x00\x00'
assert tobytes(97 ) == b'a'
assert tobytes(97, 1) == b'a'
assert tobytes(97, 2) == b'\x00a'
assert tobytes(97, 3) == b'\x00\x00a'
assert tobytes(495891539314) == b'super'
###############################################################################
def frombytes(byt):
'''Convert bytes to the corresponding integer.'''
n = 0
for i, b in enumerate(reversed(list(byt))):
n |= b << 8 * i
return n
assert frombytes(b'') == 0
assert frombytes(b'\x00') == 0
assert frombytes(b'\x01') == 1
assert frombytes(b'\x02') == 2
assert frombytes(b'\x02\x80') == 640
assert frombytes(b'\x00\x02\x80') == 640
assert frombytes(tobytes(987654321)) == 987654321
###############################################################################
|
[
"plredmond@gmail.com"
] |
plredmond@gmail.com
|
c149793e1bd2ab9f684c1302a51fa785c0bfc8a2
|
205ef62fa03bc82a66a1cf148f824741e2084f77
|
/2D_Coordinate/1.4.py
|
c6105d6ab685c234625ea03c9631252390188a9a
|
[] |
no_license
|
ShreyasHavaldar7/Linear_Algebra_Applications
|
88fade230f34944ded79785d9531685186349046
|
42d534b5da306b1f59005e7c8dc61141b7fc51ce
|
refs/heads/master
| 2020-05-09T15:19:21.965674
| 2019-04-13T21:26:12
| 2019-04-13T21:26:12
| 181,229,634
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 7 18:03:18 2019
@author: shreyas
"""
import numpy as np
def dir_vec(AB):
return np.matmul(AB,dvec)
def norm_vec(AB):
return np.matmul(omat,np.matmul(AB,dvec))
A=np.array([-2,-2])
B=np.array([1,3])
dvec = np.array([-1,1])
omat = np.array([[0,1],[-1,0]])
AB= np.vstack((A,B)).T
print(dir_vec(AB))
print(norm_vec(AB))
|
[
"noreply@github.com"
] |
ShreyasHavaldar7.noreply@github.com
|
a7a06ca27759d50d419a66800ccbee290e3182ed
|
b93a640a37db99f8fd61ac00df1fa5e01b27217d
|
/[TC 03] Regex/Hernández Castellanos César Uriel Práctica 3 Teoría Computacional/genIP.py
|
46f9e9391dfa42627d0bea183db12ac9d73f7249
|
[] |
no_license
|
llCUriel/TheoryOfComputation
|
94ad55efb21f06b881f14216d31f15c24fe2c685
|
2acbb18b26aadf5abe2aaebf84f0ccb32a6b75de
|
refs/heads/master
| 2021-03-01T08:50:34.252770
| 2020-03-08T07:41:31
| 2020-03-08T07:41:31
| 245,770,711
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 568
|
py
|
import random
def generarNumeroAleatorio():
return random.randint(0,255)
def generarTupla():
for i in range(0,4):
miNumero = generarNumeroAleatorio()
cadena = str(miNumero)
if miNumero<10:
cadena = "00"+str(miNumero)
if miNumero>=10 and miNumero <100:
cadena = "0"+str(miNumero)
return cadena
def generarDireccionIp():
cadena = ""
for i in range(0,4):
cadena = cadena +"."+ generarTupla()
return cadena[1:]
def probadorDeAplicacion():
for i in range(0,10):
print str(generarTupla())+"|"+generarDireccionIp()
probadorDeAplicacion()
|
[
"48939025+llCUriel@users.noreply.github.com"
] |
48939025+llCUriel@users.noreply.github.com
|
819c33bed65c47d08cf76fd2aaf4909e3961792a
|
edc87965dbc41921dc9fc07c729c1534d456f4f8
|
/0x15-api/3-dictionary_of_list_of_dictionaries.py
|
650d30e48668cf2212f3537625b263332b91252a
|
[] |
no_license
|
alexoreiro/holberton-system_engineering-devops
|
a4f0daf9357c351ca5705bf769ca3f40f7dd7307
|
9ee81dd53315c6199227e7465ca1fce788eb6b25
|
refs/heads/master
| 2023-04-25T16:15:20.879873
| 2021-05-18T20:00:43
| 2021-05-18T20:00:43
| 294,121,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,123
|
py
|
#!/usr/bin/python3
'''Reads todo list from api for id passed and turns into json file'''
import json
import requests
import sys
base_url = 'https://jsonplaceholder.typicode.com/'
def do_request():
'''Performs request'''
response = requests.get(base_url + 'users/')
if response.status_code != 200:
return print('Error: status_code:', response.status_code)
users = response.json()
response = requests.get(base_url + 'todos/')
if response.status_code != 200:
return print('Error: status_code:', response.status_code)
todos = response.json()
data = {}
for user in users:
user_todos = [todo for todo in todos
if todo.get('userId') == user.get('id')]
user_todos = [{'username': user.get('username'),
'task': todo.get('title'),
'completed': todo.get('completed')}
for todo in user_todos]
data[str(user.get('id'))] = user_todos
with open('todo_all_employees.json', 'w') as file:
json.dump(data, file)
if __name__ == '__main__':
do_request()
|
[
"alexis.oreiro@hotmail.com"
] |
alexis.oreiro@hotmail.com
|
ce813622af650fd867999e169900d0f40a737011
|
bec751c0ae359a7bcde2582cac470ed70d5c0377
|
/src/gitutil/test/test_gitSession.py
|
826709a8d33364162c33dfab775ea2f173bb1bef
|
[] |
no_license
|
bkushigian/hog
|
b4e6c80f2d4650759396cad3c05cd8619454a982
|
9618eabb417ea4ecacdb65435666821bb4f630fd
|
refs/heads/master
| 2021-08-28T18:21:39.020270
| 2017-12-12T23:07:13
| 2017-12-12T23:07:13
| 103,278,495
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 73
|
py
|
from unittest import TestCase
class TestGitSession(TestCase):
pass
|
[
"bkushigian@gmail.com"
] |
bkushigian@gmail.com
|
f3d00278113692101fdabdbcba8f936eab3ad483
|
262f5f4c3542e4c955d98a35040900a3ef27647d
|
/routers/producto_router.py
|
d2e73d0ba7e16492028ad170e47987f00016821b
|
[] |
no_license
|
maopinedaangel/back-chicco
|
cba5a31f390e6fc432244138e29c83cdb6d457d1
|
91026eae072f0c5d5dcd55df419db6a8ee2eb4dc
|
refs/heads/main
| 2023-06-05T13:30:15.791457
| 2021-07-01T15:22:29
| 2021-07-01T15:22:29
| 382,075,700
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,388
|
py
|
from typing import List
from fastapi import Depends, APIRouter, HTTPException
from sqlalchemy.orm import Session
from db.db_connection import get_db
from db.categoria_db import CategoriaDB
from db.producto_db import ProductoDB
from models.categoria_models import CategoriaIn, CategoriaUpdate
from models.producto_models import ProductoIn, ProductoUpdate
router = APIRouter()
@router.get("/productos")
async def get_productos(db: Session = Depends(get_db)):
productos = db.query(ProductoDB).all()
return productos
@router.get("/producto")
async def get_producto(prod: int, db: Session = Depends(get_db)):
mi_producto = db.query(ProductoDB).get(prod)
if mi_producto == None:
raise HTTPException(status_code=404, detail="No se encontró el producto.")
return mi_producto
@router.get("/cat/productos")
async def filtrar_producto(cat: int, db: Session = Depends(get_db)):
filtro_productos = db.query(ProductoDB).filter(ProductoDB.id_categoria==cat).all()
if filtro_productos == None:
raise HTTPException(status_code=404, detail="No se encontraron resultados.")
return filtro_productos
@router.post("/producto/nuevo")
async def new_producto(producto_in: ProductoIn, db: Session = Depends(get_db)):
new_prod = ProductoDB(**producto_in.dict())
db.add(new_prod)
db.commit()
db.refresh(new_prod)
return {"mensaje": "Producto creado exitosamente."}
#return new_cat
@router.put("/producto/edit")
async def edit_producto(producto_in: ProductoUpdate, db: Session = Depends(get_db)):
producto_in_db = db.query(ProductoDB).get(producto_in.id)
producto_in_db.nombre = producto_in.nombre
producto_in_db.unidad = producto_in.unidad
producto_in_db.precio = producto_in.precio
producto_in_db.imagen = producto_in.imagen
producto_in_db.descripcion = producto_in.descripcion
producto_in_db.stock = producto_in.stock
producto_in_db.id_categoria = producto_in.id_categoria
db.commit()
db.refresh(producto_in_db)
return {"mensaje": "Producto actualizado exitosamente."}
#return categoria_in_db
@router.delete("/producto/delete")
async def borrar_producto(prod: int, db: Session = Depends(get_db)):
db.query(ProductoDB).filter(ProductoDB.id==prod).delete()
db.commit()
return {"mensaje": "Producto eliminado exitosamente."}
|
[
"ingmauriciopineda@gmail.com"
] |
ingmauriciopineda@gmail.com
|
b1c2624a39524531b8f7070514ca8c03d2acac6c
|
f559c2552526cf3e1bde20f0b3c8bd39563bc2ce
|
/manage.py
|
e7748fc0ae491061d0134bde0683652cca043d84
|
[] |
no_license
|
mchocianowski/Matt_Katie_Milestone_4
|
7bed30dc41525ee531af7100850085ff507219e6
|
e719c87028d6f7ee187f35ace0ef317bf0a958c6
|
refs/heads/master
| 2023-09-01T17:05:26.402189
| 2021-10-23T18:27:50
| 2021-10-23T18:27:50
| 378,400,610
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'matt_katie.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"ch.mateusz@hotmail.com"
] |
ch.mateusz@hotmail.com
|
76fe9b33f8e40be7a219e20eee5cdf56266de674
|
59e59cbc24650b557f41c041cbeb8dad10c4d2b1
|
/05 Tree/987. Vertical Order Traversal of a Binary Tree.py
|
97cd5176c2993952bd8af337395ff3f1bb4b0ed5
|
[] |
no_license
|
stungkit/Leetcode-Data-Structures-Algorithms
|
5345211f4ceb7dc7651360f0ca0a7f48f2434556
|
a3a341369a8acd86c29f8fba642f856d6ea2fd0a
|
refs/heads/master
| 2023-07-26T18:14:17.993112
| 2021-09-13T15:40:47
| 2021-09-13T15:40:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,109
|
py
|
# Given a binary tree, return the vertical order traversal of its nodes values.
# For each node at position (X, Y), its left and right children respectively will be at positions (X-1, Y-1) and (X+1, Y-1).
# Running a vertical line from X = -infinity to X = +infinity, whenever the vertical line touches some nodes,
# we report the values of the nodes in order from top to bottom (decreasing Y coordinates).
# If two nodes have the same position, then the value of the node that is reported first is the value that is smaller.
# Return an list of non-empty reports in order of X coordinate. Every report will have a list of values of nodes.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# Method 1: BFS with sorting all nodes
class Solution:
def verticalTraversal(self, root: TreeNode) -> List[List[int]]:
# (0) edge case
if root is None:
return []
# (1) initialize
columnTable = defaultdict(list)
queue = deque([(root, 0, 0)])
# (2) BFS traversal
while queue:
node, row, column = queue.popleft()
if node:
columnTable[column].append((row, node.val))
if node.left:
queue.append((node.left, row+1, column-1))
if node.right:
queue.append((node.right, row+1, column+1))
# (3) extract the values from the columnTable
# (3) for each column, first sort by 'row', then by 'value', in ascending order
res = []
for col in sorted(columnTable.keys()):
res.append([val for row, val in sorted(columnTable[col])])
return res
# Time: O(N) for BFS traversal, O(NlogN) for sorting
# Space: O(N)
# Method 2: BFS with sorting partition columns
class Solution:
def verticalTraversal(self, root: TreeNode) -> List[List[int]]:
# (0) edge case
if root is None:
return []
# (1) initialize
columnTable = defaultdict(list)
min_column = max_column = 0
queue = deque([(root, 0, 0)])
# (2) BFS traversal
while queue:
node, row, column = queue.popleft()
if node is not None:
columnTable[column].append((row, node.val))
min_column = min(min_column, column)
max_column = max(max_column, column)
queue.append((node.left, row + 1, column - 1))
queue.append((node.right, row + 1, column + 1))
# (3) extract the values from the columnTable
# (3) for each column, first sort by 'row', then by 'value', in ascending order
res = []
for col in range(min_column, max_column + 1):
res.append([val for row, val in sorted(columnTable[col])])
return res
# Time: O(N) for BFS traversal, O(K*(N/K)*log(N/k))=O(N*log(N/k)) for sort in step 2
# Space: O(N)
|
[
"noreply@github.com"
] |
stungkit.noreply@github.com
|
864af5a66b0bcfd3de035d8a7b115080389d3992
|
166d36ddd783dcbccb230f5bf8005c43aeebff17
|
/lyric_predictions.py
|
e434e1974261dddd9b86236c4c22c8550215355d
|
[] |
no_license
|
sarasteeves/songlyrics
|
ec20fa6104462bbdf00cd23c4754a5c5d4f9da3b
|
043b6c67c339a708467f3720e8f3b818164a1d66
|
refs/heads/master
| 2021-01-12T03:32:18.337201
| 2017-01-15T19:54:02
| 2017-01-15T19:54:02
| 78,226,765
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,641
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 06 18:31:47 2017
@author: Sara
"""
import pandas as pd
import language_model as lm
def generate_lyrics_char(dataset, artist, seed, n):
lyrics = dataset.loc[dataset['artist']==artist, 'text'] # get all text for entries with specified artist
model = lm.charModel(order=len(seed))
for song in lyrics:
song = song.lower()
model.train(song)
return model.generate(seed, n)
def generate_lyrics_word(dataset, artist, seed, n):
lyrics = dataset.loc[dataset['artist']==artist, 'text'] # get all text for entries with specified artist
model = lm.wordModel()
for song in lyrics:
song = song.lower()
model.train(song)
return model.generate(seed, n)
if __name__=='__main__':
# Load dataset
# using 55000+ Song Lyrics dataset from Kaggle (https://www.kaggle.com/mousehead/songlyrics)
data = pd.read_csv('songdata.csv')
print 'Column names:'
print data.columns.values
print data.describe()
print
print 'Generating lyrics for Coldplay (by character)...'
print generate_lyrics_char(data, 'Coldplay', "i don'", 200)
print
print
print 'Generating lyrics for Coldplay (by word)...'
print generate_lyrics_word(data, 'Coldplay', "i don", 100)
print
print
print 'Generating lyrics for Robbie Williams (by character)...'
print generate_lyrics_char(data, 'Robbie Williams', "i don'", 200)
print
print
print 'Generating lyrics for Robbie Williams (by word)...'
print generate_lyrics_word(data, 'Robbie Williams', "i don", 100)
|
[
"sarasteeves@gmail.com"
] |
sarasteeves@gmail.com
|
e2bc3c89ebc437d6fbdd72376c9ef0e7d2dd468d
|
eea6ebf8af0e3e833eb95ed8d27a227feaaab79a
|
/practice6/14_3.py
|
c44301d5d766c1425326d023eb9c8e22fd150406
|
[] |
no_license
|
minzhou1003/intro-to-programming-using-python
|
88840b93dc6f47e79d9a3c711856265b30c248b9
|
784945b15131d1149fbb650cf6867895cdb15c86
|
refs/heads/master
| 2020-03-07T01:53:23.572164
| 2018-04-26T03:30:52
| 2018-04-26T03:30:52
| 127,195,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 963
|
py
|
# minzhou@bu.edu
import os.path
import sys
import collections
def main():
keyWords = {"and", "as", "assert", "break", "class",
"continue", "def", "del", "elif", "else",
"except", "False", "finally", "for", "from",
"global", "if", "import", "in", "is", "lambda",
"None", "nonlocal", "not", "or", "pass", "raise",
"return", "True", "try", "while", "with", "yield"}
filename = input("Enter a Python source code filename: ").strip()
if not os.path.isfile(filename): # Check if file exists
print("File", filename, "does not exist")
sys.exit()
infile = open(filename, "r") # Open files for input
text = infile.read().split() # Read and split words from the file
matched = collections.Counter(text)
for item in matched:
if item in keyWords:
print('Keyword "{}" occurs {} times'.format(item, matched[item]))
main()
|
[
"minzhou@bu.edu"
] |
minzhou@bu.edu
|
51aea0837af27265ca6570dcb07f16f5a0cd1a77
|
e59f257d5735cae8cf7bb46d52792aa7371c9dae
|
/.history/core/views_20200223201904.py
|
15927a86821df5a7628af8d466e5036af55b2e8d
|
[] |
no_license
|
okumujustine/django-eccomerce-website
|
95499049dd4e46513c25a0fe6e6b82cf69d2080b
|
00c1ca600af5faa89829702044cc9f329bbc8b66
|
refs/heads/master
| 2022-12-08T23:29:19.453109
| 2021-05-31T11:23:22
| 2021-05-31T11:23:22
| 242,557,196
| 1
| 2
| null | 2022-12-08T03:44:29
| 2020-02-23T17:08:21
|
Python
|
UTF-8
|
Python
| false
| false
| 7,036
|
py
|
from django.conf import settings
from django.contrib import messages
from django.shortcuts import render, get_object_or_404, redirect
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import DetailView,ListView, View
from django.utils import timezone
from .models import Item, OrderItem, Order, BillingAddress, Payment
from .forms import CheckoutForm
import stripe
# Create your views here.
stripe.api_key = settings.STRIPE_SECRET_KEY
class CheckoutView(View):
def get(self,*args,**kwargs):
form = CheckoutForm()
context = {
'form':form
}
return render(self.request, 'checkout-page.html', context)
def post(self, *args, **kwargs):
form = CheckoutForm(self.request.POST or None)
try:
order = Order.objects.get(user=self.request.user, ordered=False)
if form.is_valid():
street_address = form.cleaned_data.get('street_address')
apartment_address = form.cleaned_data.get('apartment_address')
country = form.cleaned_data.get('country')
zip = form.cleaned_data.get('zip')
# same_shipping_address = forms.cleaned_data.get('same_billing_address')
# save_info = forms.cleaned_data.get('save_info')
payment_option = form.cleaned_data.get('payment_option')
billing_address = BillingAddress(
user=self.request.user,
street_address = street_address,
apartment_address = apartment_address,
country = country,
zip = zip
)
billing_address.save()
order.billing_address = billing_address
order.save( )
return redirect('core:checkout')
messages.warning(self.request, "Failed Checkout")
return redirect('core:checkout')
except ObjectDoesNotExist:
messages.warning(self.request, "You do not have an active order")
return redirect('core:order-summary')
class PaymentView(View):
def get(self, *args, **kwargs):
return render(self.request, "payment-page.html")
def post(self, *args, **kwargs):
order = Order.objects.get(user=self.request.user, ordered=False)
token = self.request.POST.get('stripeToken')
payment_intent = stripe.Charge.create(
amount=order.get_total() * 100,
currency='usd',
source = token
# description='Charge for jean',
)
order.ordered = True
payment = Payment()
def products(request):
context = {
'items':Item.objects.all()
}
return render(request, 'product-page.html', context)
class HomeView(ListView):
model = Item
paginate_by = 10
template_name = 'home-page.html'
class OrderSummaryView(LoginRequiredMixin, View):
def get(self, *args, **kwargs):
try:
order = Order.objects.get(user=self.request.user, ordered=False)
context = {
'object':order
}
return render(self.request, 'order-summary-page.html', context)
except ObjectDoesNotExist:
messages.error(self.request, "You do not have an active order")
return redirect("/")
class ItemDetailView(DetailView):
model = Item
template_name = 'product-page.html'
@login_required
def add_to_cart(request, slug):
item = get_object_or_404(Item, slug=slug)
order_item, created = OrderItem.objects.get_or_create(
item=item,
user=request.user,
ordered = False
)
order_qs = Order.objects.filter(user=request.user, ordered=False)
if order_qs.exists():
order = order_qs[0]
#check if order item is in order
if order.items.filter(item__slug=item.slug).exists():
order_item.quantity += 1
order_item.save()
messages.info(request, "This item quantity was updated in your cart")
return redirect("core:order-summary")
else:
order.items.add(order_item)
messages.info(request, "This item was added to your cart")
return redirect("core:order-summary")
else:
ordered_date = timezone.now()
order = Order.objects.create(user=request.user, ordered_date=ordered_date)
order.items.add(order_item)
messages.info(request, "This item was added to your cart")
return redirect("core:order-summary")
@login_required
def remove_single_item_from_cart(request, slug):
item = get_object_or_404(Item, slug=slug)
order_qs = Order.objects.filter(user=request.user, ordered=False)
if order_qs.exists():
order = order_qs[0]
#check if order item is in order
if order.items.filter(item__slug=item.slug).exists():
order_item = OrderItem.objects.filter(
item=item,
user=request.user,
ordered = False
)[0]
if order_item.quantity > 1:
order_item.quantity -= 1
order_item.save()
else:
order.items.remove(order_item)
messages.info(request, "This item quantity was updated")
return redirect("core:order-summary")
else:
#add message saying the order doesnot contain any item
messages.info(request, "This item was not in your cart")
return redirect("core:order-summary",slug=slug)
else:
#add message saying user doesnot have any item
messages.info(request, "You do not have an active order")
return redirect("core:product",slug=slug)
return redirect("core:product",slug=slug)
@login_required
def remove_from_cart(request, slug):
item = get_object_or_404(Item, slug=slug)
order_qs = Order.objects.filter(user=request.user, ordered=False)
if order_qs.exists():
order = order_qs[0]
#check if order item is in order
if order.items.filter(item__slug=item.slug).exists():
order_item = OrderItem.objects.filter(
item=item,
user=request.user,
ordered = False
)[0]
order.items.remove(order_item)
messages.info(request, "This item was removed to your cart")
return redirect("core:product",slug=slug)
else:
#add message saying the order doesnot contain any item
messages.info(request, "This item was not in your cart")
return redirect("core:product",slug=slug)
else:
#add message saying user doesnot have any item
messages.info(request, "You do not have an active order")
return redirect("core:product",slug=slug)
return redirect("core:product",slug=slug)
|
[
"okumujustine01@gmail.com"
] |
okumujustine01@gmail.com
|
9f1a5562b8e1b9eaf2c8ed19e91d802fa067c9af
|
9b3e71bd4477c72eb38bb7231618694e441edbf0
|
/memly/tests/test_08_domains.py
|
792b5debc2a9bcf3608b6625e5df3e3e0698a397
|
[
"MIT"
] |
permissive
|
tamir-dingjan/memly
|
50c74f156c76b5ce8232c1244cb2b9c4d9978100
|
ae77207c969145ce6bb627c7219790b008f15098
|
refs/heads/master
| 2023-07-20T18:14:54.381227
| 2022-12-15T11:11:39
| 2022-12-15T11:11:39
| 271,009,350
| 0
| 0
|
MIT
| 2023-07-06T23:21:36
| 2020-06-09T13:24:56
|
Python
|
UTF-8
|
Python
| false
| false
| 415
|
py
|
import os
import numpy as np
import memly
from memly import domains
def test_domains():
# Setup access to datafiles
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
traj = os.path.join(THIS_DIR, "data/2.xtc")
top = os.path.join(THIS_DIR, "data/2.pdb")
x = memly.Analysis(traj, top, load=True)
metric = domains.Domains(membrane=x.membrane)
return metric
metric = test_domains()
|
[
"tamir.dingjan@gmail.com"
] |
tamir.dingjan@gmail.com
|
672b7bb63c2ac183fd40f8f5eb8c333b60d6f42c
|
b4f4559c6b684f63845149c2f80d93d6bac12a1b
|
/test/embedding/tet_mix.py
|
764c3addeecdc298715b026fe62736fa4c966405
|
[
"MIT"
] |
permissive
|
payiz-asj/Macadam
|
14c2351a501ff9884a0c7dd33ad0dd58b6f69b0b
|
5237381459db5909f392737e33618a16c1e0452a
|
refs/heads/master
| 2022-09-20T19:31:29.854889
| 2020-06-04T17:12:53
| 2020-06-04T17:12:53
| 271,481,356
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,629
|
py
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2020/5/15 21:20
# @author : Mo
# @function: test embedding of pre-train model of mix word and char
import sys
import os
path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
sys.path.append(path_root)
# cpu-gpu与tf.keras
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
os.environ["TF_KERAS"] = "1"
os.environ["MACADAM_LEVEL"] = "custom" # 自定义, 只要不是auto就好
from macadam.conf.path_config import path_tc_baidu_qa_2019
from macadam.base.embedding import *
import os
def _tet_mix_embedding():
######## tet embed-roberta
# 训练/验证数据地址
# path_train = os.path.join(path_tc_thucnews, "train.json")
# path_dev = os.path.join(path_tc_thucnews, "dev.json")
path_train = os.path.join(path_tc_baidu_qa_2019, "train.json")
path_dev = os.path.join(path_tc_baidu_qa_2019, "dev.json")
# params
params_word = {
"embed": {"path_embed": None,
},
"sharing": {"length_max": 32, # 句子最大长度, 不配置则会选择前95%数据的最大长度, 配置了则会强制选择, 固定推荐20-50, bert越长会越慢, 占用空间也会变大, 小心OOM
"embed_size": 64, # 字/词向量维度, bert取768, word取300, char可以更小些
"task": "TC", # 任务类型, TC是原始, SL则加上CLS,SEP。"SL"(sequence-labeling), "TC"(text-classification),"RE"(relation-extraction)
"token_type": "word", # 级别, 最小单元, 字/词, 填 "char" or "word", "ngram", 注意:word2vec模式下训练语料要首先切好
"embed_type": "random", # 级别, 嵌入类型, 还可以填"word2vec"、"random"、 "bert"、 "albert"、"roberta"、"nezha"、"xlnet"、"electra"、"gpt2"
},
"data": {"train_data": path_train, # 训练数据
"val_data": path_dev # 验证数据
},
}
params_char = {
"embed": {"path_embed": None,
},
"sharing": {"length_max": 32, # 句子最大长度, 不配置则会选择前95%数据的最大长度, 配置了则会强制选择, 固定推荐20-50, bert越长会越慢, 占用空间也会变大, 小心OOM
"embed_size": 64, # 字/词向量维度, bert取768, word取300, char可以更小些
"task": "TC", # 任务类型, TC是原始, SL则加上CLS,SEP。"SL"(sequence-labeling), "TC"(text-classification),"RE"(relation-extraction)
"token_type": "char", # 级别, 最小单元, 字/词, 填 "char" or "word", "ngram", 注意:word2vec模式下训练语料要首先切好
"embed_type": "random", # 级别, 嵌入类型, 还可以填"word2vec"、"random"、 "bert"、 "albert"、"roberta"、"nezha"、"xlnet"、"electra"、"gpt2"
},
"data": {"train_data": path_train, # 训练数据
"val_data": path_dev # 验证数据
},
}
params = [params_char, params_word]
word2vec_embed = MixEmbedding(params)
word2vec_embed.build_embedding()
return word2vec_embed
if __name__ == '__main__':
embed = _tet_mix_embedding()
res = embed.encode(text="北京在哪里来着", second_text=["macadam是碎石路"], use_seconds=True)
print(res)
while True:
print("请输入first_text:")
first_text = input()
print("请输入second_text:")
second_text = input()
res = embed.encode(text=first_text, second_text=second_text)
print(res)
mm = 0
text = [['[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]'], ['北', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]'], ['北京', '京', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]'], ['在', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]'], ['哪', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]'], ['哪里', '里', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]'], ['来', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]']]
second_texts = [[['[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]'], ['m', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]'], ['ma', 'a', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]'], ['mac', 'ac', 'c', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]'], ['aca', 'ca', 'a', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]'], ['acad', 'cad', 'ad', 'd', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]'], ['ada', 'da', 'a', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]'], ['am', 'm', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]'], ['是', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]'], ['碎', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]'], ['碎石', '石', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]', '[PAD-WC]']]]
|
[
"2714618994qq.cpm"
] |
2714618994qq.cpm
|
6ccb3ab7325ef6a0e1823cf45fc7b0692a791a9c
|
ff2f20dc0104c4dfd5e050874a05ab8f61cdc9c6
|
/manage.py
|
42846a20a7438ab7bdd9a6c708a7c3bb46efc514
|
[] |
no_license
|
aierickasun/positbe
|
de113819a2326e316e4f5d0b81c80a0294539f95
|
fb192d424363f6a941ee09c6d660e1391efbe538
|
refs/heads/master
| 2020-05-30T14:49:18.910977
| 2019-04-13T06:48:41
| 2019-04-13T06:48:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'positbe.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"curtiskma@gmail.com"
] |
curtiskma@gmail.com
|
343558f2a0ab90e774a3ec0bc4088c72aa48535a
|
9458f645d98b5b009f8f2a63aac4105d66c5f6ff
|
/accouts/urls.py
|
0d34ff814653c9041eeb9159f393551352827ed0
|
[] |
no_license
|
darshan2297/practicedjango
|
29291c95a3caf39b1d2ae93caaa419b3ebf3f1f6
|
d273183436ecb9b6501ae2b9bb3c4a942fce6f95
|
refs/heads/master
| 2020-12-22T05:32:11.599817
| 2020-02-01T10:12:57
| 2020-02-01T10:12:57
| 236,682,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 798
|
py
|
from django.urls import path
from django.conf.urls import url
from . import views
from django.conf.urls.static import static
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
app_name = 'accouts'
urlpatterns = [
path('signup',views.signup,name='signup'),
path('signin',views.signin,name='signin'),
path('signout',views.signout,name='signout'),
path('create',views.create_ac,name='create'),
path('user',views.user_detail,name='user_detail'),
path('authform',views.form_auth,name='authform'),
path('profile',views.profile,name='profile'),
path('authprofileform',views.authprofileform,name='authprofileform'),
] + static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
urlpatterns += staticfiles_urlpatterns()
|
[
"darshanvankawala2297@gmail.com"
] |
darshanvankawala2297@gmail.com
|
c65fab0ade30f468c699559d9a4249525491e707
|
094537d15c8cc3177998f2d72a007ba8d5a203d8
|
/src/01simple_stats.py
|
ad3d2385b527980d6f213c7688e8232072406af1
|
[] |
no_license
|
mikekestemont/panorama
|
ec588daa11eb2bb6f98b2f04a9686c9628e2f8e2
|
62c81c376359193b5f065d8cba0d2bb41be0eeb7
|
refs/heads/master
| 2021-01-12T13:59:43.910169
| 2017-08-10T14:20:56
| 2017-08-10T14:20:56
| 69,681,840
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,281
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script to plot the absolute number of word tokens in the
Speculum archive (aggregated per year), as tagged by the
Stanford Core NLP Suite.
"""
import glob
import os
from operator import itemgetter
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sb
import numpy as np
year_counter = {}
for filename in sorted(glob.glob('../data/tagged/*.txt.conll')):
year = int(os.path.basename(filename).split('_')[0])
if year not in year_counter:
year_counter[year] = 0
with open(filename, 'r') as f:
y = os.path.basename(filename).split('_')[0]
for line in f.readlines():
line = line.strip()
if line:
year_counter[year] += 1
year_counts = sorted(year_counter.items(), key=itemgetter(0), reverse=True)
years, cnts = zip(*year_counts)
years = [int(y) for y in years]
nb_words = sum(cnts)
sb.plt.barh(years, cnts, color='lightslategray')
sb.plt.title('Speculum archive (' + str(min(years)) +
'-' + str(max(years)) + ')\n'
+ str(nb_words)+' tokens in total (yearly mean: ' +
str(int(np.mean(cnts))) + ')')
sb.plt.xlabel('Number of tokens')
sb.plt.ylabel('Year')
sb.plt.savefig('../figures/01nb_words.pdf')
|
[
"mike.kestemont@gmail.com"
] |
mike.kestemont@gmail.com
|
5d45d09baa3993b2ffaa78ac1e4e03a207cc5551
|
0c87e782b6d225dbc0477caa8b9279c91e939664
|
/hello.py
|
030117cf94bae7806fd472dfb6f82de53ded522f
|
[] |
no_license
|
whyisjacob/python
|
d8969827209cdea60ca4f735367ef33fe9cc1763
|
a8478ac9d9f37f2fce0870b35736549ba23cd42b
|
refs/heads/master
| 2020-06-08T01:49:34.871242
| 2019-06-21T17:29:42
| 2019-06-21T17:29:42
| 193,136,684
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
num1 = float( input("Enter the first number: "))
num2 = float( input("Enter the second number: "))
num3 = float( input("Enter the third number: "))
print("The max value is: ", max(num1,num2,num3))
input("Press any key to exit")
|
[
"whyisjacob@gmail.com"
] |
whyisjacob@gmail.com
|
c163d71072f932134492d6e5e0360b8ef47835ec
|
96a70069156b23fd7157f3813334f9a1f3de3b2b
|
/pytest/ex21.py
|
140c9333920341a00e3c0320581407a23a99c446
|
[] |
no_license
|
hello-lily/learngit
|
148eecdb487abf824a0b75090eb91655caaacaa2
|
aa3571c9bb9f5f808b1f20ae139e7b102ff515c0
|
refs/heads/master
| 2021-01-19T22:26:57.141547
| 2017-05-08T06:00:55
| 2017-05-08T06:00:55
| 88,817,144
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 924
|
py
|
def add( a, b):
print("ADDING %d + %d" % (a,b) )
return a + b
def subtract ( a, b):
print("SUBTRACTING %d - %d" % (a , b))
return a - b
def multiply ( a, b):
print("MULTIPLYING %d * %d" % (a,b))
return a * b
def divide (a, b):
print("DIVIDING %d / %d" % (a,b))
return a / b
print(" Let's do some math with just functiions!")
age = add (30,5)
#35
height = subtract (78, 4)
#74
weight = multiply ( 90, 2)
#180
iq = divide (100 ,2 )
#50
#there are some format, i want to try!
# you like what ,use what!
print(" Age: %d \tHeight: %d \tWeight: %d \tIQ: %d" %
(age, height,weight,iq))
# A puzzle for the extra credit, type it in anyway.
print ("Here is a puzzle.")
what = add( age, subtract( height, multiply(weight, divide(iq,2))))
# 35+ ( 74 - (180* 25))
n = 35+74-180*25
print(" my result is : ", n)
print ("That becomes: ", what, "Can you do it by hand?")
if n == what:
print("Ture")
else:
print("False")
|
[
"aliciali@139.com"
] |
aliciali@139.com
|
720dd9b38910f452f5ea01ccbc26b93268856808
|
5ec66a087265030f7c3730556cd92b7212a65fa9
|
/programming/python-curso_em_video/exercises/ex049.py
|
70cb0899911c77f6380385fa37bc9c73b2271f03
|
[
"MIT"
] |
permissive
|
carlosevmoura/courses-notes
|
6d60081bc7b036c42c25b1e1876f15aa40e4a8eb
|
dc938625dd79267f9a262e7e6939205f63dda885
|
refs/heads/master
| 2022-09-17T01:17:13.481694
| 2020-06-05T16:57:03
| 2020-06-05T16:57:03
| 258,475,195
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
numero = int(input('Insira um número inteiro para obter a tabuada: '))
print('='*12)
contador = 1
for contador in range(1, 11):
print('{} x {} = {}'.format(numero, contador, numero * contador))
contador += 1
print('='*12)
|
[
"carlosevmoura@gmail.com"
] |
carlosevmoura@gmail.com
|
a39f41e9421ad42e165d795e74db70f6c53200df
|
ca53027eccd5135e72fbc5bcbba12fb5654f6c5c
|
/Tianic_Kaggle/Titanic_kaggle/Tianic.py
|
4ad5cd5cb32ec9a2ca8d063caa302ef1d611c5a6
|
[] |
no_license
|
amazingcodeLYL/Pytorch_Advanced_combat
|
f981c58d5bc12ec2bf3b548847f40c981d579db0
|
7ee23cb4275eaecbdbc61b0af6f39b81179b2a30
|
refs/heads/master
| 2022-12-20T08:43:06.620015
| 2020-10-15T12:23:56
| 2020-10-15T12:23:56
| 255,927,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,531
|
py
|
import numpy as np
import random as rnd
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC,LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
train_df=pd.read_csv('train.csv')
test_df=pd.read_csv('test.csv')
print(train_df[train_df.Age.isnull()].iloc[:,:].values)
# print(y)
combine=[train_df,test_df]
# print(train_df.columns.values)
# print(train_df.info())
# print('_'*40)
# print(test_df.info())
# print(train_df.describe(include=['O'])) #计算离散型变量
# a=train_df[['Pclass','Survived']].groupby(['Pclass'],as_index=False).mean().sort_values(by='Survived',ascending=False)
# b=train_df[['Sex','Survived']].groupby(['Sex'],as_index=False).mean().sort_values(by='Survived',ascending=False)
# b=train_df[['SibSp','Survived']].groupby(['SibSp'],as_index=False).mean().sort_values(by='Survived',ascending=False)
# print(b)
# g=sns.FacetGrid(train_df,col='Survived',row='Pclass',size=2.2,aspect=1.6)
# g.map(plt.hist,'Age',alpha=.5,bins=20)
# grid=sns.FacetGrid(train_df,row='Embarked',size=2.2,aspect=1.6)
# grid.map(sns.pointplot,'Pclass','Survived','Sex',palette='deep')
# grid=sns.FacetGrid(train_df,row='Embarked',col='Survived',size=2.2,aspect=1.6)
# grid.map(sns.barplot,'Sex','Fare',alpha=.5,ci=None)
# grid.add_legend()
# plt.show()
print("Before",train_df.shape,test_df.shape,combine[0].shape,combine[1].shape)
train_df=train_df.drop(['Ticket','Cabin'],axis=1) #axis=1表示删除一列
test_df=test_df.drop(['Ticket', 'Cabin'],axis=1)
combine=[train_df,test_df]
print("After",train_df.shape,test_df.shape,combine[0].shape,combine[1].shape)
for dataset in combine:
dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\.', expand=False)
pd.crosstab(train_df['Title'], train_df['Sex'])
for dataset in combine:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess', 'Capt', 'Col', \
'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
train_df[['Title', 'Survived']].groupby(['Title'], as_index=False).mean()
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
for dataset in combine:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
train_df.head()
train_df=train_df.drop(['Name','PassengerId'],axis=1)
test_df=test_df.drop(['Name'],axis=1)
combine=[train_df,test_df]
for dataset in combine:
dataset['Sex']=dataset['Sex'].map({'female':1,'male':0}).astype(int)
# print(train_df.head())
guess_ages=np.zeros((2,3))
for dataset in combine:
for i in range(0,2):
for j in range(0,3):
guess_df=dataset[(dataset['Sex']==i)&(dataset['Pclass']==j+1)]['Age'].dropna()
ages_guess=guess_df.median()
guess_ages[i,j]=int(ages_guess/0.5+0.5)*0.5
for i in range(0, 2):
for j in range(0, 3):
dataset.loc[(dataset.Age.isnull()) & (dataset.Sex == i) & (dataset.Pclass == j + 1), 'Age'] = guess_ages[i, j]
train_df['AgeBand']=pd.cut(train_df['Age'],5)
a=train_df[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False).mean().sort_values(by='AgeBand', ascending=True)
for dataset in combine:
dataset.loc[dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[dataset['Age'] > 64, 'Age']
train_df=train_df.drop(['AgeBand'],axis=1)
combine=[train_df,test_df]
for dataset in combine:
dataset['FamilySize']=dataset['SibSp']+dataset['Parch']+1
train_df[['FamilySize','Survived']].groupby(['FamilySize'],as_index=False).mean().sort_values(by='Survived',ascending=False)
for dataset in combine:
dataset['IsAlone']=0
dataset.loc[dataset['FamilySize']==1,'IsAlone']=1
train_df[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False).mean()
train_df=train_df.drop(['Parch','SibSp','FamilySize'],axis=1)
test_df = test_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)
combine = [train_df, test_df]
for dataset in combine:
dataset['Age*Class'] = dataset.Age * dataset.Pclass
# print(train_df.loc[:, ['Age*Class', 'Age', 'Pclass']].head(10))
freq_port=train_df.Embarked.dropna().mode()[0]
for dataset in combine:
dataset['Embarked']=dataset['Embarked'].fillna(freq_port)
train_df[['Embarked','Survived']].groupby(['Embarked'],as_index=False).mean().sort_values(by='Survived', ascending=False)
for dataset in combine:
dataset['Embarked']=dataset['Embarked'].map({'S':0,'Q':1,'C':2}).astype(int)
# print(train_df.head())
test_df['Fare'].fillna(test_df['Fare'].dropna().median(),inplace=True)
train_df['FareBand']=pd.qcut(train_df['Fare'],4)
#cut 根据值本身来选择箱子均匀间隔
#qcut 根据这些值频率来选择箱子的均匀间隔
a=train_df[['FareBand','Survived']].groupby(['FareBand'],as_index=False).mean().sort_values(by='FareBand',ascending=True)
for dataset in combine:
dataset.loc[dataset['Fare']<=7.9,'Fare']=0
dataset.loc[(dataset['Fare']>7.91)&(dataset['Fare']<=14.454),'Fare']=1
dataset.loc[(dataset['Fare']>14.454)&(dataset['Fare']<=31),'Fare']=2
dataset.loc[dataset['Fare']>31,'Fare']=3
dataset['Fare']=dataset['Fare'].astype(int)
train_df=train_df.drop(['FareBand'],axis=1)
combine=[train_df,test_df]
# print(train_df.head(10))
# print(test_df.head(10))
X_train=train_df.drop(['Survived'],axis=1)
Y_train=train_df['Survived']
X_test=test_df.drop(['PassengerId'],axis=1).copy()
# print(X_train.shape,Y_train.shape,X_test.shape)
logreg=LogisticRegression()
logreg.fit(X_train,Y_train)
Y_pred=logreg.predict(X_test)
acc_log=round(logreg.score(X_train,Y_train)*100,2)
# print(acc_log)
coeff_df=pd.DataFrame(train_df.columns.delete(0))
coeff_df.columns=['Feature']
coeff_df["Correlation"]=pd.Series(logreg.coef_[0])
a=coeff_df.sort_values(by='Correlation',ascending=False)
# print(a)
svc=SVC()
svc.fit(X_train,Y_train)
Y_pred=svc.predict(X_test)
acc_svc=round(svc.score(X_train,Y_train)*100,2)
# print(acc_svc)
knn=KNeighborsClassifier(n_neighbors=3)
knn.fit(X_train,Y_train)
Y_pred=knn.predict(X_test)
acc_knn=round(knn.score(X_train,Y_train)*100,2)
# print(Y_pred)
# print(acc_knn)
gaussian=GaussianNB()
gaussian.fit(X_train,Y_train)
Y_pred=gaussian.predict(X_test)
acc_gaussian=round(gaussian.score(X_train,Y_train)*100,2)
# print(acc_gaussian)
perceptron=Perceptron()
perceptron.fit(X_train,Y_train)
Y_pred=perceptron.predict(X_test)
acc_perceptron=round(perceptron.score(X_train,Y_train)*100,2)
# print(acc_perceptron)
linear_svc = LinearSVC()
linear_svc.fit(X_train, Y_train)
Y_pred = linear_svc.predict(X_test)
acc_linear_svc = round(linear_svc.score(X_train, Y_train) * 100, 2)
# # print(acc_linear_svc)
sgd=SGDClassifier()
sgd.fit(X_train,Y_train)
Y_pred=sgd.predict(X_test)
acc_sgd=round(sgd.score(X_train,Y_train)*100,2)
# print(acc_sgd)
decision_tree=DecisionTreeClassifier()
decision_tree.fit(X_train,Y_train)
Y_pred=decision_tree.predict(X_test)
acc_decision_tree=round(decision_tree.score(X_train,Y_train)*100,2)
# print(acc_decision_tree)
random_forest=RandomForestClassifier(n_estimators=100)
random_forest.fit(X_train,Y_train)
Y_pred=random_forest.predict(X_test)
acc_random_forest=round(random_forest.score(X_train,Y_train)*100,2)
# print(acc_random_forest)
model=pd.DataFrame({
'Model':['Support Vector Machines', 'KNN', 'Logistic Regression',
'Random Forest', 'Naive Bayes', 'Perceptron',
'Stochastic Gradient Decent', 'Linear SVC',
'Decision Tree'],
'Score':[acc_svc, acc_knn, acc_log,
acc_random_forest, acc_gaussian, acc_perceptron,
acc_sgd, acc_linear_svc, acc_decision_tree]})
a=model.sort_values(by='Score',ascending=False)
# print(a)
submission=pd.DataFrame({
"PassengerId":test_df["PassengerId"],
"Survived":Y_pred
})
# submission.to_csv('submission.csv', index=False)
|
[
"noreply@github.com"
] |
amazingcodeLYL.noreply@github.com
|
07912b76327f7b780d8fdf83cdc4fcd2ec8bbe0a
|
5246d8562ab23f30bb9c912278815e375a8a7ed3
|
/API/repository/classes/political_leader_repository.py
|
bfd810912a001ef0fbaa85019b3793dea7d279cb
|
[] |
no_license
|
gautam0707/WebScrapper
|
8a38356c9b22e4d2cd5d4bab0a8e2776387ffab7
|
a427a4715040373205d8c8796f1cc8b68ac17546
|
refs/heads/master
| 2020-12-02T16:36:33.538735
| 2017-07-07T17:12:59
| 2017-07-07T17:12:59
| 96,559,678
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 569
|
py
|
from pymongo import MongoClient
from repository.common import constants
class PoliticalLeaderRepository:
def __init__(self):
self.mongo_client = MongoClient(constants.MONGO_INSTANCE_URL, constants.MONGO_INSTANCE_PORT)
self.database = self.mongo_client["metadata_4p"]
self.collection = self.database["metadata_4p_political_leader"]
def get_political_leader_details(self, leader_id):
political_leader_details = self.collection.find_one({"LeaderId": leader_id}, projection={"_id": False})
return political_leader_details
|
[
"bgautam.0707@gmail.com"
] |
bgautam.0707@gmail.com
|
ab14cc9c703776757e833d46ff64145f93bfd183
|
7954bb6cb3ba67d7310296c6d348159da4f6c1b6
|
/layers.py
|
521554030951cc4ec404e161620e14c3b155aff2
|
[
"MIT"
] |
permissive
|
truebeliever17/handcrafted-torch
|
7d8867d73387f293b0969e43032ac47bfb833b23
|
94ed86e68da7c1969ded5eecef6191bd04ad1a1d
|
refs/heads/master
| 2023-03-31T14:59:43.133785
| 2021-04-01T16:14:07
| 2021-04-01T16:14:07
| 353,733,499
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,831
|
py
|
import numpy as np
class Param:
def __init__(self, data):
self.data = data
self.grad = np.zeros_like(data)
self.velocity = np.zeros_like(data)
class Linear:
def __init__(self, input_size, output_size):
self.W = Param(np.random.randn(input_size, output_size))
self.B = Param(np.random.randn(output_size))
self.params = [self.W, self.B]
self.X = None
def __call__(self, X):
self.X = X
return X @ self.W.data + self.B.data
def backward(self, out):
self.W.grad += (self.X.T @ out) / self.X.shape[0]
self.B.grad += np.sum(out, axis=0) / self.X.shape[0]
return out @ self.W.data.T
def parameters(self):
for p in self.params:
yield p
class Sigmoid:
def __init__(self):
self.res = None
def __call__(self, X):
self.res = 1 / (1 + np.exp(-X))
return self.res
def backward(self, out):
return (self.res * (1 - self.res)) * out
def parameters(self):
return None
class ReLU:
def __init__(self):
pass
def __call__(self, X):
self.X = X
return np.where(X > 0, X, 0)
def backward(self, out):
return np.where(self.X > 0, 1, 0) * out
def parameters(self):
return None
class Sequential:
def __init__(self, *args):
self.layers = args
def __call__(self, X):
for layer in self.layers:
X = layer(X)
return X
def backward(self, out):
for layer in reversed(self.layers):
out = layer.backward(out)
return out
def parameters(self):
for layer in self.layers:
params = layer.parameters()
if params is not None:
for param in params:
yield param
|
[
"madiyar2000@gmail.com"
] |
madiyar2000@gmail.com
|
7a23b11601587584de73acb86204bfb3ae064e5d
|
5445dc6d98955935d11858426e47d5d2eec2c82a
|
/fitnesshub/urls.py
|
16986f3c0280eefb0075553676c073a6921bed47
|
[] |
no_license
|
Caiseyann/Fitness-hub
|
fa3da46bf364472b419732581595c4d06df1c67d
|
24ae999f97d372587b934d04cb432e5b83d6b62e
|
refs/heads/main
| 2023-01-08T17:00:53.044125
| 2020-11-03T10:26:10
| 2020-11-03T10:26:10
| 309,712,644
| 0
| 0
| null | 2020-11-03T14:31:23
| 2020-11-03T14:31:22
| null |
UTF-8
|
Python
| false
| false
| 800
|
py
|
"""fitnesshub URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls import url,include
urlpatterns = [
url('admin/', admin.site.urls),
url(r'^gym/',include('gym.urls'))
]
|
[
"andiwoedwin@gmail.com"
] |
andiwoedwin@gmail.com
|
dbd1977a2b50edf49b3e8a63d2f7f806455c5d5c
|
a1d300b3bd872e73b446f30dde315332730ae7b0
|
/bin/easy_install-3.8
|
37031fabdc59dc5af3e77dc9d035982824b89fcc
|
[] |
no_license
|
josephp27/GitCrypt
|
f28d2cb8798288a6f3e15d7a6b8e8be52401b9f6
|
eec6c5d8ce8e1b3c959205fb5f65f8b99a2905bc
|
refs/heads/master
| 2020-05-09T15:48:32.777231
| 2020-01-14T15:39:31
| 2020-01-14T15:39:31
| 181,243,505
| 0
| 1
| null | 2020-01-10T21:48:30
| 2019-04-14T00:41:09
|
Python
|
UTF-8
|
Python
| false
| false
| 258
|
8
|
#!/Users/jprof/Desktop/GitCrypt/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"jprof@allstate.com"
] |
jprof@allstate.com
|
21c555ebf99593e87c33baf49bc5875a88d3e4da
|
ffc6ecca431696c814f338477eeeee77ef475fe6
|
/Factorizer/Factorizer/test/number_generator.py
|
c523b2328c42db2a5361522d6a88a3ceb6add9e1
|
[] |
no_license
|
ViktorCollin/avalg12
|
9773d102dc2bd3bc2ae60c2fbbeef21474bd5b20
|
e5ea2da3ab5ed9ff10eea037158aaa08716aa4ae
|
refs/heads/master
| 2021-01-10T19:41:07.239675
| 2012-12-05T22:26:45
| 2012-12-05T22:26:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 102
|
py
|
#!/usr/bin/env python
import random
N = 100
for i in range(0, N):
print random.randint(1, 2**50)
|
[
"carlantonlindstrom@gmail.com"
] |
carlantonlindstrom@gmail.com
|
c9b3dd7a2d679e0189c1fdbd39434b40df667ea5
|
38083877cc396fab845257696fdb574a859038a7
|
/routeros_alive.py
|
eff9b1d02810993463cc56ab8a844c7dc87d7f9d
|
[] |
no_license
|
zhuzhes/ShenLian-1
|
6c808d25924f95983328e3ca3201742c92005014
|
eb513cdfaa308ee91aea9221433ff6481cc4e918
|
refs/heads/master
| 2020-04-25T10:24:26.159488
| 2019-02-27T18:20:24
| 2019-02-27T18:20:24
| 172,707,133
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,477
|
py
|
import os
# Configure settings for project
# Need to run this before calling models from application!
os.environ.setdefault('DJANGO_SETTINGS_MODULE','learning_templates.settings')
import django
# Import settings
django.setup()
import napalm
from napalm_ros import ros
import requests
import socket
import routeros_api
from basic_app.models import Class_Model_Model1
def RouterOs_Query(cmd='/system/idenity',router_ip='8.8.8.8',username='azhe',password='sdlnet'):
try:
connection = routeros_api.RouterOsApiPool(router_ip, username, password)
api = connection.get_api()
print("\n################ connecting router #################\n")
result = api.get_resource(cmd).get()
connection.disconnect()
print(result)
return result
except:
print ('\nsomething goes wrong while deal with Router, please check')
try:
router_ip = '198.19.1.21'
router_port = 8728 # Use 8729 for api-ssl
router_user = 'azhe'
router_pass = 'sdlnet'
driver = napalm.get_network_driver('ros')
print('Connecting to', router_ip, "on port", router_port, "as", router_user)
device = driver(hostname=router_ip, username=router_user,
password=router_pass, optional_args={'port': router_port})
print('Opening ...')
device.open()
is_alive = device.is_alive()['is_alive']
print(is_alive)
device.close()
# cmd1 = '/system/license'
# cmd2 = '/system/identity'
# cmd3 = '/system/resource'
#
# systemlicense = RouterOs_Query(cmd1,router_ip,username,password)
# systemidentity = RouterOs_Query(cmd2,router_ip,username,password)
# systemresource = RouterOs_Query(cmd3,router_ip,username,password)
#
# router_name = systemidentity[0]['name']
# mgtIP = router_ip
# # licenselevel = systemlicense[0]['nlevel']
# version = systemresource[0]['version']
# cpu = systemresource[0]['cpu']
# cpu_frequency = systemresource[0]['cpu-frequency']
# architecture_name = systemresource[0]['architecture-name']
# board_name = systemresource[0]['board-name']
#
print("\n################ updating database #################\n")
obj = Class_Model_Model1.objects.get(router_name = 'HK-GOIP')
obj.alive = is_alive
obj.save()
print("\n################ complete! ######################\n")
except:
print ('\nsomething goes wrong while dealing with Router and updating database, please check')
|
[
"michael.2.zhu@bt.com"
] |
michael.2.zhu@bt.com
|
2b8a33ddfb44d4bc41545c10a5d297a6589a14b9
|
e5148f3040ab19a9abfb8990830ccb78021b1288
|
/mdstack-service_2.2/mdstack-service_2/mdstack-service_2.0/mdstack/dbtools/globalvariable.py
|
61ea38fbda85d19d73d1efe455d0e83bb9b179ca
|
[] |
no_license
|
linxuanmax/msql_to_es
|
5602f662e29b2f3bdcdffff4371ae047c37bafef
|
bc848f400703dec66592c9958a231c7d65e2b4a4
|
refs/heads/master
| 2021-01-17T14:20:09.904308
| 2017-03-06T15:05:43
| 2017-03-06T15:05:43
| 84,084,975
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,445
|
py
|
#!/usr/bin/env python
# -*- coding=utf-8 -*-
__author__="yanzl"
__date__="2017-02-27"
'''
连接数据库的信息:为字典形式
例: mysql1:数据库连接的配置(键)名称
hostname:IP地址 port:端口 dbname:连接的数据库的名称 username:数据库的用户名 userpass:与用户名对应的密码
'''
dic_connection = {
'mysql1': {'hostname': '192.168.1.225', 'port': '3306', 'dbname': 'test_a', 'username': 'root', 'userpass': 'MTIzNDY1'},
'mysql2': {'hostname': '192.168.1.225', 'port': '3306', 'dbname': 'test_b', 'username': 'root', 'userpass': 'MTIzNDY1'},
'mysql3': {'hostname': '192.168.1.225', 'port': '3306', 'dbname': 'test_c', 'username': 'root', 'userpass': 'MTIzNDY1'}
}
'''
任务列表:列表形式
taskname:任务名称(命名方式:mysql_first) dbic:任务标号 reclimit:限制每次导入的记录数目
sql:提取数据sql语句 timefld:时间增量字段 incfld:主键字段 tbname:数据表的名称
conname:数据库连接配置名称 tbstructure:数据表的字段(主键放在第一位,时间增量字段放在最后一位)
inctype:数据的增长方式(1:主键增量 2:时间增量 0:一次性导入) mianincr:主键字段类型
'''
dic_tasklist = [
{'taskname': '1_1', 'dbid': 'mysql1', 'reclimit': '5', 'inctype': 1, 'timefld':'', 'incrfld':'ID', 'mianincr': 'number','msgfld':123, 'curpos':'', 'tbname':'info_aa',
'conname': 'mysql1', 'sql': 'select %s from %s where %s > %s order by %s limit %s ;', 'tbstructure':['ID', 'name']},
{'taskname': '1_2', 'dbid': 'mysql1', 'reclimit': '5', 'inctype': 1, 'timefld': '', 'incrfld': 'ID', 'mianincr': 'string', 'msgfld': 123, 'curpos': '', 'tbname': 'info_ba',
'conname': 'mysql1', 'sql': 'select %s from %s where %s > %s order by %s limit %s ;','tbstructure': ['ID', 'name']},
{'taskname': '1_3', 'dbid': 'mysql1', 'reclimit': '2', 'inctype': 1, 'timefld': '', 'incrfld': 'ID', 'mianincr': 'time', 'msgfld': 123, 'curpos': '', 'tbname': 'info_ca',
'conname': 'mysql1', 'sql': 'select %s from %s where %s > %s order by %s limit %s ;', 'tbstructure': ['ID', 'name']},
{'taskname': '2_1', 'dbid': 2, 'reclimit': '10', 'inctype': 2, 'timefld': 'curtime', 'incrfld': 'ID', 'mianincr': 'number','msgfld': 'curtime', 'tbname':'info_ab',
'conname': 'mysql2', 'sql': 'select %s from %s where %s > %s and %s > "'" %s "'" and %s <= "'" %s "'" order by %s limit %s ;', 'tbstructure': ['ID', 'curtime']},
{'taskname': '2_2', 'dbid': 3, 'reclimit': '10', 'inctype': 2, 'timefld': 'curtime', 'incrfld': 'ID', 'mianincr': 'string','msgfld': 'curtime', 'tbname':'info_bb',
'conname': 'mysql2', 'sql': 'select * from %s where %s > "%s" and %s <= "'" %s "'" and %s > "'" %s "'" order by %s limit %s ;', 'tbstructure': ['ID', 'curtime']},
{'taskname': '2_3', 'dbid': 4, 'reclimit': '10', 'inctype': 2, 'timefld': 'curtime', 'incrfld': 'ID','mianincr': 'time','msgfld': 'curtime','tbname':'info_cb',
'conname': 'mysql2', 'sql': 'select * from %s where %s > %s and %s <= "'" %s "'" and %s > "'" %s "'" order by %s limit %s', 'tbstructure': ['ID', 'curtime']},
{'taskname': '0_1', 'dbid': 5, 'reclimit': '10', 'inctype': 0, 'timefld': '', 'incrfld': 'ID', 'mianincr': 'number', 'msgfld': '' ,'curpos':'', 'tbname':'info_ac',
'conname': 'mysql3', 'sql': 'select %s from %s ;', 'tbstructure': ['ID', 'name']},
]
|
[
"root@debian"
] |
root@debian
|
f0a362bf7b8807f40fc84b1947b325492fd317e5
|
36e96651974fa0a73fe2d754190cfa307547e697
|
/ent/migrations/0129_actualtext_original_text.py
|
ca602d9722bd4a18af0767bf95d771ef9c8a491a
|
[] |
no_license
|
wrekshan/sentimini
|
658e0e43b0f7903211ceb488295d9d9cc72e8c07
|
ec8ad3f4c08875f59c971c8575cc63c5118eac15
|
refs/heads/master
| 2021-01-15T17:09:47.777374
| 2017-08-03T20:37:59
| 2017-08-03T20:37:59
| 99,734,899
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 595
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-07-06 15:35
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ent', '0128_auto_20170705_2008'),
]
operations = [
migrations.AddField(
model_name='actualtext',
name='original_text',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='original_text', to='ent.PossibleText'),
),
]
|
[
"william.rekshan@gmail.com"
] |
william.rekshan@gmail.com
|
c0fca2401c3a950b1a45020106324b86401296e4
|
0aced31d3e8e7f41aa685aa3a29cfa0b3600517d
|
/tutorial-env/bin/alembic
|
910b6123893ac54eac0111c4cbda7015910359f4
|
[] |
no_license
|
alexiscodes/fpl
|
49e1be2b4c45aae6f69c7a7b64791095582a5311
|
c38b650f0efa707b8497e4057436929117091932
|
refs/heads/master
| 2022-12-23T17:08:05.608370
| 2019-07-22T18:04:34
| 2019-07-22T18:04:34
| 144,007,045
| 0
| 0
| null | 2022-12-08T05:45:57
| 2018-08-08T11:55:37
|
Python
|
UTF-8
|
Python
| false
| false
| 260
|
#!/Users/alexis.becker/repos/fpl-dummy/tutorial-env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from alembic.config import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"alexis.becker@transferwise.com"
] |
alexis.becker@transferwise.com
|
|
e9878b4484aae836c8a29d1e67001de93d9b11d9
|
ca6779939f4ed7257d076ea4524486146c4e6eba
|
/week2/1.py
|
571dd08a87f2f72f1214bfb600af9fed58a41983
|
[] |
no_license
|
nguyntony/algorithms
|
1eb0dc72ae06091bca3cc9c048a5c656fdc55add
|
1f2241b069e6682c72130c6e9538dd8914722b83
|
refs/heads/main
| 2023-02-06T04:37:41.681171
| 2020-12-27T23:48:41
| 2020-12-27T23:48:41
| 304,662,710
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
# 1. Given an array nums, write a function to move all zeroes to the end of it while maintaining the relative order of the non-zero elements.
nums = [9, 1, 0, 3, 0, 5]
nums1 = [0, 18, 6, 7, 9, 0, 2, 0, 3, 0, 2, 0]
def move_zeros(list_given):
for idx, num in enumerate(list_given):
if num == 0:
zero_element = list_given.pop(idx)
list_given.append(zero_element)
move_zeros(nums)
print(nums)
move_zeros(nums1)
print(nums1)
|
[
"nguyen.tony1015@gmail.com"
] |
nguyen.tony1015@gmail.com
|
8899b61f533b21547fdca3f5414b0bba90c4c86b
|
1f3e98e3bb36765f869ca3177a47c53ce302ec70
|
/test/input/061.py
|
8b5e3f6c8aacd56928face756eefb3dacd6e28f5
|
[
"MIT"
] |
permissive
|
EliRibble/pyfmt
|
d73dec1061e93a28ad738139edf523e1678d0e19
|
e84a5531a7c06703eddd9dbc2072b0c8deae8c57
|
refs/heads/master
| 2020-04-01T10:57:18.521463
| 2019-05-24T21:39:18
| 2019-05-24T21:39:18
| 153,139,803
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
def frobulate(bar=None, bif=None, fiz=None):
print(bar, bif, fiz)
frobulate(
bif="So we can suppress the horizontal layout",
bar="This needs to be a really long line",
fiz="And test the alpha ordering",
)
|
[
"eliribble@google.com"
] |
eliribble@google.com
|
61549d35fdaeae11251142a8d1b81ef0625287d1
|
bbb24bc2f136c435377d572fb315d4cb9e19f2f8
|
/scripts/download_codes.py
|
eb23c9eac29ddd2e46152290be150c0c49b6d793
|
[
"Apache-2.0"
] |
permissive
|
bsolomon1124/demoji
|
f6207d71a261dd5713242aa8626668c0cf33cdea
|
6577cfe87876d5ab6311afa8e03a06d6ba636bd6
|
refs/heads/master
| 2023-08-24T05:52:16.856757
| 2021-08-29T19:21:03
| 2021-08-29T19:21:03
| 169,640,556
| 146
| 23
|
Apache-2.0
| 2021-08-29T19:12:09
| 2019-02-07T20:50:30
|
Python
|
UTF-8
|
Python
| false
| false
| 2,877
|
py
|
#!/usr/bin/env python3
"""Download emoji data to package_data."""
import datetime
import json
import pathlib
import re
import time
import colorama
import requests
from demoji import URL
# We do *not* use importlib.resources here since we just want the source file,
# not where it (might) be instlled
parent = pathlib.Path(__file__).parent.parent.resolve() / "demoji"
CACHEPATH = parent / "codes.json"
MODULEPATH = parent / "__init__.py"
def download_codes(dest=CACHEPATH):
codes = dict(stream_unicodeorg_emojifile(URL))
_write_codes(codes, CACHEPATH)
def _write_codes(codes, dest):
print(
colorama.Fore.YELLOW
+ "Writing emoji data to %s ..." % CACHEPATH
+ colorama.Style.RESET_ALL
)
with open(CACHEPATH, "w") as f:
json.dump(codes, f, separators=(",", ":"))
print(colorama.Fore.GREEN + "... OK" + colorama.Style.RESET_ALL)
def stream_unicodeorg_emojifile(url=URL):
for codes, desc in _raw_stream_unicodeorg_emojifile(url):
if ".." in codes:
for cp in parse_unicode_range(codes):
yield cp, desc
else:
yield parse_unicode_sequence(codes), desc
def parse_unicode_sequence(string):
return "".join((chr(int(i.zfill(8), 16)) for i in string.split()))
def parse_unicode_range(string):
start, _, end = string.partition("..")
start, end = map(lambda i: int(i.zfill(8), 16), (start, end))
return (chr(i) for i in range(start, end + 1))
def _raw_stream_unicodeorg_emojifile(url):
colorama.init()
print(
colorama.Fore.YELLOW
+ "Downloading emoji data from %s ..." % URL
+ colorama.Style.RESET_ALL
)
resp = requests.request("GET", url, stream=True)
print(
colorama.Fore.GREEN
+ "... OK"
+ colorama.Style.RESET_ALL
+ " (Got response in %0.2f seconds)" % resp.elapsed.total_seconds()
)
POUNDSIGN = "#"
POUNDSIGN_B = b"#"
SEMICOLON = ";"
SPACE = " "
for line in resp.iter_lines():
if not line or line.startswith(POUNDSIGN_B):
continue
line = line.decode("utf-8")
codes, desc = line.split(SEMICOLON, 1)
_, desc = desc.split(POUNDSIGN, 1)
desc = desc.split(SPACE, 3)[-1]
yield (codes.strip(), desc.strip())
def replace_lastdownloaded_timestamp():
with open(MODULEPATH) as f:
text = f.read()
now = datetime.datetime.fromtimestamp(
time.time(), tz=datetime.timezone.utc
)
ldt_re = re.compile(r"^_LDT = .*$", re.M)
with open(MODULEPATH, "w") as f:
f.write(ldt_re.sub("_LDT = %r # noqa: E501" % now, text))
print(
colorama.Fore.GREEN
+ "Replaced timestamp with %r in %s" % (now, MODULEPATH)
+ colorama.Style.RESET_ALL
)
if __name__ == "__main__":
download_codes()
replace_lastdownloaded_timestamp()
|
[
"bsolomon@protonmail.com"
] |
bsolomon@protonmail.com
|
2e334de14174d79b2b85517cfe43af9b69ac8f78
|
93fb9bdbe659a87caf42e694bd0694af4ff46b8f
|
/ORS/hospital/urls.py
|
4767c8f055b1fddd4e4f30802366f937337852a3
|
[] |
no_license
|
manishbansal0021/ORS-SIH2K18
|
3804a1e2391812e48da9409e03d3289eda7db215
|
54566273a2eb59672f6dd5eda47fa3ea4281a832
|
refs/heads/master
| 2020-03-07T08:30:33.843966
| 2018-03-30T06:50:23
| 2018-03-30T06:50:23
| 127,380,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 337
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('login/' , views.login_view , name='login_page_mgnt'),
path('logout/',views.logout_view,name='logout_page_mgnt'),
path('register/',.views.register,name='hospital_registration'),
path('dashboard/',views.hospital_dashboard,name='hospital_dashboard'),
]
|
[
"mayankt28@gmail.com"
] |
mayankt28@gmail.com
|
463634e51f6c550e7f7627dd28f80025376bdc34
|
403d68101f0215e95dd8657f042eb86fcbd89f20
|
/metadamage/progressbar.py
|
252fdf54c5685c6a96920dcc8d35b8f29f6a5656
|
[
"MIT"
] |
permissive
|
genomewalker/metadamage
|
71339b171e2df464883296fb568a18b291881b0b
|
0d08e822935bdf496a0a27b2ca81d29e37cef748
|
refs/heads/main
| 2023-03-14T20:00:35.025788
| 2021-03-18T15:44:29
| 2021-03-18T15:44:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,665
|
py
|
# Third Party
from rich.console import Console
from rich.panel import Panel
from rich.progress import (
BarColumn,
Progress,
SpinnerColumn,
TextColumn,
TimeElapsedColumn,
TimeRemainingColumn,
)
#%%
class MyProgress(Progress):
def get_renderables(self):
for task in self.tasks:
if task.fields.get("progress_type") == "overall":
self.columns = progress_bar_overall
yield Panel(self.make_tasks_table([task]))
if task.fields.get("progress_type") == "shortname":
self.columns = progress_bar_shortname
yield self.make_tasks_table([task])
if task.fields.get("progress_type") == "status":
self.columns = progress_bar_status
yield self.make_tasks_table([task])
progress_bar_overall = (
"[bold green]{task.description}:",
SpinnerColumn(),
BarColumn(bar_width=None, complete_style="green"),
"[progress.percentage]{task.percentage:>3.0f}%",
"• Files: [progress.percentage]{task.completed} / {task.total}",
# "• Remaining:",
# TimeRemainingColumn(),
"• Time Elapsed:",
TimeElapsedColumn(),
)
progress_bar_shortname = (TextColumn(" " * 4 + "[blue]{task.fields[name]}"),)
progress_bar_status = (
TextColumn(" " * 8 + "{task.fields[status]}:"),
BarColumn(bar_width=20, complete_style="green"),
"[progress.percentage]{task.percentage:>3.0f}%",
"• Time Elapsed:",
TimeElapsedColumn(),
"• {task.fields[name]} [progress.percentage]{task.completed:>4} / {task.total:>4}",
)
#%%
console = Console()
progress = MyProgress(console=console)
|
[
"christianmichelsen@gmail.com"
] |
christianmichelsen@gmail.com
|
4dda1660b0a8a5e7bbe0c0bbb13c29f036773b40
|
ed4d7ac589de72b473550e9d76c2e5d5ce4bad31
|
/scripts/scripts-py/subjects_info.py
|
b6d02fd36de6a130ff96dfb3aee643eadce7d83d
|
[
"MIT"
] |
permissive
|
WesleyMarques/tcp-experiment
|
769a42faf1b50fddf6e47a38eefa33fe581db1ca
|
1a4db61a9f122934e78d4e68e20a2f71893f861e
|
refs/heads/master
| 2022-03-31T10:09:38.150163
| 2020-01-13T23:00:34
| 2020-01-13T23:00:34
| 128,976,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,819
|
py
|
import json
import sys
from pprint import pprint
import calc_metrics
import os
import io
from numpy import median
try:
to_unicode = unicode
except NameError:
to_unicode = str
def get_tests_by_mutants(testsFile):
tests2Mut = {}
for testMut in testsFile:
testMut = testMut.replace("\n", "")
splitLine = testMut.split(" ")
testName = splitLine[0]
mutants = splitLine[1:]
if testName not in testsName:
continue
tests2Mut[testName] = mutants # testName identify mutants
return tests2Mut
def get_info_test_execution(testsFile):
testMat = []
testMatTime = []
testLen = 0
for line in testsFile:
line = line.replace("\n", "").strip().split(' ')
testName = line[0]
testMat.append(testName)
testLen += 1
testTimes = map(lambda valueTime: int(
valueTime.replace("m", "").replace("s", "")), line[1:])
testMatTime.append(median(testTimes))
return testMat, testMatTime, testLen
def get_coverage_matrix(coverageFile):
coverageMatrix = [];
for line in coverageFile:
coverageMatrix.append(list(line))
return coverageMatrix
def covered_number(coverageTest):
return coverageTest.count("1");
PATH = os.getcwd()
os.chdir(PATH)
projects = ["scribe-java", "jasmine-maven-plugin", "java-apns",
"jopt-simple", "la4j", "metrics-core", "vraptor", "assertj-core"]
covLevels = ["statement", "method", "branch"]
with open(PATH+"/result.data", 'w+') as outfile:
for project in projects:
with open(PATH + "/data/" + project + "/coverage/sorted_version.txt") as versionFile:
versions = [line.rstrip('\n') for line in versionFile]
for version in versions:
for coverage in covLevels:
with open(PATH + "/data/" + project + "/coverage/" + version + "/running_time.txt", "r") as testsFile:
testsName, testsTime, testLen = get_info_test_execution(testsFile)
with open(PATH + "/data/" + project + "/faults-groups/" + version + "/test-mut.data", "r") as testsFile:
tests2Mut = get_tests_by_mutants(testsFile)
with open(PATH + "/data/" + project + "/coverage/" + version + "/"+ coverage +"_matrix.txt", "r") as coverageFile:
coverageMatrix = get_coverage_matrix(coverageFile)
cont = 0
amount = []
for test in tests2Mut:
lineToWrite = "%s,%s,%s,%s,%s,%s\n" % (project, version, coverage, test, str(len(tests2Mut[test])), str(covered_number(coverageMatrix[cont])))
amount.append(len(tests2Mut[test]))
cont += 1
outfile.write(lineToWrite)
outfile.close()
# print median(amount)
|
[
"wesley.nmtorres@gmail.com"
] |
wesley.nmtorres@gmail.com
|
8dd348247909748c602290de6f3d5b29dcfdd7bb
|
e40abe72659de459461ce214ed19494560520ab9
|
/django/django_fullstack/the_wall/the_wall_app/urls.py
|
a56a0e7bc28275cceb45d8d4a715b2aa336f2dd7
|
[] |
no_license
|
asmaalsaada/python_stack
|
b05b580a9438b7084e39ac4514f84ccb4436c08d
|
c68f8b06b30742df6802f3815c30d7a5413e5ceb
|
refs/heads/master
| 2023-05-04T00:18:06.800035
| 2021-05-30T13:28:25
| 2021-05-30T13:28:25
| 364,530,802
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index),
path('register',views.register),
path('login',views.login),
path('wall',views.success),
path('msgs',views.posts),
path('addcomment/<int:id>',views.comments),
path('del_msg/<int:id>',views.del_msg),
path('logout',views.logout),
]
|
[
"asma.alsaada96@gmail.com"
] |
asma.alsaada96@gmail.com
|
2d513946bc9424797d12bab29ea4a70521ca8c05
|
bb31b3f2e84a74dc0106652103244cf6a4c9145b
|
/save_to_baiduyun.py
|
10772753ca8f177b4dcdee13eb415a026d96bbbe
|
[] |
no_license
|
kampfer/fuli
|
4f068110311f42bb35b128abf67d4f0fdd5dc7ab
|
3ae8e46feecf186fe571e0cdfcf293bef26b2eda
|
refs/heads/master
| 2021-01-20T14:25:31.358086
| 2017-02-22T02:48:21
| 2017-02-22T02:48:21
| 82,752,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 770
|
py
|
#coding:utf-8
import rarfile
import os
import re
def readRar(path, password=''):
rarName = os.path.splitext(os.path.split(path)[1])[0]
with rarfile.RarFile(path) as rf:
if len(password) > 0:
rf.setpassword(password)
return rf.read(rarName + '.txt').decode('gbk')
def findShareLinkAndPwd(path):
return re.findall(ur'百度云链接:(\S*)\s*密码:(\S{4})', readRar(path, 'bhdp'))
def saveFuliToBaiduPan(path):
url, pwd = findShareLinkAndPwd(path)[0]
print url, pwd
os.system('python iScript/pan.baidu.com.py s ' + url + ' / -s ' + pwd)
def findFuli(dir):
for rt, dirs, files in os.walk(dir):
for f in files:
print f
saveFuliToBaiduPan(os.path.join(rt,f))
findFuli('data/')
|
[
"liaowei02@baidu.com"
] |
liaowei02@baidu.com
|
0e78fa42d4c2482ef9e47b036156586ca083cdde
|
795df757ef84073c3adaf552d5f4b79fcb111bad
|
/polpak/cheby_u_poly_coef.py
|
d231467223a9b56c9e78119dfd9a0f86e67509dc
|
[] |
no_license
|
tnakaicode/jburkardt-python
|
02cb2f9ba817abf158fc93203eb17bf1cb3a5008
|
1a63f7664e47d6b81c07f2261b44f472adc4274d
|
refs/heads/master
| 2022-05-21T04:41:37.611658
| 2022-04-09T03:31:00
| 2022-04-09T03:31:00
| 243,854,197
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,745
|
py
|
#! /usr/bin/env python
#
def cheby_u_poly_coef ( n ):
#*****************************************************************************80
#
## CHEBY_U_POLY_COEF evaluates coefficients of Chebyshev polynomials U(n,x).
#
# First terms:
#
# N/K 0 1 2 3 4 5 6 7 8 9 10
#
# 0 1
# 1 0 2
# 2 -1 0 4
# 3 0 -4 0 8
# 4 1 0 -12 0 16
# 5 0 6 0 -32 0 32
# 6 -1 0 24 0 -80 0 64
# 7 0 -8 0 80 0 -192 0 128
#
# Recursion:
#
# U(0)(X) = 1,
# U(1)(X) = 2 * X,
# U(N)(X) = 2 * X * U(N-1)(X) - U(N-2)(X)
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 22 January 2015
#
# Author:
#
# John Burkardt
#
# Reference:
#
# Milton Abramowitz and Irene Stegun,
# Handbook of Mathematical Functions,
# US Department of Commerce, 1964.
#
# Parameters:
#
# Input, integer N, the highest order polynomial to compute.
# Note that polynomials 0 through N will be computed.
#
# Output, real C[0:N,0:N], the coefficients of the Chebyshev T
# polynomials.
#
import numpy as np
c = np.zeros ( [ n + 1, n + 1 ] )
c[0,0] = 1.0
if ( 0 < n ):
c[1,1] = 2.0
for i in range ( 1, n ):
c[i+1,0] = - c[i-1,0]
for j in range ( 1, i ):
c[i+1,j] = 2.0 * c[i,j-1] - c[i-1,j]
c[i+1, i ] = 2.0 * c[i, i-1]
c[i+1, i+1] = 2.0 * c[i, i ]
return c
def cheby_u_poly_coef_test ( ):
#*****************************************************************************80
#
## CHEBY_U_POLY_COEF_TEST tests CHEBY_U_POLY_COEF.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 22 January 2015
#
# Author:
#
# John Burkardt
#
import platform
n = 5
print ( '' )
print ( 'CHEBY_U_POLY_COEF_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' CHEBY_U_POLY_COEF determines the Chebyshev U' )
print ( ' polynomial coefficients.' )
c = cheby_u_poly_coef ( n )
for i in range ( 0, n + 1 ):
print ( '' )
print ( ' U(%d)' % ( i ) )
print ( '' )
for j in range ( i, -1, -1 ):
if ( j == 0 ):
print ( ' %f' % ( c[i,j] ) )
elif ( j == 1 ):
print ( ' %f * x' % ( c[i,j] ) )
else:
print ( ' %f * x^%d' % ( c[i,j], j ) )
#
# Terminate.
#
print ( '' )
print ( 'CHEBY_U_POLY_COEF_TEST' )
print ( ' Normal end of execution.' )
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
cheby_u_poly_coef_test ( )
timestamp ( )
|
[
"tnakaicode@gmail.com"
] |
tnakaicode@gmail.com
|
165599fb9d7e6710f2f9240133f05a511e423901
|
e0f840379e99b7138b7565598b50901aa77d05e0
|
/config.py
|
38fd629aad945c2a4c7b3b79f5b2e0c21673d1b6
|
[] |
no_license
|
cimaggio/DM_likelihood
|
f09f2035a6a27ee2e5e8dd4d50be82e0c3e3fa0d
|
2d4b6f6e259a9e615f469b94d09e027b634f52b2
|
refs/heads/master
| 2021-05-15T19:16:25.165716
| 2018-03-12T14:52:08
| 2018-03-12T14:52:08
| 107,683,042
| 0
| 0
| null | 2017-10-24T08:15:07
| 2017-10-20T13:44:12
| null |
UTF-8
|
Python
| false
| false
| 889
|
py
|
#!/home/software/anaconda/bin/python
nsamples = 2
SampleName = ["0306", "0307"]
GlobalDir = '/media/san2/astro/M15/'
ModelsDir = GlobalDir+'masses'
# reconstructed energy limits
energy_cut = 70
energy_upper_cut= 6500
energy_numbins = 17 # from CreateAeff.C
energy_absmin = 10
# Background binning
NumBckgEnBins = 40
EnCutHighBG = 1.15*energy_upper_cut
EnCutLowBG = 0.9 *energy_cut
# Initialization of array sizes
EvtMaxOn = 540000
EvtMaxOff = 302000
BinsIntegrand = 3
BinsIntegrandNorm = 10
SigmaLimits = 4
# Initialization of array types
ArrayDataType = 'double'
# Flux initialization
Mode = 'ann' # possible modes: ann or dec
ModelInterpolation = 'linear'
Jfactor = 1.4e25 # J-factor from (H.E.S.S. for M15)
tauDMm1 = 4.1e17 # age of Universe in seconds
refsv = 3.0e-26 # thermal relic cross-section for reference
|
[
"cmaggio@neas.uab.es"
] |
cmaggio@neas.uab.es
|
562b9161382d9c37bafe66536738c0b2ca71347d
|
05f92f9b631daaae19f0f43ac2e68ebe72be1958
|
/5.image_crop_by_pixcels.py
|
829697e24b3614ddb6f21136a9aa1a6bceab9144
|
[] |
no_license
|
heysushil/python_image_processing
|
29d67a58bb6d2672ea06f179f39c306167d4426e
|
8e31cefeee3dd3f26ec067acd397aa1c1d42eec7
|
refs/heads/master
| 2021-03-25T16:59:42.886729
| 2020-08-10T13:16:20
| 2020-08-10T13:16:20
| 247,634,275
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,050
|
py
|
import numpy as np
import scipy.misc
from scipy import ndimage
import matplotlib.pyplot as plt
face = scipy.misc.face(gray=True)
lx, ly = face.shape
# print(lx)
# print(ly)
# Cropping
# lxstart = lx // 4
# lxend = -lx // 4
# print('\n',lxstart)
# print('\n',lxend)
crop_face = face[lx//4:-lx//4, ly//4:-ly//4]
# print(crop_face);
# up <-> down flip
flip_ud_face = np.flipud(face)
# rotation
rotate_face = ndimage.rotate(face, 45)
rotate_face_noreshape = ndimage.rotate(face, 45, reshape=False)
plt.figure(figsize=(12.5, 2.5))
plt.subplot(151)
plt.imshow(face, cmap=plt.cm.gray) # show a normal greay image
plt.axis('off')
plt.subplot(152)
plt.imshow(crop_face, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(153)
plt.imshow(flip_ud_face, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(154)
plt.imshow(rotate_face, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(155)
plt.imshow(rotate_face_noreshape, cmap=plt.cm.gray)
plt.axis('off')
plt.subplots_adjust(wspace=0.02, hspace=0.3, top=1, bottom=0.1, left=0,
right=1)
plt.show()
|
[
"sushil.chaudhary111@gmail.com"
] |
sushil.chaudhary111@gmail.com
|
f0c490d3b6a4dce2ab23f9f7b7bc6f06fb0aeee9
|
194987258df8b02c7992e476735461c4724ac866
|
/ScoreBoard.py
|
95e72d42e894a9db3f42132e836e7f5e5a20243c
|
[] |
no_license
|
flips30240/VoxelDash
|
6bbffcc591a9a59aa3f33964df71e178e486b642
|
2ce5861a7345b9c489ff3b711a0cebb317611198
|
refs/heads/master
| 2020-05-17T08:22:22.256317
| 2015-03-11T02:32:10
| 2015-03-11T02:32:10
| 17,657,679
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,884
|
py
|
from direct.gui.DirectGui import *
class ScoreBoard():
def __init__(self):
self.scoreList = []
self.scoreLocation = "./scoreboard/ScoreBoard.txt"
print("Score Board Initialized!")
self.parseScoreFile(self.scoreLocation)
base.accept("9", self.destroyBoard)
def parseScoreFile(self, location):
space = open(location)
self.lines = space.readlines()
space.close()
print(self.lines)
for x in range(len(self.lines)):
z = self.lines[x].strip()
a = z.split(":")
print(a)
name = a[0]
print(name)
score = a[1]
print(score)
b = [a[0], a[1]]
print(b)
self.scoreList.append(b)
b = []
print(self.scoreList)
self.createBoard()
def createBoard(self):
base.messenger.send("escape")
numItemsVisible = 4
itemHeight = 0.11
self.myScrolledList = DirectScrolledList(
decButton_pos= (0.35, 0, 0.53),
decButton_text = "UP",
decButton_text_scale = 0.04,
decButton_borderWidth = (0.005, 0.005),
incButton_pos= (0.35, 0, -0.02),
incButton_text = "DOWN",
incButton_text_scale = 0.04,
incButton_borderWidth = (0.005, 0.005),
frameSize = (0.0, 0.7, -0.05, 0.59),
frameColor = (1,0,0,0.5),
pos = (-1, 0, 0),
numItemsVisible = numItemsVisible,
forceHeight = itemHeight,
itemFrame_frameSize = (-0.2, 0.2, -0.37, 0.11),
itemFrame_pos = (0.35, 0, 0.4),
)
for x in range(len(self.scoreList)):
for y in range(len(self.scoreList[x])):
l = DirectLabel(text = self.scoreList[x][y], text_scale = 0.1)
self.myScrolledList.addItem(l)
def destroyBoard(self):
try:
self.myScrolledList.destroy()
except:
print("destroy must not be how i get rid of it lol")
##need to position it corrdctly and add a listener in controlHandler so this doesnt use the pause screen, also need scoreBoard at top and clean up##
|
[
"flips92691@gmail.com"
] |
flips92691@gmail.com
|
90f8ff9df83c1f455a3ecbad9cc7f3f796f9a1b0
|
d0c521db0302002723b0fa03f55239e5b7d1a0b4
|
/networks/ConvLSTM.py
|
4ed72a0f2bdefea8c756b6aa294cf3c5b94a180e
|
[
"MIT"
] |
permissive
|
caslab-vt/DeepPaSTL
|
4e028fb42ec1867de44512a788098966d526af3c
|
a928a0fc1f1bbe5a27f7bc1e7d1e320c023d13c6
|
refs/heads/main
| 2023-08-02T06:40:22.318280
| 2021-10-05T10:44:37
| 2021-10-05T10:44:37
| 413,774,872
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,408
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
# Define some constants
KERNEL_SIZE = 3
PADDING = KERNEL_SIZE // 2
class ConvLSTMCell(nn.Module):
def __init__(self, args, input_dim, hidden_dim, kernel_size, bias):
"""
Initialize ConvLSTM cell.
Parameters
----------
input_dim: int
Number of channels of input tensor.
hidden_dim: int
Number of channels of hidden state.
kernel_size: (int, int)
Size of the convolutional kernel.
bias: bool
Whether or not to add the bias.
"""
super(ConvLSTMCell, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.padding = kernel_size[0] // 2, kernel_size[1] // 2
self.bias = bias
self.conv = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim,
out_channels=4 * self.hidden_dim,
kernel_size=self.kernel_size,
padding=self.padding,
bias=self.bias
)
def forward(self, input_tensor, cur_state):
# print(input_tensor.size())
if cur_state is None:
# print(f'Shape of Inpuit Tensor: {input_tensor.shape}')
spatial_size = input_tensor.data.size()[2:]
batch_size = input_tensor.data.size()[0]
cur_state = self.init_hidden(batch_size, spatial_size)
h_cur, c_cur = cur_state
combined = torch.cat([input_tensor, h_cur], dim=1) # concatenate along channel axis
combined_conv = self.conv(combined)
cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)
i = torch.sigmoid(cc_i)
f = torch.sigmoid(cc_f)
o = torch.sigmoid(cc_o)
g = torch.tanh(cc_g)
c_next = f * c_cur + i * g
h_next = o * torch.tanh(c_next)
return h_next, c_next
def init_hidden(self, batch_size, image_size):
# print(image_size)
height, width = image_size
return (torch.randn(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device),
torch.randn(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device))
|
[
"murtazar@vt.edu"
] |
murtazar@vt.edu
|
d0aa37477e91bf5b5fec9e081d58348b9bf3ece3
|
307dcc7cfa93b584b95de0c69a600f07a1daefed
|
/backend/am_books_15599/urls.py
|
115803a5380b59a5cc34b215c8af38b9d6594b78
|
[] |
no_license
|
crowdbotics-apps/am-books-15599
|
72708e097ab09b28dab7a07bc8bb4fc369b56a59
|
1fc34ba18781fd5234242d0eb6634040c7e3bda4
|
refs/heads/master
| 2023-02-04T12:25:01.589815
| 2020-04-08T15:29:52
| 2020-04-08T15:29:52
| 254,126,478
| 0
| 0
| null | 2023-01-24T01:59:07
| 2020-04-08T15:26:56
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,914
|
py
|
"""am_books_15599 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "AM books"
admin.site.site_title = "AM books Admin Portal"
admin.site.index_title = "AM books Admin"
# swagger
schema_view = get_schema_view(
openapi.Info(
title="AM books API",
default_version="v1",
description="API documentation for AM books App",
),
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
fceac6484c02034b637e4d82418f5fc378c81962
|
1a0b00a87533ea13e61562ce173f29a052cefeb7
|
/app/risk/__init__.py
|
119e9639c6ec14160f85d2adbc68368626641643
|
[] |
no_license
|
panuta/risk_model
|
1e8afc274a23c940c06c0c7253a4a3ef5772a011
|
408ed595b7dab5b24fcf4ab745ded2c87e762bb1
|
refs/heads/master
| 2021-05-03T17:50:30.746182
| 2018-02-12T17:14:40
| 2018-02-12T17:14:40
| 120,115,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 48
|
py
|
default_app_config = 'app.risk.apps.RiskConfig'
|
[
"panuta@gmail.com"
] |
panuta@gmail.com
|
877d19c38b03eb7511405c4630340bce1febcc09
|
b07cda4dac84e83130079c92cf5ecc5a2f8ff424
|
/QQspider/QQspider/items.py
|
63bedafc5d47143d803fc89faadc774d71e1ad40
|
[] |
no_license
|
likeketchup/scrapyframework
|
a004ee5ded09453c2b96811c2f3381167648a039
|
de485972143623fa7360aaf9d7066a0f8e532739
|
refs/heads/master
| 2021-09-06T04:00:22.915974
| 2018-02-02T08:26:58
| 2018-02-02T08:26:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class QqspiderItem(scrapy.Item):
url = scrapy.Field()
# define the fields for your item here like:
# name = scrapy.Field()
pass
|
[
"noreply@github.com"
] |
likeketchup.noreply@github.com
|
fd2233a8d7ecca628b8cb515b0e01dfe5550abd5
|
00f65c34fbc19c6768999b43ca3b4f70e68b1490
|
/connectn/tournament.py
|
a9b7ff445bd3b9557f29353809d617d61daacbcd
|
[] |
no_license
|
owenmackwood/connn
|
0527805e9a1a4fb0b42a678bcf13d6c6a0e7a856
|
728d0d701341f520a6d964c56bd781f9dc6b311a
|
refs/heads/master
| 2023-01-29T03:53:07.822185
| 2020-12-04T19:49:45
| 2020-12-04T19:49:45
| 318,532,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,870
|
py
|
import traceback
import logging
import time
import numpy as np
import multiprocessing as mp
from typing import List, Union, Optional
import connectn.utils as cu
from connectn.users import import_agents, agents
from connectn.game import PlayerAction, BoardPiece, AgentFailed
from connectn.game import NO_PLAYER
class AgentResult:
def __init__(self, agent_name: str):
self.name: str = agent_name
self.moves: List[PlayerAction] = []
self.move_times: List[float] = []
self.state_size: List[int] = []
self.seeds: List[int] = []
self.stdout: List[str] = []
self.stderr: List[str] = []
self.outcome: str = "NONE"
class GameResult:
def __init__(self, agent_r1: AgentResult, agent_r2: AgentResult):
self.result_1: AgentResult = agent_r1
self.result_2: AgentResult = agent_r2
self.winner: BoardPiece = NO_PLAYER
self.time_sec: float = time.time()
self.time_str: str = time.ctime()
class GenMoveArgs:
def __init__(
self,
seed: Union[int, None],
board: np.ndarray,
player: BoardPiece,
state: Optional[cu.SavedState],
):
self.seed = seed
self.board = board
self.player = player
self.state = state
class GenMoveResult:
def __init__(self, stdout: str, stderr: str):
self.stdout = stdout
self.stderr = stderr
class GenMoveSuccess(GenMoveResult):
def __init__(
self,
stdout: str,
stderr: str,
move_time: float,
action: int,
state: Optional[cu.SavedState],
):
super().__init__(stdout, stderr)
self.move_time = move_time
self.action = action
self.state = state
class GenMoveFailure(GenMoveResult):
def __init__(self, stdout: str, stderr: str, error_msg: str):
super().__init__(stdout, stderr)
self.error_msg = error_msg
def run_tournament_process(
rq: mp.Queue, sq: mp.Queue, shutdown: mp.Event, play_all: bool = True
):
from itertools import product
from queue import Empty as EmptyQueue
from threading import Timer, Event
import connectn.results as cr
logger = logging.getLogger(__name__)
logger.info("Started run_tournament_process.")
if not cr.TOURNAMENT_PROCESS_DATA_DIR.exists():
cr.TOURNAMENT_PROCESS_DATA_DIR.mkdir()
repetitions = 1
run_all_after = (
60.0 * 60 * cu.RUN_ALL_EVERY
) # Run all-against-all every cu.RUN_ALL_EVERY hours
block_time = 1
agent_modules = import_agents({})
cr.initialize(agent_modules.keys())
if play_all:
logger.info(f"Just started, running all-against-all once.")
updated_agents = list(agents())
else:
logger.info("Skipping all-against-all.")
updated_agents = []
time_to_play_all = Event()
timer = Timer(run_all_after, lambda ev: ev.set(), args=(time_to_play_all,))
timer.start()
while not shutdown.is_set():
if updated_agents:
agent_modules = import_agents(agent_modules)
agent_modules.pop("agent_fail", None)
to_play = repetitions * [
list(g) # grid_map wants args as a list
for g in product(agent_modules.keys(), agent_modules.keys())
if g[0] != g[1] and (g[0] in updated_agents or g[1] in updated_agents)
]
updated_agents.clear()
if cu.ON_CLUSTER:
logger.info(f"About to play {len(to_play)} games on the cluster.")
run_tournament_cluster(to_play)
else:
logger.info(f"About to play {len(to_play)} games locally.")
run_tournament_local(to_play)
logger.info("Finished game-play round.")
played = set(n for p in to_play for n in p if cr.record_games_for_agent(n))
new_results = {}
for agent_name in played:
with open(f"{cr.agent_games_file_path(agent_name)}", "rb") as f:
new_results[agent_name] = f.read()
with open(f"{cr.RESULTS_FILE_PATH!s}", "rb") as f:
new_results[cu.TOURNAMENT_FILE] = f.read()
logger.info(
f"Sending {len(new_results)} modified result files to the server."
)
sq.put(new_results)
try:
# Check the message queue for updated agents
q_data = rq.get(block=True, timeout=block_time)
except EmptyQueue:
if time_to_play_all.is_set():
time_to_play_all.clear()
timer = Timer(
run_all_after, lambda ev: ev.set(), args=(time_to_play_all,)
)
timer.start()
updated_agents = list(agents())
logger.info("Timed to run all-against-all.")
else:
if isinstance(q_data, str) and q_data == "PLAY_ALL":
updated_agents = list(agents())
msg = "Received request to play all-against-all."
else:
updated_agents = cu.update_user_agent_code(q_data)
msg = f'Received {len(updated_agents)} updated agents for game-play: {" ".join(updated_agents)}'
logger.info(msg)
timer.cancel()
logger.info(f"Shutting down run_tournament_process gracefully.")
def run_tournament_cluster(to_play: List[List[str]]):
from gridmap import grid_map
import connectn.results as results
from connectn.utils import TEMP_DIR
logger = logging.getLogger(__name__)
if not TEMP_DIR.exists():
TEMP_DIR.mkdir(parents=True)
job_temp_dir = TEMP_DIR / time.strftime("%Y-%m-%d-%Hh%Mm%Ss")
job_temp_dir.mkdir()
logger.info(f"Submitting games to the queue: {to_play}")
n_games = len(to_play)
n_done = 1
for game_result in grid_map(
run_single_game,
to_play,
mem_free="2G",
name="conn4match",
num_slots=1,
temp_dir=f"{job_temp_dir!s}",
queue="cognition-all.q",
add_env={"CREATE_PLOTS": "FALSE", "USE_MEM_FREE": "TRUE"},
require_cluster=True,
):
logging.info(f"Received result {n_done} of {n_games}")
results.add_game(game_result)
logging.info(f"Wrote result {n_done} of {n_games} to disk.")
n_done += 1
logging.info(f"Finished all {n_games} games.")
def run_tournament_local(to_play: List[List[str]]):
import connectn.results as results
logger = logging.getLogger(__name__)
for g in to_play:
try:
game_result = run_single_game(*g)
results.add_game(game_result)
except Exception:
logger.exception("This should not happen, unless we are testing")
def run_single_game(
agent_1: str, agent_2: str, game_seed: Optional[int] = None
) -> GameResult:
"""
Likely has to be replaced by separate function runnable via the GridEngine
"""
from connectn import IS_DEBUGGING
from queue import Empty as EmptyQueue
from connectn.game import initialize_game_state, other_player
from connectn.game import valid_player_action, apply_player_action
from connectn.game import check_end_state, pretty_print_board
from connectn.game import PLAYER1, PLAYER2, GameStatus
logger = logging.getLogger(__name__)
logger.debug(f"Entered run_single_game for {agent_1} vs {agent_2}")
rs = np.random.RandomState(game_seed)
agent_modules = import_agents({})
agent_names = (agent_1, agent_2)
def get_name(_player: BoardPiece) -> str:
return agent_names[_player - 1]
states = {agent_name: None for agent_name in agent_names}
winner = player = NO_PLAYER
agent_name = agent_1
results = {PLAYER1: AgentResult(agent_1), PLAYER2: AgentResult(agent_2)}
gr = GameResult(results[PLAYER1], results[PLAYER2])
gen_move = {}
for player, agent_name in zip((PLAYER1, PLAYER2), agent_names):
try:
gen_move[agent_name]: cu.GenMove = getattr(
agent_modules[agent_name], "generate_move"
)
except AttributeError:
results[player].stderr.append(
"\nYou did not define generate_move at the package level"
)
gr.winner = other_player(player)
results[player].outcome = "FAIL"
results[gr.winner].outcome = "WIN"
return gr
except KeyError as e:
# If this occurs and it isn't for agent_fail, then something has gone terribly wrong.
# Presumably one of the agents is not defined in users.py
logger.exception("Something has gone terribly wrong")
raise e
game_state = initialize_game_state()
for player, agent_name in zip((PLAYER1, PLAYER2), agent_names):
try:
init = getattr(agent_modules[agent_name], "initialize")
init(game_state.copy(), player)
except (Exception, AttributeError):
pass
loser_result = "LOSS"
try:
logger.info(f"Playing game between {agent_1} and {agent_2}")
moves_q = mp.Manager().Queue()
end_state = GameStatus.STILL_PLAYING
playing = True
action = PlayerAction(0)
while playing:
for player, agent_name in zip((PLAYER1, PLAYER2), agent_names):
move_seed = rs.randint(2 ** 32)
results[player].seeds.append(move_seed)
gma = GenMoveArgs(
move_seed, game_state.copy(), player, states[agent_name]
)
moves_q.put(gma)
if IS_DEBUGGING:
generate_move_process(gen_move[agent_name], moves_q)
else:
ap = mp.Process(
target=generate_move_process,
args=(gen_move[agent_name], moves_q),
)
t0 = time.time()
ap.start()
ap.join(cu.MOVE_TIME_MAX)
move_time = time.time() - t0
if ap.is_alive():
ap.terminate()
loser_result = "TIMEOUT"
msg = f"Agent {agent_name} timed out after {cu.MOVE_TIME_MAX} seconds ({move_time:.1f}s)."
raise AgentFailed(msg)
try:
ret: Union[GenMoveSuccess, GenMoveFailure] = moves_q.get(
block=True, timeout=60.0
)
except EmptyQueue:
logger.exception("Timed out waiting to get move result from queue")
raise
results[player].stdout.append(ret.stdout)
results[player].stderr.append(ret.stderr)
if isinstance(ret, GenMoveFailure):
loser_result = "EXCEPTION"
error_msg = ret.error_msg
msg = f"Agent {agent_name} threw an exception:\n {error_msg}"
raise AgentFailed(msg)
assert isinstance(ret, GenMoveSuccess)
action = ret.action
state_size = cu.get_size(ret.state)
results[player].move_times.append(ret.move_time)
results[player].state_size.append(state_size)
if state_size > cu.STATE_MEMORY_MAX:
loser_result = "MAX_STATE_MEM"
msg = f"Agent {agent_name} used {cu.mib(state_size):.2f} MiB > {cu.mib(cu.STATE_MEMORY_MAX)} MiB"
raise AgentFailed(msg)
if not np.issubdtype(type(action), np.integer):
loser_result = "NONINT_ACTION"
msg = f"Agent {agent_name} returned an invalid type of action {type(action)}"
raise AgentFailed(msg)
action = PlayerAction(action)
results[player].moves.append(action)
if not valid_player_action(game_state, action):
loser_result = "INVALID_ACTION"
msg = f"Agent {agent_name} returned an invalid action {action}"
raise AgentFailed(msg)
apply_player_action(game_state, action, player)
end_state = check_end_state(game_state, player)
playing = end_state == GameStatus.STILL_PLAYING
states[agent_name] = ret.state
if not playing:
break
if end_state == GameStatus.IS_WIN:
winner = player
logger.info(
f"Game finished, {get_name(player)} beat {get_name(other_player(player))} by playing column {action}."
)
elif end_state == GameStatus.IS_DRAW:
winner = NO_PLAYER
logger.info("Game finished, no winner")
else:
logger.info("Something went wrong, game-play stopped before the end state.")
except AgentFailed as err:
logger.info(pretty_print_board(game_state))
logger.info(f"Agent failed: {agent_name}")
logger.info(err)
winner = other_player(player)
results[player].stderr.append(str(err))
# fig = plt.figure()
# fig.suptitle('Odds of win')
# for i, (agent, saved_state) in enumerate(states.items()):
# ax = fig.add_subplot(2, 1, i+1)
# for odds in saved_state.odds.values():
# ax.plot(odds, ('r', 'b')[i])
# ax.set_title(agent)
# ax.set_ylim(0, 1)
#
# fig = plt.figure()
# fig.suptitle('Odds of draw')
# for i, (agent, saved_state) in enumerate(states.items()):
# ax = fig.add_subplot(2, 1, i+1)
# for odds in saved_state.draw.values():
# ax.plot(odds, ('r', 'b')[i])
# ax.set_title(agent)
# ax.set_ylim(0, 1)
#
# fig = plt.figure()
# for i, (agent, saved_state) in enumerate(states.items()):
# ax = fig.add_subplot(2, 1, i+1)
# ax.plot(saved_state.nodes, label='Nodes')
# ax.plot(saved_state.visits, label='Visits')
# ax.set_title(agent)
# ax.legend()
#
# fig = plt.figure()
# fig.suptitle('Time')
# for i, (agent, saved_state) in enumerate(states.items()):
# ax = fig.add_subplot(2, 1, i+1)
# ax.plot(saved_state.time, label=f'{np.mean(saved_state.time):.2f}')
# ax.set_title(agent)
# ax.legend()
#
# plt.show()
# for i, (agent, saved_state) in enumerate(states.items()):
# print(
# f'TIME {agent} mu:{np.mean(saved_state.time):.2f},
# med:{np.median(saved_state.time):.2f}, max:{np.max(saved_state.time):.2f}'
# )
gr.winner = winner
if winner == NO_PLAYER:
results[PLAYER1].outcome = results[PLAYER2].outcome = "DRAW"
else:
results[PLAYER1 if winner == PLAYER1 else PLAYER2].outcome = "WIN"
results[PLAYER2 if winner == PLAYER1 else PLAYER1].outcome = loser_result
logger.debug(f"Finished run_single_game for {agent_1} vs {agent_2}")
return gr
def generate_move_process(generate_move: cu.GenMove, moves_q: mp.Queue):
from traceback import StackSummary
from random import seed as random_seed
import io
from time import time
import pickle
from contextlib import redirect_stderr, redirect_stdout
logger = logging.getLogger(__name__)
f_stderr, f_stdout = io.StringIO(), io.StringIO()
gma: GenMoveArgs = moves_q.get()
np.random.seed(gma.seed)
random_seed(gma.seed)
try:
with redirect_stdout(f_stdout), redirect_stderr(f_stderr):
t0 = time()
returned = generate_move(gma.board, gma.player, gma.state)
saved_state = None
if isinstance(returned, tuple):
action = returned[0]
if len(returned) > 1:
saved_state = returned[1]
else:
action = returned
move_time = time() - t0
stdout, stderr = f_stdout.getvalue(), f_stderr.getvalue()
result = GenMoveSuccess(stdout, stderr, move_time, action, saved_state)
except Exception as e:
logger.exception("An exception was thrown by the agent.")
error_msg = repr(e) + "\n"
extracted_list = traceback.extract_tb(e.__traceback__)
for item in StackSummary.from_list(extracted_list).format():
error_msg += str(item)
stdout, stderr = f_stdout.getvalue(), f_stderr.getvalue()
result = GenMoveFailure(stdout, stderr, error_msg)
try:
moves_q.put(result)
except pickle.PickleError:
logger.exception(
"Internal error in trying to send the result, probably caused by saved_state"
)
moves_q.put(GenMoveSuccess(stdout, stderr, move_time, action, None))
|
[
"owen.mackwood@tu-berlin.de"
] |
owen.mackwood@tu-berlin.de
|
e522cc2b29a6998ae1f628ab1d26796a56f114c3
|
0b53f5a217177aadd822e0e8114688248321b943
|
/tg_bot/modules/warns.py
|
36c5cac35d4ea2343c6ddac87118c3f5586747a5
|
[] |
no_license
|
tahirdeger/hayriBot
|
75b7cb4fab184fcfba2176697ae5b8416c7149af
|
7aa7b6cb002c77bc45faa763670f95258dd31e16
|
refs/heads/master
| 2023-06-01T10:24:51.469034
| 2021-06-29T23:22:35
| 2021-06-29T23:22:35
| 337,869,402
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,417
|
py
|
import html
import re
from typing import Optional, List
import telegram
from telegram import InlineKeyboardButton, InlineKeyboardMarkup, ParseMode, User, CallbackQuery
from telegram import Message, Chat, Update, Bot
from telegram.error import BadRequest
from telegram.ext import CommandHandler, run_async, DispatcherHandlerStop, MessageHandler, Filters, CallbackQueryHandler
from telegram.utils.helpers import mention_html
from tg_bot import dispatcher, BAN_STICKER
from tg_bot.modules.disable import DisableAbleCommandHandler
from tg_bot.modules.helper_funcs.chat_status import is_user_admin, bot_admin, user_admin_no_reply, user_admin, \
can_restrict
from tg_bot.modules.helper_funcs.extraction import extract_text, extract_user_and_text, extract_user
from tg_bot.modules.helper_funcs.filters import CustomFilters
from tg_bot.modules.helper_funcs.misc import split_message
from tg_bot.modules.helper_funcs.string_handling import split_quotes
from tg_bot.modules.log_channel import loggable
from tg_bot.modules.sql import warns_sql as sql
WARN_HANDLER_GROUP = 9
CURRENT_WARNING_FILTER_STRING = "<b>Current warning filters in this chat:</b>\n"
# Not async
def warn(user: User, chat: Chat, reason: str, message: Message, warner: User = None) -> str:
if is_user_admin(chat, user.id):
message.reply_text("Yöneticileri uyaramam ki!")
return ""
if warner:
warner_tag = mention_html(warner.id, warner.first_name)
else:
warner_tag = "Automated warn filter."
limit, soft_warn = sql.get_warn_setting(chat.id)
num_warns, reasons = sql.warn_user(user.id, chat.id, reason)
if num_warns >= limit:
sql.reset_warns(user.id, chat.id)
if soft_warn: # kick
chat.unban_member(user.id)
reply = "{} uyarı, {} kovulmuş!".format(limit, mention_html(user.id, user.first_name))
else: # ban
chat.kick_member(user.id)
reply = "{} uyarı, {} yasaklı!".format(limit, mention_html(user.id, user.first_name))
for warn_reason in reasons:
reply += "\n - {}".format(html.escape(warn_reason))
message.bot.send_sticker(chat.id, BAN_STICKER) # banhammer marie sticker
keyboard = []
log_reason = "<b>{}:</b>" \
"\n#WARN_BAN" \
"\n<b>Admin:</b> {}" \
"\n<b>User:</b> {} (<code>{}</code>)" \
"\n<b>Reason:</b> {}"\
"\n<b>Counts:</b> <code>{}/{}</code>".format(html.escape(chat.title),
warner_tag,
mention_html(user.id, user.first_name),
user.id, reason, num_warns, limit)
else:
keyboard = InlineKeyboardMarkup(
[[InlineKeyboardButton("Remove warn", callback_data="rm_warn({})".format(user.id))]])
reply = "{} has {}/{} warnings... watch out!".format(mention_html(user.id, user.first_name), num_warns,
limit)
if reason:
reply += "\nReason for last warn:\n{}".format(html.escape(reason))
log_reason = "<b>{}:</b>" \
"\n#WARN" \
"\n<b>Admin:</b> {}" \
"\n<b>User:</b> {} (<code>{}</code>)" \
"\n<b>Reason:</b> {}"\
"\n<b>Counts:</b> <code>{}/{}</code>".format(html.escape(chat.title),
warner_tag,
mention_html(user.id, user.first_name),
user.id, reason, num_warns, limit)
try:
message.reply_text(reply, reply_markup=keyboard, parse_mode=ParseMode.HTML)
except BadRequest as excp:
if excp.message == "Reply message not found":
# Do not reply
message.reply_text(reply, reply_markup=keyboard, parse_mode=ParseMode.HTML, quote=False)
else:
raise
return log_reason
@run_async
@user_admin_no_reply
@bot_admin
@loggable
def button(bot: Bot, update: Update) -> str:
query = update.callback_query # type: Optional[CallbackQuery]
user = update.effective_user # type: Optional[User]
match = re.match(r"rm_warn\((.+?)\)", query.data)
if match:
user_id = match.group(1)
chat = update.effective_chat # type: Optional[Chat]
res = sql.remove_warn(user_id, chat.id)
if res:
update.effective_message.edit_text(
"Uyarıyı kaldıran {}.".format(mention_html(user.id, user.first_name)),
parse_mode=ParseMode.HTML)
user_member = chat.get_member(user_id)
return "<b>{}:</b>" \
"\n#UYARILAMAYAN" \
"\n<b>Admin:</b> {}" \
"\n<b>Kullanıcı:</b> {} (<code>{}</code>)".format(html.escape(chat.title),
mention_html(user.id, user.first_name),
mention_html(user_member.user.id, user_member.user.first_name),
user_member.user.id)
else:
update.effective_message.edit_text(
"Kullanıcının zaten hiçbir uyarısı yok.".format(mention_html(user.id, user.first_name)),
parse_mode=ParseMode.HTML)
return ""
@run_async
@user_admin
@can_restrict
@loggable
def warn_user(bot: Bot, update: Update, args: List[str]) -> str:
message = update.effective_message # type: Optional[Message]
chat = update.effective_chat # type: Optional[Chat]
warner = update.effective_user # type: Optional[User]
user_id, reason = extract_user_and_text(message, args)
if user_id:
if message.reply_to_message and message.reply_to_message.from_user.id == user_id:
return warn(message.reply_to_message.from_user, chat, reason, message.reply_to_message, warner)
else:
return warn(chat.get_member(user_id).user, chat, reason, message, warner)
else:
message.reply_text("Hiçbir kullanıcı atanmadı!")
return ""
@run_async
@user_admin
@bot_admin
@loggable
def reset_warns(bot: Bot, update: Update, args: List[str]) -> str:
message = update.effective_message # type: Optional[Message]
chat = update.effective_chat # type: Optional[Chat]
user = update.effective_user # type: Optional[User]
user_id = extract_user(message, args)
if user_id:
sql.reset_warns(user_id, chat.id)
message.reply_text("Warnings have been reset!")
warned = chat.get_member(user_id).user
return "<b>{}:</b>" \
"\n#UYARITEMIZLE" \
"\n<b>Admin:</b> {}" \
"\n<b>Kullanıcı:</b> {} (<code>{}</code>)".format(html.escape(chat.title),
mention_html(user.id, user.first_name),
mention_html(warned.id, warned.first_name),
warned.id)
else:
message.reply_text("Hiçbir kullanıcı atanmadı!")
return ""
@run_async
def warns(bot: Bot, update: Update, args: List[str]):
message = update.effective_message # type: Optional[Message]
chat = update.effective_chat # type: Optional[Chat]
user_id = extract_user(message, args) or update.effective_user.id
result = sql.get_warns(user_id, chat.id)
if result and result[0] != 0:
num_warns, reasons = result
limit, soft_warn = sql.get_warn_setting(chat.id)
if reasons:
text = "Kullanıcının {}/{} uyarısı var, geçerli gerekçeler şunlar:".format(num_warns, limit)
for reason in reasons:
text += "\n - {}".format(reason)
msgs = split_message(text)
for msg in msgs:
update.effective_message.reply_text(msg)
else:
update.effective_message.reply_text(
"Kullanıcının {}/{} uyarısı var, ama sebepleri belirtilmemiş.".format(num_warns, limit))
else:
update.effective_message.reply_text("Bu kullanıcının herhangi bir uyarısı yok!")
# Dispatcher handler stop - do not async
@user_admin
def add_warn_filter(bot: Bot, update: Update):
chat = update.effective_chat # type: Optional[Chat]
msg = update.effective_message # type: Optional[Message]
args = msg.text.split(None, 1) # use python's maxsplit to separate Cmd, keyword, and reply_text
if len(args) < 2:
return
extracted = split_quotes(args[1])
if len(extracted) >= 2:
# set trigger -> lower, so as to avoid adding duplicate filters with different cases
keyword = extracted[0].lower()
content = extracted[1]
else:
return
# Note: perhaps handlers can be removed somehow using sql.get_chat_filters
for handler in dispatcher.handlers.get(WARN_HANDLER_GROUP, []):
if handler.filters == (keyword, chat.id):
dispatcher.remove_handler(handler, WARN_HANDLER_GROUP)
sql.add_warn_filter(chat.id, keyword, content)
update.effective_message.reply_text("Uyarı işleyicisi eklendi '{}'!".format(keyword))
raise DispatcherHandlerStop
@user_admin
def remove_warn_filter(bot: Bot, update: Update):
chat = update.effective_chat # type: Optional[Chat]
msg = update.effective_message # type: Optional[Message]
args = msg.text.split(None, 1) # use python's maxsplit to separate Cmd, keyword, and reply_text
if len(args) < 2:
return
extracted = split_quotes(args[1])
if len(extracted) < 1:
return
to_remove = extracted[0]
chat_filters = sql.get_chat_warn_triggers(chat.id)
if not chat_filters:
msg.reply_text("Aktif uyarı filtresi yok!")
return
for filt in chat_filters:
if filt == to_remove:
sql.remove_warn_filter(chat.id, to_remove)
msg.reply_text("Evet, bunun için insanları uyarmayı bırakacağım.")
raise DispatcherHandlerStop
msg.reply_text("Mevcut uyarı filtresi yok - /uyarilistesi komutu ile kontrol edin.")
@run_async
def list_warn_filters(bot: Bot, update: Update):
chat = update.effective_chat # type: Optional[Chat]
all_handlers = sql.get_chat_warn_triggers(chat.id)
if not all_handlers:
update.effective_message.reply_text("Aktif uyarı filtresi yok!")
return
filter_list = CURRENT_WARNING_FILTER_STRING
for keyword in all_handlers:
entry = " - {}\n".format(html.escape(keyword))
if len(entry) + len(filter_list) > telegram.MAX_MESSAGE_LENGTH:
update.effective_message.reply_text(filter_list, parse_mode=ParseMode.HTML)
filter_list = entry
else:
filter_list += entry
if not filter_list == CURRENT_WARNING_FILTER_STRING:
update.effective_message.reply_text(filter_list, parse_mode=ParseMode.HTML)
@run_async
@loggable
def reply_filter(bot: Bot, update: Update) -> str:
chat = update.effective_chat # type: Optional[Chat]
message = update.effective_message # type: Optional[Message]
chat_warn_filters = sql.get_chat_warn_triggers(chat.id)
to_match = extract_text(message)
if not to_match:
return ""
for keyword in chat_warn_filters:
pattern = r"( |^|[^\w])" + re.escape(keyword) + r"( |$|[^\w])"
if re.search(pattern, to_match, flags=re.IGNORECASE):
user = update.effective_user # type: Optional[User]
warn_filter = sql.get_warn_filter(chat.id, keyword)
return warn(user, chat, warn_filter.reply, message)
return ""
@run_async
@user_admin
@loggable
def set_warn_limit(bot: Bot, update: Update, args: List[str]) -> str:
chat = update.effective_chat # type: Optional[Chat]
user = update.effective_user # type: Optional[User]
msg = update.effective_message # type: Optional[Message]
if args:
if args[0].isdigit():
if int(args[0]) < 3:
msg.reply_text("Minimum limit 3!")
else:
sql.set_warn_limit(chat.id, int(args[0]))
msg.reply_text("Uyarı limiti şu şekilde güncellendi {}".format(args[0]))
return "<b>{}:</b>" \
"\n#UYARI_LIMITI" \
"\n<b>Admin:</b> {}" \
"\nUyarı limiti: <code>{}</code>".format(html.escape(chat.title),
mention_html(user.id, user.first_name), args[0])
else:
msg.reply_text("Bana bir sayı veya argüman ver!")
else:
limit, soft_warn = sql.get_warn_setting(chat.id)
msg.reply_text("Mevcut uyarı limiti {}".format(limit))
return ""
@run_async
@user_admin
def set_warn_strength(bot: Bot, update: Update, args: List[str]):
chat = update.effective_chat # type: Optional[Chat]
user = update.effective_user # type: Optional[User]
msg = update.effective_message # type: Optional[Message]
if args:
if args[0].lower() in ("ac", "evet"):
sql.set_warn_strength(chat.id, False)
msg.reply_text("Çok fazla uyarı, artık gruptan atılıp yasaklanma ile sonuçlanacak!")
return "<b>{}:</b>\n" \
"<b>Admin:</b> {}\n" \
"Sert uyarı ayarlandı. Kullanıcılar kovulup yasaklanacak.".format(html.escape(chat.title),
mention_html(user.id, user.first_name))
elif args[0].lower() in ("kapat", "hayir"):
sql.set_warn_strength(chat.id, True)
msg.reply_text("Artık çok fazla uyarı gruptan atılma ile sonuçlanacak! Kullanıcılar daha sonra tekrar gruba katılabilecekler.")
return "<b>{}:</b>\n" \
"<b>Admin:</b> {}\n" \
"Güçlü uyarı ayarlandı. Kullanıcılar atılacak".format(html.escape(chat.title),
mention_html(user.id,
user.first_name))
else:
msg.reply_text("Sadece evet/hayir/ac/kapat demenden anlıyorum!")
else:
limit, soft_warn = sql.get_warn_setting(chat.id)
if soft_warn:
msg.reply_text("Şuan uyarılan kullanıcılar gruptan atılacaktır.",
parse_mode=ParseMode.MARKDOWN)
else:
msg.reply_text("Şuan uyarılan kullanıcılar gruptan atılacak, bir daha girmesi yasaklanacaktır.",
parse_mode=ParseMode.MARKDOWN)
return ""
def __stats__():
return "{} toplam uyarılar, {} mesaj karşılında.\n" \
"{} uyarı filtresi, {} mesaj karşılında.".format(sql.num_warns(), sql.num_warn_chats(),
sql.num_warn_filters(), sql.num_warn_filter_chats())
def __import_data__(chat_id, data):
for user_id, count in data.get('warns', {}).items():
for x in range(int(count)):
sql.warn_user(user_id, chat_id)
def __migrate__(old_chat_id, new_chat_id):
sql.migrate_chat(old_chat_id, new_chat_id)
def __chat_settings__(chat_id, user_id):
num_warn_filters = sql.num_warn_chat_filters(chat_id)
limit, soft_warn = sql.get_warn_setting(chat_id)
return "Bu grup `{}` filtreye sahip. Alınan uyarı sayısı `{}` " \
"kullanıcı almadan önce *{}*.".format(num_warn_filters, limit, "kicked" if soft_warn else "banned")
__help__ = """
- /uyarilar <kullanıcı>: bir kullanıcının aldığı uyarı cezalarını görüntüler
- /uyarilistesi: mevcut tüm uyarı filtrelerinin listesi
*Sadece yöneticiler:*
- /uyar <kullanıcı>: bir kullanıcıyı uyar. 3 uyarıdan sonra kullanıcı gruptan men edilecektir. Yanıtlayarak da kullanılır.
- /uyarisifirla <kullanıcı>: Kullanıcının tüm uyarı cezalarını sıfırlar. Yanıtlayarak da kullanılır
- /uyariekle <anahtar> <mesajı cevapla>: belirli bir anahtar kelime için bir uyarı filtresi ayarlayın. Her grup kelimeniz bir cümle olsun, tırnak işaretleri içine alın
- /uyaridurdur <anahtar>: uyarı filtresini durdur
- /uyarilimit <sayı>: uyarı sınırını ayarla
- /sertuyar <ac/evet/kapat/hayir>: Açık olarak ayarlanırsa, uyarı sınırının aşılması *yasaklama ile sonuçlanacaktır.Bir daha gruba giremez.* Aksi takdirde, sadece atılır.
"""
__mod_name__ = "Uyarılar"
WARN_HANDLER = CommandHandler("uyar", warn_user, pass_args=True, filters=Filters.group)
RESET_WARN_HANDLER = CommandHandler(["uyarisifirla", "sifirla"], reset_warns, pass_args=True, filters=Filters.group)
CALLBACK_QUERY_HANDLER = CallbackQueryHandler(button, pattern=r"rm_warn")
MYWARNS_HANDLER = DisableAbleCommandHandler("uyarilar", warns, pass_args=True, filters=Filters.group)
ADD_WARN_HANDLER = CommandHandler("uyariekle", add_warn_filter, filters=Filters.group)
RM_WARN_HANDLER = CommandHandler(["uyaridurdur", "stopwarn"], remove_warn_filter, filters=Filters.group)
LIST_WARN_HANDLER = DisableAbleCommandHandler(["uyarilistesi", "liste"], list_warn_filters, filters=Filters.group, admin_ok=True)
WARN_FILTER_HANDLER = MessageHandler(CustomFilters.has_text & Filters.group, reply_filter)
WARN_LIMIT_HANDLER = CommandHandler("uyarilimit", set_warn_limit, pass_args=True, filters=Filters.group)
WARN_STRENGTH_HANDLER = CommandHandler("strongwarn", set_warn_strength, pass_args=True, filters=Filters.group)
dispatcher.add_handler(WARN_HANDLER)
dispatcher.add_handler(CALLBACK_QUERY_HANDLER)
dispatcher.add_handler(RESET_WARN_HANDLER)
dispatcher.add_handler(MYWARNS_HANDLER)
dispatcher.add_handler(ADD_WARN_HANDLER)
dispatcher.add_handler(RM_WARN_HANDLER)
dispatcher.add_handler(LIST_WARN_HANDLER)
dispatcher.add_handler(WARN_LIMIT_HANDLER)
dispatcher.add_handler(WARN_STRENGTH_HANDLER)
dispatcher.add_handler(WARN_FILTER_HANDLER, WARN_HANDLER_GROUP)
|
[
"tahir_deger@hotmail.com"
] |
tahir_deger@hotmail.com
|
4fcab845eba598866c444ff04ab34b5169844a2b
|
a65323eaf1edc748485d71fc59998866da82acb5
|
/twitterclone/settings.py
|
882dc9a90e7ae1982edde2ba5bb42fb3a57d40f0
|
[] |
no_license
|
zanvoy/twitterclone
|
50f76928ba5a0724773a0b9feff9b8e0eef19460
|
c02d4b1fdad0e3b8eef847a80fb50b7b3bbce779
|
refs/heads/master
| 2022-11-15T01:26:27.901158
| 2020-07-02T14:27:45
| 2020-07-02T14:27:45
| 268,690,405
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,337
|
py
|
"""
Django settings for twitterclone project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'bmbhxdvi!-j-+&i!g(8wj=warl3zw6b4^nn!@=87u!r_-hx*ye'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tweet.apps.TweetConfig',
'twitteruser.apps.TwitteruserConfig',
'authentication.apps.AuthenticationConfig',
'notification.apps.NotificationConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'twitterclone.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'twitterclone.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL = '/login/'
AUTH_USER_MODEL = 'twitteruser.SomeUser'
|
[
"zanvoysan@gmail.com"
] |
zanvoysan@gmail.com
|
53b6bca242148c7828532cd22c0aa8ddbe3d4188
|
749b196ba38fc54899c51947a778229faa16db99
|
/model/attentionlayer.py
|
620a1c94c2cb7c0005659d5f74e785d7bf15f8f1
|
[
"Apache-2.0"
] |
permissive
|
binzzheng/RFDA-PyTorch
|
6e6cee7a9def04e43c0a094de320d0f431fc7e64
|
c50706d31dec25e9bd1699448c52452016a0d08b
|
refs/heads/main
| 2023-07-19T06:46:23.987838
| 2021-09-14T04:57:28
| 2021-09-14T04:57:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,294
|
py
|
import math
import torch
import torch.nn as nn
import os
import torchvision.transforms as transforms
from torch.nn import functional as F
from math import floor, ceil
from ops.dcn.deform_conv import ModulatedDeformConv
class DSTA(nn.Module):
def __init__(self, n_feats, conv=default_conv):
super(DSTA, self).__init__()
f = n_feats // 4
self.conv1 = conv(n_feats, f, kernel_size=1)
self.conv_f = conv(f, f, kernel_size=1)
self.conv_max = conv(f, f, kernel_size=3, padding=1)
self.conv2 = conv(f, f, kernel_size=3, stride=2, padding=0)
self.conv3 = conv(f, f, kernel_size=3, padding=1)
self.conv3_ = conv(f, f, kernel_size=3, padding=1)
self.conv4 = conv(f, n_feats, kernel_size=1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
# DCN is better
self.dcn = ModulatedDeformConv(f,f,3,padding=1,deformable_groups=f)
self.mask = conv(f,f*3*3*3,3,padding=1)
# two mask, multilevel fusion
self.f = f
self.down_conv2 = nn.Sequential(
nn.Conv2d(f, f, 3, stride=2, padding=3//2),
nn.ReLU(inplace=True))
self.mask2 = conv(f,f*3*3*3,3,padding=1)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
# feature channel downscale and upscale --> channel weight
self.conv_du = nn.Sequential(
nn.Conv2d(f, 2*f, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(2*f, n_feats, 1, padding=0, bias=True),
nn.Sigmoid()
)
def forward(self, x):
f = x.clone()
c1_ = (self.conv1(f))
c1 = self.conv2(c1_)
v_max = F.max_pool2d(c1, kernel_size=7, stride=3)
v_range = self.relu(self.conv_max(v_max))
c3 = self.relu(self.conv3(v_range))
c3 = self.conv3_(c3)
c3 = self.relu(c3)
dc3 = self.down_conv2(c3)
off_mask2 = self.mask2(dc3)
off_msk = self.mask(c3)
off_mask2 = F.interpolate(off_mask2, (off_msk.size(2), off_msk.size(3)), mode='bilinear', align_corners=False)
off_msk = off_msk + off_mask2
off = off_msk[:, :self.f*2*3*3, ...]
msk = torch.sigmoid(
off_msk[:, self.f*2*3*3:, ...]
)
c3 = self.dcn(v_max,off,msk)
c3 = F.relu(c3,inplace = True)
y = self.avg_pool(c3)
y = self.conv_du(y)
c3 = F.interpolate(c3, (x.size(2), x.size(3)), mode='bilinear', align_corners=False)
cf = self.conv_f(c1_)
c4 = self.conv4(c3+cf)
m = self.sigmoid(c4)
# print(x.size(),'vs',m.size(),'vs',y.size())
# is you wanna visualize them
# map_m = transforms.ToPILImage()(m[0,0,...]).convert('L')
# map_m.save("./map_m1.png") #
# map_m = transforms.ToPILImage()(m[0,2,...]).convert('L')
# map_m.save("./map_m2.png") #
# map_m = transforms.ToPILImage()(m[0,4,...]).convert('L')
# map_m.save("./map_m3.png") #
# map_m = transforms.ToPILImage()(m[0,6,...]).convert('RGB')
# map_m.save("./map_m4.png") #
# map_m = transforms.ToPILImage()(m[0,8,...]).convert('RGB')
# map_m.save("./map_m5.png") #
# print("y=",y[0])
return x * m * y
|
[
"noreply@github.com"
] |
binzzheng.noreply@github.com
|
3e38b731fa9cf3386690394b6eec2cfede7ef69e
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/1246.py
|
467cde26036ec3b0eda727f56e9bc73feb98a2c8
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619
| 2015-09-23T11:54:06
| 2015-09-23T11:54:06
| 42,749,205
| 2
| 3
| null | 2015-09-23T11:54:07
| 2015-09-18T22:06:38
|
Python
|
UTF-8
|
Python
| false
| false
| 121
|
py
|
ii = [('WadeJEB.py', 1), ('SoutRD2.py', 1), ('WheeJPT.py', 1), ('MereHHB3.py', 3), ('StorJCC.py', 1), ('MereHHB2.py', 1)]
|
[
"prabhjyotsingh95@gmail.com"
] |
prabhjyotsingh95@gmail.com
|
5568ed398962bc337e8b618aefeb691942452d8e
|
7620893c7d253a4d8c6f5aef2cfda6c72b777d49
|
/src/Camera/ShowPicture.py
|
edf8d5620eb77c25c350ef4020f451c9c7ee1829
|
[] |
no_license
|
garridoH/cameraGUI
|
cacc549a9da0bcb6c3b9be04ef9783c653300118
|
cb6ac1d54dd8651da974ed058990c8212d145415
|
refs/heads/master
| 2021-01-17T22:38:53.398306
| 2012-06-12T20:41:44
| 2012-06-12T20:41:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
'''
Created on Jun 3, 2012
@author: redwards
'''
import Image
def show():
fileName = raw_input("What is the name of the image file? ")
showImage(fileName)
def showImage(fn):
picture = Image.open(fn)
picture.show();
# width, height = picture.size()
#pix = picture.getPixels()
show()
|
[
"raedwards@gmail.com"
] |
raedwards@gmail.com
|
5389d5091e5331db72ab4abf9e86919aff376da4
|
ecdf5493acc0c7ca67988657d36c256d565d73be
|
/01_PythonBasics/02_Strings.py
|
ac4dccff9a07d082b4cfede964acc2dca06859ea
|
[] |
no_license
|
baubyte/pythonPractice
|
731795f58a6cdcdd0fa459d2d2a3063e921578d7
|
e5fa2e9b58b434bb46077245d6c89907bcfeb686
|
refs/heads/main
| 2023-05-07T01:08:35.518041
| 2021-05-14T15:57:11
| 2021-05-14T15:57:11
| 362,898,680
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
print(" /|")
print(" / |")
print(" / |")
print("/___|")
phrase = "Giraffe Academy"
print(phrase.upper())
print(phrase.upper().isupper())
print(len(phrase))
print(phrase[0])
print(phrase[3])
print(phrase.index("a"))
print(phrase.replace("Giraffe", "Elephant"))
|
[
"paredbaez.martin@gmail.com"
] |
paredbaez.martin@gmail.com
|
d14a5c62a35ff32c8988af4603655a76fe3bca8b
|
80a4ed1cee1cf971a830cd54f8de2433bc0bd9d4
|
/menog-rtt-cdn-measure/retryFailedMeasurements.py
|
e61f339e150c97ecd7c09def76ecdad21aaf352a
|
[] |
no_license
|
miriamturk/CDNs-Performance-in-MENOG-Region-
|
818cc483d81e273479651ee7bb02141d43536153
|
a6377da5ac748c551236ef45b8e0b6b5874e86f7
|
refs/heads/master
| 2021-07-14T01:04:37.839356
| 2021-03-14T13:33:49
| 2021-03-14T13:33:49
| 241,588,149
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,587
|
py
|
import glob
import os
import ntpath
import json
from datetime import datetime
from ripe.atlas.cousteau import (
Ping,
Traceroute,
AtlasSource,
AtlasCreateRequest
)
# pour mesurements 1
ATLAS_API_KEY = ""
for filepath in glob.iglob('measurements/*.json'):
source_country_code = os.path.splitext(ntpath.basename(filepath))[0]
measurement_dict = dict()
with open(filepath) as f:
data = json.load(f)
for destination_network, measurements in data.items():
measurement_dict[destination_network] = []
for elt in measurements[:]:
if elt['is_success'] is False:
measurements.remove(elt)
ping = Ping(af=4, target=elt['host'],
description="From {} to {}".format(source_country_code, destination_network),
interval=10800, tags=["retry-test-code-esib"])
traceroute = Traceroute(
af=4,
target=elt['host'],
description="From {} to {}".format(source_country_code, destination_network),
protocol="ICMP",
interval=10800,
tags=["retry-test-code-esib"]
)
source = AtlasSource(type="country", value=source_country_code, requested=3)
atlas_request = AtlasCreateRequest(
start_time=datetime.utcnow(),
key=ATLAS_API_KEY,
measurements=[ping, traceroute],
sources=[source],
is_oneoff=False
)
(is_success, response) = atlas_request.create()
if is_success:
measurement_dict[destination_network].append(
{"host": elt['host'], "is_success": is_success,
"measurement_id": response['measurements']})
else:
measurement_dict[destination_network].append(
{"host": elt['host'], "is_success": is_success,
"reason": response})
else:
measurement_dict[destination_network].append(
{"host": elt['host'], "is_success": elt['is_success'],
"measurement_id": elt['measurement_id']})
filename = "measurements/{}.json".format(source_country_code)
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w") as f:
json.dump(measurement_dict, f, indent=4, sort_keys=True)
|
[
"Khalafmichel98@gmail.com"
] |
Khalafmichel98@gmail.com
|
65e46dc505f7f63a30b130e9e03f7126129f8877
|
4079ddfe62e1df9cd57a067b9a9eaf79d6974c01
|
/.c9/metadata/environment/home/tests.py
|
91b2069cc617e1bd8666045d959e2508cc654237
|
[] |
no_license
|
Code-Institute-Submissions/Clare-Supporters-Club
|
e4f741bff1f4c082a06518befadbbfa1b77e7b6f
|
f36c8ee722531571c57b269126ca3a48360582e9
|
refs/heads/master
| 2021-03-19T23:53:29.479231
| 2020-03-13T20:06:22
| 2020-03-13T20:06:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
{"filter":false,"title":"tests.py","tooltip":"/home/tests.py","undoManager":{"mark":-1,"position":-1,"stack":[]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":0,"column":0},"end":{"row":0,"column":0},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1584012432505,"hash":"d8ba9a8a1de20610ca7f80f8c8c6c515ece66a30"}
|
[
"ubuntu@ip-172-31-80-226.ec2.internal"
] |
ubuntu@ip-172-31-80-226.ec2.internal
|
d3f82b062abac9c3587d7ebf6cc1dec7e0901f59
|
45c3017f11930d2ab94e1010ae55f4a7c4993fe7
|
/checkers/CheckersGame.py
|
0131484717a4dead33046b822888a3105c70bf26
|
[
"MIT"
] |
permissive
|
DomFC/alpha-zero-general
|
dd0fc41e35ad92c73f311a252bfbd21886009e37
|
865a09b397a7776d2b0e07022bc840c293843f22
|
refs/heads/master
| 2020-05-16T07:37:37.115512
| 2019-04-23T16:54:43
| 2019-04-23T16:54:43
| 182,882,814
| 0
| 0
|
MIT
| 2019-04-22T23:23:43
| 2019-04-22T23:23:43
| null |
UTF-8
|
Python
| false
| false
| 1,417
|
py
|
import sys
from .Piece import Piece
from .Board import Board, _mirror_action
#sys.path.append('..')
from Game import Game
import numpy as np
import copy
W = 4
H = 8
class CheckersGame(Game):
def __init__(self):
pass
def getInitBoard(self):
return Board()
def getBoardSize(self):
return (W, H)
def getActionSize(self):
return W * H * 4 * 2
def getNextState(self, board, player, action):
if board.flipped_board:
action = _mirror_action(action)
next_board = copy.deepcopy(board)
next_turn = next_board.play_move(player, action)
return next_board, next_turn
def getValidMoves(self, board, player):
return board.get_valid_moves(player).flatten()
def getGameEnded(self, board, player):
return board.winner(player)
def getCanonicalForm(self, board, player):
# If the player is white, return the board unchanged, otherwise flip it.
if player == Piece.WHITE:
if board.flipped_board:
return board.flipped()
return board
else:
if board.flipped_board:
return board
return board.flipped()
def getSymmetries(self, board, pi):
return [(board, pi)]
def stringRepresentation(self, board):
return str(board.tostring()) + str(board.mid_capture) + str(board.flipped_board)
|
[
"dominiquefauteuxchapleau@gmail.com"
] |
dominiquefauteuxchapleau@gmail.com
|
a751f10831f3bc96a6f7962f64f981bca129e116
|
c7328c278da9615794fd3faa9a57214c6e86ecfd
|
/tornadows/soap.py
|
be9f289303c26d93f64daf9bd31f5d09c495a740
|
[
"Apache-2.0"
] |
permissive
|
jiaxiaolei/tornado-webservices
|
a94c17cc6f3add419016478e72b42ef4454b414c
|
49728fd1e35ff24b1b814bacf76ad6736eb6bffd
|
refs/heads/master
| 2021-01-18T10:27:41.546325
| 2011-08-13T05:52:27
| 2011-08-13T05:52:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,118
|
py
|
#!/usr/bin/env python
#
# Copyright 2011 Rodrigo Ancavil del Pino
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Implementation of a envelope soap 1.1 """
import xml.dom.minidom
class SoapMessage:
""" Implementation of a envelope soap 1.1 with minidom API
import tornadows.soap
import xml.dom.minidom
soapenvelope = tornadows.soap.SoapMessage()
xmlDoc = xml.dom.minidom.parseString('<Doc>Hello, world!!!</Doc>')
soapenvelope.setBody(xmlDoc)
for s in soapenvelope.getBody():
print s.toxml()
"""
def __init__(self):
self._soap = xml.dom.minidom.Document()
self._envelope = self._soap.createElementNS('http://schemas.xmlsoap.org/soap/envelope/','soapenv:Envelope')
self._envelope.setAttribute('xmlns:soapenv','http://schemas.xmlsoap.org/soap/envelope/')
self._soap.appendChild(self._envelope)
self._header = self._soap.createElement('soapenv:Header')
self._body = self._soap.createElement('soapenv:Body')
self._envelope.appendChild(self._header)
self._envelope.appendChild(self._body)
def getSoap(self):
""" Return the soap envelope as xml.dom.minidom.Document
getSoap() return a xml.dom.minidom.Document object
"""
return self._soap
def getHeader(self):
""" Return the child elements of Header element
getHeader() return a list with xml.dom.minidom.Element objects
"""
return self._header.childNodes
def getBody(self):
""" Return the child elements of Body element
getBody() return a list with xml.dom.minidom.Element objects
"""
return self._body.childNodes
def setHeader(self, header):
""" Set the child content to Header element
setHeader(header), header is a xml.dom.minidom.Document object
"""
if isinstance(header,xml.dom.minidom.Document):
self._header.appendChild(header.documentElement)
elif isinstance(header,xml.dom.minidom.Element):
self._header.appendChild(header)
def setBody(self,body):
""" Set the child content to Body element
setBody(body), body is a xml.dom.minidom.Document object or
a xml.dom.minidom.Element
"""
if isinstance(body,xml.dom.minidom.Document):
self._body.appendChild(body.documentElement)
elif isinstance(body,xml.dom.minidom.Element):
self._body.appendChild(body)
def removeHeader(self):
""" Remove the last child elements from Header element """
lastElement = self._header.lastChild
if lastElement != None:
self._header.removeChild(lastElement)
def removeBody(self):
""" Remove last child elements from Body element """
lastElement = self._body.lastChild
if lastElement != None:
self._body.removeChild(lastElement)
|
[
"rancavil@innovaser.cl"
] |
rancavil@innovaser.cl
|
ac0ef3c3bac0577f9a02c066de46ace3b1ffb697
|
6a5ac2d0ed34d2fa453b80b26781b40471de28e0
|
/tests/routes.py
|
8c763f07d057c94d8985097750aeb5736019fd88
|
[
"BSD-3-Clause"
] |
permissive
|
Tijani-Dia/yrouter-websockets
|
7e07da5758fff50b320bf71e5ef08a6dc6279887
|
ea5ef8ed6a2143945c8f0736313197dbd6c77896
|
refs/heads/main
| 2023-09-06T06:43:43.327832
| 2021-11-22T18:32:01
| 2021-11-22T21:05:43
| 427,655,524
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
from yrouter import route
from .handlers import hello_user, home
routes = (
route("/", home),
route("hello/<str:username>/", hello_user),
)
|
[
"atdia97@gmail.com"
] |
atdia97@gmail.com
|
6f52bf730155985a3ce1c5c0fc620626491dd40d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02860/s909177636.py
|
f053600ab017ccfb578e04bbb70efdaa080e8bca
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 90
|
py
|
n = int(input())//2
s = input()
if s[:n] == s[n:]:
print("Yes")
else:
print("No")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
25293dbee788f6de6f10ab787800e61a54d9d26c
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/redshift_write_f/cluster-snapshot-schedule_modify.py
|
0de0745d12e875d130eed74239233eb4ef382995
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
"""
write_parameter("redshift", "modify-cluster-snapshot-schedule")
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
bef0c5697c0bc5aa9de09eccc6ad29a5496a6520
|
0000ae449d18f995596b1a0b1f1ade25db64efde
|
/board.py
|
9690e17862ca7cbe82dc4679557932906f397618
|
[] |
no_license
|
MahmoodRasooli/MineWseeper
|
22d2a4f9b51155295d9f7f95978ca3acf44674e8
|
44e6cfa3d42b44debf77cffaabb3c7a354d315f7
|
refs/heads/main
| 2023-01-23T16:15:47.358902
| 2020-12-05T10:56:54
| 2020-12-05T10:56:54
| 318,192,613
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,342
|
py
|
from piece import piece
from math import ceil
from random import randint
### Represents a board which contains the placeholders
class board:
def __init__(self, size: int, pieces: list = None) -> None:
super().__init__()
if size <= 2:
raise Exception("size in not valid")
self.size = size
if pieces is not None:
self.pieces = pieces
else:
self._generatePieces()
self._setBombs()
# self._determinePiecesNeighboursBombsCount()
def _getBombNumbers(self) -> int:
"""
Returns the number of the bombs on the board
"""
return int(ceil((self.size ** 2) * 15 / 100))
def _generatePieces(self) -> None:
"""
Initialize the pieces and their states on the board
"""
self.pieces = [[piece(y, x) for x in range(self.size)] for y in range(self.size)]
def _setBombs(self) -> None:
"""
Sets the bombs in the pieces and also corrects the adjacent pieces neighboursBombsCount property
"""
for i in range(self._getBombNumbers()):
random_row = randint(0, self.size - 1)
random_col = randint(0, self.size - 1)
# Checks if the picked random piece is already bombed
while(self.pieces[random_row][random_col].haveBomb):
random_row = randint(0, self.size - 1)
random_col = randint(0, self.size - 1)
self.pieces[random_row][random_col].haveBomb = True
# Sets the neiboursBombCount of the adjacent pieces
for i in range(-1, 2, 1):
for j in range(-1, 2, 1):
if(i == 0 and j == 0):
continue
if(random_row + i < 0 or random_row + i >= self.size or
random_col + j < 0 or random_col + j >= self.size):
continue
self.pieces[random_row + i][random_col + j].neiboursBombCount += 1
def _determinePiecesNeighboursBombsCount(self) -> None:
"""
Fill the neiboursBombCount in each piece
"""
# for i in range(self.size):
# for j in range(self.size):
# self.pieces[i][j].
pass
def _getNeighbours(self, i_index: int, j_index: int) -> list:
"""
Returns the neighbours of the given piece
"""
neighbours = []
for i in range(-1, 2, 1):
for j in range(-1, 2, 1):
if(i == 0 and j == 0):
continue
if(i_index + i < 0 or i_index + i >= self.size or
j_index + j < 0 or j_index + j >= self.size):
continue
neighbours.append(self.pieces[i][j])
return neighbours
def clearPiece(self, i_index: int, j_index: int) -> bool:
"""
Clears the piece, meaning the player left-clicked on the piece
"""
if(not(self.pieces[i_index][j_index].clear())):
return False
def changeFlagState(self, i_index: int, j_index: int, flagState: bool) -> None:
"""
Changes the flag state of the given piece
"""
self.pieces[i_index][j_index].changeFlagState(flagState)
|
[
"rasuli.ut@gmail.com"
] |
rasuli.ut@gmail.com
|
a3241fdc902e1082dbe26d60d653dcf4b84dd8ba
|
6964b2fee83b5efc07682933b75c5ef27b459d5b
|
/plotting_python/ohm_ds-ycut.py
|
11357b8e1abe9fd568f161c40cf4751b49080eef
|
[] |
no_license
|
dcfy/dusty-plasma
|
bb1a565f20492cd313d5c95c87a386dbfb14839b
|
678e08f20128d7a80cf1cd83fdab1f77a1dc0b0c
|
refs/heads/master
| 2022-03-07T20:03:40.274231
| 2022-02-22T19:00:13
| 2022-02-22T19:00:13
| 56,763,656
| 0
| 1
| null | 2016-07-08T03:14:35
| 2016-04-21T10:09:26
|
Lua
|
UTF-8
|
Python
| false
| false
| 2,487
|
py
|
import matplotlib as mpl
mpl.use('agg')
import matplotlib.pyplot as plt
import numpy as np
import gk1
lightSpeed = 1.
mu0 = 1.
n0 = 1.
mi = 1.
qi=1.
me = 1/25.
qe=-1.
wpe0_wce0 = 3.
epsilon0 = 1/lightSpeed*lightSpeed/mu0
B0 = lightSpeed * np.sqrt(mu0 * n0 * me) / wpe0_wce0
v_A = B0/np.sqrt(mu0*n0*mi)
wci0=qi*B0/mi
wpi0 = np.sqrt(n0 * qi*qi / epsilon0 / mi)
di0 = lightSpeed/wpi0
de0 = di0*np.sqrt(me/mi)
Enorm = B0*v_A
print Enorm
iEnorm = 1./Enorm
tnorm = 1./wci0
xnorm = di0
#def plotOhmY(ts, ix=None, species=['e','p','o']):
# f = gk1.File('gem_q_%d.h5'%ts,
# num_fluids=3,fluid_names=['e','p','o'],num_moments=5,
#def plotOhmY(ts, ix=None, species=['e','p']):
def plotOhmY(ts, ix=None, iy=None, species=['e','p']):
f = gk1.File('gem-1010-ds_q_%d.h5'%ts,
num_fluids=2,fluid_names=['e','p'],num_moments=10,
me=1./25., qe=-1., mp=1., qp=1., mo=16., qo=1.)
nx,ny,ncomp = f['StructGridField'].shape
xlo,ylo = f.LowerBounds()
xup,yup = f.UpperBounds()
dx = (xup-xlo)/nx
dy = (yup-ylo)/ny
idx = 1./dx
idy = 1./dy
if ix == None:
ix = nx/2
#print 'ix=', ix
if iy == None:
iy = ny/2
#print 'iy=', iy
x = f.getCoordinates('x')/xnorm
#print 'x=', x, len(x)
y = f.getCoordinates('y')/xnorm
#print 'y=', y, len(y)
x_cut = x[ix]
#print 'x_cut=', x_cut
y_cut = y[iy]
#print 'y_cut=', y_cut
fig, ax = plt.subplots()
#
Ez = f.getField('Ez', iy=iy)*iEnorm
#print 'Ez=', Ez, len(Ez)
ax.plot(x, Ez, label='$ E_z $',color='k',lw=2,alpha=0.8)
#
Bx = f.getField('Bx', iy=iy)
By = f.getField('By', iy=iy)
#
for s in species:
# -VxB term, i.e., convection electric field
vx_s = f.getField('vx_'+s, iy=iy)*iEnorm
vy_s = f.getField('vy_'+s, iy=iy)*iEnorm
_v_sxB = -vx_s*By + vy_s*Bx
ax.plot(x, _v_sxB, label='$ -(\\mathbf{v}_%s \\times \\mathbf{B} )|_z$'%(s),lw=2,alpha=0.8)
# TODO: implement more terms
title = '$t=%g, x=%g$'%(f.Time()/tnorm,y_cut)
lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), title = title, fontsize='x-large')
lgd.get_title().set_fontsize('x-large')
ax.set_xlabel('$x/d_{i0}$', fontsize='x-large')
ax.set_ylabel('$E/B_0v_{A0}$', fontsize='x-large')
ax.set_xlim([-20, 20])
ax.set_ylim([-0.4, 0.1])
fig.savefig('ohm_Ez_ycut_%02d_x%g.png'%(ts,y_cut), bbox_inches='tight')
#plt.show()
plt.close(fig)
f.close()
#for frame in [10]:
for frame in range(80):
#for frame in range(57,58):
print("frame %d"%frame)
plotOhmY(frame)
|
[
"dcf.ustc@gmail.com"
] |
dcf.ustc@gmail.com
|
0a69960936289dc206506de496375c721ea80577
|
0246b885a34ad294646589c0b011323b443990f5
|
/Condition/Condition.py
|
716db9efb39b615206f96069837992bdedf8a1e7
|
[] |
no_license
|
kaczmarekkacper/CN2
|
25b8271bb48d77675415fca7945d2d800b6196ee
|
fd65b73d355b9ef09446654796c841e4f34e1c97
|
refs/heads/master
| 2023-01-29T00:47:25.665790
| 2020-12-15T22:04:37
| 2020-12-15T22:04:37
| 321,799,934
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
class Condition:
def __init__(self):
self.attribute_number = None
self.attribute_value = None
def is_true(self, example):
return example.attributes[self.attribute_number] == self.attribute_value
|
[
"kazzper791@gmail.com"
] |
kazzper791@gmail.com
|
7a53c9a7ba84f35b98c74d5cb5f1c27c12bac341
|
8b3cdddedf4d5ba6f2218f0f61ff7dd2a85f9d5a
|
/model.py
|
4a69246f0e8db6cb680101232c58f50cd3e4bb54
|
[
"MIT"
] |
permissive
|
yingrui-yang/JPQ
|
986cee6f8fc68afc73aa056d48273db9540668ae
|
c375b980052c22f3cf59a7d5599ef62e40cc5ad4
|
refs/heads/main
| 2023-09-03T23:53:51.501897
| 2021-10-13T04:50:07
| 2021-10-13T04:50:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,566
|
py
|
import torch
from torch import nn
import transformers
from transformers.models.roberta.modeling_roberta import RobertaPreTrainedModel
from transformers import RobertaModel
class EmbeddingMixin:
"""
Mixin for common functions in most embedding models. Each model should define its own bert-like backbone and forward.
We inherit from RobertaModel to use from_pretrained
"""
def __init__(self, model_argobj):
if model_argobj is None:
self.use_mean = False
else:
self.use_mean = model_argobj.use_mean
print("Using mean:", self.use_mean)
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding, nn.Conv1d)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=0.02)
def masked_mean(self, t, mask):
s = torch.sum(t * mask.unsqueeze(-1).float(), axis=1)
d = mask.sum(axis=1, keepdim=True).float()
return s / d
def masked_mean_or_first(self, emb_all, mask):
# emb_all is a tuple from bert - sequence output, pooler
assert isinstance(emb_all, tuple)
if self.use_mean:
return self.masked_mean(emb_all[0], mask)
else:
return emb_all[0][:, 0]
def query_emb(self, input_ids, attention_mask):
raise NotImplementedError("Please Implement this method")
def body_emb(self, input_ids, attention_mask):
raise NotImplementedError("Please Implement this method")
class BaseModelDot(EmbeddingMixin):
def _text_encode(self, input_ids, attention_mask):
# TODO should raise NotImplementedError
# temporarily do this
return None
def query_emb(self, input_ids, attention_mask):
outputs1 = self._text_encode(input_ids=input_ids,
attention_mask=attention_mask)
full_emb = self.masked_mean_or_first(outputs1, attention_mask)
query1 = self.norm(self.embeddingHead(full_emb))
return query1
def body_emb(self, input_ids, attention_mask):
return self.query_emb(input_ids, attention_mask)
def forward(self, input_ids, attention_mask, is_query, *args):
assert len(args) == 0
if is_query:
return self.query_emb(input_ids, attention_mask)
else:
return self.body_emb(input_ids, attention_mask)
class RobertaDot(BaseModelDot, RobertaPreTrainedModel):
def __init__(self, config, model_argobj=None):
BaseModelDot.__init__(self, model_argobj)
RobertaPreTrainedModel.__init__(self, config)
if int(transformers.__version__[0]) ==4 :
config.return_dict = False
self.roberta = RobertaModel(config, add_pooling_layer=False)
if hasattr(config, "output_embedding_size"):
self.output_embedding_size = config.output_embedding_size
else:
self.output_embedding_size = config.hidden_size
print("output_embedding_size", self.output_embedding_size)
self.embeddingHead = nn.Linear(config.hidden_size, self.output_embedding_size)
self.norm = nn.LayerNorm(self.output_embedding_size)
self.apply(self._init_weights)
def _text_encode(self, input_ids, attention_mask):
outputs1 = self.roberta(input_ids=input_ids,
attention_mask=attention_mask)
return outputs1
|
[
"jingtaozhan@qq.com"
] |
jingtaozhan@qq.com
|
b94fa2caa15e19bd78666af1370320d7843a9ca5
|
104c2dd588e6de0e8cce58eb2eb96c6c0f55ff47
|
/testApp01/testSet/common/DRIVER.py
|
b36e9c8ab9ebc665149bf343f8d57e314b613f91
|
[] |
no_license
|
AceCcream/appium_python
|
aef56e97c8bc4402f65bcc3e4012d31d7aab0514
|
17b26fc393fddfc9ace33918b9f4219e303779d5
|
refs/heads/master
| 2021-01-21T09:34:11.716269
| 2015-11-17T08:49:23
| 2015-11-17T08:49:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,440
|
py
|
from selenium.common.exceptions import WebDriverException
import testApp01.readConfig as readConfig
from testApp01.testSet.common import init
import threading
from appium import webdriver
from urllib.error import URLError
readConfigLocal = readConfig.ReadConfig()
class MyDriver:
driver = None
mutex = threading.Lock()
myInit = init.Init()
platformName = readConfigLocal.getConfigValue("platformName")
platformVersion = myInit.get_android_version()
appPackage = readConfigLocal.getConfigValue("appPackage")
appActivity = readConfigLocal.getConfigValue("appActivity")
deviceName = myInit.get_deviceName()
baseUrl = readConfigLocal.getConfigValue("baseUrl")
desired_caps = {"platformName": platformName, "platformVersion": platformVersion, "appPackage": appPackage,
"appActivity": appActivity, "deviceName": deviceName}
def _init__(self):
pass
@staticmethod
def get_driver():
try:
if MyDriver.driver is None:
MyDriver.mutex.acquire()
if MyDriver.driver is None:
try:
MyDriver.driver = webdriver.Remote(MyDriver.baseUrl, MyDriver.desired_caps)
except URLError:
MyDriver.driver = None
MyDriver.mutex.release()
return MyDriver.driver
except WebDriverException:
raise
|
[
"1904736199@qq.com"
] |
1904736199@qq.com
|
00e541cbdd775beff44f6df9b32c6f4f501713b2
|
3f79aea0b62379f9c2d5406b0f6080407c43ff76
|
/bleurt/score_files.py
|
12bab6bb06d534d457022821f2511c17da1b9d72
|
[
"Apache-2.0"
] |
permissive
|
techthiyanes/bleurt
|
25811163ecb71ca6b691c73ae8b512496d4a5751
|
c6f2375c7c178e1480840cf27cb9e2af851394f9
|
refs/heads/master
| 2023-08-21T14:38:20.368598
| 2021-10-14T21:12:50
| 2021-10-14T21:22:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,144
|
py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""BLEURT scoring library."""
import itertools
from bleurt import score as score_lib
import pandas as pd
import tensorflow as tf
flags = tf.compat.v1.flags
logging = tf.compat.v1.logging
FLAGS = flags.FLAGS
flags.DEFINE_string(
"sentence_pairs_file", None,
"Path to a JSONL file that contains sentence pairs. Each JSON record must "
"contain the fields `reference` and `candidate`. Overrides `candidate_file`"
" and `reference_file` flags if specified.")
flags.DEFINE_string(
"candidate_file", None,
"Path to candidates text file, with one candidate sentence "
"per line.")
flags.DEFINE_string(
"reference_file", None,
"Path to reference text file, with one reference sentence "
"per line.")
flags.DEFINE_string(
"scores_file", None,
"[optional] Path where the scores will be written. Will use standard "
"output if unspecified.")
flags.DEFINE_string("bleurt_checkpoint", None,
"[optional] Path to BLEURT checkpoint.")
flags.DEFINE_integer("bleurt_batch_size", 16,
"Number of sentence pairs per batch.")
flags.DEFINE_integer(
"read_buffer_size", 100000,
"Number of lines to read at a time from the input files. "
"Increase or decrase to ajust memory consumption.")
flags.DEFINE_bool(
"batch_same_length", False,
"Enables dynamic batching to speed up inference."
" [experimental feature]")
def _json_generator(sentence_pairs_file):
"""Yields a generator for iterating from a single JSONL file."""
assert tf.io.gfile.exists(
sentence_pairs_file), "Sentence pairs file {} not found".format(
sentence_pairs_file)
with tf.io.gfile.GFile(sentence_pairs_file, "r") as pairs_file:
ratings_df = pd.read_json(pairs_file, lines=True)
for _, row in ratings_df.iterrows():
assert row.get("reference") is not None, (
"Reference sentence not found, are you sure the JSON record "
"contains a 'reference' field?")
assert row.get("candidate") is not None, (
"Candidate sentence not found, are you sure the JSON record "
"contains a 'candidate' field?")
yield row.get("reference"), row.get("candidate")
def _text_generator(reference_file, candidate_file):
"""Yields a generator for iterating from two text files."""
assert tf.io.gfile.exists(
reference_file), "Reference file {} not found".format(reference_file)
assert tf.io.gfile.exists(
candidate_file), "Candidate file {} not found".format(candidate_file)
with tf.io.gfile.GFile(reference_file, "r") as ref_file:
with tf.io.gfile.GFile(candidate_file, "r") as cand_file:
for ref_sentence, cand_sentence in itertools.zip_longest(
ref_file, cand_file, fillvalue=None):
assert ref_sentence is not None, (
"Reference sentence not found, are you sure that the files have "
"the same size?")
assert cand_sentence is not None, (
"Candidate sentence not found, are you sure that the files have "
"the same size?")
yield ref_sentence, cand_sentence
def score_files(generator, bleurt_checkpoint):
"""Computes BLEURT scores from a sentence pairs generator.
Requires that a JSONL file containing both candidate and reference
sentences or two individual candidate and reference text files be specified,
with the former overriding the latter if both flags are specified.
Args:
generator: A generator yielding reference and candidate sentences.
bleurt_checkpoint: BLEURT checkpoint used for scoring.
"""
ref_buffer = []
cand_buffer = []
scores_buffer = []
if not FLAGS.batch_same_length:
scorer = score_lib.BleurtScorer(bleurt_checkpoint)
else:
logging.warning(
"Enabling same length batching. BEWARE: this is an experimental "
"feature.")
scorer = score_lib.LengthBatchingBleurtScorer(bleurt_checkpoint)
def _consume_buffer():
scores = scorer.score(
references=ref_buffer,
candidates=cand_buffer,
batch_size=FLAGS.bleurt_batch_size)
del ref_buffer[:]
del cand_buffer[:]
scores_buffer.extend(scores)
logging.info("Computing BLEURT scores...")
for ref_sentence, cand_sentence in generator:
ref_buffer.append(ref_sentence)
cand_buffer.append(cand_sentence)
if len(ref_buffer) >= FLAGS.read_buffer_size:
_consume_buffer()
if ref_buffer:
_consume_buffer()
logging.info("BLEURT scores computed.")
if FLAGS.scores_file:
logging.info("Writing to disk.")
with tf.io.gfile.GFile(FLAGS.scores_file, "w+") as score_file:
for s in scores_buffer:
score_file.write("{}\n".format(str(s)))
else:
for s in scores_buffer:
print("{}".format(str(s)))
logging.info("Done.")
def check_flags_and_score():
"""Creates a file reader and runs model."""
assert FLAGS.sentence_pairs_file or (
FLAGS.reference_file and FLAGS.candidate_file
), ("Reference and candidate files not found, please specify a JSONL file or "
"two text files.")
if FLAGS.sentence_pairs_file:
sentence_pairs_generator = _json_generator(FLAGS.sentence_pairs_file)
else:
sentence_pairs_generator = _text_generator(FLAGS.reference_file,
FLAGS.candidate_file)
score_files(sentence_pairs_generator, FLAGS.bleurt_checkpoint)
def main(_):
logging.info("Running BLEURT scoring.")
check_flags_and_score()
if __name__ == "__main__":
tf.compat.v1.app.run()
|
[
"tsellam@google.com"
] |
tsellam@google.com
|
b162f3d51f74445dbc282e6de7399918bd461f28
|
758a00b30b54782d04421580b0ea8c103fca95ff
|
/scrabble/old/config_scrabble.py
|
6d6ff1ca30c8cbc2674404a07e36ee81d1ee5946
|
[] |
no_license
|
LukasForst/RPH
|
45409b854edc654587c18d48b6613e9e3e32bbab
|
aeee4ad2de973400c86050c27bf3273c27c91aee
|
refs/heads/master
| 2021-01-18T17:27:58.218064
| 2017-04-05T19:53:37
| 2017-04-05T19:53:37
| 86,804,598
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,829
|
py
|
desk_value = [ # zaporna cisla odpovidaji nasobeni hodnoty celeho slova, kladna cisla odpovidaji nasobeni hodnoty pismen
[-3, +1, +1, +2, +1, +1, +1, -3, +1, +1, +1, +2, +1, +1, -3],
[+1, -2, +1, +1, +1, +3, +1, +1, +1, +3, +1, +1, +1, -2, +1],
[+1, +1, -2, +1, +1, +1, +2, +1, +2, +1, +1, +1, -2, +1, +1],
[+2, +1, +1, -2, +1, +1, +1, +2, +1, +1, +1, -2, +1, +1, +2],
[+1, +1, +1, +1, -2, +1, +1, +1, +1, +1, -2, +1, +1, +1, +1],
[+1, +3, +1, +1, +1, +3, +1, +1, +1, +3, +1, +1, +1, +3, +1],
[+1, +1, +2, +1, +1, +1, +2, +1, +2, +1, +1, +1, +2, +1, +1],
[-3, +1, +1, +2, +1, +1, +1, -2, +1, +1, +1, +2, +1, +1, -3],
[+1, +1, +2, +1, +1, +1, +2, +1, +2, +1, +1, +1, +2, +1, +1],
[+1, +3, +1, +1, +1, +3, +1, +1, +1, +3, +1, +1, +1, +3, +1],
[+1, +1, +1, +1, -2, +1, +1, +1, +1, +1, -2, +1, +1, +1, +1],
[+2, +1, +1, -2, +1, +1, +1, +2, +1, +1, +1, -2, +1, +1, +2],
[+1, +1, -2, +1, +1, +1, +2, +1, +2, +1, +1, +1, -2, +1, +1],
[+1, -2, +1, +1, +1, +3, +1, +1, +1, +3, +1, +1, +1, -2, +1],
[-3, +1, +1, +2, +1, +1, +1, -3, +1, +1, +1, +2, +1, +1, -3]]
letter_value = {"A": 1,
"B": 3,
"C": 3,
"D": 2,
"E": 1,
"F": 4,
"G": 2,
"H": 4,
"I": 1,
"J": 8,
"K": 5,
"L": 1,
"M": 3,
"N": 1,
"O": 1,
"P": 3,
"Q": 10,
"R": 1,
"S": 1,
"T": 1,
"U": 1,
"V": 4,
"W": 4,
"X": 8,
"Y": 4,
"Z": 10}
pocet_kamenu_ve_hre = {"A": 9,
"B": 2,
"C": 2,
"D": 4,
"E": 10,
"F": 2,
"G": 3,
"H": 2,
"I": 9,
"J": 1,
"K": 1,
"L": 4,
"M": 2,
"N": 6,
"O": 8,
"P": 2,
"Q": 1,
"R": 6,
"S": 4,
"T": 6,
"U": 4,
"V": 2,
"W": 2,
"X": 1,
"Y": 2,
"Z": 1 }
board = [
["*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*"],
["*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*"],
["*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*"],
["*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*"],
["*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*"],
["*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*"],
["*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*"],
["*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*"],
["*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*"],
["*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*"],
["*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*"],
["*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*"],
["*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*"],
["*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*"],
["*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*"]]
|
[
"forstluk@fel.cvut.cz"
] |
forstluk@fel.cvut.cz
|
c1e73a6d40bddcf713520ac9b365fad4c1122a6f
|
e17654a99467643e2fd9acb47c9d8f23f11dadfe
|
/sample_buggy_scripts/GitHub Samples/8.py
|
09ceb906230a075adcfbb6b964f6feefde8b25cd
|
[] |
no_license
|
evelynmitchell/neuralint
|
d8c09e1beafafde66331699f4561fa4e67603c6e
|
034267846ae8a68d63cce0f87645da71dd8324f0
|
refs/heads/master
| 2023-08-27T19:33:16.322891
| 2021-11-05T15:39:18
| 2021-11-05T15:39:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,338
|
py
|
from keras.layers import Dropout
from keras.models import Sequential
from keras.layers import Dense, Flatten, GaussianNoise, Convolution2D
from tensorflow.keras import regularizers
from keras.optimizers import SGD
from keras.constraints import maxnorm
import neuraLint
model = Sequential()
# model.add(GaussianNoise(0.1, input_shape=(32, 3, 3)))
model.add(GaussianNoise(0.1, input_shape=(3, 32, 32)))
model.add(Convolution2D(32, 3, 3, input_shape=(3, 32, 32), activation='relu', padding='same'))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dropout(0.2))
model.add(Dense(1024, activation='relu', kernel_regularizer=regularizers.l2(l2=1e-4)))
model.add(Dense(1024, activation='relu', kernel_regularizer=maxnorm(3)))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.2))
# model.add(Dense(512, init='glorot_uniform'))
model.add(Dense(512, activation='relu', kernel_regularizer=maxnorm(3)))
model.add(Dropout(0.2))
model.add(Dense(5, activation='softmax'))
# Compile model
lrate = 0.01
nb_epoch = 1000
decay = lrate / (nb_epoch)
sgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
# model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])
print(neuraLint.check(model))
|
[
"noreply@github.com"
] |
evelynmitchell.noreply@github.com
|
76c90fd0618675a715f426c7c24027b929f70326
|
98daf5f732e937da16552e5230967cb0500e2203
|
/migrations/versions/f2205abe233b_deleting_surname.py
|
db817d43ea0fa9bcbad60779a3c7749cd739b34c
|
[] |
no_license
|
xKatinJn/MovieTone
|
cb62273cce0888dd9bc2c174735f182a363b4663
|
66da3f366349100af4a7e53e9fbdb86af02874d1
|
refs/heads/master
| 2022-11-10T06:31:28.465397
| 2020-06-13T10:33:07
| 2020-06-13T10:33:07
| 269,948,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 702
|
py
|
"""deleting surname
Revision ID: f2205abe233b
Revises: fd9bdfb375a5
Create Date: 2020-06-08 16:43:34.994122
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'f2205abe233b'
down_revision = 'fd9bdfb375a5'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'surname')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('surname', mysql.VARCHAR(length=64), nullable=True))
# ### end Alembic commands ###
|
[
"antosha.katin@yandex.ru"
] |
antosha.katin@yandex.ru
|
b86a7385243568238e940a7668a58885c8e8645e
|
77c63dff1de66d10ea031a78d79abf2a157a7ba3
|
/sql.py
|
10e75591e335069b9665a4e79590c38fa86cf724
|
[] |
no_license
|
hoangduy121297/PythonSocketDemo
|
e512f6e0d242c41f6466fbe294ecd41b06b921ef
|
db63db671f8d54c655037efce81be4419ddb8028
|
refs/heads/master
| 2020-03-26T16:11:27.291046
| 2018-08-17T10:37:24
| 2018-08-17T10:37:24
| 145,087,076
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 874
|
py
|
import sqlite3 as lite
import sys
import os
conn= None
try:
conn = lite.connect("MyDb.db") #biến conn để connect
with conn:
cur= conn.cursor() #biến cur để duyệt qua các bản ghi thông qua hàm cursor()
cur.execute("SELECT SQLITE_VERSION()") #execute để thực hiện câu truy vấn lấy version sqlite
data=cur.fetchone() #lấy về dòng đầu tiên của database
print("SQLite version " + str(data)) #in ra version
cur.execute("CREATE TABLE Nguoidung1 (Id INT PRIMARY KEY, Name STRING);") #tao bang
cur.execute("INSERT INTO Nguoidung1 VALUES(1,'Hoang Manh Duy');") #insert 1 hàng vào database
except lite.Error as e:
print("Error: %s" %e.args[0]) #in ra exeption
sys.exit(1)
finally:
if conn:
conn.close() #ngắt kết nối
|
[
"noreply@github.com"
] |
hoangduy121297.noreply@github.com
|
443231adb8f6dd9df9b31f88e143b256e67063d6
|
b866c924c5c155a2bf264131151d18646159bf35
|
/Django/PythonWebsiteDevelopmentCourse/firstdjango/firstdjangoapp/views.py
|
d0765a04899867d3f3726ee8f92f7f46698a4abe
|
[] |
no_license
|
TylerPanda/Python
|
4ceb5420dbca2da09e6a210d3b4ff305b6491e60
|
f5e41834470262b2446395a46a1ff8f97c5b3bac
|
refs/heads/master
| 2021-01-01T20:08:05.336441
| 2018-03-31T01:20:51
| 2018-03-31T01:20:51
| 98,771,231
| 0
| 0
| null | 2018-04-05T01:41:58
| 2017-07-30T02:54:26
|
Python
|
UTF-8
|
Python
| false
| false
| 4,790
|
py
|
from django.shortcuts import render
from datetime import datetime
from django.http import HttpResponse
import random
from firstdjangoapp.models import student
from firstdjangoapp.form import PostForm
# Create your views here.
times = 0
def sayhello(request):
return HttpResponse("Hello Django!")
def hello2(request, username):
return HttpResponse("Hello " + username)
def hello3(request, username):
now = datetime.now()
return render(request, "hello3.html", locals())
def hello4(request, username):
now = datetime.now()
return render(request, "hello4.html", locals())
def dice(request):
no = random.randint(1, 6)
return render(request, "dice.html", {"no" : no})
def dice2(request):
no1 = random.randint(1, 6)
no2 = random.randint(1, 6)
no3 = random.randint(1, 6)
return render(request, "dice2.html", locals())
def dice3(request):
global times
times = times + 1
local_times = times
username = "David"
dice_no = {"no" : random.randint(1, 6)}
return render(request, "dice3.html", locals())
def show(request):
person1 = {"name":"Amy", "phone":"049-1234567", "age":"20"}
person2 = {"name":"Jack", "phone":"049-4455666", "age":"25"}
person3 = {"name":"Nacy", "phone":"049-9876543", "age":"17"}
persons = [person1, person2, person3]
return render(request, "show.html", locals())
def filter(request):
value = 4
list1 = [1, 2, 3]
pw = '芝麻開門'
html = "<h1>Hello</h1>"
value2 = False
return render(request, "filter.html", locals())
def listone(request):
try:
unit = student.objects.get(cName = "Tyler")
except:
errormessage = "(讀取錯誤!)"
return render(request, "listone.html", locals())
def listall(request):
students = student.objects.all().order_by('id')
return render(request, "listall.html", locals())
def index(request):
students = student.objects.all().order_by('id')
return render(request, "index.html", locals())
def post(request):
if request.method == "POST":
mess = request.POST['username']
else:
mess = "表單資料尚未送出!"
return render(request, "post.html", locals())
def post1(request):
if request.method == "POST":
cName = request.POST['cName']
cGender = request.POST['cGender']
cBirthday = request.POST['cBirthday']
cEmail = request.POST['cEmail']
cPhone = request.POST['cPhone']
cAddr = request.POST['cAddr']
unit = student.objects.create(cName = cName, cGender = cGender, cBirthday = cBirthday, cEmail = cEmail, cPhone = cPhone, cAddr = cAddr)
unit.save()
return redirect('/index/')
else:
message = '請輸入資料(資料不做驗證)'
return render(request, "post1.html", locals())
def postform(request):
cName = 'David'
cGender = 'M'
cBirthday = '1995/12/23'
cEmail = '123@django.com'
cPhone = '12344321'
cAddr = 'Section 2, Number1'
postform = PostForm()
return render(request, "postform.html", locals())
def post2(request):
if request.method == "POST":
postform - PostForm(request.POST)
if postform.is_value():
cName = postform.cleaned_data['cName']
cGender = post.cleaned_data['cGender']
cBirthday = post.cleaned_data['cBirthday']
cEmail = post.cleaned_data['cEmail']
cPhone = post.cleaned_data['cPhone']
cAddr = post.cleaned_data['cAddr']
unit = student.objects.create(cName = cName, cGender = cGender, cBirthday = cBirthday, cEmail = cEmail, cPhone = cPhone, cAddr =cAddr)
unit.save()
message = '以儲存...'
return redirect('/index/')
else:
message = '驗證碼錯誤!'
else:
message = '姓名,性別,生日必須輸入!'
postform = PostForm()
return render(request, "post2.html", locals())
def edit(request, id = None, mode = None):
if mode == "edit":
unit = student.objects.get(id = id)
unit.cName = request.GET['cName']
unit.cGender = request.GET['cGender']
unit.cBirthday = request.GET['cBirthday']
unit.cPhone = request.GET['cPhone']
unit.cAddr = request.GET['cAddr']
unit.save()
message = '以修改...!'
return redirect('/index')
else:
try:
unit = student.objects.get(id = id)
strdate = str(unit.cBirthday)
strdate2 = strdate.replace("年", "-")
strdate2 = strdate.replace("月", "-")
strdates = strdate.replace("日", "-")
unit.cBirthday =strdate2
except:
message = "此id不存在!"
return render(request, "edit.html", locals())
|
[
"mingkaipanda@gmail.com"
] |
mingkaipanda@gmail.com
|
800794eafef17314dd186a36b34ecaaec25476b7
|
20e787a427d5777a942fc86e6e445401c09e3f4e
|
/LeetCode/18SurroundedRegions.py
|
ce9a17d15eac8639707a263bd3001665a650f7d9
|
[] |
no_license
|
blldd/CodeExercise
|
262760cebded080b71e40973c210386b1cbd6eac
|
b5b4851ba348c7fbcaa0480a36a8c689e8f8751c
|
refs/heads/master
| 2023-04-01T15:29:37.505616
| 2023-03-24T12:53:44
| 2023-03-24T12:53:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,349
|
py
|
# -*- coding:UTF-8 -*-
'''
Given a 2D board containing 'X' and 'O' (the letter O), capture all regions surrounded by 'X'.
A region is captured by flipping all 'O's into 'X's in that surrounded region.
For example,
X X X X
X O O X
X X O X
X O X X
After running your function, the board should be:
X X X X
X X X X
X X X X
X O X X
'''
class Solution(object):
def solve(self, board):
"""
:type board: List[List[str]]
:rtype: void Do not return anything, modify board in-place instead.
"""
if len(board) <= 0:
return
m, n = len(board), len(board[0])
visited = [[False]*n for x in range(m)]
for i in range(m):
for j in range(n):
if board[i][j] == 'O' and not visited[i][j]:
surrounded, union_filed = self.bfs(board, i, j, m, n, visited)
if surrounded:
for x, y in union_filed:
board[x][y] = 'X'
# flood fill, 使用bfs找到union field
# 判断他是不是被包围, 如果是,返回被包含区域
def bfs(self, board, x, y, m, n, visited):
directions = [(0, 1), (0, -1), (1, 0), (-1, 0)]
union_filed = []
surrounded = True
# 放进queue之前,先visit
visited[x][y] = True
for i, j in [(x + d_i, y + d_j) for d_i, d_j in directions]:
#
if i < 0 or i >= m or j < 0 or j >= n:
surrounded = False
continue
union_filed.append((x, y))
queue = [(x, y)]
while queue:
i_, j_ = queue.pop(0)
# 将相邻节点放入queue
for i, j in [(i_ + d_i, j_ + d_j) for d_i, d_j in directions]:
#
if i < 0 or i >= m or j < 0 or j >= n:
surrounded = False
continue
if not visited[i][j] and board[i][j] == "O":
visited[i][j] = True
union_filed.append((i, j))
queue.append((i, j))
return surrounded, union_filed
if __name__ == '__main__':
board = [
["X", "X", "X", "O"],
["X", "O", "O", "X"],
["X", "O", "X", "O"],
["X", "X", "X", "X"]
]
Solution().solve(board)
print(board)
|
[
"18201788952@163.com"
] |
18201788952@163.com
|
bfab979ab11b9ac0e7a440320a5e7c1336eb8a08
|
85008646c695b4f31202bbc5b53f4c54c395ccf6
|
/setup.py
|
e70f9077339d1a92f3b1a58ca78873e3ddb79633
|
[
"MIT"
] |
permissive
|
praekeltfoundation/xenzen
|
8068300c5552d4fd3d76522e8788e788da88bc2c
|
5dc670302052bb4cd9a89351bda613cc436573a0
|
refs/heads/develop
| 2020-12-24T12:20:51.947990
| 2017-05-12T09:14:02
| 2017-05-12T09:14:02
| 13,208,753
| 3
| 5
| null | 2017-05-12T09:14:03
| 2013-09-30T07:26:56
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,312
|
py
|
import codecs
import os
from setuptools import setup, find_packages
HERE = os.path.abspath(os.path.dirname(__file__))
# Stolen from txacme
def read(*parts):
with codecs.open(os.path.join(HERE, *parts), 'rb', 'utf-8') as f:
return f.read()
setup(
name='xenzen',
version='1.0.3.dev0',
license='MIT',
url='https://github.com/praekeltfoundation/xenzen',
description=(
'A Django UI for managing XenServer in the simplest possible way.'),
long_description=read('README.rst'),
author='Colin Alston',
maintainer='Praekelt.org SRE team',
maintainer_email='sre@praekelt.org',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
packages=find_packages(),
include_package_data=True,
install_requires=[
'celery >= 3.1, < 4',
'Django >= 1.8, < 1.9',
'django-crispy-forms',
'django-haystack',
'lxml',
'psycopg2',
'pyyaml',
'raven',
'redis',
'social-auth-app-django',
],
)
|
[
"jhewland@gmail.com"
] |
jhewland@gmail.com
|
4f742c00374f3e30bf1f1db2eb209e3ddf51c833
|
ba7eed4d2f31794540808db9d275ccb06006062e
|
/priority.py
|
fda3b1f633e3fcb692e47a5ff8104b13a8854e58
|
[] |
no_license
|
wswartworth/gomoku_AI
|
027f2aece90104285ed08741d7f008af59fda30a
|
962904edaab462d13195b4c3a4330e68f69dd266
|
refs/heads/master
| 2020-04-13T22:31:17.765017
| 2018-12-29T05:58:17
| 2018-12-29T05:58:17
| 163,481,210
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,636
|
py
|
import heapq
import itertools
'''
borrowed from Python documentation of heapq
'''
#max heap
class myPriorityQueue:
pq = [] # list of entries arranged in a heap
entry_finder = {} # mapping of tasks to entries
REMOVED = '<removed-task>' # placeholder for a removed task
counter = itertools.count() # unique sequence count
def add_task(self, task, priority=0):
pq = self.pq
entry_finder = self.entry_finder
counter = self.counter
'Add a new task or update the priority of an existing task'
if task in entry_finder:
self.remove_task(task)
count = next(counter)
entry = [-priority, count, task]
entry_finder[task] = entry
heapq.heappush(pq, entry)
def remove_task(self, task):
entry_finder = self.entry_finder
'Mark an existing task as REMOVED. Raise KeyError if not found.'
entry = entry_finder.pop(task)
entry[-1] = self.REMOVED
def pop_task(self):
pq = self.pq
entry_finder = self.entry_finder
'Remove and return the lowest priority task. Raise KeyError if empty.'
while pq:
priority, count, task = heapq.heappop(pq)
if task is not self.REMOVED:
del entry_finder[task]
return task
raise KeyError('pop from an empty priority queue')
#increase the priority by n
def increase_priority(self, task, n):
entry_finder = self.entry_finder
if not task in entry_finder: return
priority, count, task = entry_finder[task]
self.add_task(task, n-priority) #funny because max heap
def sorted_list(self):
pq = self.pq
REMOVED = self.REMOVED
l = heapq.nsmallest(len(pq), pq)
ret = []
for entry in l:
add = entry[2]
if(add is not REMOVED): ret.append(add)
return ret
#return the list up to a given priority
def truncated_sorted_list(self, t):
pq = self.pq
REMOVED = self.REMOVED
l = heapq.nsmallest(len(pq), pq)
ret = []
for entry in l:
add = entry[2]
pri = entry[0]
if pri >= -t: return ret
if(add is not REMOVED): ret.append(add)
return ret
#pq = myPriorityQueue()
#pq.add_task("a", 6)
#pq.add_task("b", 4)
#pq.add_task("c", 2)
#pq.add_task("d", 1)
#pq.increase_priority("d", 2)
#print(pq.sorted_list())
#print(pq.sorted_list())
#print(pq.pop_task())
#print(pq.pop_task())
#print(pq.pop_task())
#print(pq.pop_task())
|
[
"WilliamSwartworth@Williams-MacBook-Pro.local"
] |
WilliamSwartworth@Williams-MacBook-Pro.local
|
f7325f730ac67ca71365865271c2961120433fc1
|
4da9d8c41969e7e3590472ab760d1f1fe552b8af
|
/sorting/selection-sort.py
|
055ae805b49eb96e51f2f110b5a7ce3542ae531d
|
[] |
no_license
|
noa-barsky/interview-prep
|
4c7cee3004d914822fe1b46387f6fdf5ef38f479
|
660df4f7f915506172e3bf71edb9972a9ed05bf5
|
refs/heads/master
| 2020-04-20T13:22:56.801526
| 2019-02-12T20:18:36
| 2019-02-12T20:18:36
| 168,867,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
def selection_sort(arr):
for i in range(len(arr)):
min_idx = i
for j in range(i+1, len(arr)):
if arr[min_idx] > arr[j]:
min_idx = j
arr[i], arr[min_idx] = arr[min_idx], arr[i]
return arr
print(selection_sort([14,46,43,27,57,41,45,21,70]))
|
[
"barskyn@mcmaster.ca"
] |
barskyn@mcmaster.ca
|
ee2a711d4d888ce3d65e5ac85cf516bbb3973dfb
|
b6bda0a1fcf88ca9a83336a17c33b8051fc1201c
|
/GWC 2017/shapes.py
|
0ad199974b9b757f0591a23d2c165889c0235146
|
[] |
no_license
|
roseORG/GirlsWhoCode2017
|
edf50d5ac9cb5a057a559252450ac34ca7b46f5f
|
a76dbeaf2aebc5242f812a9db2bac8c8eb213993
|
refs/heads/master
| 2021-01-01T16:59:08.824332
| 2017-07-21T17:11:55
| 2017-07-21T17:11:55
| 97,970,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
from turtle import *
import math
Turtle
t = Turtle()
# Set Up your screen and starting position.p
t.penup()
setup(500,300)
x_pos = 0
y_pos = 0
t.setposition(x_pos, y_pos)
t.pendown()
for counter in range(4):
t.forward(25)
t.right(90)
t.penup()
### Write your code below:
exitonclick()
|
[
"noreply@github.com"
] |
roseORG.noreply@github.com
|
ff2527d8c68ec597fc7a2caa438baa0381949798
|
3d1667e87a3ab0f880a13dcb17ef9e17f6e1431d
|
/minimal/business/models.py
|
7b7b49fd2692377b672d337f1466b7a2811a6224
|
[] |
no_license
|
TonyEight/minimal
|
d57d696e87c65037a644116179b4a07e8317bc7c
|
521ce501c709277bf93e9622c6510853d226179b
|
refs/heads/master
| 2016-08-03T09:50:58.858896
| 2014-04-22T16:23:20
| 2014-04-22T16:23:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,719
|
py
|
# Create yngo modules
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError
class Company(models.Model):
name = models.CharField(_(u'name'), max_length='255', unique=True)
class Meta:
verbose_name = _(u'Company')
verbose_name_plural = _(u'Companies')
def __unicode__(self):
return u'%s' % self.name
class Contact(models.Model):
first_name = models.CharField(_(u'first_name'), max_length='750')
last_name = models.CharField(_(u'last_name'), max_length='750')
email = models.EmailField(_(u'email'), unique=True)
company = models.ForeignKey(
Company, verbose_name=_(u'company'), related_name=u'contacts')
class Meta:
verbose_name = _(u'Contact')
verbose_name_plural = _(u'Contacts')
def __unicode__(self):
return u'%s %s' % (self.first_name, self.last_name)
def save(self, *args, **kwargs):
self.first_name = self.first_name.title()
self.last_name = self.last_name.upper()
super(Contact, self).save(*args, **kwargs)
class Contract(models.Model):
name = models.CharField(_(u'name'), max_length='750')
client = models.ForeignKey(
Contact, verbose_name=_(u'client'), related_name=u'owned_contracts')
start = models.DateField(_(u'start'))
end = models.DateField(_(u'end'))
days = models.DecimalField(
_(u'number of days'), max_digits=6, decimal_places=2)
actor = models.ForeignKey(settings.AUTH_USER_MODEL,
verbose_name=_(u'actor'),
related_name=u'contracts')
class Meta:
verbose_name = _('Contract')
verbose_name_plural = _('Contracts')
def __unicode__(self):
return u'%s' % self.name
def clean(self):
if self.days is not None:
if self.days < 0:
raise ValidationError(
_(u'Number of days must be stickly positive.'))
if self.start is not None and self.end is not None:
if self.start > self.end:
raise ValidationError(
_(u'Start date must be less or equal to end date.'))
else:
max_days = 1
if self.start != self.end:
delta = self.end - self.start
max_days = delta.days
if self.days > max_days:
raise ValidationError(
_(u'Number of days must be must be less or equal to '
u'the maximum delta in days between start '
u'end dates.'))
|
[
"ludovic.legendart@green-conseil.com"
] |
ludovic.legendart@green-conseil.com
|
e23ae575e3a72bad6f5988347b97daa7bc4b4ccf
|
00c980a260abdb15fd8e048c956b4757af872271
|
/orders/urls.py
|
10ae414be6df60fc9d1c5a01080106d2ced39b33
|
[] |
no_license
|
PaolaMartinez/Project3
|
a0eebce3c06b23fd8f69b8d893da7485f0dd0740
|
d187e4e47d389c16559a6f2e3bcbdce91eceb73f
|
refs/heads/master
| 2021-06-24T09:51:15.854986
| 2019-12-11T22:35:10
| 2019-12-11T22:35:10
| 227,472,497
| 0
| 0
| null | 2021-06-10T22:23:21
| 2019-12-11T22:29:40
|
Python
|
UTF-8
|
Python
| false
| false
| 763
|
py
|
from django.urls import path
from django.contrib import admin
from . import views
urlpatterns = [
path("", views.index,name = "index"),
path("menu/", views.menu, name="menu"),
path("registration/", views.registration, name="registration"),
path("login/", views.login_view, name="login"),
path("logout/", views.logout_view, name="logout"),
path("confirm", views.AddtoCart, name="Cart"),
path("<int:product>", views.selectTopping, name="topping"),
path("carrito/", views.cart, name="ShowCarrito"),
path("confirmedOrder/", views.accept, name="Confirmado"),
path("orders/", views.viewOrders, name="Ordenes"),
path("status", views.checkStatus, name="Status")
#path("toppings/")views.logout_view, name="toppings")
]
|
[
"noreply@github.com"
] |
PaolaMartinez.noreply@github.com
|
cae2958d1b8ef88bbd6226d494cebe9b1718637b
|
1f56a7dd7c4fc27924b06c0dd5669743c112b6f4
|
/Inceptionists/Main/migrations/0001_initial.py
|
e2aeab09f5468d9ddba4513dae63ea4245ef27c9
|
[] |
no_license
|
sammittal0015/cmpsc431w-group-work
|
118a82d61748b5ec842282265b8a6d74af6a8077
|
6e42f705cc3a7b5f66a69544f265952397096ab5
|
refs/heads/master
| 2021-05-29T06:37:46.124175
| 2015-12-11T12:43:15
| 2015-12-11T12:43:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 949
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=128)),
],
),
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=128)),
('url', models.URLField()),
('views', models.IntegerField(default=0)),
('category', models.ForeignKey(to='Main.category')),
],
),
]
|
[
"fliu004@gmail.com"
] |
fliu004@gmail.com
|
00bd73838e45df36e1cc6bdc69a63324ed01ff3b
|
1577e1cf4e89584a125cffb855ca50a9654c6d55
|
/WebKit/Source/ThirdParty/ANGLE/scripts/process_angle_perf_results.py
|
23210d488dc23a1994eb55a7a9ce124b8d28163a
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
apple-open-source/macos
|
a4188b5c2ef113d90281d03cd1b14e5ee52ebffb
|
2d2b15f13487673de33297e49f00ef94af743a9a
|
refs/heads/master
| 2023-08-01T11:03:26.870408
| 2023-03-27T00:00:00
| 2023-03-27T00:00:00
| 180,595,052
| 124
| 24
| null | 2022-12-27T14:54:09
| 2019-04-10T14:06:23
| null |
UTF-8
|
Python
| false
| false
| 31,029
|
py
|
#!/usr/bin/env vpython
#
# Copyright 2021 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# process_angle_perf_results.py:
# Perf result merging and upload. Adapted from the Chromium script:
# https://chromium.googlesource.com/chromium/src/+/main/tools/perf/process_perf_results.py
from __future__ import print_function
import argparse
import collections
import datetime
import json
import logging
import multiprocessing
import os
import pathlib
import shutil
import subprocess
import sys
import tempfile
import time
import uuid
logging.basicConfig(
level=logging.INFO,
format='(%(levelname)s) %(asctime)s pid=%(process)d'
' %(module)s.%(funcName)s:%(lineno)d %(message)s')
PY_UTILS = str(pathlib.Path(__file__).resolve().parents[1] / 'src' / 'tests' / 'py_utils')
if PY_UTILS not in sys.path:
os.stat(PY_UTILS) and sys.path.insert(0, PY_UTILS)
import angle_metrics
import angle_path_util
angle_path_util.AddDepsDirToPath('tools/perf')
from core import path_util
path_util.AddTelemetryToPath()
from core import upload_results_to_perf_dashboard
from core import results_merger
path_util.AddAndroidPylibToPath()
try:
from pylib.utils import logdog_helper
except ImportError:
pass
path_util.AddTracingToPath()
from tracing.value import histogram
from tracing.value import histogram_set
from tracing.value.diagnostics import generic_set
from tracing.value.diagnostics import reserved_infos
RESULTS_URL = 'https://chromeperf.appspot.com'
JSON_CONTENT_TYPE = 'application/json'
MACHINE_GROUP = 'ANGLE'
BUILD_URL = 'https://ci.chromium.org/ui/p/angle/builders/ci/%s/%d'
GSUTIL_PY_PATH = str(
pathlib.Path(__file__).resolve().parents[1] / 'third_party' / 'depot_tools' / 'gsutil.py')
def _upload_perf_results(json_to_upload, name, configuration_name, build_properties,
output_json_file):
"""Upload the contents of result JSON(s) to the perf dashboard."""
args = [
'--buildername',
build_properties['buildername'],
'--buildnumber',
build_properties['buildnumber'],
'--name',
name,
'--configuration-name',
configuration_name,
'--results-file',
json_to_upload,
'--results-url',
RESULTS_URL,
'--output-json-file',
output_json_file,
'--perf-dashboard-machine-group',
MACHINE_GROUP,
'--got-angle-revision',
build_properties['got_angle_revision'],
'--send-as-histograms',
'--project',
'angle',
]
if build_properties.get('git_revision'):
args.append('--git-revision')
args.append(build_properties['git_revision'])
#TODO(crbug.com/1072729): log this in top level
logging.info('upload_results_to_perf_dashboard: %s.' % args)
return upload_results_to_perf_dashboard.main(args)
def _merge_json_output(output_json, jsons_to_merge, extra_links, test_cross_device=False):
"""Merges the contents of one or more results JSONs.
Args:
output_json: A path to a JSON file to which the merged results should be
written.
jsons_to_merge: A list of JSON files that should be merged.
extra_links: a (key, value) map in which keys are the human-readable strings
which describe the data, and value is logdog url that contain the data.
"""
begin_time = time.time()
merged_results = results_merger.merge_test_results(jsons_to_merge, test_cross_device)
# Only append the perf results links if present
if extra_links:
merged_results['links'] = extra_links
with open(output_json, 'w') as f:
json.dump(merged_results, f)
end_time = time.time()
print_duration('Merging json test results', begin_time, end_time)
return 0
def _handle_perf_json_test_results(benchmark_directory_map, test_results_list):
"""Checks the test_results.json under each folder:
1. mark the benchmark 'enabled' if tests results are found
2. add the json content to a list for non-ref.
"""
begin_time = time.time()
benchmark_enabled_map = {}
for benchmark_name, directories in benchmark_directory_map.items():
for directory in directories:
# Obtain the test name we are running
is_ref = '.reference' in benchmark_name
enabled = True
try:
with open(os.path.join(directory, 'test_results.json')) as json_data:
json_results = json.load(json_data)
if not json_results:
# Output is null meaning the test didn't produce any results.
# Want to output an error and continue loading the rest of the
# test results.
logging.warning('No results produced for %s, skipping upload' % directory)
continue
if json_results.get('version') == 3:
# Non-telemetry tests don't have written json results but
# if they are executing then they are enabled and will generate
# chartjson results.
if not bool(json_results.get('tests')):
enabled = False
if not is_ref:
# We don't need to upload reference build data to the
# flakiness dashboard since we don't monitor the ref build
test_results_list.append(json_results)
except IOError as e:
# TODO(crbug.com/936602): Figure out how to surface these errors. Should
# we have a non-zero exit code if we error out?
logging.error('Failed to obtain test results for %s: %s', benchmark_name, e)
continue
if not enabled:
# We don't upload disabled benchmarks or tests that are run
# as a smoke test
logging.info('Benchmark %s ran no tests on at least one shard' % benchmark_name)
continue
benchmark_enabled_map[benchmark_name] = True
end_time = time.time()
print_duration('Analyzing perf json test results', begin_time, end_time)
return benchmark_enabled_map
def _generate_unique_logdog_filename(name_prefix):
return name_prefix + '_' + str(uuid.uuid4())
def _handle_perf_logs(benchmark_directory_map, extra_links):
""" Upload benchmark logs to logdog and add a page entry for them. """
begin_time = time.time()
benchmark_logs_links = collections.defaultdict(list)
for benchmark_name, directories in benchmark_directory_map.items():
for directory in directories:
benchmark_log_file = os.path.join(directory, 'benchmark_log.txt')
if os.path.exists(benchmark_log_file):
with open(benchmark_log_file) as f:
uploaded_link = logdog_helper.text(
name=_generate_unique_logdog_filename(benchmark_name), data=f.read())
benchmark_logs_links[benchmark_name].append(uploaded_link)
logdog_file_name = _generate_unique_logdog_filename('Benchmarks_Logs')
logdog_stream = logdog_helper.text(
logdog_file_name,
json.dumps(benchmark_logs_links, sort_keys=True, indent=4, separators=(',', ': ')),
content_type=JSON_CONTENT_TYPE)
extra_links['Benchmarks logs'] = logdog_stream
end_time = time.time()
print_duration('Generating perf log streams', begin_time, end_time)
def _handle_benchmarks_shard_map(benchmarks_shard_map_file, extra_links):
begin_time = time.time()
with open(benchmarks_shard_map_file) as f:
benchmarks_shard_data = f.read()
logdog_file_name = _generate_unique_logdog_filename('Benchmarks_Shard_Map')
logdog_stream = logdog_helper.text(
logdog_file_name, benchmarks_shard_data, content_type=JSON_CONTENT_TYPE)
extra_links['Benchmarks shard map'] = logdog_stream
end_time = time.time()
print_duration('Generating benchmark shard map stream', begin_time, end_time)
def _get_benchmark_name(directory):
return os.path.basename(directory).replace(" benchmark", "")
def _scan_output_dir(task_output_dir):
benchmark_directory_map = {}
benchmarks_shard_map_file = None
directory_list = [
f for f in os.listdir(task_output_dir)
if not os.path.isfile(os.path.join(task_output_dir, f))
]
benchmark_directory_list = []
for directory in directory_list:
for f in os.listdir(os.path.join(task_output_dir, directory)):
path = os.path.join(task_output_dir, directory, f)
if os.path.isdir(path):
benchmark_directory_list.append(path)
elif path.endswith('benchmarks_shard_map.json'):
benchmarks_shard_map_file = path
# Now create a map of benchmark name to the list of directories
# the lists were written to.
for directory in benchmark_directory_list:
benchmark_name = _get_benchmark_name(directory)
logging.debug('Found benchmark %s directory %s' % (benchmark_name, directory))
if benchmark_name in benchmark_directory_map.keys():
benchmark_directory_map[benchmark_name].append(directory)
else:
benchmark_directory_map[benchmark_name] = [directory]
return benchmark_directory_map, benchmarks_shard_map_file
def _upload_to_skia_perf(benchmark_directory_map, benchmark_enabled_map, build_properties_map):
metric_filenames = []
for benchmark_name, directories in benchmark_directory_map.items():
if not benchmark_enabled_map.get(benchmark_name, False):
continue
for directory in directories:
metric_filenames.append(os.path.join(directory, 'angle_metrics.json'))
assert metric_filenames
buildername = build_properties_map['buildername'] # e.g. win10-nvidia-gtx1660-perf
skia_data = {
'version': 1,
'git_hash': build_properties_map['got_angle_revision'],
'key': {
'buildername': buildername,
},
'results': angle_metrics.ConvertToSkiaPerf(metric_filenames),
}
skia_perf_dir = tempfile.mkdtemp('skia_perf')
try:
local_file = os.path.join(skia_perf_dir, '%s.%s.json' % (buildername, time.time()))
with open(local_file, 'w') as f:
json.dump(skia_data, f, indent=2)
gs_dir = 'gs://angle-perf-skia/angle_perftests/%s/' % (
datetime.datetime.now().strftime('%Y/%m/%d/%H'))
upload_cmd = ['vpython3', GSUTIL_PY_PATH, 'cp', local_file, gs_dir]
logging.info('Skia upload: %s', ' '.join(upload_cmd))
subprocess.check_call(upload_cmd)
finally:
shutil.rmtree(skia_perf_dir)
def process_perf_results(output_json,
configuration_name,
build_properties,
task_output_dir,
smoke_test_mode,
output_results_dir,
lightweight=False,
skip_perf=False):
"""Process perf results.
Consists of merging the json-test-format output, uploading the perf test
output (histogram), and store the benchmark logs in logdog.
Each directory in the task_output_dir represents one benchmark
that was run. Within this directory, there is a subdirectory with the name
of the benchmark that was run. In that subdirectory, there is a
perftest-output.json file containing the performance results in histogram
format and an output.json file containing the json test results for the
benchmark.
Returns:
(return_code, upload_results_map):
return_code is 0 if the whole operation is successful, non zero otherwise.
benchmark_upload_result_map: the dictionary that describe which benchmarks
were successfully uploaded.
"""
handle_perf = not lightweight or not skip_perf
handle_non_perf = not lightweight or skip_perf
logging.info('lightweight mode: %r; handle_perf: %r; handle_non_perf: %r' %
(lightweight, handle_perf, handle_non_perf))
begin_time = time.time()
return_code = 0
benchmark_upload_result_map = {}
benchmark_directory_map, benchmarks_shard_map_file = _scan_output_dir(task_output_dir)
test_results_list = []
extra_links = {}
if handle_non_perf:
# First, upload benchmarks shard map to logdog and add a page
# entry for it in extra_links.
if benchmarks_shard_map_file:
_handle_benchmarks_shard_map(benchmarks_shard_map_file, extra_links)
# Second, upload all the benchmark logs to logdog and add a page entry for
# those links in extra_links.
_handle_perf_logs(benchmark_directory_map, extra_links)
# Then try to obtain the list of json test results to merge
# and determine the status of each benchmark.
benchmark_enabled_map = _handle_perf_json_test_results(benchmark_directory_map,
test_results_list)
if not smoke_test_mode and handle_perf:
build_properties_map = json.loads(build_properties)
if not configuration_name:
# we are deprecating perf-id crbug.com/817823
configuration_name = build_properties_map['buildername']
try:
return_code, benchmark_upload_result_map = _handle_perf_results(
benchmark_enabled_map, benchmark_directory_map, configuration_name,
build_properties_map, extra_links, output_results_dir)
except Exception:
logging.exception('Error handling perf results jsons')
return_code = 1
try:
_upload_to_skia_perf(benchmark_directory_map, benchmark_enabled_map,
build_properties_map)
except Exception:
logging.exception('Error uploading to skia perf')
return_code = 1
if handle_non_perf:
# Finally, merge all test results json, add the extra links and write out to
# output location
try:
_merge_json_output(output_json, test_results_list, extra_links)
except Exception:
logging.exception('Error handling test results jsons.')
end_time = time.time()
print_duration('Total process_perf_results', begin_time, end_time)
return return_code, benchmark_upload_result_map
def _merge_histogram_results(histogram_lists):
merged_results = []
for histogram_list in histogram_lists:
merged_results += histogram_list
return merged_results
def _load_histogram_set_from_dict(data):
histograms = histogram_set.HistogramSet()
histograms.ImportDicts(data)
return histograms
def _add_build_info(results, benchmark_name, build_properties):
histograms = _load_histogram_set_from_dict(results)
common_diagnostics = {
reserved_infos.MASTERS:
build_properties['builder_group'],
reserved_infos.BOTS:
build_properties['buildername'],
reserved_infos.POINT_ID:
build_properties['angle_commit_pos'],
reserved_infos.BENCHMARKS:
benchmark_name,
reserved_infos.ANGLE_REVISIONS:
build_properties['got_angle_revision'],
reserved_infos.BUILD_URLS:
BUILD_URL % (build_properties['buildername'], build_properties['buildnumber']),
}
for k, v in common_diagnostics.items():
histograms.AddSharedDiagnosticToAllHistograms(k.name, generic_set.GenericSet([v]))
return histograms.AsDicts()
def _merge_perf_results(benchmark_name, results_filename, directories, build_properties):
begin_time = time.time()
collected_results = []
for directory in directories:
filename = os.path.join(directory, 'perf_results.json')
try:
with open(filename) as pf:
collected_results.append(json.load(pf))
except IOError as e:
# TODO(crbug.com/936602): Figure out how to surface these errors. Should
# we have a non-zero exit code if we error out?
logging.error('Failed to obtain perf results from %s: %s', directory, e)
if not collected_results:
logging.error('Failed to obtain any perf results from %s.', benchmark_name)
return
# Assuming that multiple shards will be histogram set
# Non-telemetry benchmarks only ever run on one shard
merged_results = []
assert (isinstance(collected_results[0], list))
merged_results = _merge_histogram_results(collected_results)
# Write additional histogram build info.
merged_results = _add_build_info(merged_results, benchmark_name, build_properties)
with open(results_filename, 'w') as rf:
json.dump(merged_results, rf)
end_time = time.time()
print_duration(('%s results merging' % (benchmark_name)), begin_time, end_time)
def _upload_individual(benchmark_name, directories, configuration_name, build_properties,
output_json_file):
tmpfile_dir = tempfile.mkdtemp()
try:
upload_begin_time = time.time()
# There are potentially multiple directores with results, re-write and
# merge them if necessary
results_filename = None
if len(directories) > 1:
merge_perf_dir = os.path.join(os.path.abspath(tmpfile_dir), benchmark_name)
if not os.path.exists(merge_perf_dir):
os.makedirs(merge_perf_dir)
results_filename = os.path.join(merge_perf_dir, 'merged_perf_results.json')
_merge_perf_results(benchmark_name, results_filename, directories, build_properties)
else:
# It was only written to one shard, use that shards data
results_filename = os.path.join(directories[0], 'perf_results.json')
results_size_in_mib = os.path.getsize(results_filename) / (2**20)
logging.info('Uploading perf results from %s benchmark (size %s Mib)' %
(benchmark_name, results_size_in_mib))
with open(output_json_file, 'w') as oj:
upload_return_code = _upload_perf_results(results_filename, benchmark_name,
configuration_name, build_properties, oj)
upload_end_time = time.time()
print_duration(('%s upload time' % (benchmark_name)), upload_begin_time,
upload_end_time)
return (benchmark_name, upload_return_code == 0)
finally:
shutil.rmtree(tmpfile_dir)
def _upload_individual_benchmark(params):
try:
return _upload_individual(*params)
except Exception:
benchmark_name = params[0]
upload_succeed = False
logging.exception('Error uploading perf result of %s' % benchmark_name)
return benchmark_name, upload_succeed
def _GetCpuCount(log=True):
try:
cpu_count = multiprocessing.cpu_count()
if sys.platform == 'win32':
# TODO(crbug.com/1190269) - we can't use more than 56
# cores on Windows or Python3 may hang.
cpu_count = min(cpu_count, 56)
return cpu_count
except NotImplementedError:
if log:
logging.warn('Failed to get a CPU count for this bot. See crbug.com/947035.')
# TODO(crbug.com/948281): This is currently set to 4 since the mac masters
# only have 4 cores. Once we move to all-linux, this can be increased or
# we can even delete this whole function and use multiprocessing.cpu_count()
# directly.
return 4
def _load_shard_id_from_test_results(directory):
shard_id = None
test_json_path = os.path.join(directory, 'test_results.json')
try:
with open(test_json_path) as f:
test_json = json.load(f)
all_results = test_json['tests']
for _, benchmark_results in all_results.items():
for _, measurement_result in benchmark_results.items():
shard_id = measurement_result['shard']
break
except IOError as e:
logging.error('Failed to open test_results.json from %s: %s', test_json_path, e)
except KeyError as e:
logging.error('Failed to locate results in test_results.json: %s', e)
return shard_id
def _find_device_id_by_shard_id(benchmarks_shard_map_file, shard_id):
try:
with open(benchmarks_shard_map_file) as f:
shard_map_json = json.load(f)
device_id = shard_map_json['extra_infos']['bot #%s' % shard_id]
except KeyError as e:
logging.error('Failed to locate device name in shard map: %s', e)
return device_id
def _update_perf_json_with_summary_on_device_id(directory, device_id):
perf_json_path = os.path.join(directory, 'perf_results.json')
try:
with open(perf_json_path, 'r') as f:
perf_json = json.load(f)
except IOError as e:
logging.error('Failed to open perf_results.json from %s: %s', perf_json_path, e)
summary_key_guid = str(uuid.uuid4())
summary_key_generic_set = {
'values': ['device_id'],
'guid': summary_key_guid,
'type': 'GenericSet'
}
perf_json.insert(0, summary_key_generic_set)
logging.info('Inserted summary key generic set for perf result in %s: %s', directory,
summary_key_generic_set)
stories_guids = set()
for entry in perf_json:
if 'diagnostics' in entry:
entry['diagnostics']['summaryKeys'] = summary_key_guid
stories_guids.add(entry['diagnostics']['stories'])
for entry in perf_json:
if 'guid' in entry and entry['guid'] in stories_guids:
entry['values'].append(device_id)
try:
with open(perf_json_path, 'w') as f:
json.dump(perf_json, f)
except IOError as e:
logging.error('Failed to writing perf_results.json to %s: %s', perf_json_path, e)
logging.info('Finished adding device id %s in perf result.', device_id)
def _handle_perf_results(benchmark_enabled_map, benchmark_directory_map, configuration_name,
build_properties, extra_links, output_results_dir):
"""
Upload perf results to the perf dashboard.
This method also upload the perf results to logdog and augment it to
|extra_links|.
Returns:
(return_code, benchmark_upload_result_map)
return_code is 0 if this upload to perf dashboard successfully, 1
otherwise.
benchmark_upload_result_map is a dictionary describes which benchmark
was successfully uploaded.
"""
begin_time = time.time()
# Upload all eligible benchmarks to the perf dashboard
results_dict = {}
invocations = []
for benchmark_name, directories in benchmark_directory_map.items():
if not benchmark_enabled_map.get(benchmark_name, False):
continue
# Create a place to write the perf results that you will write out to
# logdog.
output_json_file = os.path.join(output_results_dir, (str(uuid.uuid4()) + benchmark_name))
results_dict[benchmark_name] = output_json_file
#TODO(crbug.com/1072729): pass final arguments instead of build properties
# and configuration_name
invocations.append(
(benchmark_name, directories, configuration_name, build_properties, output_json_file))
# Kick off the uploads in multiple processes
# crbug.com/1035930: We are hitting HTTP Response 429. Limit ourselves
# to 2 processes to avoid this error. Uncomment the following code once
# the problem is fixed on the dashboard side.
# pool = multiprocessing.Pool(_GetCpuCount())
pool = multiprocessing.Pool(2)
upload_result_timeout = False
try:
async_result = pool.map_async(_upload_individual_benchmark, invocations)
# TODO(crbug.com/947035): What timeout is reasonable?
results = async_result.get(timeout=4000)
except multiprocessing.TimeoutError:
upload_result_timeout = True
logging.error('Timeout uploading benchmarks to perf dashboard in parallel')
results = []
for benchmark_name in benchmark_directory_map:
results.append((benchmark_name, False))
finally:
pool.terminate()
# Keep a mapping of benchmarks to their upload results
benchmark_upload_result_map = {}
for r in results:
benchmark_upload_result_map[r[0]] = r[1]
logdog_dict = {}
upload_failures_counter = 0
logdog_stream = None
logdog_label = 'Results Dashboard'
for benchmark_name, output_file in results_dict.items():
upload_succeed = benchmark_upload_result_map[benchmark_name]
if not upload_succeed:
upload_failures_counter += 1
is_reference = '.reference' in benchmark_name
_write_perf_data_to_logfile(
benchmark_name,
output_file,
configuration_name,
build_properties,
logdog_dict,
is_reference,
upload_failure=not upload_succeed)
logdog_file_name = _generate_unique_logdog_filename('Results_Dashboard_')
logdog_stream = logdog_helper.text(
logdog_file_name,
json.dumps(dict(logdog_dict), sort_keys=True, indent=4, separators=(',', ': ')),
content_type=JSON_CONTENT_TYPE)
if upload_failures_counter > 0:
logdog_label += (' %s merge script perf data upload failures' % upload_failures_counter)
extra_links[logdog_label] = logdog_stream
end_time = time.time()
print_duration('Uploading results to perf dashboard', begin_time, end_time)
if upload_result_timeout or upload_failures_counter > 0:
return 1, benchmark_upload_result_map
return 0, benchmark_upload_result_map
def _write_perf_data_to_logfile(benchmark_name, output_file, configuration_name, build_properties,
logdog_dict, is_ref, upload_failure):
viewer_url = None
# logdog file to write perf results to
if os.path.exists(output_file):
results = None
with open(output_file) as f:
try:
results = json.load(f)
except ValueError:
logging.error('Error parsing perf results JSON for benchmark %s' % benchmark_name)
if results:
try:
json_fname = _generate_unique_logdog_filename(benchmark_name)
output_json_file = logdog_helper.open_text(json_fname)
json.dump(results, output_json_file, indent=4, separators=(',', ': '))
except ValueError as e:
logging.error('ValueError: "%s" while dumping output to logdog' % e)
finally:
output_json_file.close()
viewer_url = output_json_file.get_viewer_url()
else:
logging.warning("Perf results JSON file doesn't exist for benchmark %s" % benchmark_name)
base_benchmark_name = benchmark_name.replace('.reference', '')
if base_benchmark_name not in logdog_dict:
logdog_dict[base_benchmark_name] = {}
# add links for the perf results and the dashboard url to
# the logs section of buildbot
if is_ref:
if viewer_url:
logdog_dict[base_benchmark_name]['perf_results_ref'] = viewer_url
if upload_failure:
logdog_dict[base_benchmark_name]['ref_upload_failed'] = 'True'
else:
# TODO(jmadill): Figure out if we can get a dashboard URL here. http://anglebug.com/6090
# logdog_dict[base_benchmark_name]['dashboard_url'] = (
# upload_results_to_perf_dashboard.GetDashboardUrl(benchmark_name, configuration_name,
# RESULTS_URL,
# build_properties['got_revision_cp'],
# _GetMachineGroup(build_properties)))
if viewer_url:
logdog_dict[base_benchmark_name]['perf_results'] = viewer_url
if upload_failure:
logdog_dict[base_benchmark_name]['upload_failed'] = 'True'
def print_duration(step, start, end):
logging.info('Duration of %s: %d seconds' % (step, end - start))
def main():
""" See collect_task.collect_task for more on the merge script API. """
logging.info(sys.argv)
parser = argparse.ArgumentParser()
# configuration-name (previously perf-id) is the name of bot the tests run on
# For example, buildbot-test is the name of the android-go-perf bot
# configuration-name and results-url are set in the json file which is going
# away tools/perf/core/chromium.perf.fyi.extras.json
parser.add_argument('--configuration-name', help=argparse.SUPPRESS)
parser.add_argument('--build-properties', help=argparse.SUPPRESS)
parser.add_argument('--summary-json', required=True, help=argparse.SUPPRESS)
parser.add_argument('--task-output-dir', required=True, help=argparse.SUPPRESS)
parser.add_argument('-o', '--output-json', required=True, help=argparse.SUPPRESS)
parser.add_argument(
'--skip-perf',
action='store_true',
help='In lightweight mode, using --skip-perf will skip the performance'
' data handling.')
parser.add_argument(
'--lightweight',
action='store_true',
help='Choose the lightweight mode in which the perf result handling'
' is performed on a separate VM.')
parser.add_argument('json_files', nargs='*', help=argparse.SUPPRESS)
parser.add_argument(
'--smoke-test-mode',
action='store_true',
help='This test should be run in smoke test mode'
' meaning it does not upload to the perf dashboard')
args = parser.parse_args()
with open(args.summary_json) as f:
shard_summary = json.load(f)
shard_failed = any(int(shard['exit_code']) != 0 for shard in shard_summary['shards'])
output_results_dir = tempfile.mkdtemp('outputresults')
try:
return_code, _ = process_perf_results(args.output_json, args.configuration_name,
args.build_properties, args.task_output_dir,
args.smoke_test_mode, output_results_dir,
args.lightweight, args.skip_perf)
except Exception:
logging.exception('process_perf_results raised an exception')
return_code = 1
finally:
shutil.rmtree(output_results_dir)
if return_code != 0 and shard_failed:
logging.warning('Perf processing failed but one or more shards failed earlier')
return_code = 0 # Enables the failed build info to be rendered normally
return return_code
if __name__ == '__main__':
sys.exit(main())
|
[
"opensource@apple.com"
] |
opensource@apple.com
|
694882d9800bdd93c4505badef7a0e1e91e9f0b3
|
309d109d3ad59590771de3458e51ce1e7fcede3b
|
/data_processor/imports.py
|
0c7b41f1ba5c8acae5a3f3edcb8fe50f54ffacac
|
[] |
no_license
|
malingreats/scoringApp
|
9e35a5e4cd63e0d9181873e7f2809b8548f1a921
|
8c588c1f82ff0b55fd86c1e621840f8a8f3c8625
|
refs/heads/main
| 2023-03-08T22:47:29.587511
| 2021-03-03T08:38:28
| 2021-03-03T08:38:28
| 340,773,518
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,223
|
py
|
import csv
from companies import models
# opfile = '/home/greats/Documents/projects/dreatol/webapp/fintechapp/clean_applications.csv'
def get_data(csv_file):
opfile = csv_file
header = []
lbfile = open(opfile, "rt")
reader = csv.reader(lbfile)
rownum = 0
for row in reader:
if rownum == 0:
header.append(row)
rownum += 1
else:
the_row = models.Loan_History(LOAN_ID =row[0],NAME_CONTRACT_TYPE =row[1],CODE_GENDER =row[2],FLAG_OWN_CAR =row[3],FLAG_OWN_REALTY =row[4],NAME_TYPE_SUITE =row[5],
NAME_INCOME_TYPE =row[6],NAME_EDUCATION_TYPE =row[7],NAME_FAMILY_STATUS =row[8],NAME_HOUSING_TYPE =row[9],OCCUPATION_TYPE =row[10],WEEKDAY_APPR_PROCESS_START =row[11],ORGANIZATION_TYPE =row[12],FONDKAPREMONT_MODE =row[13],HOUSETYPE_MODE =row[14],WALLSMATERIAL_MODE =row[15],
EMERGENCYSTATE_MODE =row[16],TARGET =row[17],CNT_CHILDREN =row[18],AMT_INCOME_TOTAL =row[19],AMT_CREDIT =row[20],AMT_ANNUITY =row[21],AMT_GOODS_PRICE =row[22],REGION_POPULATION_RELATIVE =row[23],DAYS_BIRTH =row[24],DAYS_EMPLOYED =row[25],
DAYS_REGISTRATION =row[26],DAYS_ID_PUBLISH =row[27],OWN_CAR_AGE =row[28],FLAG_MOBIL =row[29],FLAG_EMP_PHONE =row[30],FLAG_WORK_PHONE =row[31],FLAG_CONT_MOBILE =row[32],FLAG_PHONE =row[33],FLAG_EMAIL =row[34],CNT_FAM_MEMBERS =row[35],
REGION_RATING_CLIENT =row[36],REGION_RATING_CLIENT_W_CITY =row[37],HOUR_APPR_PROCESS_START =row[38],REG_REGION_NOT_LIVE_REGION =row[39],REG_REGION_NOT_WORK_REGION =row[40],LIVE_REGION_NOT_WORK_REGION =row[41],REG_CITY_NOT_LIVE_CITY =row[42],REG_CITY_NOT_WORK_CITY =row[43],LIVE_CITY_NOT_WORK_CITY =row[44],EXT_SOURCE_1 =row[45],
EXT_SOURCE_2 =row[46],EXT_SOURCE_3 =row[47],APARTMENTS_AVG =row[48],BASEMENTAREA_AVG =row[49],YEARS_BEGINEXPLUATATION_AVG =row[50],YEARS_BUILD_AVG =row[51],COMMONAREA_AVG =row[52],ELEVATORS_AVG =row[53],ENTRANCES_AVG =row[54],FLOORSMAX_AVG =row[55],
FLOORSMIN_AVG =row[56],LANDAREA_AVG =row[57],LIVINGAPARTMENTS_AVG =row[58],LIVINGAREA_AVG =row[59],NONLIVINGAPARTMENTS_AVG =row[60],NONLIVINGAREA_AVG =row[61],APARTMENTS_MODE =row[62],BASEMENTAREA_MODE =row[63],YEARS_BEGINEXPLUATATION_MODE =row[64],YEARS_BUILD_MODE =row[65],
COMMONAREA_MODE =row[66],ELEVATORS_MODE =row[67],ENTRANCES_MODE =row[68],FLOORSMAX_MODE =row[69],FLOORSMIN_MODE =row[70],LANDAREA_MODE =row[71],LIVINGAPARTMENTS_MODE =row[72],LIVINGAREA_MODE =row[73],NONLIVINGAPARTMENTS_MODE =row[74],NONLIVINGAREA_MODE =row[75],
APARTMENTS_MEDI =row[76],BASEMENTAREA_MEDI =row[77],YEARS_BEGINEXPLUATATION_MEDI =row[78],YEARS_BUILD_MEDI =row[79],COMMONAREA_MEDI =row[80],ELEVATORS_MEDI =row[81],ENTRANCES_MEDI =row[82],FLOORSMAX_MEDI =row[83],FLOORSMIN_MEDI =row[84],LANDAREA_MEDI =row[85],
LIVINGAPARTMENTS_MEDI =row[86],LIVINGAREA_MEDI =row[87],NONLIVINGAPARTMENTS_MEDI =row[88],NONLIVINGAREA_MEDI =row[89],TOTALAREA_MODE =row[90],OBS_30_CNT_SOCIAL_CIRCLE =row[91],DEF_30_CNT_SOCIAL_CIRCLE =row[92],OBS_60_CNT_SOCIAL_CIRCLE =row[93],DEF_60_CNT_SOCIAL_CIRCLE =row[94],DAYS_LAST_PHONE_CHANGE =row[95],
FLAG_DOCUMENT_2 =row[96],FLAG_DOCUMENT_3 =row[97], FLAG_DOCUMENT_4 =row[98], FLAG_DOCUMENT_5 =row[99], FLAG_DOCUMENT_6 =row[100],FLAG_DOCUMENT_7 =row[101],FLAG_DOCUMENT_8 =row[102],FLAG_DOCUMENT_9 =row[103],FLAG_DOCUMENT_10 =row[104],FLAG_DOCUMENT_11 =row[105],
FLAG_DOCUMENT_12 =row[106],FLAG_DOCUMENT_13 =row[107],FLAG_DOCUMENT_14 =row[108],FLAG_DOCUMENT_15 =row[109],FLAG_DOCUMENT_16 =row[110],FLAG_DOCUMENT_17 =row[111],FLAG_DOCUMENT_18 =row[112],FLAG_DOCUMENT_19 =row[113],FLAG_DOCUMENT_20 =row[114],FLAG_DOCUMENT_21 =row[115],
AMT_REQ_CREDIT_BUREAU_HOUR =row[116],AMT_REQ_CREDIT_BUREAU_DAY =row[117],AMT_REQ_CREDIT_BUREAU_WEEK =row[118],AMT_REQ_CREDIT_BUREAU_MON =row[119],AMT_REQ_CREDIT_BUREAU_QRT =row[120],AMT_REQ_CREDIT_BUREAU_YEAR =row[121],OFFICER_ID =row[122]
)
the_row.save()
def get_data_income_data(csv_file):
opfile = csv_file
header = []
lbfile = open(opfile, "rt")
reader = csv.reader(lbfile)
rownum = 0
for row in reader:
if rownum == 0:
header.append(row)
rownum += 1
else:
the_row = models.IncomeData(LOAN_ID =row[0],age =row[1],workclass =row[2],fnlwgt =row[3],education =row[4],education_num =row[5],
marital_status =row[6],occupation =row[7],relationship =row[8],race =row[9],sex =row[10],capital_gain =row[11],capital_loss =row[12],hours_per_week =row[13],native_country =row[14],income =row[15]
)
the_row.save()
def get_data_applications_data(csv_file):
opfile = csv_file
header = []
lbfile = open(opfile, "rt")
reader = csv.reader(lbfile)
rownum = 0
for row in reader:
if rownum == 0:
header.append(row)
rownum += 1
else:
the_row = models.LoanApplication(LOAN_ID =row[0],age =row[1],ed =row[2], employ =row[3],address =row[4],income =row[5],
debtinc =row[6],creddebt =row[7],othdebt =row[8]
)
the_row.save()
|
[
"mpasiinnocent@gmail.com"
] |
mpasiinnocent@gmail.com
|
90d6497b64dd88ae9f83c2201df72852965e2b22
|
5133105fa925c877aad73e04eb47b1ca3581cd9b
|
/bot/booking/__init__.py
|
6900033148ff91c340c88478ba7b108027d65eb1
|
[] |
no_license
|
IdenTiclla/selenium-course-freecodecamp
|
c1d6bcb32ab460b8f5c3da7639fcbdbf09fba63e
|
d7e1a5809d10f18a72117c99bc4376d9847f09e4
|
refs/heads/master
| 2023-08-03T07:15:58.270262
| 2021-09-23T12:32:03
| 2021-09-23T12:32:03
| 406,105,107
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27
|
py
|
print("i will print first")
|
[
"iden.ticlla@gmail.com"
] |
iden.ticlla@gmail.com
|
bc6bd20f56502c3ec96615ac26ba0c0b8f0e1e16
|
3f7f9d8c7dba3072903b39827655946106fd2a8c
|
/xml_2_csv.py
|
7c5e1621af64a8d9dcc0cb35a67bea16b12542c7
|
[] |
no_license
|
Zhangchi95/Facial_expression_detector_SSD
|
696751cb9eb6b12f32d3489e00f5bf986609d9c4
|
eca759ba073f74e67d91babdfd26076c177323cc
|
refs/heads/master
| 2020-03-12T12:00:10.018935
| 2018-04-22T23:46:28
| 2018-04-22T23:46:28
| 130,608,913
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,409
|
py
|
import os
import glob
import pandas as pd
import xml.etree.ElementTree as ET
def xml_to_csv(path):
xml_list = []
for xml_file in glob.glob(path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall('object'):
value = (root.find('filename').text,
int(root.find('size')[0].text),
int(root.find('size')[1].text),
member[0].text,
int(member[4][0].text),
int(member[4][1].text),
int(member[4][2].text),
int(member[4][3].text)
)
xml_list.append(value)
column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
xml_df = pd.DataFrame(xml_list, columns=column_name)
return xml_df
# Two directory is needed, "train" which contain .xml files of training data and "test" folder which contains .xmls files of testing data
def main():
for directory in ['train','test']:
image_path = os.path.join(os.getcwd(), 'images/{}'.format(directory))
xml_df = xml_to_csv(image_path)
## train_labels.csv and test_labels.csv files will be created under /data directory
xml_df.to_csv('data/{}_labels.csv'.format(directory), index=None)
print('Successfully converted xml to csv.')
main()
|
[
"noreply@github.com"
] |
Zhangchi95.noreply@github.com
|
7dd3c6873b350bcef460037312ca95443b81312d
|
22d1bf08cfdfd659cf230319379ee9f7ea17869d
|
/solutions/ALS.py
|
bd736f7092784af2c70b214e65fdadc2362085a9
|
[] |
no_license
|
vardhamanmodi/BigDataCourse
|
17dc64eec061ee00f99e23b372af6bc1af2f2aa9
|
fe318222c80d40b728f010025e5a1bb9fc212990
|
refs/heads/master
| 2021-01-19T18:51:38.150691
| 2015-07-06T05:00:53
| 2015-07-06T05:00:53
| 35,421,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
from pyspark import SparkContext
from pyspark.mllib.recommendation import ALS, MatrixFactorizationModel, Rating
# Load and parse the data
sc = SparkContext()
data = sc.textFile("/Data/ALS/*")
ratings = data.map(lambda l: l.split(',')).map(lambda l: Rating(int(l[0]), int(l[1]), float(l[2])))
# Build the recommendation model using Alternating Least Squares
rank = 10
numIterations = 20
model = ALS.train(ratings, rank, numIterations)
# Evaluate the model on training data
testdata = ratings.map(lambda p: (p[0], p[1]))
#print(testdata)
predictions = model.predictAll(testdata).map(lambda r: ((r[0], r[1]), r[2]))
predictions.saveAsTextFile('vardhaman.txt')
|
[
"vardhaman_modi@persistent.co.in"
] |
vardhaman_modi@persistent.co.in
|
32a7dfbe8bd6975110861b5b6acb9bd993cc4137
|
0a31f44af75339326f793fe9cc8f2ae367ca5df5
|
/application/migrations/0001_initial.py
|
fbde9223d3f78ff6dbfa7899dc142a5d194fe76f
|
[] |
no_license
|
huhaiqng/weibbix
|
5c967e5159e95aa6481a3c275653ed0f94e843dd
|
8af9d1d778bdf0c984e566bdb3fa44b2c0db7db1
|
refs/heads/master
| 2021-08-31T04:03:50.782996
| 2017-12-20T09:15:29
| 2017-12-20T09:15:29
| 108,378,790
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,782
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
import django.core.validators
import django.contrib.auth.models
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, max_length=30, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, verbose_name='username')),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=254, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('nickname', models.CharField(max_length=50, blank=True)),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'abstract': False,
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Env',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('env_sid', models.CharField(max_length=200)),
('env_name', models.CharField(max_length=150)),
('env_domain', models.CharField(max_length=150)),
('env_disc', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Host',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sid', models.CharField(max_length=200)),
('ip', models.CharField(max_length=200)),
('hostname', models.CharField(max_length=200)),
('os', models.CharField(max_length=200)),
('software', models.CharField(max_length=200)),
('app', models.CharField(max_length=200)),
('owner', models.CharField(max_length=200)),
('fenpei', models.CharField(max_length=200)),
('sta', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='OSUser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sid', models.CharField(max_length=200)),
('ip', models.CharField(max_length=200)),
('username', models.CharField(max_length=200)),
('passwd', models.CharField(max_length=200)),
('notice', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Tom',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('tom_sid', models.CharField(max_length=200)),
('tom_name', models.CharField(max_length=150)),
('tom_ma', models.CharField(max_length=150)),
('tom_disc', models.CharField(max_length=200)),
('tom_cnt', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='TomDir',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sid', models.CharField(max_length=200)),
('ip', models.CharField(max_length=200)),
('dir', models.CharField(max_length=200)),
('stat', models.CharField(max_length=200)),
],
),
]
|
[
"1321385590@qq.com"
] |
1321385590@qq.com
|
11548a9c980a942e940ee440e417c3715ce01577
|
a51288cd62f7a013a1fc842c0f8da8766bfc3161
|
/collab_compet/model.py
|
4df9b8cee17a9aa1b12dee8f9f54f58428012451
|
[] |
no_license
|
aime20ic/collab_compet
|
f1eba4fedff5f8419c440085ef90e88df652fd5e
|
3ab690f5777455905f30eb5bb4fd0f03c68fea9c
|
refs/heads/master
| 2023-05-15T04:29:41.269921
| 2021-06-10T11:15:48
| 2021-06-10T11:15:48
| 375,448,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,903
|
py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1. / np.sqrt(fan_in)
return (-lim, lim)
class Actor(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=256, fc2_units=128):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)
# self.bn = nn.BatchNorm1d((1,state_size))
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state):
"""Build an actor (policy) network that maps states -> actions."""
# state = self.bn(state.unsqueeze(0))
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
return torch.tanh(self.fc3(x))
class Critic(nn.Module):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed, fcs1_units=256, fc2_units=128):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fcs1_units (int): Number of nodes in the first hidden layer
fc2_units (int): Number of nodes in the second hidden layer
"""
super(Critic, self).__init__()
self.seed = torch.manual_seed(seed)
# self.bn = nn.BatchNorm1d((1,state_size))
self.fcs1 = nn.Linear(state_size, fcs1_units)
self.fc2 = nn.Linear(fcs1_units+action_size, fc2_units)
self.fc3 = nn.Linear(fc2_units, 1)
self.reset_parameters()
def reset_parameters(self):
self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state, action):
"""Build a critic (value) network that maps (state, action) pairs -> Q-values."""
# state = self.bn(state.unsqueeze(0))
xs = F.leaky_relu(self.fcs1(state))
x = torch.cat((xs, action), dim=1)
x = F.leaky_relu(self.fc2(x))
return self.fc3(x)
|
[
"aime20ic@gmail.com"
] |
aime20ic@gmail.com
|
eda3fc9ef7cef88c167c3ec7ebf2da44e242f68b
|
b42f5e41fd92f0507192662fd265072069495869
|
/Classes/classApp/models.py
|
55514d52d247d6df36af3599d9a29ba4d9cdc13b
|
[] |
no_license
|
levit123/Django-Class-App
|
1881bf3ef1d4cb96d59f73f7f9e5ceead7f961d9
|
000c6c0143a062821c17c7433288eafbb4757807
|
refs/heads/master
| 2023-01-03T15:39:20.557393
| 2020-10-24T20:34:17
| 2020-10-24T20:34:17
| 306,951,934
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
from django.db import models
# Create your models here.
class djangoClass(models.Model):
Title = models.CharField(max_length=100)
CourseNumber = models.IntegerField(max_length=5)
InstructorName = models.CharField(max_length=50)
Duration = models.FloatField(max_length=5)
djangoClasses = models.Manager()
def __str__(self):
return self.Title
|
[
"levicaseblodgett@gmail.com"
] |
levicaseblodgett@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.