filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_1187 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ServiceAssociationLinksOperations(object):
"""ServiceAssociationLinksOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ServiceAssociationLinksListResult"
"""Gets a list of service association links for a subnet.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceAssociationLinksListResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.ServiceAssociationLinksListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ServiceAssociationLinksListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceAssociationLinksListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}/ServiceAssociationLinks'} # type: ignore
|
the-stack_0_1188 | import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.activation import LeakyReLU
from utils import initialize_weights_he
# The MNIST datasets are hosted on yann.lecun.com that has moved under CloudFlare protection
# Run this script to enable the datasets download
# Reference: https://github.com/pytorch/vision/issues/1938
from six.moves import urllib
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
from Networks import ResNetBlock
import torch
import numpy as np
from torchvision import datasets
import torchvision.transforms as transforms
import cv2
from zu_resnet import ResNetEncoder
# define the NN architecture
class ConvAutoencoder_NAV2(nn.Module):
def __init__(self, imgChannels=1, zDim=512,featureDim=12*10*10, fix_params=False):
super(ConvAutoencoder_NAV2, self).__init__()
self.featureDim = featureDim
## encoder layers ##
# https://stackoverflow.com/questions/39691902/ordering-of-batch-normalization-and-dropout
self.encode = nn.Sequential(
nn.Conv2d(imgChannels, 32, 5, padding=2) ,
nn.BatchNorm2d(32),
nn.ReLU(),
ResNetBlock(32,64,3),
ResNetBlock(64,128,3),
ResNetBlock(128,256,3),
ResNetBlock(256,128,3), # 64x5x5 = 3200 feature vector
).apply(initialize_weights_he)
## decoder layers ##
## a kernel of 2 and a stride of 2 will increase the spatial dims by 2
self.decode = nn.Sequential(
nn.ConvTranspose2d(128, 256, 2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(256, 128, 2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(128, 64, 2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(64, imgChannels, 2, stride=2),
).apply(initialize_weights_he)
def fix_params(self):
for param in self.encode.parameters():
param.requires_grad = False
for param in self.decode.parameters():
param.requires_grad = False
def encode_(self, x):
return self.encode(x)
def forward(self, x):
x = self.encode(x)
# print(x.shape)
# x = x.reshape(64,5,5)
x = self.decode(x)
x = torch.sigmoid(x)
return x
# define the NN architecture
class ConvAutoencoder_NAV3(nn.Module):
def __init__(self, imgChannels=1, zDim=512,featureDim=12*10*10, fix_params=False):
super(ConvAutoencoder_NAV3, self).__init__()
self.featureDim = featureDim
## encoder layers ##
# https://stackoverflow.com/questions/39691902/ordering-of-batch-normalization-and-dropout
self.encode = ResNetEncoder(12,blocks_sizes=[64,128,256,384],deepths=[2,2,2,2])
## decoder layers ##
## a kernel of 2 and a stride of 2 will increase the spatial dims by 2
self.decode = nn.Sequential(
nn.ConvTranspose2d(384, 512, 2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(512, 256, 2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(256, 128, 2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(128, 64, 2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(64, imgChannels, 2, stride=2)
).apply(initialize_weights_he)
def fix_params(self):
for param in self.encode.parameters():
param.requires_grad = False
for param in self.decode.parameters():
param.requires_grad = False
def encode_(self, x):
return self.encode(x)
def forward(self, x):
x = self.encode(x)
# print(x.shape)
# x = x.reshape(64,5,5)
x = self.decode(x)
x = torch.sigmoid(x)
return x
# define the NN architecture
class ConvAutoencoder_NAV4(nn.Module):
def __init__(self, imgChannels=1, zDim=512,featureDim=12*10*10, fix_params=False):
super(ConvAutoencoder_NAV4, self).__init__()
self.featureDim = featureDim
## encoder layers ##
# https://stackoverflow.com/questions/39691902/ordering-of-batch-normalization-and-dropout
self.encode = nn.Sequential(
ResNetBlock(imgChannels,64,3),
ResNetBlock(64,128,3),
ResNetBlock(128,256,3),
ResNetBlock(256,128,3), # 64x5x5 = 3200 feature vector
).apply(initialize_weights_he)
## decoder layers ##
## a kernel of 2 and a stride of 2 will increase the spatial dims by 2
self.decode = nn.Sequential(
nn.ConvTranspose2d(128, 256, 2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(256, 128, 2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(128, 64, 2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(64, imgChannels, 2, stride=2),
).apply(initialize_weights_he)
def fix_params(self):
for param in self.encode.parameters():
param.requires_grad = False
for param in self.decode.parameters():
param.requires_grad = False
def encode_(self, x):
return self.encode(x)
def forward(self, x):
x = self.encode(x)
# print(x.shape)
# x = x.reshape(64,5,5)
x = self.decode(x)
x = torch.sigmoid(x)
return x
# define the NN architecture
class ConvAutoencoder_NAV6(nn.Module):
def __init__(self, imgChannels=1, zDim=1024,featureDim=64*5*5, fix_params=False):
super(ConvAutoencoder_NAV6, self).__init__()
self.featureDim = featureDim
## encoder layers ##
# https://stackoverflow.com/questions/39691902/ordering-of-batch-normalization-and-dropout
self.encode = nn.Sequential(
ResNetBlock(imgChannels,64,3),
ResNetBlock(64,128,3),
ResNetBlock(128,256,3),
ResNetBlock(256,64,3), # 64x5x5 = 3200 feature vector,
nn.Flatten(),
nn.Linear(featureDim,zDim)
).apply(initialize_weights_he)
self. FC_1 = nn.Linear(zDim,featureDim)
## decoder layers ##
## a kernel of 2 and a stride of 2 will increase the spatial dims by 2
self.decode = nn.Sequential(
nn.ConvTranspose2d(64, 128, 2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(128, 256, 2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(256, 128, 2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(128, 64, 2, stride=2),
).apply(initialize_weights_he)
def fix_params(self):
for param in self.encode.parameters():
param.requires_grad = False
for param in self.decode.parameters():
param.requires_grad = False
def encode_(self, x):
return self.encode(x)
def forward(self, x):
x = self.encode(x)
x = x.view(-1, self.fedim)
x = self.decode(x)
x = torch.sigmoid(x)
return x
if __name__ == '__main__':
GPU = True
device_idx = 0
if GPU:
device = torch.device("cuda:" + str(device_idx) if torch.cuda.is_available() else "cpu")
else:
device = torch.device("cpu")
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
channels = 3
n_s_f = 4
inputshape = (80,80,channels)
cv2_resz = (80,80)
imshape = (channels,*cv2_resz)
show_shape = (*cv2_resz,channels)
model = ConvAutoencoder_NAV4(imgChannels=channels*n_s_f)
# model.load_state_dict(torch.load("/home/developer/Training_results/Qricculum_Learning/big_and_small/final/Models/1/VAE_20"))
model.load_state_dict(torch.load("/home/developer/Training_results/Qricculum_Learning/big_and_small/hoffentlich/VAE_80803_615"))
model.eval()
model.to(device)
train_images = []
test_images = []
moving_database = np.load("/home/developer/Training_results/Qricculum_Learning/big_and_small/hoffentlich/VAE_dtb_12_8080_final_hoffentlich.npy")
# moving_database = np.load("/home/developer/VAE_dtb_12_128128_final.npy")
# moving_database = np.load("/home/developer/Training_results/Qricculum_Learning/big_and_small/3/VAE_dtb_3_8080.npy")
print(moving_database.shape)
print(moving_database[0])
stacked_images = []
train_data = (moving_database[0:45000]/ 2**8).astype(np.float32)
test_data = (moving_database[45000:] / 2**8).astype(np.float32)
print(train_data.shape)
print(test_data.shape)
# Create training and test dataloaders
num_workers = 10
# how many samples per batch to load
batch_size = 32
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers,shuffle=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers,shuffle=True)
import matplotlib.pyplot as plt
infostring = "net: \n" + str(model) + " \n \n \n"
print(infostring)
filename = "/home/developer/Training_results/VA/"+"Infofile.txt"
text_file = open(filename, "w")
n = text_file.write(infostring)
text_file.close()
learning_rate = 0.01
# specify loss function
criterion = nn.MSELoss()
# specify loss function
# torch.optim.Adam
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# from torch.optim.lr_scheduler import ExponentialLR
from torch.optim.lr_scheduler import MultiStepLR
# scheduler1 = ExponentialLR(optimizer, gamma=0.90)
scheduler2 = MultiStepLR(optimizer, milestones=[30,50,70,90], gamma=0.25)
# number of epochs to train the model
n_epochs = 100
# for epoch in range(1, n_epochs+1):
# # monitor training loss
# train_loss = 0.0
# test_loss = 0.0
# ##################
# # train the model #
# ##################
# for data in train_loader:
# # _ stands in for labels, here
# # no need to flatten images
# images = data
# images = images.to(device)
# # clear the gradients of all optimized variables
# optimizer.zero_grad()
# # forward pass: compute predicted outputs by passing inputs to the model
# outputs = model(images).to(device)
# # output_decoder = decoder(images)
# # print(output_decoder)
# # print(output_decoder.shape)
# # calculate the loss
# loss = criterion(outputs, images)
# # backward pass: compute gradient of the loss with respect to model parameters
# loss.backward()
# # perform a single optimization step (parameter update)
# optimizer.step()
# # update running training loss
# train_loss += loss.item()*images.size(0)
# # print avg training statistics
# train_loss = train_loss/len(train_loader)
# print('Epoch: {} \tTraining Loss: {:.6f}'.format(
# epoch,
# train_loss
# ))
# for test_i_data in test_loader:
# # _ stands in for labels, here
# # no need to flatten images
# test_images = test_i_data
# test_images = test_images.to(device)
# # clear the gradients of all optimized variables
# with torch.no_grad():
# # forward pass: compute predicted outputs by passing inputs to the model
# outputs = model(test_images).to(device)
# loss = criterion(outputs, test_images)
# test_loss += loss.item()*test_images.size(0)
# print('Epoch: {} \tTesting Loss: {:.6f}'.format(
# epoch,
# test_loss
# ))
# torch.save(model.state_dict(), "/home/developer/Training_results/VA/VAE_RESNET18"+str(epoch))
# # scheduler1.step()
# scheduler2.step()
# obtain one batch of test images
dataiter = iter(test_loader)
while True:
show_images = dataiter.next()
show_images = show_images.to(device)
# get sample outputs
output = model(show_images)
# prep images for display
show_images = show_images.detach().cpu().numpy()
# output is resized into a batch of iages
output = output.view(batch_size,n_s_f*channels,*cv2_resz)
# use detach when it's an output that requires_grad
output = output.detach().cpu().numpy()
print(output.shape)
print(show_images.shape)
# torch.save(model.state_dict(), "/home/developer/Training_results/VAE")
# plot the first ten input images and then reconstructed images
fig, axes = plt.subplots(nrows=2, ncols=4, sharex=True, sharey=True, figsize=(20,20))
axes[0][0].imshow(show_images[0][0:3].reshape(show_shape))
axes[0][0].get_xaxis().set_visible(False)
axes[0][0].get_yaxis().set_visible(False)
axes[0][1].imshow(show_images[0][3:6].reshape(show_shape))
axes[0][1].get_xaxis().set_visible(False)
axes[0][1].get_yaxis().set_visible(False)
axes[0][2].imshow(show_images[0][6:9].reshape(show_shape))
axes[0][2].get_xaxis().set_visible(False)
axes[0][2].get_yaxis().set_visible(False)
axes[0][3].imshow(show_images[0][9:12].reshape(show_shape))
axes[0][3].get_xaxis().set_visible(False)
axes[0][3].get_yaxis().set_visible(False)
axes[1][0].imshow(output[0][0:3].reshape(show_shape))
axes[1][0].get_xaxis().set_visible(False)
axes[1][0].get_yaxis().set_visible(False)
axes[1][1].imshow(output[0][3:6].reshape(show_shape))
axes[1][1].get_xaxis().set_visible(False)
axes[1][1].get_yaxis().set_visible(False)
axes[1][2].imshow(output[0][6:9].reshape(show_shape))
axes[1][2].get_xaxis().set_visible(False)
axes[1][2].get_yaxis().set_visible(False)
axes[1][3].imshow(output[0][9:12].reshape(show_shape))
axes[1][3].get_xaxis().set_visible(False)
axes[1][3].get_yaxis().set_visible(False)
# input images on top row, reconstructions on bottom
# for show_images, row in zip([show_images, output], axes):
# for img, ax in zip(show_images, row):
# ax.imshow(img[0:3].reshape(show_shape))
# ax.get_xaxis().set_visible(False)
# ax.get_yaxis().set_visible(False)
plt.show() |
the-stack_0_1189 | """
Parsed Config File Produces Expected Behaviors - fixed parameters
"""
import inspect
import os
import deeplenstronomy.deeplenstronomy as dl
doc = """
\tRunning tests from test_expected_behaviors_fixed.py
\tThe tests included in this module demonstrate that the values of fixed parameters
\tin the main configuration file are accurately utilized in the simulation and
\tappear as expected in the simulation metadata. The functions are:
\t\t- test_dataset_section
\t\t\tTesting that NAME, OUTDIR, and SEED properties from the DATASET section of
\t\t\tthe main config file were properly interpretted and utilized as properties
\t\t\tof the generated dataset
\t\t- test_cosmology_section
\t\t\tTesting that the cosmological parameters from the COSMOLOGY section appear
\t\t\tas expected in the simulation metadata
\t\t- test_image_size
\t\t\tTesting that the IMAGE.numPix keyword produced simulated images with the
\t\t\texpected size.
\t\t- test_bands
\t\t\tTesting that the BANDS argument was interpretted properly and produced an
\t\t\tarray of simulated images with the expected number of bands
"""
print(doc)
# Below are all of the possible operation modes
kwargs_sets = {0: {}, # default arguments
1: {'save_to_disk': True},
2: {'save_to_disk': True, 'image_file_format': 'h5'},
3: {'save_to_disk': True, 'skip_image_generation': True},
4: {'store_in_memory': False},
5: {'store_sample': True},
6: {'skip_image_generation': True, 'survey': 'des'},
7: {'solve_lens_equation': True},
8: {'return_planes': True}
}
f = open('status.txt', 'r')
current_test = int(f.read().strip())
f.close()
# Generate the dataset
kwargs_set = kwargs_sets[current_test]
config_filename = 'config.yaml'
dataset = dl.make_dataset(config_filename, **kwargs_set)
has_images = [hasattr(dataset, x + '_images') for x in dataset.configurations]
has_metadata = [hasattr(dataset, x + '_metadata')
for x in dataset.configurations]
has_planes = [hasattr(dataset, x + '_planes') for x in dataset.configurations]
images_exist = [os.path.exists(dataset.outdir +'/' + x + '_images.' +
dataset.arguments['image_file_format'])
for x in dataset.configurations]
metadata_exist = [os.path.exists(dataset.outdir +'/' + x + '_metadata.csv')
for x in dataset.configurations]
planes_exist = [os.path.exists(dataset.outdir +'/' + x + '_planes.' +
dataset.arguments['image_file_format'])
for x in dataset.configurations]
# Begin test functions
def test_dataset_section():
section = dataset.config_dict['DATASET']['PARAMETERS']
assert dataset.size == section['SIZE']
assert dataset.outdir == section['OUTDIR']
if 'SEED' in section.keys():
assert dataset.seed == section['SEED']
def test_cosmology_section():
if all(has_metadata):
section = dataset.config_dict['COSMOLOGY']['PARAMETERS']
for conf in dataset.configurations:
for band in dataset.bands:
for param, value in section.items():
md = eval(f'dataset.{conf}_metadata["{param}-{band}"]')
assert all(md.values == value)
def test_image_size():
if all(has_images):
for conf in dataset.configurations:
x = eval(f'dataset.{conf}_images').shape[-2]
y = eval(f'dataset.{conf}_images').shape[-1]
assert dataset.config_dict['IMAGE']['PARAMETERS']['numPix'] == x
assert dataset.config_dict['IMAGE']['PARAMETERS']['numPix'] == y
def test_bands():
config_bands = dataset.config_dict['SURVEY']['PARAMETERS']['BANDS'].split(',')
assert config_bands == dataset.bands
if all(has_images):
for conf in dataset.configurations:
b = eval(f'dataset.{conf}_images').shape[-3]
assert len(config_bands) == b
if all(has_metadata):
get_band = lambda col: col.split('-')[-1]
for conf in dataset.configurations:
md = eval(f'dataset.{conf}_metadata').columns
assert all([band in config_bands for band in [get_band(c) for c in md]])
|
the-stack_0_1190 | from argparse import ArgumentParser
from mmdet.apis import inference_detector, init_detector, show_result_pyplot
def main():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
args = parser.parse_args()
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
result = inference_detector(model, args.img)
# show the results
show_result_pyplot(model, args.img, result, score_thr=args.score_thr)
if __name__ == '__main__':
main()
|
the-stack_0_1192 | import datetime
import json
from itertools import chain
from io import BytesIO
from django.template.loader import get_template
from xlsxwriter.workbook import Workbook
from xhtml2pdf import pisa
import xlrd
import logging
from django.db import transaction
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.db.models import Max
from django.http import HttpResponse, HttpResponseRedirect
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, render, redirect
from django.template.loader import render_to_string
from django.core.exceptions import ObjectDoesNotExist
from django.utils import timezone
from notification.views import AssistantshipClaim_notify,AssistantshipClaim_acad_notify,AssistantshipClaim_account_notify,AssistantshipClaim_faculty_notify
from applications.academic_information.models import (Calendar, Course, Student,Curriculum_Instructor, Curriculum,
Student_attendance)
from applications.central_mess.models import(Monthly_bill, Payments)
from applications.programme_curriculum.models import (CourseSlot, Course as Courses, Batch, Semester)
from applications.globals.models import (DepartmentInfo, Designation,
ExtraInfo, Faculty, HoldsDesignation)
from .models import (BranchChange, CoursesMtech, InitialRegistration, StudentRegistrationChecks,
MinimumCredits, Register, Thesis, FinalRegistration, ThesisTopicProcess,
Constants, FeePayments, TeachingCreditRegistration, SemesterMarks,
MarkSubmissionCheck, Dues,AssistantshipClaim, MTechGraduateSeminarReport,
PhDProgressExamination,CourseRequested, course_registration, MessDue, Assistantship_status)
from notification.views import academics_module_notif
from .forms import BranchChangeForm
demo_date = timezone.now()
# demo_date = demo_date - datetime.timedelta(days = 180)
# demo_date = demo_date + datetime.timedelta(days = 180)
# demo_date = demo_date + datetime.timedelta(days = 3)
# demo_date = demo_date - datetime.timedelta(days = 5)
student_status = None
hod_status = None
account_status = None
available_cse_seats = 100
available_ece_seats = 100
available_me_seats = 100
# assistantship_status = Assistantship_status.objects.all()
# for obj in assistantship_status:
# student_status = obj.student_status
# hod_status = obj.hod_status
# account_status = obj.account_status
@login_required(login_url='/accounts/login')
def academic_procedures_redirect(request):
return HttpResponseRedirect('/academic-procedures/main/')
@login_required(login_url='/accounts/login')
def main(request):
return HttpResponseRedirect('/academic-procedures/main/')
@login_required(login_url='/accounts/login')
def academic_procedures(request):
current_user = get_object_or_404(User, username=request.user.username)
#extra info details , user id used as main id
user_details = ExtraInfo.objects.select_related('user','department').get(user = request.user)
des = HoldsDesignation.objects.all().select_related().filter(user = request.user).first()
if str(des.designation) == "student":
obj = Student.objects.select_related('id','id__user','id__department').get(id = user_details.id)
return HttpResponseRedirect('/academic-procedures/stu/')
# return HttpResponseRedirect('/logout/')
elif str(des.designation) == "Associate Professor" or str(des.designation) == "Professor" or str(des.designation) == "Assistant Professor" :
return HttpResponseRedirect('/academic-procedures/fac/')
# return HttpResponseRedirect('/logout/')
elif str(request.user) == "acadadmin" :
return HttpResponseRedirect('/aims/')
elif str(request.user) == "rizwan":
return HttpResponseRedirect('/academic-procedures/account/')
elif str(request.user) == "talib":
Messdue = MessDue.objects.all()
dues = Dues.objects.all()
return render(request,
'../templates/academic_procedures/messdueassistant.html' ,
{
'Mess_due' : Messdue,
'dues' : dues,
})
else:
return HttpResponse('person not found')
#
#
#
#
#
#
@login_required(login_url='/accounts/login')
def academic_procedures_faculty(request):
current_user = get_object_or_404(User, username=request.user.username)
#extra info details , user id used as main id
user_details = ExtraInfo.objects.select_related('user','department').get(user = request.user)
des = HoldsDesignation.objects.all().select_related().filter(user = request.user).first()
fac_id = user_details
fac_name = user_details.user.first_name + " " + user_details.user.last_name
if str(des.designation) == "student":
return HttpResponseRedirect('/academic-procedures/main/')
elif str(request.user) == "acadadmin":
return HttpResponseRedirect('/academic-procedures/main/')
elif str(des.designation) == "Associate Professor" or str(des.designation) == "Professor" or str(des.designation) == "Assistant Professor":
object_faculty = Faculty.objects.select_related('id','id__user','id__department').get(id = user_details)
month = int(demo_date.month)
sem = []
if month>=7 and month<=12:
sem = [1,3,5,7]
else:
sem = [2,4,6,8]
student_flag = False
fac_flag = True
Faculty_department =user_details.department
# temp = Curriculum.objects.all().filter(course_code = "CS315L").first()
# Curriculum_Instructor.objects.create(curriculum_id = temp, instructor_id = user_details)
#thesis_supervision_request_list = ThesisTopicProcess.objects.all()
thesis_supervision_request_list = ThesisTopicProcess.objects.all().select_related().filter(supervisor_id = object_faculty)
approved_thesis_request_list = thesis_supervision_request_list.filter(approval_supervisor = True)
pending_thesis_request_list = thesis_supervision_request_list.filter(pending_supervisor = True)
faculty_list = get_faculty_list()
assistantship_request_list = AssistantshipClaim.objects.all()
hod_assistantship_request_list = assistantship_request_list.filter(ta_supervisor_remark = True).filter(thesis_supervisor_remark = True).filter(hod_approval = False)
hod_approved_assistantship = assistantship_request_list.filter(ta_supervisor_remark = True).filter(thesis_supervisor_remark = True).filter(acad_approval = False)
ta_approved_assistantship_request_list = AssistantshipClaim.objects.all().filter(ta_supervisor_remark=True)
thesis_approved_assistantship_request_list = AssistantshipClaim.objects.all().filter(thesis_supervisor_remark=True)
approved_assistantship_request_list = ta_approved_assistantship_request_list | thesis_approved_assistantship_request_list
mtechseminar_request_list = MTechGraduateSeminarReport.objects.all().filter(Overall_grade = '')
phdprogress_request_list = PhDProgressExamination.objects.all().filter(Overall_grade = '')
courses_list = Curriculum_Instructor.objects.select_related('curriculum_id','instructor_id','curriculum_id__course_id','instructor_id__department','instructor_id__user').filter(instructor_id=user_details).filter(curriculum_id__sem__in = sem)
r = range(4)
return render(
request,
'../templates/academic_procedures/academicfac.html' ,
{
'student_flag' : student_flag,
'fac_flag' : fac_flag,
'hod_flag' : hod_status,
'thesis_supervision_request_list' : thesis_supervision_request_list,
'pending_thesis_request_list' : pending_thesis_request_list,
'approved_thesis_request_list' : approved_thesis_request_list,
'faculty_list' : faculty_list,
'courses_list' : courses_list,
'fac_id': fac_id,
'fac_name' : fac_name,
'department' : Faculty_department,
'assistantship_request_list' : assistantship_request_list,
'approved_assistantship_request_list' : approved_assistantship_request_list,
'hod_assistantship_request_list' : hod_assistantship_request_list,
'hod_approved_assistantship' : hod_approved_assistantship,
'mtechseminar_request_list' : mtechseminar_request_list,
'phdprogress_request_list' : phdprogress_request_list,
'r' : r,
})
else:
HttpResponse("user not found")
@login_required(login_url='/accounts/login')
def account(request):
assistant_account_list = AssistantshipClaim.objects.filter(ta_supervisor_remark = True).filter(thesis_supervisor_remark = True)
assistant_pen_list = AssistantshipClaim.objects.filter(ta_supervisor_remark = True).filter(thesis_supervisor_remark = True).filter(acad_approval = True).filter(account_approval = False)
assistant_account_length = len(assistant_account_list.filter(acad_approval = True).filter(account_approval = False))
return render(request,
'../templates/ais/account.html' ,
{
'assistant_account_length' : assistant_account_length,
'assistant_account_list' : assistant_account_list ,
'assistant_pen_list' : assistant_pen_list,
'account_flag' : account_status,
})
@login_required(login_url='/accounts/login')
def academic_procedures_student(request):
current_user = get_object_or_404(User, username=request.user.username)
user_details = ExtraInfo.objects.select_related('user','department').get(id = request.user)
des = HoldsDesignation.objects.all().select_related().filter(user = request.user).first()
if str(des.designation) == "student":
obj = Student.objects.select_related('id','id__user','id__department').get(id = user_details.id)
if obj.programme.upper() == "PHD" :
student_flag = True
ug_flag = False
masters_flag = False
phd_flag = True
fac_flag = False
des_flag = False
elif obj.programme.upper() == "M.DES" :
student_flag = True
ug_flag = False
masters_flag = True
phd_flag = False
fac_flag = False
des_flag = True
elif obj.programme.upper() == "B.DES" :
student_flag = True
ug_flag = True
masters_flag = False
phd_flag = False
fac_flag = False
des_flag = True
elif obj.programme.upper() == "M.TECH" :
student_flag = True
ug_flag = False
masters_flag = True
phd_flag = False
fac_flag = False
des_flag = False
elif obj.programme.upper() == "B.TECH" :
student_flag = True
ug_flag = True
masters_flag = False
phd_flag = False
fac_flag = False
des_flag = False
else :
return HttpResponse("Student has no record")
# masters_flag=True
current_date = demo_date.date()
year = demo_date.year
registers = get_student_register(user_details.id)
user_sem = get_user_semester(request.user, ug_flag, masters_flag, phd_flag)
user_branch = get_user_branch(user_details)
batch = obj.batch_id
curr_id = batch.curriculum
curr_sem_id = Semester.objects.get(curriculum = curr_id, semester_no = obj.curr_semester_no)
try:
next_sem_id = Semester.objects.get(curriculum = curr_id, semester_no = obj.curr_semester_no+1)
except Exception as e:
next_sem_id = curr_sem_id
student_registration_check_pre = get_student_registrtion_check(obj,next_sem_id)
student_registration_check_final = get_student_registrtion_check(obj,next_sem_id)
cpi = get_cpi(user_details.id)
# branch change flag
branchchange_flag=True # True for testing, to be initialised as False
if user_sem==2:
branchchange_flag=True
pre_registration_date_flag = get_pre_registration_eligibility(current_date)
final_registration_date_flag = get_final_registration_eligibility(current_date)
add_or_drop_course_date_flag = get_add_or_drop_course_date_eligibility(current_date)
pre_registration_flag = False
final_registration_flag = False
if(student_registration_check_pre):
pre_registration_flag = student_registration_check_pre.pre_registration_flag
if(student_registration_check_final):
final_registration_flag = student_registration_check_final.final_registration_flag
acad_year = get_acad_year(user_sem, year)
currently_registered_courses = get_currently_registered_courses(user_details.id, user_sem)
next_sem_branch_course = get_sem_courses(next_sem_id, batch)
current_sem_branch_course = get_sem_courses(curr_sem_id, batch)
next_sem_registration_courses = get_sem_courses(next_sem_id, batch)
final_registration_choice, unavailable_courses_nextsem = get_final_registration_choices(next_sem_registration_courses,batch.year)
currently_registered_course = get_currently_registered_course(obj,obj.curr_semester_no)
current_credits = get_current_credits(currently_registered_course)
cur_cpi=0.0
details = {
'current_user': current_user,
'year': acad_year,
'user_sem': user_sem,
'user_branch' : str(user_branch),
'cpi' : cpi,
}
cur_cpi=details['cpi']
try:
pre_registered_course = InitialRegistration.objects.all().filter(student_id = user_details.id,semester_id = next_sem_id)
pre_registered_course_show = pre_registered_course
except Exception as e:
pre_registered_course = None
pre_registered_course_show = None
try:
final_registered_course = FinalRegistration.objects.all().filter(student_id = user_details.id,semester_id = next_sem_id)
add_courses_options = get_add_course_options(current_sem_branch_course, currently_registered_course, batch.year)
drop_courses_options = get_drop_course_options(currently_registered_course)
except Exception as e:
final_registered_course = None
drop_courses_options = None
add_courses_options = None
fee_payment_mode_list = dict(Constants.PaymentMode)
performance_list = []
result_announced = False
for i in currently_registered_courses:
try:
performance_obj = SemesterMarks.objects.all().select_related('curr_id','student_id','curr_id__course_id','student_id__id','student_id__id__user','student_id__id__department').filter(student_id = obj, curr_id = i).first()
except Exception as e:
performance_obj = None
performance_list.append(performance_obj)
for i in currently_registered_courses:
try:
result_announced_obj = MarkSubmissionCheck.objects.select_related().get(curr_id = i)
if result_announced_obj:
if result_announced_obj.announced == True:
result_announced = result_announced_obj.announced
else:
continue
except Exception as e:
continue
faculty_list = None
thesis_request_list = None
assistantship_list = None
pre_existing_thesis_flag = False
teaching_credit_registration_course = None
if masters_flag:
faculty_list = get_faculty_list()
thesis_request_list = ThesisTopicProcess.objects.all().filter(student_id = obj)
assistantship_list = AssistantshipClaim.objects.all().filter(student = obj)
pre_existing_thesis_flag = get_thesis_flag(obj)
if phd_flag:
pre_existing_thesis_flag = get_thesis_flag(obj)
teaching_credit_registration_course = Curriculum.objects.all().select_related().filter(batch = 2016, sem =6)
# Dues Check
#Initializing all due with -1 value , since generating no due certificate requires total due=0
lib_d, pc_d, hos_d, mess_d, acad_d = -1, -1, -1, -1, -1
if student_flag:
try:
obj = Dues.objects.select_related().get(student_id=Student.objects.select_related('id','id__user','id__department').get(id=request.user.username))
lib_d = obj.library_due
pc_d = obj.placement_cell_due
hos_d = obj.hostel_due
mess_d = obj.mess_due
acad_d = obj.academic_due
except ObjectDoesNotExist:
logging.warning("entry in DB not found for student")
tot_d = lib_d + acad_d + pc_d + hos_d + mess_d
obj = Student.objects.select_related('id','id__user','id__department').get(id=request.user.username)
course_list = []
for i in registers:
course_list.append(i.curr_id)
attendence = []
for i in course_list:
instructors = Curriculum_Instructor.objects.select_related('curriculum_id','instructor_id','curriculum_id__course_id','instructor_id__department','instructor_id__user').filter(curriculum_id=i)
pr,ab=0,0
for j in list(instructors):
presents = Student_attendance.objects.select_related('student_id','student_id__id','student_id__id__user','student_id__id__department','instructor_id','instructor_id__curriculum_id','instructor_id__curriculum_id__course_id','instructor_id__instructor_id','instructor_id__instructor_id__user','instructor_id__instructor_id__department').filter(student_id=obj,instructor_id=j, present=True)
absents = Student_attendance.objects.select_related('student_id','student_id__id','student_id__id__user','student_id__id__department','instructor_id','instructor_id__curriculum_id','instructor_id__curriculum_id__course_id','instructor_id__instructor_id','instructor_id__instructor_id__user','instructor_id__instructor_id__department').filter(student_id=obj,instructor_id=j, present=False)
pr += len(presents)
ab += len(absents)
attendence.append((i,pr,pr+ab))
cur_spi='Sem results not available' # To be fetched from db if result uploaded
Mess_bill = Monthly_bill.objects.filter(student_id = obj)
Mess_pay = Payments.objects.filter(student_id = obj)
# Branch Change Form save
if request.method=='POST':
if True:
# Processing Branch Change form
objb = BranchChange()
objb.branches=request.POST['branches']
objb.save()
return render(
request, '../templates/academic_procedures/academic.html',
{'details': details,
# 'calendar': calendar,
'currently_registered': currently_registered_course,
'pre_registered_course' : pre_registered_course,
'pre_registered_course_show' : pre_registered_course_show,
'final_registered_course' : final_registered_course,
'current_credits' : current_credits,
'courses_list': next_sem_branch_course,
'fee_payment_mode_list' : fee_payment_mode_list,
'next_sem_registration_courses': next_sem_registration_courses,
'final_registration_choice' : final_registration_choice,
'unavailable_courses_nextsem' : unavailable_courses_nextsem,
'performance_list' : performance_list,
'faculty_list' : faculty_list,
'thesis_request_list' : thesis_request_list,
'assistantship_list' : assistantship_list,
'next_sem': next_sem_id,
'curr_sem': curr_sem_id,
# 'final_register': final_register,
'student_flag' : student_flag,
'ug_flag' : ug_flag,
'masters_flag' : masters_flag,
'phd_flag' : phd_flag,
'fac_flag' : fac_flag,
'des_flag' : des_flag,
'result_announced' : result_announced,
'thesis_flag' : pre_existing_thesis_flag,
# 'change_branch': change_branch,
# 'add_course': add_course,
'add_courses_options': add_courses_options,
'drop_courses_options' : drop_courses_options,
# 'pre_register': pre_register,
'prd': pre_registration_date_flag,
'frd': final_registration_date_flag,
'adc_date_flag': add_or_drop_course_date_flag,
'pre_registration_flag' : pre_registration_flag,
'final_registration_flag': final_registration_flag,
# 'final_r': final_register_1,
'teaching_credit_registration_course' : teaching_credit_registration_course,
'cur_cpi': cur_cpi,
'cur_spi': cur_spi,
# 'mincr': minimum_credit,
'Mess_bill' : Mess_bill,
'Mess_pay' : Mess_pay,
'lib_d':lib_d,
'acad_d':acad_d,
'mess_d':mess_d,
'pc_d':pc_d,
'hos_d':hos_d,
'tot_d':tot_d,
'attendence':attendence,
'BranchChangeForm': BranchChangeForm(),
'BranchFlag':branchchange_flag,
'assistantship_flag' : student_status,
}
)
elif str(des.designation) == "Associate Professor" :
return HttpResponseRedirect('/academic-procedures/main/')
elif str(request.user) == "acadadmin" :
return HttpResponseRedirect('/academic-procedures/main/')
else:
return HttpResponse('user not found')
def dues_pdf(request):
template = get_template('academic_procedures/dues_pdf.html')
current_user = get_object_or_404(User, username=request.user.username)
user_details = ExtraInfo.objects.get(id = request.user)
des = HoldsDesignation.objects.all().filter(user = request.user).first()
name = ExtraInfo.objects.all().filter(id=request.user.username)[0].user
if str(des.designation) == "student":
obj = Student.objects.get(id = user_details.id)
context = {
'student_id' : request.user.username,
'degree' : obj.programme.upper(),
'name' : name.first_name +" "+ name.last_name,
'branch' : get_user_branch(user_details),
}
pdf = render_to_pdf('academic_procedures/dues_pdf.html',context)
if pdf:
response = HttpResponse(pdf, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename=Bonafide.pdf'
return response
return HttpResponse("PDF could not be generated")
def facultyData(request):
current_value = request.POST['current_value']
try:
# students =ExtraInfo.objects.all().filter(user_type = "student")
faculty = ExtraInfo.objects.all().filter(user_type = "faculty")
facultyNames = []
for i in faculty:
name = i.user.first_name + " " + i.user.last_name
if current_value != "":
Lowname = name.lower()
Lowcurrent_value = current_value.lower()
if Lowcurrent_value in Lowname:
facultyNames.append(name)
else:
facultyNames.append(name)
faculty = json.dumps(facultyNames)
return HttpResponse(faculty)
except Exception as e:
return HttpResponse("error")
def get_course_to_show_pg(initial_courses, final_register):
'''
This function fetches the PG courses from the database and store them into list x.
@param:
initial_courses - The courses that the registered PG student has already selected.
final_register - Finally registered courses of the user.
@variables:
x - The courses that are not being finally registered.
'''
x = []
for i in initial_courses:
flag = 0
for j in final_register:
if(str(i.course_name) == str(j.course_id)):
flag = 1
if(flag == 0):
x.append(i)
return x
def get_pg_course(usersem, specialization):
'''
This function fetches the PG Spcialization courses from the database and store them into list result.
@param:
usersem - Current semester of the user.
specialization - has the specialization of the logged in PG student.
@variables:
result - The selected Specialization courses.
'''
usersem = 2
obj = CoursesMtech.objects.select_related().filter(specialization=specialization)
obj3 = CoursesMtech.objects.select_related().filter(specialization="all")
obj2 = Course.objects.filter(sem=usersem)
result = []
for i in obj:
p = i.c_id
for j in obj2:
if(str(j.course_name) == str(p)):
result.append(j)
for i in obj3:
p = i.c_id
for j in obj2:
if(str(j.course_name) == str(p)):
result.append(j)
return result
def get_add_course(branch, final):
'''
This function shows the courses that were added after pre-registration.
@param:
branch - Branch of the Logged in student.
final - all the added courses after pre-registration.
@variables:
x - all the added courses after pre-registration.
total_course - al the remaining courses that were not added.
'''
x = []
for i in final:
x.append(i.course_id)
total_course = []
for i in branch:
if i not in x:
total_course.append(i)
return total_course
@login_required(login_url='/accounts/login')
def apply_branch_change(request):
'''
This function is used to verify the details to apply for the branch change. It checks the requirement and tells the user if he/she can change the branch or not.
@param:
request - trivial
@variables:
branches - selected branches by the user.
student - details of the logged in user.
extraInfo_user - gets the user details from the extrainfo model.
cpi_data - cpi of the logged in user.
semester - user's semester.
label_for_change - boolean variable to check the eligibility.
'''
# Get all the departments
# branch_list = DepartmentInfo.objects.all()
branches = ['CSE', 'ME', 'ECE']
# Get the current logged in user
student = User.objects.all().filter(username=request.user).first()
# Get the current logged in user's cpi
extraInfo_user = ExtraInfo.objects.all().select_related('user','department').filter(user=student).first()
cpi_data = Student.objects.all().select_related('id','id__user','id__department').filter(id=extraInfo_user.id).first()
# for i in range(len(branch_list)):
# branch_cut = branch_list[i].name
# branches.append(branch_cut)
label_for_change = False
semester = get_user_semester(extraInfo_user.id, ug_flag, masters_flag, phd_flag)
# semester = 2
if cpi_data.cpi >= 8 and semester >= 1 and semester <= 2:
label_for_change = True
context = {
'branches': branches,
'student': student,
'cpi_data': cpi_data,
'label_for_change': label_for_change,
}
return context
def branch_change_request(request):
'''
This function is used to apply the branch change request.
@param:
request - trivial
@variables:
current_user - details of the current user.
student - details of the logged in student.
extraInfo_user - gets the user details from the extrainfo model.
department - user's applied brach.
'''
if request.method == 'POST':
current_user = get_object_or_404(User, username=request.user.username)
extraInfo_user = ExtraInfo.objects.all().select_related('user','department').filter(user=current_user).first()
student = Student.objects.all().select_related('id','id__user','id__department').filter(id=extraInfo_user.id).first()
department = DepartmentInfo.objects.all().filter(id=int(request.POST['branches'])).first()
change_save = BranchChange(
branches=department,
user=student
)
change_save.save()
return HttpResponseRedirect('/academic-procedures/main')
else:
return HttpResponseRedirect('/academic-procedures/main')
@login_required(login_url='/acounts/login')
def approve_branch_change(request):
'''
This function is used to approve the branch change requests from acad admin's frame.
@param:
request - trivial
@variables:
choices - list of students who applied for the branch change.
branches - selected brances by the student.
get_student - updating the student's branch after approval.
branch - branch of the current user.
'''
if request.method == 'POST':
values_length = len(request.POST.getlist('choice'))
choices = []
branches = []
for i in range(values_length):
for key, values in request.POST.lists():
if key == 'branch':
branches.append(values[i])
if key == 'choice':
choices.append(values[i])
else:
continue
changed_branch = []
for i in range(len(branches)):
get_student = ExtraInfo.objects.all().select_related('user','department').filter(id=choices[i][:7])
get_student = get_student[0]
branch = DepartmentInfo.objects.all().filter(name=branches[i])
get_student.department = branch[0]
changed_branch.append(get_student)
student = Student.objects.all().select_related('id','id__user','id__department').filter(id=choices[i][:7]).first()
change = BranchChange.objects.select_related('branches','user','user__id','user__id__user','user__id__department').all().filter(user=student)
change = change[0]
change.delete()
try:
ExtraInfo.objects.bulk_update(changed_branch,['department'])
messages.info(request, 'Apply for branch change successfull')
except:
messages.info(request, 'Unable to proceed, we will get back to you very soon')
return HttpResponseRedirect('/academic-procedures/main')
else:
messages.info(request, 'Unable to proceed')
return HttpResponseRedirect('/academic-procedures/main')
# Function returning Branch , Banch data which was required many times
def get_batch_query_detail(month, year):
'''
This function is used to get the batch's detail simply return branch which is required often.
@param:
month - current month
year - current year.
@variables:
stream1 - string BTech.
stream2 - string MTech.
query_option1 - year to be shown on students course sho page acad admin
'''
stream1 = "B.Tech "
stream2 = "M.Tech "
query_option1 = {}
if(month >= 7):
query_option1 = {
stream1+str(year): stream1+str(year),
stream1+str(year-1): stream1+str(year-1),
stream1+str(year-2): stream1+str(year-2),
stream1+str(year-3): stream1+str(year-3),
stream1+str(year-4): stream1+str(year-4),
stream2+str(year): stream2+str(year),
stream2+str(year-1): stream2+str(year)}
else:
query_option1 = {
stream1+str(year-1): stream1+str(year-1),
stream1+str(year-2): stream1+str(year-2),
stream1+str(year-3): stream1+str(year-3),
stream1+str(year-4): stream1+str(year-4),
stream1+str(year-5): stream1+str(year-5),
stream2+str(year-1): stream2+str(year-1),
stream2+str(year-2): stream2+str(year-2), }
return query_option1
# view when Admin drops a user course
@login_required(login_url='/accounts/login')
def dropcourseadmin(request):
'''
This function is used to get the view when Acad Admin drops any course of any student.
@param:
request - trivial
@variables:
data - user's id.
rid - Registration ID of Registers table
response_data - data to be responded.
'''
data = request.GET.get('id')
data = data.split(" - ")
course_code = data[1]
# need to add batch and programme
curriculum_object = Curriculum.objects.all().filter(course_code = course_code)
try:
Register.objects.filter(curr_id = curriculum_object.first(),student_id=int(data[0])).delete()
except:
print("hello ")
response_data = {}
return HttpResponse(json.dumps(response_data), content_type="application/json")
@login_required(login_url='/accounts/login')
def gen_course_list(request):
if(request.POST):
try:
batch = request.POST['batch']
course_id = request.POST['course']
course = Courses.objects.get(id = course_id)
obj = course_registration.objects.all().filter(course_id = course)
except Exception as e:
batch=""
course=""
obj=""
students = []
for i in obj:
if i.student_id.batch_id.year == int(batch):
students.append(i.student_id)
html = render_to_string('academic_procedures/gen_course_list.html',
{'students': students, 'batch':batch, 'course':course_id}, request)
maindict = {'html': html}
obj = json.dumps(maindict)
return HttpResponse(obj, content_type='application/json')
# view where Admin verifies the registered courses of every student
@login_required(login_url='/accounts/login')
def verify_course(request):
'''
This function is used to get the view when Acad Admin verifies the registered courses of every student.
@param:
request - trivial
@variables:
current_user - details of current user.
desig_id - Finds the Acad admin whose designation is "Upper Division Clerk".
acadadmin - details of the acad person(logged in).
roll_no - roll number of all the students.
firstname - firstname of the students.
year - current year.
month - current month.
date - current date.
'''
if(request.POST):
current_user = get_object_or_404(User, username=request.user.username)
user_details = ExtraInfo.objects.all().select_related('user','department').filter(user=current_user).first()
desig_id = Designation.objects.all().filter(name='adminstrator').first()
temp = HoldsDesignation.objects.all().select_related().filter(designation = desig_id).first()
acadadmin = temp.working
k = str(user_details).split()
final_user = k[2]
if (str(acadadmin) != str(final_user)):
return HttpResponseRedirect('/academic-procedures/')
roll_no = request.POST["rollNo"]
obj = ExtraInfo.objects.all().select_related('user','department').filter(id=roll_no).first()
firstname = obj.user.first_name
lastname = obj.user.last_name
dict2 = {'roll_no': roll_no, 'firstname': firstname, 'lastname': lastname}
obj2 = Student.objects.all().select_related('id','id__user','id__department').filter(id=roll_no).first()
obj = Register.objects.all().select_related('curr_id','student_id','curr_id__course_id','student_id__id','student_id__id__user','student_id__id__department').filter(student_id = obj2)
curr_sem_id = obj2.curr_semester_no
details = []
current_sem_courses = get_currently_registered_course(roll_no,curr_sem_id)
idd = obj2
for z in current_sem_courses:
z=z[1]
course_code,course_name= str(z).split(" - ")
k = {}
# reg_ig has course registration id appended with the the roll number
# so that when we have removed the registration we can be redirected to this view
k['reg_id'] = roll_no+" - "+course_code
k['rid'] = roll_no+" - "+course_code
# Name ID Confusion here , be carefull
courseobj2 = Courses.objects.all().filter(code = course_code)
# if(str(z.student_id) == str(idd)):
for p in courseobj2:
k['course_id'] = course_code
k['course_name'] = course_name
k['sem'] = curr_sem_id
k['credits'] = p.credit
details.append(k)
year = demo_date.year
month = demo_date.month
yearr = str(year) + "-" + str(year+1)
semflag = 0
if(month >= 7):
semflag = 1
else:
semflag = 2
# TO DO Bdes
date = {'year': yearr, 'semflag': semflag}
html = render_to_string('academic_procedures/studentCourses.html',
{'details': details,
'dict2': dict2,
'date': date}, request)
maindict = {'html': html}
obj = json.dumps(maindict)
return HttpResponse(obj, content_type='application/json')
# view to generate all list of students
def acad_branch_change(request):
'''
This function is used to approve the branch changes requested by the students.
@param:
request - trivial
@variables:
current_user - logged in user
desig_id - Finds the Acad admin whose designation is "Upper Division Clerk".
acadadmin - details of the logged in acad admin.
user_details - details of the logged in user.
change_queries - gets all the details of branch changes from the database.
year - current year.
month - current month
date - current date.
total_cse_seats - total availbale CSE seats.
total_ece_seats - total availbale ECE seats.
total_me_seats - total availbale ME seats.
available_cse_seats - availbale CSE seats.
available_ece_seats - available ECE seats.
available_me_seats - available ME seats.
'''
current_user = get_object_or_404(User, username=request.user.username)
user_details = ExtraInfo.objects.all().select_related('user','department').filter(user=current_user).first()
desig_id = Designation.objects.all().filter(name='Upper Division Clerk')
temp = HoldsDesignation.objects.all().select_related().filter(designation = desig_id).first()
acadadmin = temp.working
k = str(user_details).split()
final_user = k[2]
if (str(acadadmin) != str(final_user)):
return HttpResponseRedirect('/academic-procedures/')
# year = datetime.datetime.now().year
# month = datetime.datetime.now().month
year = demo_date.year
month = demo_date.month
yearr = str(year) + "-" + str(year+1)
semflag = 0
queryflag = 0
query_option1 = get_batch_query_detail(month, year)
query_option2 = {"CSE": "CSE", "ECE": "ECE", "ME": "ME"}
if(month >= 7):
semflag = 1
else:
semflag = 2
# TO DO Bdes
date = {'year': yearr, 'month': month, 'semflag': semflag, 'queryflag': queryflag}
change_queries = BranchChange.objects.select_related('branches','user','user__id','user__id__user','user__id__department').all()
# Total seats taken as some random value
total_cse_seats = 100
total_ece_seats = 100
total_me_seats = 100
total_cse_filled_seats = 98
total_ece_filled_seats = 98
total_me_filled_seats = 98
available_cse_seats = total_cse_seats - total_cse_filled_seats
available_ece_seats = total_ece_seats - total_ece_filled_seats
available_me_seats = total_me_seats - total_me_filled_seats
initial_branch = []
change_branch = []
available_seats = []
applied_by = []
cpi = []
for i in change_queries:
applied_by.append(i.user.id)
change_branch.append(i.branches.name)
students = Student.objects.all().select_related('id','id__user','id__department').filter(id=i.user.id).first()
user_branch = ExtraInfo.objects.all().select_related('user','department').filter(id=students.id.id).first()
initial_branch.append(user_branch.department.name)
cpi.append(students.cpi)
if i.branches.name == 'CSE':
available_seats.append(available_cse_seats)
elif i.branches.name == 'ECE':
available_seats.append(available_ece_seats)
elif i.branches.name == 'ME':
available_seats.append(available_me_seats)
else:
available_seats.append(0)
lists = zip(applied_by, change_branch, initial_branch, available_seats, cpi)
tag = False
if len(initial_branch) > 0:
tag = True
context = {
'list': lists,
'total': len(initial_branch),
'tag': tag
}
return render(
request,
'../templates/academic_procedures/academicadminforbranch.html',
{
'context': context,
'lists': lists,
'date': date,
'query_option1': query_option1,
'query_option2': query_option2,
'result_year' : result_year
}
)
@login_required(login_url='/accounts/login')
def phd_details(request):
'''
This function is used to extract the details of the PHD details.
@param:
request - trivial
@variables:
current_user - logged in user
student - details of the logged in student.
thesis - gets the thesis details of the PhD student.
faculty - gets the chosen faculty's details.
user_details - details of the logged in user.
total_thesis - total number of applied thesis.
'''
current_user = get_object_or_404(User, username=request.user.username)
user_details = ExtraInfo.objects.all().select_related('user','department').filter(user=current_user).first()
student = Student.objects.all().select_related('id','id__user','id__department').filter(id=user_details.id).first()
thesis = Thesis.objects.all().filter(student_id=student).first()
#Professor = Designation.objects.all().filter(name='Professor')
#faculty = ExtraInfo.objects.all().filter(department=user_details.department,
# designation='Professor')
f1 = HoldsDesignation.objects.select_related().filter(designation=Designation.objects.get(name = "Assistant Professor"))
f2 = HoldsDesignation.objects.select_related().filter(designation=Designation.objects.get(name = "Professor"))
f3 = HoldsDesignation.objects.select_related().filter(designation=Designation.objects.get(name = "Associate Professor"))
faculty = list(chain(f1,f2,f3))
faculties_list = []
for i in faculty:
faculties_list.append(str(i.user.first_name)+" "+str(i.user.last_name))
total_thesis = True
if(thesis is None):
total_thesis = False
context = {
'total_thesis': total_thesis,
'thesis': thesis,
}
return render(
request,
'../templates/academic_procedures/phdregistration.html',
{'context': context, 'faculty': faculties_list, 'student': student}
)
#
#
#
#
#
#
##
#
#
#
#
##
#
#
#
#
#
#
###
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
def get_student_register(id):
return Register.objects.all().select_related('curr_id','student_id','curr_id__course_id','student_id__id','student_id__id__user','student_id__id__department').filter(student_id = id)
def get_pre_registration_eligibility(current_date):
try:
pre_registration_date = Calendar.objects.all().filter(description="Pre Registration").first()
prd_start_date = pre_registration_date.from_date
prd_end_date = pre_registration_date.to_date
if current_date>=prd_start_date and current_date<=prd_end_date:
return True
else :
return False
except Exception as e:
return False
def get_final_registration_eligibility(current_date):
try:
frd = Calendar.objects.all().filter(description="Physical Reporting at the Institute").first()
frd_start_date = frd.from_date
frd_end_date = frd.to_date
if current_date>=frd_start_date and current_date<=frd_end_date:
return True
else :
return False
except Exception as e:
return False
def get_add_or_drop_course_date_eligibility(current_date):
try:
add_drop_course_date = Calendar.objects.all().filter(description="Last Date for Adding/Dropping of course").first()
adc_start_date = add_drop_course_date.from_date
adc_end_date = add_drop_course_date.to_date
if current_date>=adc_start_date and current_date<=adc_end_date:
return True
else :
return False
except Exception as e:
return False
def get_course_verification_date_eligibilty(current_date):
try:
course_verification_date = Calendar.objects.all().filter(description="course verification date").first()
verif_start_date = course_verification_date.from_date
verif_end_date = course_verification_date.to_date
if current_date>=verif_start_date and current_date<=verif_end_date:
return True
else :
return False
except Exception as e:
return False
def get_user_branch(user_details):
return user_details.department.name
def get_acad_year(user_sem, year):
if user_sem%2 == 1:
acad_year = str(year) + "-" + str(year+1)
elif user_sem%2 == 0:
acad_year = str(year-1) + "-" + str(year)
return acad_year
def pre_registration(request):
if request.method == 'POST':
try:
current_user = get_object_or_404(User, username=request.POST.get('user'))
current_user = ExtraInfo.objects.all().select_related('user','department').filter(user=current_user).first()
current_user = Student.objects.all().filter(id=current_user.id).first()
sem_id = Semester.objects.get(id = request.POST.get('semester'))
count = request.POST.get('ct')
count = int(count)
reg_curr=[]
for i in range(1, count+1):
i = str(i)
choice = "choice["+i+"]"
slot = "slot["+i+"]"
if request.POST.get(choice)!='0':
course_id = Courses.objects.get(id = request.POST.get(choice))
courseslot_id = CourseSlot.objects.get(id = request.POST.get(slot))
p = InitialRegistration(
course_id = course_id,
semester_id = sem_id,
student_id = current_user,
course_slot_id = courseslot_id
)
else:
continue
reg_curr.append(p)
InitialRegistration.objects.bulk_create(reg_curr)
try:
check = StudentRegistrationChecks(
student_id = current_user,
pre_registration_flag = True,
final_registration_flag = False,
semester_id = sem_id
)
check.save()
messages.info(request, 'Pre-Registration Successful')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
return HttpResponseRedirect('/academic-procedures/main')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
else:
return HttpResponseRedirect('/academic-procedures/main')
def get_student_registrtion_check(obj, sem):
return StudentRegistrationChecks.objects.all().filter(student_id = obj, semester_id = sem).first()
def final_registration(request):
if request.method == 'POST':
if request.POST.get('type_reg') == "register" :
try:
current_user = get_object_or_404(User, username=request.POST.get('user'))
current_user = ExtraInfo.objects.all().select_related('user','department').filter(user=current_user).first()
current_user = Student.objects.all().filter(id=current_user.id).first()
sem_id = Semester.objects.get(id = request.POST.get('semester'))
choice = request.POST.getlist('choice')
slot = request.POST.getlist('slot')
values_length = 0
values_length = len(choice)
mode = str(request.POST.get('mode'))
transaction_id = str(request.POST.get('transaction_id'))
f_reg = []
for x in range(values_length):
if choice[x] != '0':
course_id = Courses.objects.get(id = choice[x])
courseslot_id = CourseSlot.objects.get(id = slot[x])
if FinalRegistration .objects.filter(student_id__batch_id__year = current_user.batch_id.year, course_id = course_id).count() < courseslot_id.max_registration_limit:
p = FinalRegistration(
course_id = course_id,
semester_id=sem_id,
student_id= current_user,
course_slot_id = courseslot_id,
verified = False
)
f_reg.append(p)
else:
messages.info(request, 'Final-Registration Falied\n'+course_id.code+'-'+course_id.name+' registration limit reached.')
return HttpResponseRedirect('/academic-procedures/main')
FinalRegistration.objects.bulk_create(f_reg)
obj = FeePayments(
student_id = current_user,
semester_id = sem_id,
mode = mode,
transaction_id = transaction_id
)
obj.save()
try:
StudentRegistrationChecks.objects.filter(student_id = current_user, semester_id = sem_id).update(final_registration_flag = True)
messages.info(request, 'Final-Registration Successful')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
return HttpResponseRedirect('/academic-procedures/main')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
elif request.POST.get('type_reg') == "change_register" :
try:
current_user = get_object_or_404(User, username=request.POST.get('user'))
current_user = ExtraInfo.objects.all().select_related('user','department').filter(user=current_user).first()
current_user = Student.objects.all().filter(id=current_user.id).first()
sem_id = Semester.objects.get(id = request.POST.get('semester'))
FinalRegistration.objects.filter(student_id = current_user, semester_id = sem_id).delete()
count = request.POST.get('ct')
count = int(count)
mode = str(request.POST.get('mode'))
transaction_id = str(request.POST.get('transaction_id'))
f_reg=[]
for i in range(1, count+1):
i = str(i)
choice = "choice["+i+"]"
slot = "slot["+i+"]"
if request.POST.get(choice) != '0':
try:
course_id = Courses.objects.get(id = request.POST.get(choice))
courseslot_id = CourseSlot.objects.get(id = request.POST.get(slot))
if FinalRegistration .objects.filter(student_id__batch_id__year = current_user.batch_id.year, course_id = course_id).count() < courseslot_id.max_registration_limit:
p = FinalRegistration(
course_id = course_id,
semester_id=sem_id,
student_id= current_user,
course_slot_id = courseslot_id,
verified = False
)
f_reg.append(p)
else:
messages.info(request, 'Final-Registration Falied\n'+course_id.code+'-'+course_id.name+' registration limit reached.')
return HttpResponseRedirect('/academic-procedures/main')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
FinalRegistration.objects.bulk_create(f_reg)
obj = FeePayments(
student_id = current_user,
semester_id = sem_id,
mode = mode,
transaction_id = transaction_id
)
obj.save()
try:
StudentRegistrationChecks.objects.filter(student_id = current_user, semester_id = sem_id).update(final_registration_flag = True)
messages.info(request, 'registered course change Successful')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
return HttpResponseRedirect('/academic-procedures/main')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
else:
return HttpResponseRedirect('/academic-procedures/main')
def get_cpi(id):
obj = Student.objects.select_related('id','id__user','id__department').get(id = id)
return obj.cpi
def register(request):
if request.method == 'POST':
try:
current_user = get_object_or_404(User, username=request.POST.get('user'))
current_user = ExtraInfo.objects.all().select_related('user','department').filter(user=current_user).first()
current_user = Student.objects.all().select_related('id','id__user','id__department').filter(id=current_user.id).first()
values_length = 0
values_length = len(request.POST.getlist('choice'))
sem = request.POST.get('semester')
for x in range(values_length):
reg_curr=[]
for key, values in request.POST.lists():
if (key == 'choice'):
try:
last_id = Register.objects.all().aggregate(Max('r_id'))
last_id = last_id['r_id__max']+1
except Exception as e:
last_id = 1
curr_id = get_object_or_404(Curriculum, curriculum_id=values[x])
p = Register(
r_id=last_id,
curr_id=curr_id,
year=current_user.batch,
student_id=current_user,
semester=sem
)
reg_curr.append(p)
else:
continue
Register.objects.bulk_create(reg_curr)
messages.info(request, 'Pre-Registration Successful')
return HttpResponseRedirect('/academic-procedures/main')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
else:
return HttpResponseRedirect('/academic-procedures/main')
def add_courses(request):
"""
This function is used to add courses for currernt semester
@param:
request - contains metadata about the requested page
@variables:
current_user - contains current logged in user
sem_id - contains current semester id
count - no of courses to be added
course_id - contains course id for a particular course
course_slot_id - contains course slot id for a particular course
reg_curr - list of registered courses object
choice - contains choice of a particular course
slot - contains slot of a particular course
# gg and cs
"""
if request.method == 'POST':
try:
current_user = get_object_or_404(User, username=request.POST.get('user'))
current_user = ExtraInfo.objects.all().filter(user=current_user).first()
current_user = Student.objects.all().filter(id=current_user.id).first()
sem_id = Semester.objects.get(id = request.POST.get('semester'))
count = request.POST.get('ct')
count = int(count)
reg_curr=[]
for i in range(1, count+1):
choice = "choice["+str(i)+"]"
slot = "slot["+str(i)+"]"
try:
course_id = Courses.objects.get(id = request.POST.get(choice))
courseslot_id = CourseSlot.objects.get(id = request.POST.get(slot))
# Check if maximum course registration limit has not reached and student has not already registered for that course
if course_registration.objects.filter(student_id__batch_id__year = current_user.batch_id.year, course_id = course_id).count() < courseslot_id.max_registration_limit and (course_registration.objects.filter(course_id=course_id, student_id=current_user).count() == 0):
p = course_registration(
course_id = course_id,
student_id=current_user,
course_slot_id = courseslot_id,
semester_id=sem_id
)
if p not in reg_curr:
reg_curr.append(p)
except Exception as e:
continue
course_registration.objects.bulk_create(reg_curr)
return HttpResponseRedirect('/academic-procedures/main')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
else:
return HttpResponseRedirect('/academic-procedures/main')
def drop_course(request):
if request.method == 'POST':
try:
current_user = get_object_or_404(User, username=request.POST.get('user'))
current_user = ExtraInfo.objects.all().select_related('user','department').filter(user=current_user).first()
current_user = Student.objects.all().get(id=current_user.id)
values_length = 0
values_length = len(request.POST.getlist('choice'))
sem_id = request.POST.get('semester')
sem = Semester.objects.get(id = sem_id)
for x in range(values_length):
for key, values in request.POST.lists():
if (key == 'choice'):
course_id = get_object_or_404(Courses, id=values[x])
course_registration.objects.filter(course_id = course_id, student_id = current_user).delete()
else:
continue
messages.info(request, 'Course Successfully Dropped')
return HttpResponseRedirect('/academic-procedures/main')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
else:
return HttpResponseRedirect('/academic-procedures/main')
def add_thesis(request):
if request.method == 'POST':
try:
if(str(request.POST.get('by'))=="st"):
thesis_topic = request.POST.get('thesis_topic')
research_area = request.POST.get('research_area')
supervisor_faculty = get_object_or_404(User, username = request.POST.get('supervisor'))
supervisor_faculty = ExtraInfo.objects.select_related('user','department').get(user = supervisor_faculty)
supervisor_faculty = Faculty.objects.select_related('id','id__user','id__department').get(id = supervisor_faculty)
try:
co_supervisor_faculty = get_object_or_404(User, username = request.POST.get('co_supervisor'))
co_supervisor_faculty = ExtraInfo.objects.select_related('user','department').get(user = co_supervisor_faculty)
co_supervisor_faculty = Faculty.objects.select_related('id','id__user','id__department').get(id = co_supervisor_faculty)
except Exception as e:
co_supervisor_faculty = None
current_user = get_object_or_404(User, username=request.POST.get('user'))
current_user = ExtraInfo.objects.all().select_related('user','department').filter(user=current_user).first()
current_user = Student.objects.all().select_related('id','id__user','id__department').filter(id=current_user.id).first()
try:
curr_id = request.POST.get('curr_id')
curr_id = Curriculum.objects.select_related().get(curriculum_id = curr_id)
except Exception as e:
curr_id = None
p = ThesisTopicProcess(
student_id = current_user,
research_area = research_area,
thesis_topic = thesis_topic,
curr_id = curr_id,
supervisor_id = supervisor_faculty,
co_supervisor_id = co_supervisor_faculty,
submission_by_student = True,
pending_supervisor = True,
)
p.save()
messages.info(request, 'Thesis Successfully Added')
return HttpResponseRedirect('/academic-procedures/main/')
elif(str(request.POST.get('by'))=="fac"):
obj = request.POST.get('obj_id')
obj = ThesisTopicProcess.objects.get(id = obj)
member1 = get_object_or_404(User, username = request.POST.get('member1'))
member1 = ExtraInfo.objects.select_related('user','department').get(user = member1)
member1 = Faculty.objects.select_related('id','id__user','id__department').get(id = member1)
member2 = get_object_or_404(User, username = request.POST.get('member2'))
member2 = ExtraInfo.objects.select_related('user','department').get(user = member2)
member2 = Faculty.objects.select_related('id','id__user','id__department').get(id = member2)
try:
member3 = get_object_or_404(User, username = request.POST.get('member3'))
member3 = ExtraInfo.objects.select_related('user','department').get(user = member3)
member3 = Faculty.objects.select_related('id','id__user','id__department').get(id = member3)
except Exception as e:
member3 = None
if(str(request.POST.get('approval'))=="yes"):
obj.pending_supervisor = False
obj.member1 = member1
obj.member2 = member2
obj.member3 = member3
obj.approval_supervisor = True
obj.forwarded_to_hod = True
obj.pending_hod = True
obj.save()
elif(request.POST.get('approval')=="no"):
obj.pending_supervisor = False
obj.member1 = member1
obj.member2 = member2
obj.member3 = member3
obj.approval_supervisor = False
obj.forwarded_to_hod = False
obj.pending_hod = False
obj.save()
else:
logging.warning("Not approved till now")
return HttpResponseRedirect('/academic-procedures/main')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
else:
return HttpResponseRedirect('/academic-procedures/main/')
return HttpResponseRedirect('/academic-procedures/main/')
def get_final_registration_choices(branch_courses,batch):
course_option = []
unavailable_courses = []
for courseslot in branch_courses:
max_limit = courseslot.max_registration_limit
lis = []
for course in courseslot.courses.all():
if FinalRegistration .objects.filter(student_id__batch_id__year = batch, course_id = course).count() < max_limit:
lis.append(course)
else:
unavailable_courses.append(course)
course_option.append((courseslot, lis))
return course_option, unavailable_courses
def get_add_course_options(branch_courses, current_register, batch):
course_option = []
courses = current_register
slots = []
for c in current_register:
slots.append(c[0])
for courseslot in branch_courses:
max_limit = courseslot.max_registration_limit
if courseslot not in slots:
lis = []
for course in courseslot.courses.all():
if course_registration.objects.filter(student_id__batch_id__year = batch, course_id = course).count() < max_limit:
lis.append(course)
course_option.append((courseslot, lis))
return course_option
def get_drop_course_options(current_register):
courses = []
for item in current_register:
if item[0].type != "Professional Core":
courses.append(item[1])
return courses
def get_user_semester(roll_no, ug_flag, masters_flag, phd_flag):
roll = str(roll_no)
now = demo_date
year, month = now.year, int(now.month)
y = str(year)
if(ug_flag):
if(roll[2].isdigit()):
roll = int(roll[:4])
else:
roll = int("20"+roll[:2])
user_year = year - roll
elif(masters_flag or phd_flag):
roll = int(roll[:2])
user_year = int(y[-2:]) - roll
sem = 'odd'
if month >= 7 and month<=12:
sem = 'odd'
else:
sem = 'even'
if sem == 'odd':
return user_year * 2 + 1
else:
return user_year * 2
def get_branch_courses(roll_no, user_sem, branch):
roll = str(roll_no)
year = int(roll[:4])
courses = Curriculum.objects.all().select_related().filter(batch=(year))
courses = courses.filter(sem = user_sem)
courses = courses.filter(floated = True)
course_list = []
for course in courses:
if branch.lower() == course.branch.lower() :
course_list.append(course)
elif course.branch.lower() == 'common':
course_list.append(course)
return course_list
def get_sem_courses(sem_id, batch):
courses = []
course_slots = CourseSlot.objects.all().filter(semester_id = sem_id)
for slot in course_slots:
courses.append(slot)
return courses
def get_currently_registered_courses(id, user_sem):
obj = Register.objects.all().select_related('curr_id','student_id','curr_id__course_id','student_id__id','student_id__id__user','student_id__id__department').filter(student_id=id, semester=user_sem)
ans = []
for i in obj:
course = Curriculum.objects.select_related().get(curriculum_id=i.curr_id.curriculum_id)
ans.append(course)
return ans
def get_currently_registered_course(id, sem_id):
obj = course_registration.objects.all().filter(student_id = id, semester_id=sem_id)
courses = []
for i in obj:
courses.append((i.course_slot_id,i.course_id))
return courses
def get_current_credits(obj):
credits = 0
for i in obj:
credits = credits + i[1].credit
return credits
def get_faculty_list():
f1 = HoldsDesignation.objects.select_related().filter(designation=Designation.objects.get(name = "Assistant Professor"))
f2 = HoldsDesignation.objects.select_related().filter(designation=Designation.objects.get(name = "Professor"))
f3 = HoldsDesignation.objects.select_related().filter(designation=Designation.objects.get(name = "Associate Professor"))
faculty = list(chain(f1,f2,f3))
faculty_list = []
for i in faculty:
faculty_list.append(i)
return faculty_list
def get_thesis_flag(student):
obj = ThesisTopicProcess.objects.all().select_related().filter(student_id = student)
if(obj):
return True
else:
return False
@login_required(login_url='/accounts/login')
def acad_person(request):
current_user = get_object_or_404(User, username=request.user.username)
user_details = ExtraInfo.objects.select_related('user','department').get(user = request.user)
des = HoldsDesignation.objects.all().select_related().filter(user = request.user).first()
if str(des.designation) == "student":
return HttpResponseRedirect('/academic-procedures/main/')
elif str(des.designation) == "Associate Professor" :
return HttpResponseRedirect('/academic-procedures/main/')
elif str(request.user) == "acadadmin" :
# year = datetime.datetime.now().year
# month = datetime.datetime.now().month
year = demo_date.year
month = demo_date.month
yearr = str(year) + "-" + str(year+1)
semflag = 0
queryflag = 0
query_option1 = get_batch_query_detail(month, year)
query_option2 = {"CSE": "CSE", "ECE": "ECE", "ME": "ME"}
if(month >= 7):
semflag = 1
else:
semflag = 2
date = {'year': yearr, 'month': month, 'semflag': semflag, 'queryflag': queryflag}
result_year = []
result_year = get_batch_all()
# result_year = [1,2]
change_queries = BranchChange.objects.select_related('branches','user','user__id','user__id__user','user__id__department').all()
course_verification_date = get_course_verification_date_eligibilty(demo_date.date())
initial_branch = []
change_branch = []
available_seats = []
applied_by = []
cpi = []
for i in change_queries:
applied_by.append(i.user.id)
change_branch.append(i.branches.name)
students = Student.objects.all().select_related('id','id__user','id__department').filter(id=i.user.id).first()
user_branch = ExtraInfo.objects.all().select_related('user','department').filter(id=students.id.id).first()
initial_branch.append(user_branch.department.name)
cpi.append(students.cpi)
if i.branches.name == 'CSE':
available_seats.append(available_cse_seats)
elif i.branches.name == 'ECE':
available_seats.append(available_ece_seats)
elif i.branches.name == 'ME':
available_seats.append(available_me_seats)
lists = zip(applied_by, change_branch, initial_branch, available_seats, cpi)
tag = False
if len(initial_branch) > 0:
tag = True
context = {
'list': lists,
'total': len(initial_branch),
'tag': tag
}
submitted_course_list = []
obj_list = MarkSubmissionCheck.objects.all().select_related().filter(verified= False,submitted = True)
for i in obj_list:
if int(i.curr_id.batch)+int(i.curr_id.sem)/2 == int(demo_date.year):
submitted_course_list.append(i.curr_id)
else:
continue
# submitted_course_list = SemesterMarks.objects.all().filter(curr_id__in = submitted_course_list)
batch_grade_data = get_batch_grade_verification_data(result_year)
return HttpResponseRedirect('/aims/')
else:
return HttpResponse('user not found')
def acad_proced_global_context():
year = demo_date.year
month = demo_date.month
yearr = str(year) + "-" + str(year+1)
semflag = 0
queryflag = 0
query_option1 = get_batch_query_detail(month, year)
query_option2 = {"CSE": "CSE", "ECE": "ECE", "ME": "ME"}
if(month >= 7):
semflag = 1
else:
semflag = 2
date = {'year': yearr, 'month': month, 'semflag': semflag, 'queryflag': queryflag}
result_year = []
result_year = get_batch_all()
# result_year = [1,2]
change_queries = BranchChange.objects.select_related('branches','user','user__id','user__id__user','user__id__department').all()
course_verification_date = get_course_verification_date_eligibilty(demo_date.date())
initial_branch = []
change_branch = []
available_seats = []
applied_by = []
cpi = []
for i in change_queries:
applied_by.append(i.user.id)
change_branch.append(i.branches.name)
students = Student.objects.all().select_related('id','id__user','id__department').filter(id=i.user.id).first()
user_branch = ExtraInfo.objects.all().select_related('user','department').filter(id=students.id.id).first()
initial_branch.append(user_branch.department.name)
cpi.append(students.cpi)
if i.branches.name == 'CSE':
available_seats.append(available_cse_seats)
elif i.branches.name == 'ECE':
available_seats.append(available_ece_seats)
elif i.branches.name == 'ME':
available_seats.append(available_me_seats)
lists = zip(applied_by, change_branch, initial_branch, available_seats, cpi)
tag = False
if len(initial_branch) > 0:
tag = True
context = {
'list': lists,
'total': len(initial_branch),
'tag': tag
}
submitted_course_list = []
obj_list = MarkSubmissionCheck.objects.all().select_related().filter(verified= False,submitted = True)
for i in obj_list:
if int(i.curr_id.batch)+int(i.curr_id.sem)/2 == int(demo_date.year):
submitted_course_list.append(i.curr_id)
else:
submitted_course_list.append(i.curr_id)
#continue
# submitted_course_list = SemesterMarks.objects.all().filter(curr_id__in = submitted_course_list)
batch_grade_data = get_batch_grade_verification_data(result_year)
batch_branch_data = get_batch_branch_data(result_year)
return {
'context': context,
'lists': lists,
'date': date,
'query_option1': query_option1,
'query_option2': query_option2,
'course_verification_date' : course_verification_date,
'submitted_course_list' : submitted_course_list,
'result_year' : result_year,
'batch_grade_data' : batch_grade_data,
'batch_branch_data': batch_branch_data
}
def get_batch_all():
result_year = []
if demo_date.month >=7:
result_year = [demo_date.year, demo_date.year-1, demo_date.year-2, demo_date.year-3]
# result_year = [1,2]
else :
result_year = [demo_date.year-1,demo_date.year-2, demo_date.year-3, demo_date.year-4]
return result_year
def announce_results(request):
i = int(request.POST.get('id'))
year = get_batch_all()
acad = get_object_or_404(User, username="acadadmin")
student_list = Student.objects.all().select_related('id','id__user','id__department').filter(batch = year[i-1])
# for obj in student_list:
# academics_module_notif(acad, obj.id.user, 'result_announced')
courses_list = Curriculum.objects.all().select_related().filter(batch = year[i-1])
rsl = []
for obj in courses_list:
try :
o = MarkSubmissionCheck.objects.select_related().get(curr_id = obj)
o.announced = True
rsl.append(o)
except Exception as e:
continue
MarkSubmissionCheck.objects.bulk_update(rsl,['announced'])
return JsonResponse({'status': 'success', 'message': 'Successfully Accepted'})
def get_batch_grade_verification_data(list):
semester_marks = []
batch_1_list_CSE = []
batch_2_list_CSE = []
batch_3_list_CSE = []
batch_4_list_CSE = []
batch_1_list_ECE = []
batch_2_list_ECE = []
batch_3_list_ECE = []
batch_4_list_ECE = []
batch_1_list_ME = []
batch_2_list_ME = []
batch_3_list_ME = []
batch_4_list_ME = []
c = Curriculum.objects.all().select_related().filter(batch = list[0]).filter(floated = True)
c_cse = c.filter(branch = 'CSE')
c_me = c.filter(branch = 'ME')
c_ece = c.filter(branch = 'ECE')
for i in c_cse:
batch_1_list_CSE.append(i)
for i in c_me:
batch_1_list_ME.append(i)
for i in c_ece:
batch_1_list_ECE.append(i)
for i in c:
try:
obj_sem = MarkSubmissionCheck.objects.select_related().get(curr_id = i)
if obj_sem:
semester_marks.append(obj_sem)
else:
continue
except Exception as e:
continue
c = Curriculum.objects.all().select_related().filter(batch = list[1]).filter(floated = True)
c_cse = c.filter(branch = 'CSE')
c_me = c.filter(branch = 'ME')
c_ece = c.filter(branch = 'ECE')
for i in c_cse:
batch_2_list_CSE.append(i)
for i in c_me:
batch_2_list_ME.append(i)
for i in c_ece:
batch_2_list_ECE.append(i)
for i in c:
try:
obj_sem = MarkSubmissionCheck.objects.select_related().get(curr_id = i)
if obj_sem:
semester_marks.append(obj_sem)
else:
continue
except Exception as e:
continue
c = Curriculum.objects.all().select_related().filter(batch = list[2]).filter(floated = True)
c_cse = c.filter(branch = 'CSE')
c_me = c.filter(branch = 'ME')
c_ece = c.filter(branch = 'ECE')
for i in c_cse:
batch_3_list_CSE.append(i)
for i in c_me:
batch_3_list_ME.append(i)
for i in c_ece:
batch_3_list_ECE.append(i)
for i in c:
try:
obj_sem = MarkSubmissionCheck.objects.select_related().get(curr_id = i)
if obj_sem:
semester_marks.append(obj_sem)
else:
continue
except Exception as e:
continue
c = Curriculum.objects.all().select_related().filter(batch = list[3]).filter(floated = True)
c_cse = c.filter(branch = 'CSE')
c_me = c.filter(branch = 'ME')
c_ece = c.filter(branch = 'ECE')
for i in c_cse:
batch_4_list_CSE.append(i)
for i in c_me:
batch_4_list_ME.append(i)
for i in c_ece:
batch_4_list_ECE.append(i)
for i in c:
try:
obj_sem = MarkSubmissionCheck.objects.select_related().get(curr_id = i)
if obj_sem:
semester_marks.append(obj_sem)
else:
continue
except Exception as e:
continue
batch_1_list = {
'batch_list_year' : list[0],
'batch_list_ME' : batch_1_list_ME,
'batch_list_ECE' : batch_1_list_ECE,
'batch_list_CSE' : batch_1_list_CSE
}
batch_2_list = {
'batch_list_year' : list[1],
'batch_list_ME' : batch_2_list_ME,
'batch_list_ECE' : batch_2_list_ECE,
'batch_list_CSE' : batch_2_list_CSE
}
batch_3_list = {
'batch_list_year' : list[2],
'batch_list_ME' : batch_3_list_ME,
'batch_list_ECE' : batch_3_list_ECE,
'batch_list_CSE' : batch_3_list_CSE
}
batch_4_list = {
'batch_list_year' : list[3],
'batch_list_ME' : batch_4_list_ME,
'batch_list_ECE' : batch_4_list_ECE,
'batch_list_CSE' : batch_4_list_CSE
}
batch_grade_data_set = {'batch_grade_data' : [batch_1_list, batch_2_list, batch_3_list, batch_4_list],
'batch_sub_check' : semester_marks}
return batch_grade_data_set
def get_batch_branch_data(result_year):
batches = []
for batch in Batch.objects.all():
if batch.year in result_year:
batches.append(batch)
return batches
@login_required(login_url='/accounts/login')
def student_list(request):
if(request.POST):
batch = request.POST["batch"]
year = demo_date.year
month = demo_date.month
yearr = str(year) + "-" + str(year+1)
semflag = 0
queryflag = 1
if(month >= 7):
semflag = 1
else:
semflag = 2
batch_year_option = get_batch_query_detail(month, year)
branch_option = {"CSE": "CSE", "ECE": "ECE", "ME": "ME"}
date = {'year': yearr, 'month': month, 'semflag': semflag, 'queryflag': queryflag}
batch_id = Batch.objects.get(id = batch)
student_obj = Student.objects.all().filter(batch_id = batch_id)
student = []
for obj in student_obj:
curr_id = batch_id.curriculum
sem_id = Semester.objects.get(curriculum = curr_id, semester_no = obj.curr_semester_no + 1)
try:
reg = StudentRegistrationChecks.objects.all().filter(student_id = obj, semester_id = sem_id).first()
pay = FeePayments.objects.all().filter(student_id = obj, semester_id = sem_id).first()
final = FinalRegistration.objects.all().filter(student_id = obj, semester_id = sem_id,verified = False)
except Exception as e:
reg = None
pay = None
final = None
if reg:
if reg.final_registration_flag == True and final:
student.append((obj,pay,final))
else:
continue
else:
continue
html = render_to_string('academic_procedures/student_table.html',
{'student': student}, request)
maindict = {'date': date,
'query_option1': batch_year_option,
'query_option2': branch_option,
'html': html,
'queryflag': queryflag}
obj = json.dumps(maindict)
return HttpResponse(obj, content_type='application/json')
def process_verification_request(request):
if request.is_ajax():
return verify_registration(request)
return JsonResponse({'status': 'Failed'}, status=400)
@transaction.atomic
def verify_registration(request):
if request.POST.get('status_req') == "accept" :
student_id = request.POST.get('student_id')
student = Student.objects.get(id = student_id)
batch = student.batch_id
curr_id = batch.curriculum
sem_id = Semester.objects.get(curriculum = curr_id, semester_no = student.curr_semester_no+1)
final_register_list = FinalRegistration.objects.all().filter(student_id = student, verified = False, semester_id = sem_id)
sem_no = student.curr_semester_no + 1
with transaction.atomic():
ver_reg = []
for obj in final_register_list:
p = course_registration(
course_id=obj.course_id,
student_id=student,
semester_id=obj.semester_id,
course_slot_id = obj.course_slot_id
)
ver_reg.append(p)
o = FinalRegistration.objects.filter(id= obj.id).update(verified = True)
course_registration.objects.bulk_create(ver_reg)
academics_module_notif(request.user, student.id.user, 'registration_approved')
Student.objects.filter(id = student_id).update(curr_semester_no = sem_no)
return JsonResponse({'status': 'success', 'message': 'Successfully Accepted'})
elif request.POST.get('status_req') == "reject" :
reject_reason = request.POST.get('reason')
student_id = request.POST.get('student_id')
student_id = Student.objects.get(id = student_id)
batch = student_id.batch_id
curr_id = batch.curriculum
sem_id = Semester.objects.get(curriculum = curr_id, semester_no = student_id.curr_semester_no + 1)
with transaction.atomic():
academicadmin = get_object_or_404(User, username = "acadadmin")
FinalRegistration.objects.filter(student_id = student_id, verified = False, semester_id = sem_id).delete()
StudentRegistrationChecks.objects.filter(student_id = student_id, semester_id = sem_id).update(final_registration_flag = False)
FeePayments.objects.filter(student_id = student_id, semester_id = sem_id).delete()
academics_module_notif(academicadmin, student_id.id.user, 'Registration Declined - '+reject_reason)
return JsonResponse({'status': 'success', 'message': 'Successfully Rejected'})
def get_registration_courses(courses):
x = [[]]
for temp in courses:
flag = False
i = str(temp.course_code)
i = i[:5]
for j in x:
if j:
name = j[0]
name = str(name.course_code)
name = name[:5]
if i.upper() == name.upper():
j.append(temp)
flag = True
else :
continue
if not flag:
x.append([temp])
return x
def teaching_credit_register(request) :
if request.method == 'POST':
try:
roll = request.POST.get('roll')
course1 = request.POST.get('course1')
roll = str(roll)
student_id = get_object_or_404(User, username=request.POST.get('roll'))
student_id = ExtraInfo.objects.all().select_related('user','department').filter(user=student_id).first()
student_id = Student.objects.all().select_related('id','id__user','id__department').filter(id=student_id.id).first()
course1 = Curriculum.objects.select_related().get(curriculum_id = request.POST.get('course1'))
course2 = Curriculum.objects.select_related().get(curriculum_id = request.POST.get('course2'))
course3 = Curriculum.objects.select_related().get(curriculum_id = request.POST.get('course3'))
course4 = Curriculum.objects.select_related().get(curriculum_id = request.POST.get('course4'))
p = TeachingCreditRegistration(
student_id = student_id,
curr_1 = course1,
curr_2 = course2,
curr_3 = course3,
curr_4 = course4
)
p.save()
messages.info(request, ' Successful')
return HttpResponseRedirect('/academic-procedures/main')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
else:
return HttpResponseRedirect('/academic-procedures/main')
def course_marks_data(request):
try:
curriculum_id = request.POST.get('curriculum_id')
course = Curriculum.objects.select_related().get(curriculum_id = curriculum_id)
student_list = Register.objects.all().select_related('curr_id','student_id','curr_id__course_id','student_id__id','student_id__id__user','student_id__id__department').filter(curr_id = course)
mrks = []
for obj in student_list:
o = SemesterMarks.objects.all().select_related('curr_id','student_id','curr_id__course_id','student_id__id','student_id__id__user','student_id__id__department').filter(student_id = obj.student_id).filter(curr_id = course).first()
if o :
continue
else :
p = SemesterMarks(
student_id = obj.student_id,
q1 = 0,
mid_term = 0,
q2 = 0,
end_term = 0,
other = 0,
grade = None,
curr_id = course
)
mrks.append(p)
SemesterMarks.objects.bulk_create(mrks)
enrolled_student_list = SemesterMarks.objects.all().select_related('curr_id','student_id','curr_id__course_id','student_id__id','student_id__id__user','student_id__id__department').filter(curr_id = course)
grade_submission_date_eligibility = False
try :
d = Calendar.objects.get(description = "grade submission date")
if demo_date.date() >= d.from_date and demo_date.date() <= d.to_date :
grade_submission_date_eligibility = True
except Exception as e:
grade_submission_date_eligibility = False
data = render_to_string('academic_procedures/course_marks_data.html',
{'enrolled_student_list' : enrolled_student_list,
'course' : course,
'grade_submission_date_eligibility' : grade_submission_date_eligibility}, request)
obj = json.dumps({'data' : data})
return HttpResponse(obj, content_type = 'application/json')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
def submit_marks(request):
try:
user = request.POST.getlist('user')
q1 = request.POST.getlist('q1_marks')
mid = request.POST.getlist('mid_marks')
q2 = request.POST.getlist('q2_marks')
end = request.POST.getlist('end_marks')
other = request.POST.getlist('other_marks')
try:
grade = request.POST.getlist('grade')
except Exception as e:
grade = None
messages.info(request, ' Successful')
values_length = len(request.POST.getlist('user'))
curr_id = Curriculum.objects.select_related().get(curriculum_id = request.POST.get('curriculum_id'))
for x in range(values_length):
student_id = get_object_or_404(User, username = user[x])
student_id = ExtraInfo.objects.select_related('user','department').get(id = student_id)
student_id = Student.objects.select_related('id','id__user','id__department').get(id = student_id)
if grade:
g = grade[x]
else :
g = None
st_existing = SemesterMarks.objects.all().select_related('curr_id','student_id','curr_id__course_id','student_id__id','student_id__id__user','student_id__id__department').filter(student_id = student_id).filter(curr_id = curr_id).first()
if st_existing :
st_existing.q1 = q1[x]
st_existing.mid_term = mid[x]
st_existing.q2 = q2[x]
st_existing.end_term = end[x]
st_existing.other = other[x]
st_existing.grade = g
st_existing.save()
else :
p = SemesterMarks(
student_id = student_id,
q1 = q1[x],
mid_term = mid[x],
q2 = q2[x],
end_term = end[x],
other = other[x],
grade = g,
curr_id = curr_id
)
p.save()
if request.POST.get('final_submit') == "True":
try:
o_sub = MarkSubmissionCheck.objects.select_related().get(curr_id = curr_id)
except Exception as e:
o_sub = None
if o_sub:
o_sub.submitted = True
o_sub.save()
else:
o_sub_create = MarkSubmissionCheck(
curr_id = curr_id,
verified = False,
submitted =True,
announced = False,)
o_sub_create.save()
if request.POST.get('final_submit') == "False":
try:
sub_obj = MarkSubmissionCheck.objects.select_related().get(curr_id = curr_id)
except Exception as e:
sub_obj = None
if sub_obj:
continue
else :
sub_obj_create = MarkSubmissionCheck(
curr_id = curr_id,
verified = False,
submitted =False,
announced = False)
sub_obj_create.save()
return HttpResponseRedirect('/academic-procedures/main')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
def verify_course_marks_data(request):
try:
curriculum_id = request.POST.get('curriculum_id')
course = Curriculum.objects.select_related().get(curriculum_id = curriculum_id)
enrolled_student_list = SemesterMarks.objects.all().select_related('curr_id','student_id','curr_id__course_id','student_id__id','student_id__id__user','student_id__id__department').filter(curr_id = course)
grade_verification_date_eligibility = False
try :
d = Calendar.objects.get(description = "grade verification date")
if demo_date.date() >= d.from_date and demo_date.date() <= d.to_date :
grade_verification_date_eligibility = True
except Exception as e:
grade_verification_date_eligibility = False
data = render_to_string('academic_procedures/verify_course_marks_data.html',
{'enrolled_student_list' : enrolled_student_list,
'course' : course,
'grade_verification_date_eligibility' : grade_verification_date_eligibility}, request)
obj = json.dumps({'data' : data})
return HttpResponse(obj, content_type = 'application/json')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
########################################
##########GLOBAL VARIABLE###############
########################################
verified_marks_students = [[]]
verified_marks_students_curr = None
########################################
##########GLOBAL VARIABLE###############
########################################
def verify_marks(request):
try:
global verified_marks_students
global verified_marks_students_curr
verified_marks_students = [[]]
verified_marks_students_curr = None
user = request.POST.getlist('user')
curr_id = Curriculum.objects.select_related().get(curriculum_id = request.POST.get('curriculum_id'))
grade = request.POST.getlist('grade')
values_length = len(request.POST.getlist('user'))
ver_gr = []
for x in range(values_length):
student_id = get_object_or_404(User, username = user[x])
student_id = ExtraInfo.objects.select_related('user','department').get(id = student_id)
student_id = Student.objects.select_related('id','id__user','id__department').get(id = student_id)
if grade:
g = grade[x]
else :
g = None
st_existing = SemesterMarks.objects.all().select_related('curr_id','student_id','curr_id__course_id','student_id__id','student_id__id__user','student_id__id__department').filter(student_id = student_id).filter(curr_id = curr_id).first()
st_existing.grade = g
ver_gr.append(st_existing)
verified_marks_students.append([student_id,g])
SemesterMarks.objects.bulk_update(ver_gr,['grade'])
verified_marks_students_curr = curr_id
obj = MarkSubmissionCheck.objects.select_related().get(curr_id = curr_id)
obj.verified = True
obj.save()
return HttpResponseRedirect('/aims/')
except Exception as e:
return HttpResponseRedirect('/aims/')
def render_to_pdf(template_src, context_dict):
template = get_template(template_src)
html = template.render(context_dict)
result = BytesIO()
pdf = pisa.pisaDocument(BytesIO(html.encode("ISO-8859-1")), result)
if not pdf.err:
return HttpResponse(result.getvalue(), content_type='application/pdf')
return None
def generate_grade_pdf(request):
instructor = Curriculum_Instructor.objects.all().select_related('curriculum_id','instructor_id','curriculum_id__course_id','instructor_id__department','instructor_id__user').filter(curriculum_id = verified_marks_students_curr).first()
context = {'verified_marks_students' : verified_marks_students,
'verified_marks_students_curr' : verified_marks_students_curr,
'instructor' : instructor}
pdf = render_to_pdf('academic_procedures/generate_pdf.html',context)
if pdf:
response = HttpResponse(pdf, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="%s.pdf"' %(verified_marks_students_curr.course_code)
return response
return HttpResponse("PDF could not be generated")
def generate_result_pdf(request):
batch = request.POST.get('batch')
branch = request.POST.get('branch')
programme = request.POST.get('programme')
student_list = []
branch_list = []
result_list = [[]]
curriculum_list = []
if programme == "":
return HttpResponse("please insert programme")
student_obj = Student.objects.all().select_related('id','id__user','id__department').filter(programme = programme)
if batch == "":
return HttpResponse("please insert batch")
else:
student_obj = student_obj.filter(batch = int(batch))
if branch == "" :
return HttpResponse("please insert branch")
else :
dep_objects = DepartmentInfo.objects.get(name = str(branch))
branch_objects = ExtraInfo.objects.all().select_related('user','department').filter(department = dep_objects)
for i in branch_objects:
branch_list.append(i)
for i in student_obj:
if i.id in branch_list:
student_list.append(i)
else:
continue
curriculum_obj = Curriculum.objects.all().select_related().filter(batch = int(batch)).filter(branch = str(branch)).filter(programme = programme)
curriculum_obj_common = Curriculum.objects.all().select_related().filter(batch = int(batch)).filter(branch = 'Common').filter(programme = programme)
for i in curriculum_obj:
curriculum_list.append(i)
for i in curriculum_obj_common:
curriculum_list.append(i)
for i in student_list :
x = []
x.append(i.id.user.username)
x.append(i.id.user.first_name+" "+i.id.user.last_name)
for j in curriculum_list :
grade_obj = SemesterMarks.objects.all().select_related('curr_id','student_id','curr_id__course_id','student_id__id','student_id__id__user','student_id__id__department').filter(curr_id = j).filter(student_id = i).first()
if grade_obj :
x.append(grade_obj.grade)
else :
x.append("-")
spi = get_spi(curriculum_list ,x)
x.append(spi)
result_list.append(x)
context = {'batch' : batch,
'branch' : branch,
'programme' : programme,
'course_list' : curriculum_list,
'result_list' : result_list}
pdf = render_to_pdf('academic_procedures/generate_result_pdf.html',context)
if pdf:
response = HttpResponse(pdf, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="%s.pdf"' %(programme + batch + branch)
return response
return HttpResponse("PDF could not be generated")
def generate_grade_sheet_pdf(request):
batch = request.POST.get('batch')
branch = request.POST.get('branch')
programme = request.POST.get('programme')
student_list = []
branch_list = []
result_list = [[]]
curriculum_list = []
if programme == "":
return HttpResponse("please insert programme")
student_obj = Student.objects.all().select_related('id','id__user','id__department').filter(programme = programme)
if batch == "":
return HttpResponse("please insert batch")
else:
student_obj = student_obj.filter(batch = int(batch))
if branch == "" :
return HttpResponse("please insert branch")
else :
dep_objects = DepartmentInfo.objects.get(name = str(branch))
branch_objects = ExtraInfo.objects.all().select_related('user','department').filter(department = dep_objects)
for i in branch_objects:
branch_list.append(i)
for i in student_obj:
if i.id in branch_list:
student_list.append(i)
else:
continue
curriculum_obj = Curriculum.objects.all().select_related().filter(batch = int(batch)).filter(branch = str(branch)).filter(programme = programme)
curriculum_obj_common = Curriculum.objects.all().select_related().filter(batch = int(batch)).filter(branch = 'Common').filter(programme = programme)
for i in curriculum_obj:
curriculum_list.append(i)
for i in curriculum_obj_common:
curriculum_list.append(i)
for i in student_list :
x = []
x.append(i.id.user.username)
x.append(i.id.user.first_name+" "+i.id.user.last_name)
for j in curriculum_list :
grade_obj = SemesterMarks.objects.all().select_related('curr_id','student_id','curr_id__course_id','student_id__id','student_id__id__user','student_id__id__department').filter(curr_id = j).filter(student_id = i).first()
if grade_obj :
x.append(grade_obj.grade)
else :
x.append("-")
spi = get_spi(curriculum_list ,x)
x.append(spi)
result_list.append(x)
context = {'batch' : batch,
'branch' : branch,
'programme' : programme,
'course_list' : curriculum_list,
'result_list' : result_list}
pdf = render_to_pdf('academic_procedures/generate_sheet.html',context)
if pdf:
response = HttpResponse(pdf, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="%s.pdf"' %(programme + batch + branch)
return response
return HttpResponse("PDF could not be generated")
def get_spi(course_list,grade_list):
spi = 0.0
credits = 0
total = 0
earned = 0
y = []
for i in range(2,len(grade_list)) :
x = {
'grade' : grade_list[i],
'credits' : None
}
y.append(x)
for i in range(0,len(course_list)):
y[i]['credits'] = course_list[i].credits
for obj in y:
if obj['grade'] == 'O':
total = total + 10*obj['credits']
credits = credits+ obj['credits']
earned = earned + obj['credits']
elif obj['grade'] == 'A+':
total = total + 10*obj['credits']
credits = credits+ obj['credits']
earned = earned + obj['credits']
elif obj['grade'] == 'A':
total = total + 9*obj['credits']
credits = credits+ obj['credits']
earned = earned + obj['credits']
elif obj['grade'] == 'B+':
total = total + 8*obj['credits']
credits = credits+ obj['credits']
earned = earned + obj['credits']
elif obj['grade'] == 'B':
total = total + 7*obj['credits']
credits = credits+ obj['credits']
earned = earned + obj['credits']
elif obj['grade'] == 'C+':
total = total + 6*obj['credits']
credits = credits+ obj['credits']
earned = earned + obj['credits']
elif obj['grade'] == 'C':
total = total + 5*obj['credits']
credits = credits+ obj['credits']
earned = earned + obj['credits']
elif obj['grade'] == 'D+':
total = total + 4*obj['credits']
credits = credits+ obj['credits']
earned = earned + obj['credits']
elif obj['grade'] == 'D':
total = total + 3*obj['credits']
credits = credits+ obj['credits']
earned = earned + obj['credits']
elif obj['grade'] == 'F':
total = total + 2*obj['credits']
credits = credits+ obj['credits']
earned = earned + obj['credits']
elif obj['grade'] == 'S':
total = total
credits = credits
earned = earned + obj['credits']
elif obj['grade'] == 'X':
total = total
credits = credits
earned = earned
elif obj['grade'] == '-':
total = total
credits = credits
earned = earned
if credits == 0:
return 0.0
spi = total/credits
return spi
def manual_grade_submission(request):
if request.method == 'POST' and request.FILES:
manual_grade_xsl=request.FILES['manual_grade_xsl']
excel = xlrd.open_workbook(file_contents=manual_grade_xsl.read())
sheet=excel.sheet_by_index(0)
course_code = str(sheet.cell(0,1).value)
course_name = str(sheet.cell(1,1).value)
instructor = str(sheet.cell(2,1).value)
batch = int(sheet.cell(3,1).value)
sem = int(sheet.cell(4,1).value)
branch = str(sheet.cell(5,1).value)
programme = str(sheet.cell(6,1).value)
credits = int(sheet.cell(7,1).value)
curriculum_obj = Curriculum.objects.all().select_related().filter(course_code = course_code).filter(batch = batch).filter(programme = programme).first()
if not curriculum_obj:
course_obj = Course.objects.all().filter(course_name = course_name).first()
if not course_obj :
course_obj_create = Course(
course_name = course_name,
course_details = instructor)
course_obj_create.save()
course_obj = Course.objects.all().filter(course_name = course_name).first()
curriculum_obj_create = Curriculum(
course_code = course_code,
course_id = course_obj,
credits = credits,
course_type = 'Professional Core',
programme = programme,
branch = branch,
batch = batch,
sem = sem,
floated = True)
curriculum_obj_create.save()
curriculum_obj = Curriculum.objects.all().select_related().filter(course_code = course_code).filter(batch = batch).filter(programme = programme).first()
marks_check_obj = MarkSubmissionCheck.objects.select_related().all().filter(curr_id = curriculum_obj).first()
if marks_check_obj :
marks_check_obj.submitted = True
marks_check_obj.verified = True
marks_check_obj.save()
elif not marks_check_obj :
marks_check_obj_create = MarkSubmissionCheck(
curr_id = curriculum_obj,
submitted = True,
verified = False,
announced = False)
marks_check_obj_create.save()
for i in range(11,sheet.nrows):
roll = str(int(sheet.cell(i,0).value))
q1 = float(sheet.cell(i,2).value)
mid = float(sheet.cell(i,3).value)
q2 = float(sheet.cell(i,4).value)
end = float(sheet.cell(i,5).value)
others = float(sheet.cell(i,6).value)
grade = str(sheet.cell(i,8).value).strip()
user = get_object_or_404(User, username = roll)
extrainfo = ExtraInfo.objects.select_related('user','department').get(user = user)
dep_objects = DepartmentInfo.objects.get(name = str(branch))
extrainfo.department = dep_objects
extrainfo.save()
extrainfo = ExtraInfo.objects.select_related('user','department').get(user = user)
student_obj = Student.objects.select_related('id','id__user','id__department').get(id = extrainfo)
student_obj.programme = programme
student_obj.batch = batch
student_obj.category = 'GEN'
student_obj.save()
student_obj = Student.objects.select_related('id','id__user','id__department').get(id = extrainfo)
register_obj = Register.objects.all().filter(curr_id = curriculum_obj, student_id = student_obj).first()
if not register_obj:
register_obj_create = Register(
curr_id = curriculum_obj,
year = batch,
student_id = student_obj,
semester = sem)
register_obj_create.save()
register_obj = Register.objects.all().filter(curr_id = curriculum_obj, student_id = student_obj).first()
st_existing = SemesterMarks.objects.all().select_related('curr_id','student_id','curr_id__course_id','student_id__id','student_id__id__user','student_id__id__department').filter(student_id = student_obj).filter(curr_id = curriculum_obj).first()
if st_existing :
st_existing.grade = str(sheet.cell(i,8).value)
st_existing.save()
else :
p = SemesterMarks(
student_id = student_obj,
q1 = q1,
mid_term = mid,
q2 = q2,
end_term = end,
other = others,
grade = grade,
curr_id = curriculum_obj
)
p.save()
return HttpResponseRedirect('/academic-procedures/')
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
##
def test(request):
br_up = []
st_list = Student.objects.select_related('id','id__user','id__department').all()
for i in st_list :
roll = i.id.user.username
roll = str(roll)
if i.programme.upper() == "B.DES" or i.programme.upper() == "B.TECH":
batch = int(roll[:4])
i.batch = batch
elif i.programme.upper() == "M.DES" or i.programme.upper() == "M.TECH" or i.programme.upper() == "PH.D":
batch = int('20'+roll[:2])
i.batch = batch
br_up.append(i)
Student.objects.bulk_update(br_up,['batch'])
return render(request,'../templates/academic_procedures/test.html',{})
def test_ret(request):
try:
data = render_to_string('academic_procedures/test_render.html',
{}, request)
obj = json.dumps({'d' : data})
return HttpResponse(obj, content_type = 'application/json')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
def Bonafide_form(request):
template = get_template('academic_procedures/bonafide_pdf.html')
current_user = get_object_or_404(User, username=request.user.username)
user_details = ExtraInfo.objects.select_related('user','department').get(id = request.user)
des = HoldsDesignation.objects.all().select_related().filter(user = request.user).first()
name = ExtraInfo.objects.all().select_related('user','department').filter(id=request.user.username)[0].user
if str(des.designation) == "student":
obj = Student.objects.select_related('id','id__user','id__department').get(id = user_details.id)
context = {
'student_id' : request.user.username,
'degree' : obj.programme.upper(),
'name' : name.first_name +" "+ name.last_name,
'branch' : get_user_branch(user_details),
'purpose' : request.POST['purpose']
}
pdf = render_to_pdf('academic_procedures/bonafide_pdf.html',context)
if pdf:
response = HttpResponse(pdf, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename=Bonafide.pdf'
return response
return HttpResponse("PDF could not be generated")
# def bonafide(request):
# # if this is a POST request we need to process the form data
# if request.method == 'POST':
# # create a form instance and populate it with data from the request:
# form = BonafideForm(request.POST)
# # check whether it's valid:
# if form.is_valid():
# # process the data in form.cleaned_data as required
# # ...
# # redirect to a new URL:
# print("vaild")
# # if a GET (or any other method) we'll create a blank form
# else:
# form = BonafideForm()
# return render(request, 'bonafide.html', {'form': form})
@login_required
def ACF(request):
stu = Student.objects.get(id=request.user.username)
month = request.POST.get('month')
year= request.POST.get('year')
account = request.POST.get('bank_account')
thesis = request.POST.get('thesis_supervisor')
ta = request.POST.get('ta_supervisor')
appli = request.POST.get('applicability')
FACUL1 = None
FACUL2 = None
message = ""
faculties = ExtraInfo.objects.all().filter(user_type = "faculty")
res = "error"
for j in range(2):
for i in faculties:
checkName = i.user.first_name + " " + i.user.last_name
if j==0 and ta == checkName:
res = "success"
FACUL1 = i
elif j==1 and thesis == checkName:
res = "success"
FACUL2 = i
if (res == "error"):
message = message + "The entered faculty incharge does not exist"
content = {
'status' : res,
'message' : message
}
content = json.dumps(content)
return HttpResponse(content)
faculty_inc1 = get_object_or_404(Faculty, id = FACUL1)
faculty_inc2 = get_object_or_404(Faculty, id = FACUL2)
acf = AssistantshipClaim(student=stu,month=month, year=year, bank_account=account, thesis_supervisor=faculty_inc2, ta_supervisor=faculty_inc1, applicability= appli)
acf.save()
message= message + "Form submitted succesfully"
content = {
'status' : res,
'message' : message
}
sender1 = ExtraInfo.objects.get(id = str(FACUL1)[:4]).user
sender2 = ExtraInfo.objects.get(id = str(FACUL2)[:4]).user
content = json.dumps(content)
AssistantshipClaim_faculty_notify(request.user,sender1)
AssistantshipClaim_faculty_notify(request.user,sender2)
return HttpResponse(content)
def update_assistantship(request):
if request.method == 'POST':
r = request.POST.get('remark')
i = request.POST.get('obj_id')
user = ExtraInfo.objects.get(user = request.user)
recipient = User.objects.get(username = "acadadmin")
assistantship_object = AssistantshipClaim.objects.get(id = i)
sender = User.objects.get(username = assistantship_object.student)
if user == assistantship_object.ta_supervisor.id and r == "Satisfactory":
assistantship_object.ta_supervisor_remark=True
elif user == assistantship_object.ta_supervisor.id and r == "Unsatisfactory":
assistantship_object.ta_supervisor_remark=False
if user == assistantship_object.thesis_supervisor.id and r == "Satisfactory":
assistantship_object.thesis_supervisor_remark=True
elif r == "Unsatisfactory" :
assistantship_object.thesis_supervisor_remark=False
assistantship_object.save()
if assistantship_object.thesis_supervisor_remark == True and assistantship_object.ta_supervisor_remark == True :
AssistantshipClaim_acad_notify(sender,recipient)
return HttpResponseRedirect('/academic-procedures/main/')
def update_hod_assistantship(request):
if request.method == 'POST':
d = request.POST.get('dict')
dic = json.loads(d)
assisobj = AssistantshipClaim.objects.filter(ta_supervisor_remark = True).filter(thesis_supervisor_remark = True).filter(hod_approval = False)
for obj in assisobj:
if str(obj.student) in dic.keys():
obj.hod_approval =True
obj.save()
return HttpResponse('success')
def update_acad_assis(request):
if request.method == 'POST':
d = request.POST.get('dict')
dic = json.loads(d)
aobj= AssistantshipClaim.objects.all()
for obj in aobj:
if obj.acad_approval == False and str(obj.student) in dic.keys():
obj.stipend = dic[str(obj.student)]
obj.acad_approval=True
obj.save()
return HttpResponse('success')
def update_account_assistantship(request):
if request.method == 'POST':
di = request.POST.get('dict')
dic = json.loads(di)
acobj= AssistantshipClaim.objects.all()
for obj in acobj:
if obj.account_approval == False and str(obj.student) in dic.keys():
obj.account_approval = True
obj.save()
recipient = User.objects.get(username = obj.student)
AssistantshipClaim_notify(request.user,recipient,obj.month,obj.year)
return HttpResponse('success')
def assis_stat(request):
if request.method == 'POST':
flag= request.POST.get('flag')
assis_status = Assistantship_status.objects.all()
for obj in assis_status:
if flag == "studenttrue" :
obj.student_status= True
elif flag == "studentfalse":
obj.student_status = False
elif flag == "hodtrue" :
obj.hod_status= True
elif flag == "hodfalse":
obj.hod_status = False
elif flag == "accounttrue" :
obj.account_status= True
elif flag == "accountfalse":
obj.account_status = False
obj.save()
return HttpResponse('success')
@login_required
def MTSGF(request):
if request.method == 'POST':
stu= Student.objects.get(id=request.user.username)
theme = request.POST.get('theme_of_work')
date = request.POST.get('date')
place = request.POST.get('place')
time = request.POST.get('time')
work = request.POST.get('workdone')
contribution = request.POST.get('specificcontri')
future = request.POST.get('futureplan')
report = request.POST.get('briefreport')
publication_submitted = request.POST.get('publicationsubmitted')
publication_accepted = request.POST.get('publicationaccepted')
paper_presented = request.POST.get('paperpresented')
paper_under_review = request.POST.get('paperunderreview')
form=MTechGraduateSeminarReport(student=stu, theme_of_work=theme, date=date, place=place, time=time, work_done_till_previous_sem=work,
specific_contri_in_cur_sem=contribution, future_plan=future, brief_report=report, publication_submitted=publication_submitted,
publication_accepted=publication_accepted, paper_presented=paper_presented, papers_under_review=paper_under_review)
form.save()
message= "Form submitted succesfully"
res="success"
content = {
'status' : res,
'message' : message
}
content = json.dumps(content)
return HttpResponse(content)
@login_required
def PHDPE(request):
if request.method == 'POST':
stu= Student.objects.get(id=request.user.username)
theme = request.POST.get('theme_of_work')
dateandtime = request.POST.get('date')
place = request.POST.get('place')
work = request.POST.get('workdone')
contribution = request.POST.get('specificcontri')
future = request.POST.get('futureplan')
uploadfile = request.POST.get('Attachments')
paper_submitted = request.POST.get('papersubmitted')
paper_published = request.POST.get('paperaccepted')
paper_presented = request.POST.get('paperpresented')
form=PhDProgressExamination(student=stu, theme=theme, seminar_date_time=dateandtime, place=place, work_done=work,
specific_contri_curr_semester=contribution, future_plan=future,details=uploadfile,
papers_published=paper_published, presented_papers=paper_presented,papers_submitted=paper_submitted)
form.save()
message= "Form submitted succesfully"
res="success"
content = {
'status' : res,
'message' : message
}
content = json.dumps(content)
return HttpResponse(content)
def update_mtechsg(request):
if request.method == 'POST':
i = request.POST.get('obj_id')
ql=request.POST.get('quality')
qn=request.POST.get('quantity')
gr=request.POST.get('grade')
pr=request.POST.get('panel_report')
sg=request.POST.get('suggestion')
mtech_object=MTechGraduateSeminarReport.objects.get(id = i)
mtech_object.quality_of_work=ql
mtech_object.quantity_of_work=qn
mtech_object.Overall_grade=gr
mtech_object.panel_report=pr
mtech_object.suggestion=sg
mtech_object.save()
return HttpResponseRedirect('/academic-procedures/main/')
def update_phdform(request):
if request.method == 'POST':
i = request.POST.get('obj_id')
ql = request.POST.get('quality')
qn = request.POST.get('quantity')
gr = request.POST.get('grade')
continuationa = request.POST.get('continuationa')
enhancementa = request.POST.get('enhancementa')
completionperiod = request.POST.get('completionperiod')
pr = request.POST.get('pr')
annualp = request.POST.get('annualp')
sugg = request.POST.get('sugg')
phd_object = PhDProgressExamination.objects.get(id = i)
phd_object.quality_of_work=ql
phd_object.quantity_of_work=qn
phd_object.Overall_grade=gr
phd_object.continuation_enhancement_assistantship=continuationa
phd_object.enhancement_assistantship=enhancementa
phd_object.completion_period=completionperiod
phd_object.panel_report=pr
phd_object.annual_progress_seminar=annualp
phd_object.commments=sugg
phd_object.save()
content="success"
content = json.dumps(content)
return HttpResponse(content)
def update_dues(request):
if request.method == "POST":
i = request.POST.get('obj_id')
md =int(request.POST.get('md'))
hd = int(request.POST.get('hd'))
ld = int(request.POST.get('ld'))
pd = int(request.POST.get('pd'))
ad = int(request.POST.get('ad'))
dues_object = Dues.objects.get(id = i)
message = ""
if md < 0 and -1*md > dues_object.mess_due :
message = message + "Subtracting more value than existing mess due<br>"
if hd < 0 and -1*hd > dues_object.hostel_due :
message = message + "Subtracting more value than existing hostel due<br>"
if ld < 0 and -1*ld > dues_object.library_due :
message = message + "Subtracting more value than existing library due<br>"
if pd < 0 and -1*pd > dues_object.placement_cell_due :
message = message + "Subtracting more value than existing placement cell due<br>"
if ad < 0 and -1*ad > dues_object.academic_due :
message = message + "Subtracting more value than existing academic due<br>"
if (not message):
message = "success"
if message != "success":
content = json.dumps(message)
return HttpResponse(content)
md += dues_object.mess_due
hd += dues_object.hostel_due
ld += dues_object.library_due
pd += dues_object.placement_cell_due
ad += dues_object.academic_due
dues_object.mess_due = md
dues_object.hostel_due = hd
dues_object.library_due = ld
dues_object.placement_cell_due = pd
dues_object.academic_due = ad
dues_object.save()
content = json.dumps(message)
return HttpResponse(content)
def mdue(request):
if request.method == 'POST':
rollno = request.POST.get('rollno')
year = request.POST.get('year')
month = request.POST.get('month')
amount = int(request.POST.get('amount'))
desc = request.POST.get('desc')
amount1 = amount
if desc == "due":
amount1 = -1*amount
Dues_mess = amount
student = Student.objects.get(id = rollno)
messdue_list=MessDue.objects.all().filter(student = student)
duesobj = Dues.objects.get(student_id = student)
if(messdue_list):
new_remaining = messdue_list[len(messdue_list)-1].remaining_amount + amount1
Dues_mess = new_remaining
messdueobj = MessDue(student = student, month = month, year = year,description = desc, amount = amount, remaining_amount = new_remaining)
else:
messdueobj=MessDue(student = student, month = month, year = year,description = desc, amount = amount, remaining_amount = amount1)
messdueobj.save()
if Dues_mess >= 0 :
duesobj.mess_due = 0
else :
duesobj.mess_due = -1*Dues_mess
duesobj.save()
content = json.dumps("success")
return HttpResponse(content)
|
the-stack_0_1193 | from typing import Literal, List, Tuple, Union, Optional, Dict
import numpy as np
import scipy.linalg as la
from scipy import stats
Indices = Union[str, List[str]]
def std_basis_vector(size: int, index: int,
shape: Literal["row", "col", "flat"] = "col"):
"""Create a vector of {size} values where all values are zero except at
position {index} which is one. The shape can be specified as 'row', 'col',
or 'flat' to generate vectors of shape (1, {size}), ({size}, 1), or
({size}, ) respectively. The default shape is 'col'."""
e = np.zeros(size)
e[index] = 1
if shape.lower() == "col":
e = np.reshape(e, (size, 1))
elif shape.lower() == "row":
e = np.reshape(e, (1, size))
elif shape.lower() == "flat":
pass
else:
raise ValueError(f"Cannot understand vector shape: '{shape}', use "
f"'row', 'col', or 'flat'")
return(e)
class GenericDiagnosisMethod:
def __init__(self) -> None:
self.sample_size = 0
def contribution(self, sample: np.ndarray, variable_index: int) -> float:
"""Return the error contribution of a variable in a sample"""
raise NotImplementedError
def expectation(self, variable_index: int) -> float:
"""Return the expected error contribution of a variable"""
raise NotImplementedError
def limits(self, variable_index: int,
alpha: float) -> Tuple[float, float]:
"""Return the lower and upper limits of a variable at a given alpha"""
e_contrib = self.expectation(variable_index)
lower = stats.chi2.ppf(alpha, 1) * e_contrib
upper = stats.chi2.ppf(1 - alpha, 1) * e_contrib
return(lower, upper)
def rel_contribution(self, sample: np.ndarray,
variable_index: int) -> float:
"""Return the relative error contribution of a variable in a sample"""
c = self.contribution(sample, variable_index)
E_c = self.expectation(variable_index)
return(c / E_c)
def all_contributions(self, sample: np.ndarray) -> np.ndarray:
"""Return the error contributions for all variables in a sample"""
contribs = np.zeros(self.sample_size)
for i in range(self.sample_size):
contribs[i] = self.contribution(sample, i)
return(contribs)
def all_rel_contributions(self, sample: np.ndarray) -> np.ndarray:
"""Return the relative error contributions for all variables in a
sample"""
rel_contribs = np.zeros(self.sample_size)
for i in range(self.sample_size):
rel_contribs[i] = self.rel_contribution(sample, i)
return(rel_contribs)
def all_expectations(self) -> np.ndarray:
"""Return the expected error contribution for all variables"""
e_contribs = np.zeros(self.sample_size)
for i in range(self.sample_size):
e_contribs[i] = self.expectation(i)
return(e_contribs)
def all_limits(self, alpha: float) -> np.ndarray:
"""Return the lower and upper limits for all variables at a given
alpha"""
lower_upper_limits = np.zeros((self.sample_size, 2))
for i in range(self.sample_size):
lower_upper_limits[i] = self.limits(i, alpha)
return(lower_upper_limits)
class CDC(GenericDiagnosisMethod):
def __init__(self, M: np.ndarray, S: Optional[np.ndarray]) -> None:
"""Complete Decomposition Contributions Diagnosis Method"""
super().__init__()
self.M = M
self.S = S
self.sqrt_M = np.real(la.fractional_matrix_power(M, 0.5))
self.sample_size = M.shape[0]
def contribution(self, sample: np.ndarray, variable_index: int) -> float:
e_i = std_basis_vector(self.sample_size, variable_index, 'col')
contrib = (e_i.T @ self.sqrt_M @ sample) ** 2
return(contrib)
def expectation(self, variable_index: int) -> float:
if self.S is None:
raise RuntimeError("S matrix must be set to use this function")
e_i = std_basis_vector(self.sample_size, variable_index, 'col')
e_contrib = e_i.T @ self.S @ self.M @ e_i
return(e_contrib)
class PDC(GenericDiagnosisMethod):
def __init__(self, M: np.ndarray, S: Optional[np.ndarray]) -> None:
"""Partial Decomposition Contributions Diagnosis Method"""
super().__init__()
self.M = M
self.S = S
self.sample_size = M.shape[0]
def contribution(self, sample: np.ndarray, variable_index: int) -> float:
e_i = std_basis_vector(sample.size, variable_index, 'col')
contrib = sample.T @ self.M @ e_i @ e_i.T @ sample
return(contrib)
def expectation(self, variable_index: int) -> float:
if self.S is None:
raise RuntimeError("S matrix must be set to use this function")
e_i = std_basis_vector(self.sample_size, variable_index, 'col')
e_contrib = e_i.T @ self.S @ self.M @ e_i
return(e_contrib)
def limits(self, variable_index: int,
alpha: float) -> Tuple[float, float]:
e_contrib = self.expectation(variable_index)
e_i = std_basis_vector(self.sample_size, variable_index, 'col')
stdv_contrib = ((e_contrib) ** 2
+ e_i.T @ self.S @ self.M
@ self.M @ e_i @ e_i.T @ self.S @ e_i) ** 0.5
# Assumes n>=30 to use normal distribution rather than t distribution
lower, upper = stats.norm.interval(alpha, e_contrib, stdv_contrib)
return(lower, upper)
class DC(GenericDiagnosisMethod):
def __init__(self, M: np.ndarray, S: Optional[np.ndarray]) -> None:
"""Diagonal Contributions Diagnosis Method"""
super().__init__()
self.M = M
self.S = S
self.sample_size = M.shape[0]
def contribution(self, sample: np.ndarray, variable_index: int) -> float:
e_i = std_basis_vector(self.sample_size, variable_index, 'col')
contrib = sample.T @ e_i @ e_i.T @ self.M @ e_i @ e_i.T @ sample
return(contrib)
def expectation(self, variable_index: int) -> float:
if self.S is None:
raise RuntimeError("S matrix must be set to use this function")
e_i = std_basis_vector(self.M.shape[1], variable_index, 'col')
e_contrib = e_i.T @ self.S @ e_i @ e_i.T @ self.M @ e_i
return(e_contrib)
class RBC(GenericDiagnosisMethod):
def __init__(self, M: np.ndarray, S: Optional[np.ndarray]) -> None:
"""Reconstruction Based Contributions Diagnosis Method"""
super().__init__()
self.M = M
self.S = S
self.sample_size = M.shape[0]
def contribution(self, sample: np.ndarray, variable_index: int) -> float:
e_i = std_basis_vector(self.sample_size, variable_index, 'col')
contrib = (e_i.T @ self.M @ sample) ** 2 / (e_i.T @ self.M @ e_i)
return(contrib)
def expectation(self, variable_index: int) -> float:
if self.S is None:
raise RuntimeError("S matrix must be set to use this function")
e_i = std_basis_vector(self.sample_size, variable_index, 'col')
e_contrib = (e_i.T @ self.M @ self.S @ self.M @ e_i
/ (e_i.T @ self.M @ e_i))
return(e_contrib)
class GenericFaultDiagnosisModel:
def __init__(self, M: np.ndarray, S: Optional[np.ndarray]) -> None:
"""Generic Fault Diagnosis Model for any test statistic"""
if S is not None:
if not (M.shape[0] == M.shape[1] == S.shape[0] == S.shape[1]):
raise ValueError("M and S need to be [n x n] matrices")
else:
if not (M.shape[0] == M.shape[1]):
raise ValueError("M needs to be an [n x n] matrix")
self.diagnosis_methods = {
"CDC": CDC(M, S),
"PDC": PDC(M, S),
"DC": DC(M, S),
"RBC": RBC(M, S)
}
self.sample_size = M.shape[0]
indices = list(self.diagnosis_methods.keys())
rel_indices = [f"r{i}" for i in indices]
self.valid_indices = indices + rel_indices
def validate_indices(self, indices: Indices) -> List[str]:
"""Validate list of requested indices"""
if type(indices) == str:
indices = [indices]
for ind in indices:
if ind not in self.valid_indices:
raise ValueError(f"No contribution index {ind} exists")
return(indices)
def validate_sample(self, sample: np.ndarray) -> np.ndarray:
"""Validate passed sample"""
if not isinstance(sample, np.ndarray):
raise TypeError("Expected numpy array inputs for sample")
if not (self.sample_size == sample.size):
raise ValueError("M needs to be an [n x n] matrix and x needs to "
"be an [n x 1] vector")
sample = np.reshape(sample, (-1, 1)) # Makes sure it's a column vector
return(sample)
def get_contributions(self, sample: np.ndarray,
indices: Indices = ['CDC']) -> Dict[str, np.ndarray]:
"""Get the fault contributions for the sample for each index passed"""
indices = self.validate_indices(indices)
sample = self.validate_sample(sample)
index_values = dict()
for ind in indices:
if ind[0] == 'r':
fd_method = self.diagnosis_methods[ind[1:]]
index_values[ind] = fd_method.all_rel_contributions(sample)
else:
fd_method = self.diagnosis_methods[ind]
index_values[ind] = fd_method.all_contributions(sample)
return(index_values)
def get_limits(self, alpha: float = 0.05,
indices: Indices = ['CDC']) -> Dict[str, np.ndarray]:
"""Get the lower and upper control limits for any non-relative
contribution indices"""
indices = self.validate_indices(indices)
limits = dict()
for ind in indices:
if ind[0] == 'r':
raise ValueError("Control limits are not defined for relative "
"contribution indices")
else:
fd_method = self.diagnosis_methods[ind]
limits[ind] = fd_method.all_limits(alpha)
return(limits)
if __name__ == "__main__":
import random
print("Module ran as script: Running example fault diagnosis with PCA")
def example_process_model(num_samples):
A = [
[-0.3441, 0.4815, 0.6637],
[-0.2313, -0.5936, 0.3545],
[-0.5060, 0.2495, 0.0739],
[-0.5552, -0.2405, -0.1123],
[-0.3371, 0.3822, -0.6115],
[-0.3877, -0.3868, -0.2045]
]
A = np.asarray(A)
num_vars = 6
# Generate inputs t
t1 = 2.0 * stats.uniform.rvs(size=num_samples)
t2 = 1.6 * stats.uniform.rvs(size=num_samples)
t3 = 1.2 * stats.uniform.rvs(size=num_samples)
t = np.asarray([t1, t2, t3])
# Generate noise
noise = [None] * num_vars
for i in range(num_vars):
noise[i] = stats.norm.rvs(size=num_samples, scale=0.2)
noise = np.asarray(noise)
# Create samples
X = A @ t + noise
return(X)
num_samples = 3000
num_faults = 2000
num_vars = 6
X = example_process_model(num_samples)
""" PCA Model """
# Shift to 0 mean
xmean = np.mean(X, 1).reshape((-1, 1))
X = X - xmean
# Scale to unit variance
xstd = np.std(X, 1).reshape((-1, 1))
X = X / xstd
assert np.allclose(np.mean(X, 1), 0)
assert np.allclose(np.std(X, 1), 1)
S = np.cov(X)
Lam, P = la.eig(S)
Lam = np.real_if_close(Lam)
order = np.argsort(-1 * Lam)
Lam = Lam[order]
P = P[:, order]
# Plot cumulative variance of eigenvectors
# cum_eig = np.cumsum(Lam) / np.sum(Lam)
# plt.plot(cum_eig)
# plt.show()
principal_vectors = 3
alpha = 0.01 # Confidence = (1 - alpha) x 100%
P_resid = P[:, principal_vectors:]
Lam_resid = Lam[principal_vectors:]
P = P[:, :principal_vectors]
Lam = Lam[:principal_vectors]
D = P @ np.diag(Lam ** -1) @ P.T
# Generate faults
faults = np.zeros((num_vars, num_faults))
for fault_sample in range(num_faults):
fault_var = random.sample(range(num_vars), 1)[0]
faults[fault_var, fault_sample] = 5.0 * stats.uniform.rvs()
X_faulty = example_process_model(num_faults) + faults
X_faulty = (X_faulty - xmean) / xstd
T_sqr = [0] * num_faults
for i in range(num_faults):
T_sqr[i] = X_faulty[:, i].T @ D @ X_faulty[:, i]
T_sqr_limit = [stats.chi2.ppf(1 - alpha, principal_vectors)] * num_faults
detected_faults = []
for i in range(num_faults):
if T_sqr[i] > T_sqr_limit[i]:
detected_faults.append(i)
fault_detect_rate = len(detected_faults) / num_faults * 100
print(f"T^2 Detected Faults: {fault_detect_rate:.2f} %")
# plt.plot(T_sqr, label="\$T^2\$")
# plt.plot(T_sqr_limit, label="Limit")
# plt.legend()
# plt.show()
all_indices = ['CDC', 'rCDC', 'PDC', 'rPDC', 'DC', 'rDC', 'RBC', 'rRBC']
FDModel = GenericFaultDiagnosisModel(D, S)
cont_rates = dict()
for ind in all_indices:
# Tracks number of correct diagnoses and false diagnoses
cont_rates[ind] = [0, 0, 0]
for i in detected_faults:
# Get index and limit for each fault sample
cont = FDModel.get_contributions(X_faulty[:, i], all_indices)
for ind in all_indices:
highest_contrib = np.argmax(cont[ind])
if highest_contrib == np.argmax(faults[:, i]):
cont_rates[ind][0] += 1
else:
cont_rates[ind][1] += 1
for ind in all_indices:
diag_rate = cont_rates[ind][0] / len(detected_faults) * 100
false_diag_rate = cont_rates[ind][1] / len(detected_faults) * 100
# missed_rate = cont_rates[ind][2] / len(detected_faults) * 100
print("--------------------------------")
print(f"{ind} correct diagnosis: {diag_rate:.2f} %")
print(f"{ind} false diagnosis: {false_diag_rate:.2f} %")
# print(f"{ind} missed diagnosis: {missed_rate:.2f} %")
|
the-stack_0_1194 | import sys
if (sys.version_info[0] == 2 and sys.version_info[:2] >= (2,7)) or \
(sys.version_info[0] == 3 and sys.version_info[:2] >= (3,2)):
import unittest
else:
import unittest2 as unittest
import subprocess
import shutil
import time
import os
import signal
from distutils.sysconfig import get_config_var
import py2app
import platform
DIR_NAME=os.path.dirname(os.path.abspath(__file__))
class TestBasicPlugin (unittest.TestCase):
plugin_dir = os.path.join(DIR_NAME, 'plugin_with_scripts')
py2app_args = []
# Basic setup code
#
# The code in this block needs to be moved to
# a base-class.
@classmethod
def setUpClass(cls):
try:
if os.path.exists(os.path.join(cls.plugin_dir, 'build')):
shutil.rmtree(os.path.join(cls.plugin_dir, 'build'))
if os.path.exists(os.path.join(cls.plugin_dir, 'dist')):
shutil.rmtree(os.path.join(cls.plugin_dir, 'dist'))
cmd = [ sys.executable, 'setup.py', 'py2app'] + cls.py2app_args
env=os.environ.copy()
pp = os.path.dirname(os.path.dirname(py2app.__file__))
if 'PYTHONPATH' in env:
env['PYTHONPATH'] = pp + ':' + env['PYTHONPATH']
else:
env['PYTHONPATH'] = pp
if 'LANG' not in env:
env['LANG'] = 'en_US.UTF-8'
p = subprocess.Popen(
cmd,
cwd = cls.plugin_dir,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True,
env=env)
lines = p.communicate()[0]
if p.wait() != 0:
print (lines)
raise AssertionError("Creating basic_plugin bundle failed")
p = subprocess.Popen([
'xcode-select', '-print-path'
], stdout = subprocess.PIPE)
lines = p.communicate()[0]
xit = p.wait()
if p.wait() != 0:
raise AssertionError("Fetching Xcode root failed")
root = lines.strip()
if sys.version_info[0] != 2:
root = root.decode('utf-8')
if platform.mac_ver()[0] < '10.7.':
cc = [get_config_var('CC')]
env = dict(os.environ)
env['MACOSX_DEPLOYMENT_TARGET'] = get_config_var('MACOSX_DEPLOYMENT_TARGET')
else:
cc = ['xcrun', 'clang']
env = dict(os.environ)
p = subprocess.Popen(cc
+ get_config_var('LDFLAGS').split() + get_config_var('CFLAGS').split() + [
'-o', 'bundle_loader', os.path.join(DIR_NAME, 'bundle_loader.m'),
'-framework', 'Foundation'],
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True)
lines = p.communicate()[0]
if p.wait() != 0:
print (lines)
raise AssertionError("Creating bundle_loader failed")
except:
cls.tearDownClass()
raise
@classmethod
def tearDownClass(cls):
if os.path.exists('bundle_loader'):
os.unlink('bundle_loader')
if os.path.exists(os.path.join(cls.plugin_dir, 'build')):
shutil.rmtree(os.path.join(cls.plugin_dir, 'build'))
if os.path.exists(os.path.join(cls.plugin_dir, 'dist')):
shutil.rmtree(os.path.join(cls.plugin_dir, 'dist'))
def start_app(self):
# Start the test app, return a subprocess object where
# stdin and stdout are connected to pipes.
cmd = ['./bundle_loader',
os.path.join(self.plugin_dir,
'dist/BasicPlugin.bundle'),
]
p = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=True,
)
#stderr=subprocess.STDOUT)
return p
def wait_with_timeout(self, proc, timeout=10):
for i in range(timeout):
x = proc.poll()
if x is None:
time.sleep(1)
else:
return x
os.kill(proc.pid, signal.SIGKILL)
return proc.wait()
def run_script(self, name):
path = os.path.join(
self.plugin_dir,
'dist/BasicPlugin.bundle/Contents/MacOS/%s'%(name,))
p = subprocess.Popen([path],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=True,
)
#stderr=subprocess.STDOUT)
return p
#
# End of setup code
#
def test_helper1(self):
p = self.run_script('helper1')
lines = p.communicate()[0]
p.wait()
self.assertEqual(lines, b'Helper 1\n')
def test_helper2(self):
p = self.run_script('helper2')
lines = p.communicate()[0]
p.wait()
self.assertEqual(lines, b'Helper 2\n')
def test_basic_start(self):
p = self.start_app()
v = p.stdout.readline()
self.assertFalse(v.startswith(b'** Cannot load bundle'))
p.stdin.write('BasicPlugin.bundle:test startup\n'.encode('latin1'))
p.stdin.flush()
v = p.stdout.readline()
self.assertEqual(v.strip(), b'+ test startup')
p.stdin.close()
p.stdout.close()
exit = self.wait_with_timeout(p)
self.assertEqual(exit, 0)
class TestBasicAliasPlugin (TestBasicPlugin):
py2app_args = [ '--alias' ]
class TestBasicSemiStandalonePlugin (TestBasicPlugin):
py2app_args = [ '--semi-standalone' ]
class TestBasicPluginUnicodePath (TestBasicPlugin):
if sys.version_info[0] == 2:
plugin_dir = os.path.join(DIR_NAME, 'basic_plugin ' + unichr(2744).encode('utf-8'))
else:
plugin_dir = os.path.join(DIR_NAME, 'basic_plugin ' + chr(2744))
@classmethod
def setUpClass(cls):
try:
if os.path.exists(cls.plugin_dir):
shutil.rmtree(cls.plugin_dir)
assert not os.path.exists(cls.plugin_dir)
shutil.copytree(TestBasicPlugin.plugin_dir, cls.plugin_dir)
super(TestBasicPluginUnicodePath, cls).setUpClass()
except:
if os.path.exists(cls.plugin_dir):
shutil.rmtree(cls.plugin_dir)
raise
@classmethod
def tearDownClass(cls):
if os.path.exists(cls.plugin_dir):
shutil.rmtree(cls.plugin_dir)
super(TestBasicPluginUnicodePath, cls).tearDownClass()
class TestBasicAliasPluginUnicodePath (TestBasicPluginUnicodePath):
py2app_args = [ '--alias' ]
class TestBasicSemiStandalonePluginUnicodePath (TestBasicPluginUnicodePath):
py2app_args = [ '--semi-standalone' ]
if __name__ == "__main__":
unittest.main()
|
the-stack_0_1195 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 5 15:59:51 2019
@author: 939035
Classifiers
"""
# %% 1)Importing packages
import seaborn as sns
import pandas as pd
import numpy as np
# Handling SSL error when trying to connect from the office!
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
# Handing sns not showing plot error
import matplotlib.pyplot as plt
# ML models
# kernal SVM
from sklearn.svm import SVC
# RandomForrestModel
from sklearn.ensemble import RandomForestClassifier
# MLPClassifier (neural_network)
from sklearn.neural_network import MLPClassifier
# Gradient Boosting Tree
from sklearn.ensemble import GradientBoostingClassifier
# Training the model (speed)
# Decision Tree Classificr
from sklearn.tree import DecisionTreeClassifier
# Logisitc Regression
from sklearn.linear_model import LogisticRegression
# Data Is too Large
##Import Gaussian Naive Bayes model
from sklearn.naive_bayes import GaussianNB
# other random ones
# KNN
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
class machine_learning_classifier():
''' A class that contains a classifier loop '''
def __init__(self):
# Variables to alter
self.df = sns.load_dataset('iris')
# Give the string of the y variable
self.y_var = 'species'
# Do not alter
self.df_feat = pd.DataFrame()
self.dummies = pd.DataFrame
def inital_variable_removal(self, inital_vars_to_drop):
# Dropping duplicate variable e.g qualitative variable Class and quantitative equivalent pclass
self.df = self.df.drop(inital_vars_to_drop, axis=1)
return self.df
def remove_na(self):
# Dropping nan or na rows
self.df = self.df.dropna().reset_index().drop('index', axis=1)
return self.df
def exploring_data(self, y_var_category, var1, var2):
# ## Basic Pairplot
pp = sns.pairplot(self.df, hue=self.y_var)
plt.show()
# creating kde plot of sepal_lenght vs sepal width for setosa species of flower
kde = self.df[self.df[self.y_var] == y_var_category]
kdeplot = sns.kdeplot(kde[var1], kde[var2], cmap='plasma', shade='true'
, shade_lowest=False)
plt.show()
return pp, kdeplot
def creating_dummies(self):
# 4)Creating Dummys from qualitative variables (optional)
self.dummies = pd.get_dummies(self.df[self.qualitative_vars])
### dropping qualitative variables before standardising
self.df = self.df.drop(self.qualitative_vars, axis=1)
return self.df
def standardising(self):
# Splitting the DataFrame into the dummies and then the standard varibales
from sklearn.preprocessing import StandardScaler
# standardising the data to the same scale
# why - larger scale data will have a greater effect on the results
scaler = StandardScaler()
# fitting the data minus the dependent variable
scaler.fit(self.df.drop(self.y_var, axis=1))
# creating the variable scaled featers (returns a array)
scaled_features = scaler.transform(self.df.drop(self.y_var, axis=1))
# Creating a df of the array'd scaled features
self.df_feat = pd.DataFrame(scaled_features, columns=self.df.drop(self.y_var, axis=1).columns)
return self.df_feat
def readding_dummies(self):
# %% 6) Re adding dummies after standardising
## adding dummies back on after standaridiation of the rest of the data
self.df_feat = pd.concat([self.df_feat, self.dummies], axis=1)
return self.df_feat
def correlations(self):
# %% 7) Find correlation among variables.
# after standardising
correlation_matrix = self.df_feat.corr()
return correlation_matrix
def dropping_highly_correlated_variables(self, list_of_vars_to_drop):
self.df_feat = self.df_feat.drop(list_of_vars_to_drop, axis=1)
return self.df_feat
def setting_y(self):
# Setting X and y
self.y = self.df[self.y_var]
return self.y
def feature_selection(self):
# https://scikit-learn.org/stable/modules/feature_selection.html
import sklearn.feature_selection
def model_loop(self):
# model selection by cross validation.
from sklearn.model_selection import cross_val_score
models = [SVC(),
RandomForestClassifier(),
MLPClassifier(),
GradientBoostingClassifier(),
DecisionTreeClassifier(),
LogisticRegression(),
GaussianNB(),
KNeighborsClassifier()]
classification_results = pd.DataFrame(columns=['model',
'corss_val_scores',
'cvs_mean'
])
for m in models:
model = m
cvs = cross_val_score(model, self.df_feat, self.y, cv=10, scoring='accuracy')
cvsm = cvs.mean()
classification_results = classification_results.append({'model': m,
'corss_val_scores': cvs,
'cvs_mean': cvsm,
}
, ignore_index=True)
return classification_results
def model_tuning(self):
param_grid = {'C': [0.1, 1, 10, 100],
'gamma': [1, 0.1, 0.01, 0.001]}
grid = GridSearchCV(SVC(), param_grid, verbose=2)
grid.fit(self.df_feat, self.y)
grid_predictions = grid.predict(self.df_feat)
cm = (confusion_matrix(self.y, grid_predictions))
cr = (classification_report(self.y, grid_predictions))
return cm, cr
def main():
mlc = machine_learning_classifier()
# mlc.inital_variable_removal(inital_vars_to_drop = [''])
mlc.remove_na()
mlc.exploring_data(y_var_category = 'setosa', var1 = 'sepal_width', var2 = 'sepal_length')
mlc.standardising()
correlation_matrix = mlc.correlations()
# mlc.dropping_highly_correlated_variables(list_of_vars_to_drop=['who_man'])
mlc.setting_y()
classification_results = mlc.model_loop()
confusion_matrix, classification_report = mlc.model_tuning() |
the-stack_0_1197 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import deque
from typing import Dict, Iterable, Optional, Set, TypeVar
import numpy as np
from compiler_gym.datasets.benchmark import Benchmark
from compiler_gym.datasets.dataset import Dataset
from compiler_gym.datasets.uri import BENCHMARK_URI_RE, resolve_uri_protocol
T = TypeVar("T")
def round_robin_iterables(iters: Iterable[Iterable[T]]) -> Iterable[T]:
"""Yield from the given iterators in round robin order."""
# Use a queue of iterators to iterate over. Repeatedly pop an iterator from
# the queue, yield the next value from it, then put it at the back of the
# queue. The iterator is discarded once exhausted.
iters = deque(iters)
while len(iters) > 1:
it = iters.popleft()
try:
yield next(it)
iters.append(it)
except StopIteration:
pass
# Once we have only a single iterator left, return it directly rather
# continuing with the round robin.
if len(iters) == 1:
yield from iters.popleft()
class Datasets:
"""A collection of datasets.
This class provides a dictionary-like interface for indexing and iterating
over multiple :class:`Dataset <compiler_gym.datasets.Dataset>` objects.
Select a dataset by URI using:
>>> env.datasets["benchmark://cbench-v1"]
Check whether a dataset exists using:
>>> "benchmark://cbench-v1" in env.datasets
True
Or iterate over the datasets using:
>>> for dataset in env.datasets:
... print(dataset.name)
benchmark://cbench-v1
benchmark://github-v0
benchmark://npb-v0
To select a benchmark from the datasets, use :meth:`benchmark()`:
>>> env.datasets.benchmark("benchmark://a-v0/a")
Use the :meth:`benchmarks()` method to iterate over every benchmark in the
datasets in a stable round robin order:
>>> for benchmark in env.datasets.benchmarks():
... print(benchmark)
benchmark://cbench-v1/1
benchmark://github-v0/1
benchmark://npb-v0/1
benchmark://cbench-v1/2
...
If you want to exclude a dataset, delete it:
>>> del env.datasets["benchmark://b-v0"]
"""
def __init__(
self, datasets: Iterable[Dataset],
):
self._datasets: Dict[str, Dataset] = {d.name: d for d in datasets}
self._visible_datasets: Set[str] = set(
name for name, dataset in self._datasets.items() if not dataset.deprecated
)
def datasets(self, with_deprecated: bool = False) -> Iterable[Dataset]:
"""Enumerate the datasets.
Dataset order is consistent across runs.
:param with_deprecated: If :code:`True`, include datasets that have been
marked as deprecated.
:return: An iterable sequence of :meth:`Dataset
<compiler_gym.datasets.Dataset>` instances.
"""
datasets = self._datasets.values()
if not with_deprecated:
datasets = (d for d in datasets if not d.deprecated)
yield from sorted(datasets, key=lambda d: (d.sort_order, d.name))
def __iter__(self) -> Iterable[Dataset]:
"""Iterate over the datasets.
Dataset order is consistent across runs.
Equivalent to :meth:`datasets.datasets()
<compiler_gym.datasets.Dataset.datasets>`, but without the ability to
iterate over the deprecated datasets.
If the number of benchmarks in any of the datasets is infinite
(:code:`len(dataset) == math.inf`), the iterable returned by this method
will continue indefinitely.
:return: An iterable sequence of :meth:`Dataset
<compiler_gym.datasets.Dataset>` instances.
"""
return self.datasets()
def dataset(self, dataset: str) -> Dataset:
"""Get a dataset.
Return the corresponding :meth:`Dataset
<compiler_gym.datasets.Dataset>`. Name lookup will succeed whether or
not the dataset is deprecated.
:param dataset: A dataset name.
:return: A :meth:`Dataset <compiler_gym.datasets.Dataset>` instance.
:raises LookupError: If :code:`dataset` is not found.
"""
dataset_name = resolve_uri_protocol(dataset)
if dataset_name not in self._datasets:
raise LookupError(f"Dataset not found: {dataset_name}")
return self._datasets[dataset_name]
def __getitem__(self, dataset: str) -> Dataset:
"""Lookup a dataset.
:param dataset: A dataset name.
:return: A :meth:`Dataset <compiler_gym.datasets.Dataset>` instance.
:raises LookupError: If :code:`dataset` is not found.
"""
return self.dataset(dataset)
def __setitem__(self, key: str, dataset: Dataset):
"""Add a dataset to the collection.
:param key: The name of the dataset.
:param dataset: The dataset to add.
"""
dataset_name = resolve_uri_protocol(key)
self._datasets[dataset_name] = dataset
if not dataset.deprecated:
self._visible_datasets.add(dataset_name)
def __delitem__(self, dataset: str):
"""Remove a dataset from the collection.
This does not affect any underlying storage used by dataset. See
:meth:`uninstall() <compiler_gym.datasets.Datasets.uninstall>` to clean
up.
:param dataset: The name of a dataset.
:return: :code:`True` if the dataset was removed, :code:`False` if it
was already removed.
"""
dataset_name = resolve_uri_protocol(dataset)
if dataset_name in self._visible_datasets:
self._visible_datasets.remove(dataset_name)
del self._datasets[dataset_name]
def __contains__(self, dataset: str) -> bool:
"""Returns whether the dataset is contained."""
try:
self.dataset(dataset)
return True
except LookupError:
return False
def benchmarks(self, with_deprecated: bool = False) -> Iterable[Benchmark]:
"""Enumerate the (possibly infinite) benchmarks lazily.
Benchmarks order is consistent across runs. One benchmark from each
dataset is returned in round robin order until all datasets have been
fully enumerated. The order of :meth:`benchmarks()
<compiler_gym.datasets.Datasets.benchmarks>` and :meth:`benchmark_uris()
<compiler_gym.datasets.Datasets.benchmark_uris>` is the same.
If the number of benchmarks in any of the datasets is infinite
(:code:`len(dataset) == math.inf`), the iterable returned by this method
will continue indefinitely.
:param with_deprecated: If :code:`True`, include benchmarks from
datasets that have been marked deprecated.
:return: An iterable sequence of :class:`Benchmark
<compiler_gym.datasets.Benchmark>` instances.
"""
return round_robin_iterables(
(d.benchmarks() for d in self.datasets(with_deprecated=with_deprecated))
)
def benchmark_uris(self, with_deprecated: bool = False) -> Iterable[str]:
"""Enumerate the (possibly infinite) benchmark URIs.
Benchmark URI order is consistent across runs. URIs from datasets are
returned in round robin order. The order of :meth:`benchmarks()
<compiler_gym.datasets.Datasets.benchmarks>` and :meth:`benchmark_uris()
<compiler_gym.datasets.Datasets.benchmark_uris>` is the same.
If the number of benchmarks in any of the datasets is infinite
(:code:`len(dataset) == math.inf`), the iterable returned by this method
will continue indefinitely.
:param with_deprecated: If :code:`True`, include benchmarks from
datasets that have been marked deprecated.
:return: An iterable sequence of benchmark URI strings.
"""
return round_robin_iterables(
(d.benchmark_uris() for d in self.datasets(with_deprecated=with_deprecated))
)
def benchmark(self, uri: str) -> Benchmark:
"""Select a benchmark.
Returns the corresponding :class:`Benchmark
<compiler_gym.datasets.Benchmark>`, regardless of whether the containing
dataset is installed or deprecated.
:param uri: The URI of the benchmark to return.
:return: A :class:`Benchmark <compiler_gym.datasets.Benchmark>`
instance.
"""
uri = resolve_uri_protocol(uri)
match = BENCHMARK_URI_RE.match(uri)
if not match:
raise ValueError(f"Invalid benchmark URI: '{uri}'")
dataset_name = match.group("dataset")
dataset = self._datasets[dataset_name]
return dataset.benchmark(uri)
def random_benchmark(
self, random_state: Optional[np.random.Generator] = None
) -> Benchmark:
"""Select a benchmark randomly.
First, a dataset is selected uniformly randomly using
:code:`random_state.choice(list(datasets))`. The
:meth:`random_benchmark()
<compiler_gym.datasets.Dataset.random_benchmark>` method of that dataset
is then called to select a benchmark.
Note that the distribution of benchmarks selected by this method is not
biased by the size of each dataset, since datasets are selected
uniformly. This means that datasets with a small number of benchmarks
will be overrepresented compared to datasets with many benchmarks. To
correct for this bias, use the number of benchmarks in each dataset as
a weight for the random selection:
>>> rng = np.random.default_rng()
>>> finite_datasets = [d for d in env.datasets if len(d) != math.inf]
>>> dataset = rng.choice(
finite_datasets,
p=[len(d) for d in finite_datasets]
)
>>> dataset.random_benchmark(random_state=rng)
:param random_state: A random number generator. If not provided, a
default :code:`np.random.default_rng()` is used.
:return: A :class:`Benchmark <compiler_gym.datasets.Benchmark>`
instance.
"""
random_state = random_state or np.random.default_rng()
dataset = random_state.choice(list(self._visible_datasets))
return self[dataset].random_benchmark(random_state=random_state)
@property
def size(self) -> int:
return len(self._visible_datasets)
def __len__(self) -> int:
"""The number of datasets in the collection."""
return self.size
|
the-stack_0_1199 | #!/usr/bin/env python
# coding: utf-8
import argparse
import os
import ray
from dotenv import load_dotenv
from tqdm import tqdm
from birdfsd_yolov5.label_studio_helpers.utils import get_all_projects_tasks
from birdfsd_yolov5.model_utils.handlers import catch_keyboard_interrupt
from birdfsd_yolov5.model_utils.utils import api_request
@ray.remote
def patch_anno(task, _from, to):
for _entry in task['annotations']:
entry_id = _entry['id']
for entry in _entry['result']:
value = entry['value']
if not _from == value['rectanglelabels'][0]:
print(f'Could not find the label `{_from}` in task '
f'`{task["id"]}`! Skipping...')
return
entry['value']['rectanglelabels'] = [to]
url = f'{os.environ["LS_HOST"]}/api/annotations/{entry_id}/'
api_request(url, method='patch', data=_entry)
return
@ray.remote
def patch_pred(pred, _from, to):
for result in pred['result']:
label = result['value']['rectanglelabels']
if not _from == label[0]:
print(f'Could not find the label `{_from}` in pred '
f'`{pred["id"]}`! Skipping...')
return
result['value']['rectanglelabels'] = [to]
url = f'{os.environ["LS_HOST"]}/api/predictions/{pred["id"]}/'
api_request(url, method='patch', data=pred)
return
def check_if_label_exists_in_task_annotations(task, label):
labels = []
if not task.get('annotations'):
return
results = sum([x['result'] for x in task['annotations']], [])
for result in results:
labels.append(result['value']['rectanglelabels'])
labels = sum(labels, [])
if label in labels:
return task
return
def opts() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument('-f',
'--from-label',
help='Label to find and change (i.e., old label)',
type=str,
required=True)
parser.add_argument(
'-t',
'--to-label',
help='Label to use instead of the old label (i.e., new label)',
type=str,
required=True)
return parser.parse_args()
def patch(from_label, to_label):
catch_keyboard_interrupt()
# --------------------------------------------------------------
tasks = get_all_projects_tasks()
tasks_with_label = []
for task in tqdm(tasks, desc='Scan tasks'):
task = check_if_label_exists_in_task_annotations(task,
label=from_label)
if task:
tasks_with_label.append(task)
futures = []
for task in tasks_with_label:
futures.append(patch_anno.remote(task, from_label, to_label))
for future in tqdm(futures, desc='Futures'):
ray.get(future)
# --------------------------------------------------------------
preds = get_all_projects_tasks(get_predictions_instead=True)
preds_with_label = []
for pred in tqdm(preds, desc='Scan preds'):
for result in pred['result']:
label = result['value']['rectanglelabels']
if from_label in label:
preds_with_label.append(pred)
futures = []
for pred in preds_with_label:
futures.append(patch_pred.remote(pred, from_label, to_label))
for future in tqdm(futures, desc='Futures'):
ray.get(future)
# --------------------------------------------------------------
ray.shutdown()
if __name__ == '__main__':
load_dotenv()
args = opts()
patch(from_label=args.from_label, to_label=args.to_label)
|
the-stack_0_1201 | """
Copyright (c) 2016-2019 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
from programy.parser.pattern.nodes.base import PatternNode
from programy.parser.pattern.equalsmatch import EqualsMatch
from programy.parser.exceptions import ParserException
class PatternBotNode(PatternNode):
def __init__(self, attribs, text, userid='*'):
PatternNode.__init__(self, userid)
if 'name' in attribs:
self._property = attribs['name']
elif 'property' in attribs:
self._property = attribs['property']
elif text:
self._property = text
else:
raise ParserException("Invalid bot node, neither name or property specified as attribute or text")
def is_bot(self):
return True
@property
def property(self):
return self._property
def to_xml(self, client_context, include_user=False):
string = ""
if include_user is True:
string += '<bot userid="%s" property="%s">\n'%(self.userid, self.property)
else:
string += '<bot property="%s">\n' % self.property
string += super(PatternBotNode, self).to_xml(client_context)
string += "</bot>"
return string
def to_string(self, verbose=True):
if verbose is True:
return "BOT [%s] [%s] property=[%s]" % (self.userid, self._child_count(verbose), self.property)
return "BOT property=[%s]" % (self.property)
def equivalent(self, other):
if other.is_bot():
if self.userid == other.userid:
if self.property == other.property:
return True
return False
def equals(self, client_context, words, word_no):
word = words.word(word_no)
if self.userid != '*':
if self.userid != client_context.userid:
return EqualsMatch(False, word_no)
if client_context.brain.properties.has_property(self.property):
if word == client_context.brain.properties.property(self.property):
YLogger.debug(client_context, "Found word [%s] as bot property", word)
return EqualsMatch(True, word_no, word)
return EqualsMatch(False, word_no)
|
the-stack_0_1202 | from django.test import TestCase
from django.utils.timezone import now, timedelta
from polls.models import Choice, Question
from django.contrib.auth.models import User
from django.urls import reverse
# Create your tests here.
class QuestionModelTests(TestCase):
def test_was_published_recently_with_future_question(self):
time = now() + timedelta(days=30)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
time = now() - timedelta(days=1, seconds=1)
old_question = Question(pub_date=time)
self.assertIs(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
time = now() - timedelta(hours=23, minutes=59, seconds=59)
recent_question = Question(pub_date=time)
self.assertIs(recent_question.was_published_recently(), True)
class IndexViewTests(TestCase):
def test_get_no_question(self):
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
# self.assertContains(response, 'No polls are available.')
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_get_question(self):
Question.objects.create(question_text='Demo question.')
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(response.context['latest_question_list'],
['<Question: Demo question.>'])
class DetailViewTests(TestCase):
def setUp(self) -> None:
self.user = User.objects.create_user(username='libin', password='123')
self.question = Question.objects.create(
question_text='unit_test question?')
self.choice_good = Choice.objects.create(question=self.question,
choice_text='good',
votes=0)
self.choice_soso = Choice.objects.create(question=self.question,
choice_text='soso',
votes=0)
self.choice_bad = Choice.objects.create(question=self.question,
choice_text='bad',
votes=0)
def tearDown(self) -> None:
self.question.delete()
self.user.delete()
def test_get(self):
self.client.login(username='libin', password='123')
response = self.client.get(
reverse('polls:detail', kwargs={'id': self.question.id}))
self.assertEqual(response.status_code, 200)
self.assertEqual(str(response.context['question']),
self.question.question_text)
def test_post(self):
self.client.login(username='libin', password='123')
response = self.client.post(reverse('polls:detail',
kwargs={'id': self.question.id}),
data={
'choice': self.choice_good.id,
})
self.assertEqual(response.status_code, 302)
good_choice_votes = Choice.objects.get(id=self.choice_good.id).votes
self.assertEqual(good_choice_votes, 1)
|
the-stack_0_1204 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 [email protected]
# Licensed under the MIT license (http://opensource.org/licenses/MIT)
from sexpr import sexp
import pprint
import copy
import hexdump
DEBUG = 0
def u8(x):
return x & 0xff
def i16(x):
return x & 0xffff
class LEDVMError(Exception):
pass
class OpCodeInfo(object):
def __init__(self, name, data_len, arg_type):
self.name = name
self.data_len = data_len
self.arg_type = arg_type
ARG_NONE = 0
ARG_REFRENCES = 1
class OpCode(object):
SHOW_HSV = 0x00
SHOW_RGB = 0x01
LOAD_PIXEL = 0x02
ADD_VEC3 = 0x03
SUB_VEC3 = 0x04
IF_EQ = 0x05
OP_CODE_TABLE = {
# CODE , MENOMIC , DATA_SIZE
SHOW_HSV : OpCodeInfo("SHOW_HSV" , 0 , OpCodeInfo.ARG_NONE) ,
SHOW_RGB : OpCodeInfo("SHOW_RGB" , 0 , OpCodeInfo.ARG_NONE) ,
LOAD_PIXEL : OpCodeInfo("LOAD_PIXEL" , 3 , OpCodeInfo.ARG_REFRENCES) ,
ADD_VEC3 : OpCodeInfo("ADD_VEC3" , 3 , OpCodeInfo.ARG_REFRENCES) ,
SUB_VEC3 : OpCodeInfo("SUB_VEC3" , 3 , OpCodeInfo.ARG_REFRENCES) ,
IF_EQ : OpCodeInfo("IF_EQ" , 3 , OpCodeInfo.ARG_REFRENCES) ,
}
@staticmethod
def to_string(code):
if code in OpCode.OP_CODE_TABLE:
name = OpCode.OP_CODE_TABLE[code].name
return "{}<{}>".format(name, code)
else:
return "{}<{}>".format("UnknownOpCode", code)
def __init__(self, name, data_len=0):
self.name = name
self.data_len = data_len
class Register(object):
# Register codes
PIXEL_NUM = 0
OUTPUT_TYPE = 1
KEY_STATE = 2
MOUSE_X = 3
MOUSE_Y = 4
OUTPUT_TYPE_RGB = 0
OUTPUT_TYPE_HSV = 1
def __init__(self, name, default_value=0):
self.name = name
self.value = default_value
self.default_value = default_value
class LEDEffectVM(object):
REGISTER_TABLE = {
Register.PIXEL_NUM : Register("PIXEL_NUM", 0),
Register.OUTPUT_TYPE : Register("OUTPUT_TYPE", 0),
Register.KEY_STATE : Register("KEY_STATE", 0),
Register.MOUSE_X : Register("MOUSE_X", 0),
Register.MOUSE_Y : Register("MOUSE_Y", 0),
}
def __init__(self, led_program_table={'main': []}, num_pixels=None):
self.pixels = [(0, 0, 0)] * num_pixels
self.led_program_table = led_program_table
self.set_active_progarm('main')
self.instr_ptr = 0
self.registers = {}
for reg in self.REGISTER_TABLE:
self.registers[reg] = self.REGISTER_TABLE[reg].default_value
def set_active_progarm(self, name):
self._current_program_name = name
self.current_program = self.led_program_table[name]
def goto_start(self):
self.instr_ptr = 0
def rel_jump(self, offset):
self.instr_ptr += (offset)
def get_next_word(self):
if self.instr_ptr >= len(self.current_program):
return None
result = self.current_program[self.instr_ptr]
self.instr_ptr += 1
return result
def read_op_code(self):
code = self.get_next_word()
if code == None:
return None, None
self.vm_assert(code in OpCode.OP_CODE_TABLE, "Invalid OpCode: {}".format(code))
op_code = OpCode.OP_CODE_TABLE[code]
data = []
for i in range(op_code.data_len):
data.append(self.get_next_word())
# if DEBUG >= 1
if DEBUG >= 5:
print("Instruction: {}".format(self.instr_ptr))
print("Current code: {}, data:{}".format(
OpCode.to_string(code), data
)
)
return code, data
REFERENCE_TYPE_IMMEDIATE = 0
REFERENCE_TYPE_REGISTER = 1
REFERENCE_TYPE_PIXEL = 2
def lookup_refrence(self, ref):
# Refrences either an immediate value or another register value
# Format of refrence values (in hex):
# * 00xx -> Single byte immediate value
# * 01xx -> Single byte immediate value
value = (ref >> 0) & 0xff
ref_type = (ref >> 8) & 0xff
if ref_type == self.REFERENCE_TYPE_IMMEDIATE:
return value
elif ref_type == self.REFERENCE_TYPE_PIXEL:
assert(value < 3)
return self.get_current_pixel()[value]
elif ref_type == self.REFERENCE_TYPE_REGISTER:
assert(value in self.REGISTER_TABLE)
return self.registers[value]
def get_pixel(self, pixel_num):
return self.pixels[pixel_num]
def get_pixel_type(self, pixel_num):
return self.registers[Register.OUTPUT_TYPE]
def get_current_pixel(self):
return self.pixels[self.registers[Register.PIXEL_NUM]]
def set_current_pixel(self, x, y, z):
self.pixels[self.registers[Register.PIXEL_NUM]] = (x, y, z)
def execute_op_code(self, code, data):
"""
Return True if the program has finished executing
"""
if code == OpCode.SHOW_HSV:
self.registers[Register.OUTPUT_TYPE] = Register.OUTPUT_TYPE_HSV
return True
elif code == OpCode.SHOW_RGB:
self.registers[Register.OUTPUT_TYPE] = Register.OUTPUT_TYPE_RGB
return True
elif code == OpCode.LOAD_PIXEL:
self.set_current_pixel(
self.lookup_refrence(data[0]),
self.lookup_refrence(data[1]),
self.lookup_refrence(data[2])
)
elif code == OpCode.ADD_VEC3:
old_value = self.get_current_pixel()
self.set_current_pixel(
u8(old_value[0] + self.lookup_refrence(data[0])),
u8(old_value[1] + self.lookup_refrence(data[1])),
u8(old_value[2] + self.lookup_refrence(data[2]))
)
elif code == OpCode.SUB_VEC3:
old_value = self.get_current_pixel()
self.set_current_pixel(
u8(old_value[0] - self.lookup_refrence(data[0])),
u8(old_value[1] - self.lookup_refrence(data[1])),
u8(old_value[2] - self.lookup_refrence(data[2]))
)
elif code == OpCode.IF_EQ:
lhs = self.lookup_refrence(data[0])
rhs = self.lookup_refrence(data[1])
jmp_pos = self.lookup_refrence(data[2])
if DEBUG >= 5:
print("lhs, rhs, == :", lhs, rhs, lhs == rhs)
if lhs != rhs:
self.rel_jump(jmp_pos)
else:
raise LEDVMError("Unknown opcode {}".format(code))
return False
def execute_program(self, program_name):
self.set_active_progarm(program_name)
for (pixel_i, _) in enumerate(self.pixels):
self.execute_program_pixel(pixel_i)
def execute_program_pixel(self, pixel_number):
self.goto_start()
self.registers[Register.PIXEL_NUM] = pixel_number
is_running = True
if DEBUG:
print("Starting program for pixel: {}".format(pixel_number))
while is_running:
(code, data) = self.read_op_code()
if code == None:
break;
if DEBUG:
print("(OpCode {}, Data {})".format(code, data))
is_running = not self.execute_op_code(code, data)
def vm_assert(self, exp, msg=""):
if exp != True:
self.print_core_dump(msg)
if msg == "":
LEDVMError("LEDVMError: unspecified error")
else:
LEDVMError("LEDVMError: {}".format(msg))
def print_core_dump(self, error_msg):
print(
"\n"
"Core dump while executing program '{}':\n"
"Error message: {}\n"
"instr_ptr: {}\n"
"program: {}\n"
.format(
self._current_program_name,
error_msg,
self.instr_ptr,
self.current_program
)
)
class LEDEffectVMParser(object):
def __init__(self):
# The Parser needs the inverse mappings of the op_code/register lookup
# tables, so generate them here
self.op_code_lookup_table = {}
for code in OpCode.OP_CODE_TABLE:
name = OpCode.OP_CODE_TABLE[code].name
self.op_code_lookup_table[name] = code
self.register_lookup_table = {}
for reg in LEDEffectVM.REGISTER_TABLE:
name = LEDEffectVM.REGISTER_TABLE[reg].name
self.register_lookup_table[name] = reg
# def exp_as_arrays(self, exp):
# print(exp)
# arr = exp[0]
# result = []
# for child in arr:
# result.append(self.exp_as_arrays(child))
# return result
def parse_asm(self, program_str):
sexpression = sexp.parseString(program_str, parseAll=True)
if DEBUG:
print(sexpression)
pprint.pprint(sexpression)
# sexpression = self.exp_as_arrays(sexpression)
byte_code = []
byte_code += self.parse_program(sexpression)
return byte_code
def generate_ref(self, ref):
if isinstance(ref, int):
assert(ref <= 255)
ref_type = LEDEffectVM.REFERENCE_TYPE_IMMEDIATE
value = ref
elif isinstance(ref, str):
if ref in self.register_lookup_table:
ref_type = LEDEffectVM.REFERENCE_TYPE_REGISTER
value = self.register_lookup_table[ref]
elif ref in ('r', 'g', 'b', 'h', 's', 'v'):
ref_type = LEDEffectVM.REFERENCE_TYPE_PIXEL
value = {
'r': 0,
'h': 0,
'g': 1,
's': 1,
'b': 2,
'v': 2,
}[ref]
else:
raise LEDVMError("Unknown reference: {}".format(ref))
else:
return None
lo_byte = (value << 0)
hi_byte = (ref_type << 8)
return [lo_byte | hi_byte]
def parse_instruction(self, exp):
if DEBUG:
print("Parse Instruction: ", exp)
name = exp[0]
result = []
if not name in self.op_code_lookup_table:
raise LEDVMError("Unknown opcode menomic: {}".format(name))
op_code = self.op_code_lookup_table[name]
op_info = OpCode.OP_CODE_TABLE[op_code]
# Add the op_code to the result
result += [op_code]
OP_CODE_POS = 1
data = exp[OP_CODE_POS:]
if len(data) != op_info.data_len:
raise LEDVMError("Expected {} arguments to opcode {}, got {}".format(
op_info.data_len,
name,
len(data)
)
)
if op_code == OpCode.IF_EQ:
print(data)
print(data[0], data[1], data[2])
LHS_POS = 0
RHS_POS = 1
JUMP_POS = 2
result += self.generate_ref(data[LHS_POS])
result += self.generate_ref(data[RHS_POS])
if_block_exp = data[JUMP_POS]
ref_data = self.generate_ref(if_block_exp)
if ref_data != None:
result += ref_data
else:
print('ifblock:', if_block_exp)
if_block = self.parse_instruction_list(if_block_exp)
jmp_offset = i16(len(if_block))
result += [jmp_offset]
result += if_block
print('ifBlockResult:', result)
elif op_info.arg_type == OpCodeInfo.ARG_NONE:
pass # Don't need to add data
elif op_info.arg_type == OpCodeInfo.ARG_REFRENCES:
for ref in data:
result += self.generate_ref(ref)
return result
def parse_instruction_list(self, instruction_list):
result = []
for instruction in instruction_list:
result += self.parse_instruction(instruction)
return result
def parse_program(self, exp):
if DEBUG:
print("Parse program: ", exp)
exp = exp[0]
# pprint.pprint(exp)
return self.parse_instruction_list(exp)
if __name__ == "__main__":
init_prog = """
(
(LOAD_PIXEL PIXEL_NUM 255 200)
)
"""
# main_prog = """
# (
# (LOAD_PIXEL r 255 200)
# (ADD_VEC3 1 0 0)
# (IF_EQ v 199
# (
# (ADD_VEC3 1 0 0)
# )
# )
# (IF_EQ v 200
# (
# (SUB_VEC3 1 0 0)
# )
# )
# (SHOW_HSV)
# )
# """
main_prog = """
(
(IF_EQ h 0
(
(LOAD_PIXEL h 255 199)
)
)
(IF_EQ h 255
(
(LOAD_PIXEL h 255 200)
)
)
(IF_EQ v 200
(
(SUB_VEC3 1 0 0)
)
)
(IF_EQ v 199
(
(ADD_VEC3 1 0 0)
)
)
(SHOW_HSV)
)
"""
vm_parser = LEDEffectVMParser()
led_programs = {
"init": vm_parser.parse_asm(init_prog),
"main": vm_parser.parse_asm(main_prog),
}
vm = LEDEffectVM(led_programs, num_pixels=64)
for prog in led_programs:
print(prog, led_programs[prog])
byte_code_as_bytes = bytes([])
for word in led_programs[prog]:
byte_code_as_bytes += bytes([word & 0xff, word>>8 & 0xff])
hexdump.hexdump(byte_code_as_bytes)
vm.execute_program('init')
for i in range(300):
vm.execute_program('main')
print(vm.pixels)
|
the-stack_0_1205 | import torch.nn as nn
import torch.nn.functional as F
import torch
class Classifier(nn.Module):
def __init__(self, input_nc=3, ndf=64, norm_layer=nn.BatchNorm2d):
super(Classifier, self).__init__()
kw = 3
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(3):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2),
norm_layer(ndf * nf_mult, affine=True),
nn.LeakyReLU(0.2, True)
]
self.before_linear = nn.Sequential(*sequence)
sequence = [
nn.Linear(ndf * nf_mult, 1024),
nn.Linear(1024, 10)
]
self.after_linear = nn.Sequential(*sequence)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.criterionCLS = torch.nn.modules.CrossEntropyLoss()
def forward(self, x, lbl=None, ita=1.5):
bs = x.size(0)
out = self.after_linear(self.before_linear(x).view(bs, -1))
x = out
P = F.softmax(x, dim=1) # [B, 19, H, W]
logP = F.log_softmax(x, dim=1) # [B, 19, H, W]
PlogP = P * logP # [B, 19, H, W]
ent = -1.0 * PlogP.sum(dim=1) # [B, 1, H, W]
ent = ent / 2.9444 # chanage when classes is not 19
# compute robust entropy
ent = ent ** 2.0 + 1e-8
ent = ent ** ita
self.loss_ent = ent.mean()
if lbl is not None:
self.loss_cls = self.criterionCLS(x, lbl)
return x
def get_lr_params(self):
b = []
b.append(self.before_linear.parameters())
b.append(self.after_linear.parameters())
for j in range(len(b)):
for i in b[j]:
yield i
def optim_parameters(self, args):
return [{'params': self.get_lr_params(), 'lr': args.learning_rate}]
def adjust_learning_rate(self, args, optimizer, i):
lr = args.learning_rate * ( (1-float(i)/args.num_steps) ** (args.power) )
optimizer.param_groups[0]['lr'] = lr
if len(optimizer.param_groups) > 1:
optimizer.param_groups[1]['lr'] = lr * 10
def CLSNet(restore_from=None):
model = Classifier()
if restore_from is not None:
model.load_state_dict(torch.load(restore_from + '.pth', map_location=lambda storage, loc: storage))
return model
|
the-stack_0_1206 | from Common import *
from save_to_mysql import Save_MySQL
# file_date_time = "2019-10-17"
# stif_time = "201910170900"
# 生成个人表
def make_stan_person(num):
"""字段列表
"busi_reg_no":"客户号",
"ctnm":"客户名称",
"cten":"拼音/英文名称",
"client_tp":"客户类别",1客户,2商户
"account_tp":"账户分类",1/2/3代表1、2、3类账号
"busi_type":"业务类型",
"smid":"主体特约商户编号",
"citp":"证件类型",
"citp_ori":"证件类型原值",
"citp_nt":"证件类型说明",
"ctid":"证件号码",
"ctid_edt":"证件有效期",
"sex":"性别",
"country":"国籍",
"nation":"民族",
"birthday":"出生日期",
"education":"学历",
"ctvc":"主体的职业类别",
"picm":"个人年收入",
"ficm":"家庭年收入",
"marriage":"婚姻状况",
"ceml":"电子邮件",
"rgdt":"开户日期",
"cls_dt":"销户日期",
"remark":"备注",
"indu_code":"行业代码",
"stat_flag_ori":"客户状态原值",
"stat_flag":"客户状态",
"mer_prov":"省",
"mer_city":"市",
"mer_area":"区县",
"address":"详细地址",
"tel":"联系电话",
"mer_unit":"管理机构",
"is_line":"是否线上{注册}",
"certification ":"建立渠道",
"cer_num":"通过身份验证渠道数量",
"con_acc_name":"经营名称",
"bord_flag":"境内外标识",
"web_info":"网络支付商户网址信息",
"con_nation":"商户所属国家或地区",
"bind_card":"银行绑定标识",
"ip_code":"注册地IP地址",
"mac_info":"注册设备MAC或IMEI地址",
"self_acc_no":"特约商户收单结算账号",
"acc_type1":"账户类型",
"bank_acc_name":"银行账户名称",
"reals":"客户真实有效性",
"batch_pay":"批量代付标识",
"statement_type":"结算类型"
:return:
"""
busi_reg_no = "p_{}".format(num)
ctnm = make_name_data()
cten = word_to_pinyin(ctnm)
client_tp = random.choice(["1", "2"])
busi_type = make_busi_type_data()
account_tp = make_account_tp_data(busi_type)
if client_tp == "2":
smid = random.randint(1000, 9999) # 该字段值待确定
smid = str(smid)
else:
smid = ""
citp = make_citp_data()
citp_ori = citp # 该值暂定
citp_nt = "有效证件"
ctid = make_ctid_data()
ctid_edt = make_Card_valid_date(ctid)
sex = make_sex(ctid)
country = choice_contry()
nation = str(random.randint(1, 57))
birthday = ctid[6:14]
education = str(random.randint(1, 7))
ctvc = random.choice(["1A", "1B", "1C", "1D", "1E", "1F", "1G", "1H"])
picm = "300000"
ficm = "500000"
marriage = make_marriage_data(ctid)
ceml = make_email_data()
rgdt = make_register_date()
cls_dt = make_cls_dt_data(busi_reg_no)
remark = "这是一个备注"
indu_code = make_indu_code_data()
stat_flag_ori = "888888"
stat_flag = make_stat_flag_data(cls_dt)
# mer_prov = get_province_data(ctid[:6])
mer_prov = get_province_code_data(ctid[:6])
# mer_city = make_province_city_data(ctid[:6])[0]
mer_city = make_province_city_code_data(ctid[:6])
# mer_area = make_province_city_data(ctid[:6])[-1]
mer_area = ctid[:6]
address = make_address(ctid[:6])
tel = make_tel_num()
mer_unit = make_mer_unit_data()
is_line = random.choice(["0", "1"])
certification = random.choice(["1", "2", "3"])
cer_num = str(random.randint(0, 6))
con_acc_name = "默认经营名称" # 网络支付、预付卡、银行卡收单必须填写,暂为空
bord_flag = make_bord_flag_data()
web_info = make_web_info_data(busi_type) # 非网络支付业务,无网址用户可不填
con_nation = make_con_nation_data(bord_flag)
bind_card = make_bind_card_data(busi_type) # 仅需网络支付填写
ip_code = make_ip_data(busi_type) # 仅需网络支付填写
mac_info = make_mac_info_data(busi_type) # PC机填写MAC,移动终端填写IMEI(需网络支付,预付卡填写), 暂为空
self_acc_no = make_self_acc_no_data(client_tp) # 非商户不填,网络支付、预付卡、银行卡收单必须填写
acc_type1 = make_acc_type1_data(client_tp) # 非商户不填,网络支付、预付卡、银行卡收单必须填写
bank_acc_name = make_bank_acc_name_data(acc_type1)
reals = make_reals_data()
batch_pay = make_batch_pay_data(busi_type, client_tp)
statement_type = make_statement_type_data(client_tp)
# print(busi_reg_no, ctnm, cten, client_tp, account_tp, busi_type, smid, citp, citp_ori, citp_nt, ctid, ctid_edt, sex,
# country, nation, birthday, education, ctvc, picm, ficm, marriage, ceml, rgdt, cls_dt, remark, indu_code,
# stat_flag_ori, stat_flag, mer_prov, mer_city, mer_area, address, tel, mer_unit, is_line, certification,
# cer_num, con_acc_name, bord_flag, web_info, con_nation, bind_card, ip_code, mac_info, self_acc_no, acc_type1,
# bank_acc_name, reals, batch_pay, statement_type)
# contect_data = make_connect_data([
# busi_reg_no, ctnm, cten, client_tp, account_tp, busi_type, smid, citp, citp_ori, citp_nt, ctid, ctid_edt, sex,
# country, nation, birthday, education, ctvc, picm, ficm, marriage, ceml, rgdt, cls_dt, remark, indu_code,
# stat_flag_ori, stat_flag, mer_prov, mer_city, mer_area, address, tel, mer_unit, is_line, certification,
# cer_num, con_acc_name, bord_flag, web_info, con_nation, bind_card, ip_code, mac_info, self_acc_no, acc_type1,
# bank_acc_name, reals, batch_pay, statement_type
# ])
contect_data = "busi_reg_no,ctnm,cten,client_tp,account_tp,busi_type,smid,citp,citp_ori,citp_nt,ctid,ctid_edt,sex,country,nation,birthday,education,ctvc,picm,ficm,marriage,ceml,rgdt,cls_dt,remark,indu_code,stat_flag_ori,stat_flag,mer_prov,mer_city,mer_area,address,tel,mer_unit,is_line,certification,cer_num,con_acc_name,bord_flag,web_info,con_nation,bind_card,ip_code,mac_info,self_acc_no,acc_type1,bank_acc_name,reals,batch_pay,statement_type"
return {
"busi_reg_no": busi_reg_no,
"ctnm": ctnm,
"cten": cten,
"client_tp": client_tp,
"account_tp": account_tp,
"busi_type": busi_type,
"smid": smid,
"citp": citp,
"citp_ori": citp_ori,
"citp_nt": citp_nt,
"ctid": ctid,
"ctid_edt": ctid_edt,
"sex": sex,
"country": country,
"nation": nation,
"birthday": birthday,
"education": education,
"ctvc": ctvc,
"picm": picm,
"ficm": ficm,
"marriage": marriage,
"ceml": ceml,
"rgdt": rgdt,
"cls_dt": cls_dt,
"remark": remark,
"indu_code": indu_code,
"stat_flag_ori": stat_flag_ori,
"stat_flag": stat_flag,
"mer_prov": mer_prov,
"mer_city": mer_city,
"mer_area": mer_area,
"address": address,
"tel": tel,
"mer_unit": mer_unit,
"is_line": is_line,
"certification": certification,
"cer_num": cer_num,
"con_acc_name": con_acc_name,
"bord_flag": bord_flag,
"web_info": web_info,
"con_nation": con_nation,
"bind_card": bind_card,
"ip_code": ip_code,
"mac_info": mac_info,
"self_acc_no": self_acc_no,
"acc_type1": acc_type1,
"bank_acc_name": bank_acc_name,
"reals": reals,
"batch_pay": batch_pay,
"statement_type": statement_type
}, contect_data
# 生成机构表
def make_stan_org(num):
"""
busi_reg_no: 客户号
ctnm: 客户名称
cten: 拼音/英文名称
client_tp: 客户类别
account_tp: 账户分类
busi_type: 业务类型
smid: 主体特约商户编号
citp: 证件类型_报送
citp_ori: 证件类型原值
ctid: 证件号码
ctid_edt: 证件有效期
citp_nt: 证件类型说明
id_type: 证件类型_现场检查
org_no: 组织机构代码
linkman: 联系人姓名
linktel: 联系人手机号
linkjob: 联系人职务
linkmail: 联系人邮箱
linkphone: 联系人固定电话
ceml: 电子邮件
ctvc: 主体的行业类别
crnm: 主体的法定代表人姓名
crit: 主体的法定代表人身份证件类型
crit_ori: 主体的法定代表人身份证件类型原值
crit_nt: 主体的法定代表人身份证件类型说明
crid: 主体的法定代表人身份证件号码
crid_edt: 主体的法定代表人证件有效期
rgdt: 开户日期
cls_dt: 销户日期
scale: 企业规模
country: 注册国家
crp_type: 组织机构类别
fud_date: 成立日期
reg_cptl: 注册资本
remark_ctvc: 经营范围
agency_ctnm: 代办理人姓名
agency_citp: 代办理人证件类型
agency_ctid: 代办理人证件号码
agency_edt: 代办理人证件有效期限
remark: 备注
indu_code: 行业代码
stat_flag_ori: 客户状态原值
stat_flag: 客户状态
mer_prov: 省
mer_city: 市
mer_area: 区县
address: 详细地址
tel: 联系电话
mer_unit: 管理机构
is_line: 是否线上
certification : 建立渠道
cer_num: 通过身份验证渠道数量
con_acc_name: 经营名称
bord_flag: 境内外标识
web_info: 网络支付商户网址信息
con_nation: 商户所属国家或地区
majority_shareholder_ctnm: 控股股东或实际控制人姓名
majority_shareholder_citp: 控股股东或实际控制人证件类型
majority_shareholder_citp_ori: 控股股东或实际控制人证件类型原值
majority_shareholder_ctid: 控股股东或实际控制人证件号码
majority_shareholder_edt: 控股股东或实际控制人证件有效期限
reg_cptl_code: 注册资本金币种
bind_card: 银行绑定标识
ip_code: 注册地IP地址
mac_info: 注册设备MAC或IMEI地址
self_acc_no: 特约商户收单结算账号
acc_type1: 账户类型
bank_acc_name: 银行账户名称
reals: 客户真实有效性
complex: 非自然人结构复杂度
clear: 非自然人股权可辨识度
batch_pay: 批量代付标识
statement_type: 结算类型
:return:
"""
busi_reg_no = "o_{}".format(num)
ctnm = make_name_data()
cten = word_to_pinyin(ctnm)
client_tp = random.choice(["1", "2"])
busi_type = make_busi_type_data()
account_tp = make_account_tp_data(busi_type)
if client_tp == "2":
smid = make_random_str(20) # 该字段值待确定
else:
smid = ""
citp = random.choice(["21", "29"])
citp_ori = citp # 该值暂定
ctid = make_ctid_data()
ctid_edt = make_Card_valid_date(ctid)
if citp == "29":
citp_nt = random.choice(["营业执照", "统一社会信用代码"])
else:
citp_nt = "证件类型"
if citp_ori == "营业执照":
id_type = "11"
else:
id_type = "12"
org_no = make_random_num(9) # 统一社会信用代码9-17位
linkman = make_name_data()
linktel = make_tel_num()
linkjob = "联系人职务"
linkmail = make_email_data()
linkphone = make_random_num(9)
ceml = make_email_data()
ctvc = make_org_ctvc_data()
crnm = make_name_data()
crit = make_citp_data()
crit_ori = "证件原值"
if crit == "19":
crit_nt = "证件类型说明"
else:
crit_nt = ""
crid = make_ctid_data()
crid_edt = make_Card_valid_date(crid)
rgdt = make_register_date()
cls_dt = make_cls_dt_data(busi_reg_no)
scale = make_scale_data()
country = make_country_data()
crp_type = make_crp_type_data()
fud_date = "20151111" # 成立日期,暂时写死
reg_cptl = "1000000.00" # 注册资金,暂时写死
remark_ctvc = "经营范围"
agency_ctnm = make_name_data()
agency_citp = make_citp_data()
agency_ctid = make_ctid_data()
agency_edt = make_Card_valid_date(agency_ctid)
remark = "备注,暂时不填"
indu_code = make_indu_code_data() # 支付机构行业代码,暂时默认为11111
stat_flag_ori = "11111" # 客户状态原值,可是用支付系统码表,根据客户业务系统修改
stat_flag = make_stat_flag_data(busi_reg_no)
mer_prov = get_province_code_data(ctid[:6])
mer_city = make_province_city_code_data(ctid[:6])
mer_area = ctid[:6]
address = make_address(ctid[:6])
tel = make_tel_num()
mer_unit = make_mer_unit_data()
is_line = random.choice(["0", "1"])
certification = random.choice(["1", "2", "3"])
cer_num = str(random.randint(0, 6))
con_acc_name = "默认经营名称" # 网络支付、预付卡、银行卡收单必须填写,暂为空
bord_flag = make_bord_flag_data() # 网络支付、预付卡、银行卡收单必须填写
web_info = make_web_info_data(busi_type) # 非网络支付业务,无网址用户可不填
con_nation = make_con_nation_data(bord_flag) # 网络支付、预付卡、银行卡收单必须填写
majority_shareholder_ctnm = make_name_data()
majority_shareholder_citp = make_citp_data()
majority_shareholder_citp_ori = "控股股东或实际控制人证件类型原值"
majority_shareholder_ctid = make_ctid_data()
majority_shareholder_edt = make_Card_valid_date(majority_shareholder_ctid)
reg_cptl_code = "CNY"
bind_card = make_bind_card_data(busi_type) # 仅需网络支付填写
ip_code = make_ip_data(busi_type) # 仅需网络支付填写
mac_info = make_mac_info_data(busi_type) # PC机填写MAC,移动终端填写IMEI(需网络支付,预付卡填写), 暂为空
self_acc_no = make_self_acc_no_data(client_tp) # 非商户不填,网络支付、预付卡、银行卡收单必须填写
acc_type1 = make_acc_type1_data(client_tp) # 非商户不填,网络支付、预付卡、银行卡收单必须填写
bank_acc_name = make_bank_acc_name_data(acc_type1) # 当acc_type1=12时填写,银行账号对应账户名称( 网络支付、预付卡、银行卡收单均需填写)
reals = str(random.randint(1, 5))
complex = make_complex_data()
clear = make_clear_data()
batch_pay = make_batch_pay_data(busi_type, client_tp)
statement_type = random.choice(["0", "1"])
# print(busi_reg_no, ctnm, cten, client_tp, account_tp, busi_type, smid, citp, citp_ori, ctid, ctid_edt, citp_nt,
# id_type, org_no, linkman, linktel, linkjob, linkmail, linkphone, ceml, ctvc, crnm, crit, crit_ori, crit_nt,
# crid, crid_edt, rgdt, cls_dt, scale, country, crp_type, fud_date, reg_cptl, remark_ctvc, agency_ctnm,
# agency_citp, agency_ctid, agency_edt, remark, indu_code, stat_flag_ori, stat_flag, mer_prov, mer_city,
# mer_area, address, tel, mer_unit, is_line, certification, cer_num, con_acc_name, bord_flag, web_info,
# con_nation, majority_shareholder_ctnm, majority_shareholder_citp, majority_shareholder_citp_ori,
# majority_shareholder_ctid, majority_shareholder_edt, reg_cptl_code, bind_card, ip_code, mac_info, self_acc_no,
# acc_type1, bank_acc_name, reals, complex, clear, batch_pay, statement_type)
# contect_data = make_connect_data([
# busi_reg_no, ctnm, cten, client_tp, account_tp, busi_type, smid, citp, citp_ori, ctid, ctid_edt, citp_nt,
# id_type, org_no, linkman, linktel, linkjob, linkmail, linkphone, ceml, ctvc, crnm, crit, crit_ori, crit_nt,
# crid, crid_edt, rgdt, cls_dt, scale, country, crp_type, fud_date, reg_cptl, remark_ctvc, agency_ctnm,
# agency_citp, agency_ctid, agency_edt, remark, indu_code, stat_flag_ori, stat_flag, mer_prov, mer_city, mer_area,
# address, tel, mer_unit, is_line, certification, cer_num, con_acc_name, bord_flag, web_info, con_nation,
# majority_shareholder_ctnm, majority_shareholder_citp, majority_shareholder_citp_ori, majority_shareholder_ctid,
# majority_shareholder_edt, reg_cptl_code, bind_card, ip_code, mac_info, self_acc_no, acc_type1, bank_acc_name,
# reals, complex, clear, batch_pay, statement_type
# ])
contect_data = "busi_reg_no,ctnm,cten,client_tp,account_tp,busi_type,smid,citp,citp_ori,ctid,ctid_edt,citp_nt,id_type,org_no,linkman,linktel,linkjob,linkmail,linkphone,ceml,ctvc,crnm,crit,crit_ori,crit_nt,crid,crid_edt,rgdt,cls_dt,scale,country,crp_type,fud_date,reg_cptl,remark_ctvc,agency_ctnm,agency_citp,agency_ctid,agency_edt,remark,indu_code,stat_flag_ori,stat_flag,mer_prov,mer_city,mer_area,address,tel,mer_unit,is_line,certification,cer_num,con_acc_name,bord_flag,web_info,con_nation,majority_shareholder_ctnm,majority_shareholder_citp,majority_shareholder_citp_ori,majority_shareholder_ctid,majority_shareholder_edt,reg_cptl_code,bind_card,ip_code,mac_info,self_acc_no,acc_type1,bank_acc_name,reals,complex,clear,batch_pay,statement_type"
return {
"busi_reg_no": busi_reg_no,
"ctnm": ctnm,
"cten": cten,
"client_tp": client_tp,
"account_tp": account_tp,
"busi_type": busi_type,
"smid": smid,
"citp": citp,
"citp_ori": citp_ori,
"ctid": ctid,
"ctid_edt": ctid_edt,
"citp_nt": citp_nt,
"id_type": id_type,
"org_no": org_no,
"linkman": linkman,
"linktel": linktel,
"linkjob": linkjob,
"linkmail": linkmail,
"linkphone": linkphone,
"ceml": ceml,
"ctvc": ctvc,
"crnm": crnm,
"crit": crit,
"crit_ori": crit_ori,
"crit_nt": crit_nt,
"crid": crid,
"crid_edt": crid_edt,
"rgdt": rgdt,
"cls_dt": cls_dt,
"scale": scale,
"country": country,
"crp_type": crp_type,
"fud_date": fud_date,
"reg_cptl": reg_cptl,
"remark_ctvc": remark_ctvc,
"agency_ctnm": agency_ctnm,
"agency_citp": agency_citp,
"agency_ctid": agency_ctid,
"agency_edt": agency_edt,
"remark": remark,
"indu_code": indu_code,
"stat_flag_ori": stat_flag_ori,
"stat_flag": stat_flag,
"mer_prov": mer_prov,
"mer_city": mer_city,
"mer_area": mer_area,
"address": address,
"tel": tel,
"mer_unit": mer_unit,
"is_line": is_line,
"certification": certification,
"cer_num": cer_num,
"con_acc_name": con_acc_name,
"bord_flag": bord_flag,
"web_info": web_info,
"con_nation": con_nation,
"majority_shareholder_ctnm": majority_shareholder_ctnm,
"majority_shareholder_citp": majority_shareholder_citp,
"majority_shareholder_citp_ori": majority_shareholder_citp_ori,
"majority_shareholder_ctid": majority_shareholder_ctid,
"majority_shareholder_edt": majority_shareholder_edt,
"reg_cptl_code": reg_cptl_code,
"bind_card": bind_card,
"ip_code": ip_code,
"mac_info": mac_info,
"self_acc_no": self_acc_no,
"acc_type1": acc_type1,
"bank_acc_name": bank_acc_name,
"reals": reals,
"complex": complex,
"clear": clear,
"batch_pay": batch_pay,
"statement_type": statement_type
}, contect_data
# 客户证件表
def make_stan_cert(infos):
"""
ctif_id:客户号
ctif_tp:主体类型
citp:证件类型
citp_ori:证件类型原值
citp_nt:证件类型说明
ctid:证件号码
iss_unt:证件签发机关
address:证件地址
ctid_edt:主体证件有效期
iss_dt:证件签发日期
iss_ctry:证件签发国家
is_rp:是否主证件
:return:
"""
ctif_id = infos.get("busi_reg_no") # 取值
ctif_tp = "1"
citp = infos.get("citp") # 取值
citp_ori = infos.get("citp_ori") # 取值
citp_nt = infos.get("citp_nt") # 取值
ctid = infos.get("ctid") # 取值
iss_unt = make_province_city_process_data(ctid[:6])[:16] + "公安局" # 取值户籍所在地县级公安局
address = infos.get("address") # 取值
ctid_edt = infos.get("ctid_edt") # 取值,
iss_dt = make_iss_dt_data(ctid_edt)
iss_ctry = infos.get("country") # 取值,
is_rp = "1" # 考虑添加副证件
# print(ctif_id, ctif_tp, citp, citp_ori, citp_nt, ctid, iss_unt, address, ctid_edt, iss_dt, iss_ctry, is_rp)
# contect_data = make_connect_data([
# ctif_id, ctif_tp, citp, citp_ori, citp_nt, ctid, iss_unt, address, ctid_edt, iss_dt, iss_ctry, is_rp
# ])
contect_data = "ctif_id,ctif_tp,citp,citp_ori,citp_nt,ctid,iss_unt,address,ctid_edt,iss_dt,iss_ctry,is_rp"
return {
"ctif_id": ctif_id,
"ctif_tp": ctif_tp,
"citp": citp,
"citp_ori": citp_ori,
"citp_nt": citp_nt,
"ctid": ctid,
"iss_unt": iss_unt,
"address": address,
"ctid_edt": ctid_edt,
"iss_dt": iss_dt,
"iss_ctry": iss_ctry,
"is_rp": is_rp
}, contect_data
# 客户地址信息表
def make_stan_address(infos, ctif_tp_data):
"""
ctif_id: 客户号
ctif_tp: 主体类型
address_tp: 地址类型
address: 详细地址
ctry: 国家代码
county: 行政区划代码
prvc: 省
city: 市
area: 区县
postcode: 邮编
exp_dt: 地址的失效日
is_rp: 是否主地址
:return:
"""
ctif_id = infos.get("busi_reg_no") # 取值
ctif_tp = ctif_tp_data # 取值
address_tp = make_address_tp_data()
address = infos.get("address") #
ctry = infos.get("country") # 取值
# county = make_make_province_city_process_data(infos.get("ctid")[:6]) # 已从最新接口文档中移除
prvc = infos.get("mer_prov") # 取值
city = infos.get("mer_city") # 取值
area = infos.get("mer_area") # 取值
postcode = ""
exp_dt = ""
is_rp = "1"
# print(ctif_id, ctif_tp, address_tp, address, ctry, prvc, city, area, postcode, exp_dt, is_rp)
# contect_data = make_connect_data([
# ctif_id, ctif_tp, address_tp, address, ctry, prvc, city, area, postcode, exp_dt, is_rp
# ])
contect_data = "ctif_id,ctif_tp,address_tp,address,ctry,prvc,city,area,postcode,exp_dt,is_rp"
return {
"ctif_id": ctif_id,
"ctif_tp": ctif_tp,
"address_tp": address_tp,
"address": address,
"ctry": ctry,
"prvc": prvc,
"city": city,
"area": area,
"postcode": postcode,
"exp_dt": exp_dt,
"is_rp": is_rp
}, contect_data
# 客户联系信息表
def make_stan_tel(infos):
"""
ctif_id:客户号
ctif_tp:主体类型
tel_tp:电话类型
tel:联系电话
is_rp:是否主电话
:param infos:
:return:
"""
ctif_id = infos.get("busi_reg_no")
ctif_tp = "1"
tel_tp = random.choice(["11", "12", "21", "22", "23"])
tel = make_tel_num()
is_rp = "1"
# print(ctif_id, ctif_tp, tel_tp, tel, is_rp)
# contect_data = make_connect_data([
# ctif_id, ctif_tp, tel_tp, tel, is_rp
# ])
contect_data = 'ctif_id,ctif_tp,tel_tp,tel,is_rp'
return {
"ctif_id": ctif_id,
"ctif_tp": ctif_tp,
"tel_tp": tel_tp,
"tel": tel,
"is_rp": is_rp
}, contect_data
# 关系人信息表
def make_stan_relation(infos):
"""
客户关系
ctif_id: 客户号
ctif_tp: 主体类型
rel_tp: 关系类型
rel_layer: 关系人层级
rel_ctif: 关系人客户号
rel_cstp: 关系人类别
rel_name: 关系人名称
rcnt: 关系人国籍/国家
citp: 关系人证件类型
citp_ori: 关系人证件类型原
ctid: 关系人证件号码
citp_nt: 关系人证件类型说
hold_per: 持股比例
hold_amt: 持股金额
ctid_edt: 关系人证件有效期
rel_prov: 关系人省
rel_city: 关系人市
rel_area: 关系人区县
rear: 关系人详细地址
retl: 关系人联系电话
:param infos:
:return:
"""
ctif_id = infos.get("busi_reg_no")
ctif_tp = "1"
rel_tp = make_rel_tp_data()
rel_layer = random.choice(["-1", "0", "1", "2", "3", "4", "5"])
rel_ctif = make_random_num(6)
rel_cstp = random.choice(["1", "2"])
rel_name = make_name_data()
rcnt = "CHE" # make_country_data() 默认中国
citp = make_citp_data()
citp_ori = "证件类型原值"
ctid = make_ctid_data()
citp_nt = "证件类型说明"
hold_per = 0.05 # 持股比例
hold_amt = 0.05 # 持股金额
ctid_edt = make_Card_valid_date(ctid)
rel_prov = get_province_code_data(ctid[:6])
rel_city = make_province_city_code_data(ctid[:6])
rel_area = ctid[:6]
rear = make_address(ctid[:6])
retl = make_tel_num()
# print(ctif_id, ctif_tp, rel_tp, rel_layer, rel_ctif, rel_cstp, rel_name, rcnt, citp, citp_ori, ctid, citp_nt,
# hold_per, hold_amt, ctid_edt, rel_prov, rel_city, rel_area, rear, retl)
# contect_data = make_connect_data([
# ctif_id, ctif_tp, rel_tp, rel_layer, rel_ctif, rel_cstp, rel_name, rcnt, citp, citp_ori, ctid, citp_nt,
# hold_per, hold_amt, ctid_edt, rel_prov, rel_city, rel_area, rear, retl
# ])
contect_data = "ctif_id,ctif_tp,rel_tp,rel_layer,rel_ctif,rel_cstp,rel_name,rcnt,citp,citp_ori,ctid,citp_nt,hold_per,hold_amt,ctid_edt,rel_prov,rel_city,rel_area,rear,retl"
return {
"ctif_id": ctif_id,
"ctif_tp": ctif_tp,
"rel_tp": rel_tp,
"rel_layer": rel_layer,
"rel_ctif": rel_ctif,
"rel_cstp": rel_cstp,
"rel_name": rel_name,
"rcnt": rcnt,
"citp": citp,
"citp_ori": citp_ori,
"ctid": ctid,
"citp_nt": citp_nt,
"hold_per": hold_per,
"hold_amt": hold_amt,
"ctid_edt": ctid_edt,
"rel_prov": rel_prov,
"rel_city": rel_city,
"rel_area": rel_area,
"rear": rear,
"retl": retl
}, contect_data
# 支付账户表
def make_stan_pact(infos):
"""
ctif_id: 客户号
ctif_tp: 主体类型
act_tp: 账户类型
act_cd: 支付账户号
act_typ: 账号类别
act_limit: 支付账户交易限额
is_self_acc: 是否特约商户收单结算账号
sales_name: 预付卡办卡人
cst_sex: 预付卡办卡人性别
nation: 预付卡办卡人国籍
occupation: 预付卡办卡人职业
id_type: 预付卡办卡人证件种类
id_type_ori: 预付卡办卡人证件种类原值
id_no: 预付卡办卡人证件号码
id_deadline: 预付卡办卡人证件有效期截至日
contact: 预付卡办卡人联系方式
address: 预付卡办卡人住所地或工作单位地址
sales_flag: 预付卡代直销标识
bind_mob: 绑定的手机号码
mer_unit: 管理机构
cls_dt: 账户状态
rgdt: 开户日期
cls_stat: 销户日期
:param infos:
:return:
"""
if infos.get("busi_type") == "02":
ctif_id = ""
ctif_tp = ""
act_tp = ""
act_cd = ""
act_typ = ""
act_limit = 0
is_self_acc = ""
sales_name = ""
cst_sex = ""
nation = ""
occupation = ""
id_type = ""
id_type_ori = ""
id_no = ""
id_deadline = ""
contact = ""
address = ""
sales_flag = ""
bind_mob = ""
mer_unit = ""
cls_dt = ""
rgdt = ""
cls_stat = ""
else:
ctif_id = infos.get("busi_reg_no")
ctif_tp = "1"
act_tp = random.choice(['11', "211", "212"])
act_cd = make_act_cd_data(act_tp)
act_typ = make_act_type_data(act_tp)
act_limit = make_act_limit_data(act_tp, act_typ)
is_self_acc = random.choice(["0", "1"])
sales_name, cst_sex, nation, occupation, id_type, id_type_ori, id_no, id_deadline, contact, address, sales_flag \
= make_prepaid_card_data(infos)
bind_mob = make_bind_mob_data(infos)
mer_unit = make_mer_unit_data()
cls_dt = make_cls_dt_data(infos.get("busi_reg_no"))
rgdt = make_register_date()
if cls_dt == "C":
cls_stat = make_register_date()
else:
cls_stat = ""
# print(ctif_id, ctif_tp, act_tp, act_cd, act_typ, act_limit, is_self_acc, sales_name, "性别:", cst_sex, nation,
# occupation, id_type, id_type_ori, id_no, id_deadline, contact, address, sales_flag, bind_mob, mer_unit,
# cls_dt, rgdt, cls_stat)
# contect_data = make_connect_data([
# ctif_id, ctif_tp, act_tp, act_cd, act_typ, act_limit, is_self_acc, sales_name, cst_sex, nation, occupation,
# id_type, id_type_ori, id_no, id_deadline, contact, address, sales_flag, bind_mob, mer_unit, cls_dt, rgdt,
# cls_stat
# ])
contect_data = "ctif_id,ctif_tp,act_tp,act_cd,act_typ,act_limit,is_self_acc,sales_name,cst_sex,nation,occupation,id_type,id_type_ori,id_no,id_deadline,contact,address,sales_flag,bind_mob,mer_unit,cls_dt,rgdt,cls_stat"
return {
"ctif_id": ctif_id,
"ctif_tp": ctif_tp,
"act_tp": act_tp,
"act_cd": act_cd,
"act_typ": act_typ,
"act_limit": act_limit,
"is_self_acc": is_self_acc,
"sales_name": sales_name,
"cst_sex": cst_sex,
"nation": nation,
"occupation": occupation,
"id_type": id_type,
"id_type_ori": id_type_ori,
"id_no": id_no,
"id_deadline": id_deadline,
"contact": contact,
"address": address,
"sales_flag": sales_flag,
"bind_mob": bind_mob,
"mer_unit": mer_unit,
"cls_dt": cls_dt,
"rgdt": rgdt,
"cls_stat": cls_stat
}, contect_data
# 银行账户表
def make_stan_bact(infos, t_stan_pact):
"""
ctif_id: 客户号
ctif_tp: 主体类型
act_tp: 银行账号种类
act_flag: 银行账号种类-现场检查
act_cd: 银行账户号
cabm: 银行账号开户行名称
pay_id: 关联支付账户
is_self_acc: 是否特约商户收单结算账号
bank_acc_name: 银行账户名称
mer_unit: 管理机构
:param infos:
:return:
"""
ctif_id = infos.get("busi_reg_no")
ctif_tp = "1"
act_tp = make_bank_act_tp_data(ctif_tp)
act_flag = random.choice(["11", "12"])
act_cd = "62" + make_random_num(17)
cabm = make_cabm_data(infos.get("ctid")[:6])
pay_id = make_pay_id_data(infos.get("busi_type"), t_stan_pact.get("act_cd"))
is_self_acc = t_stan_pact.get("is_self_acc")
bank_acc_name = "" # 没明白是什么,暂空
mer_unit = t_stan_pact.get("mer_unit")
# print(ctif_id, ctif_tp, act_tp, act_flag, act_cd, cabm, pay_id, is_self_acc, bank_acc_name, mer_unit)
# contect_data = make_connect_data([
# ctif_id, ctif_tp, act_tp, act_flag, act_cd, cabm, pay_id, is_self_acc, bank_acc_name, mer_unit
# ])
contect_data = "ctif_id,ctif_tp,act_tp,act_flag,act_cd,cabm,pay_id,is_self_acc,bank_acc_name,mer_unit"
return {
"ctif_id": ctif_id,
"ctif_tp": ctif_tp,
"act_tp": act_tp,
"act_flag": act_flag,
"act_cd": act_cd,
"cabm": cabm,
"pay_id": pay_id,
"is_self_acc": is_self_acc,
"bank_acc_name": bank_acc_name,
"mer_unit": mer_unit
}, contect_data
# 标准交易表
def make_stan_stif(infos, stan_bact, ctif_tp_num, stif_time):
"""
ctif_id: 主体客户号
ctif_tp: 主体类别
client_tp: 客户类别
smid: 主体特约商户编码
ctnm: 主体姓名/名称
citp: 主体身份证件/证明文件类型
citp_ori: 主体身份证件/证明文件类型原值
citp_nt: 主体身份证件/证明文件类型说明
ctid: 主体身份证件/证明文件号码
cbat: 主体的银行账号种类
cbac: 主体的银行账号
cabm: 主体银行账号的开户行名称
ctat: 主体的交易账号种类
ctac: 主体的交易账号
cpin: 主体所在支付机构的名称
cpba: 主体所在支付机构的银行账号
cpbn: 主体所在支付机构的银行账号的开户行名称
ctip: 主体的交易IP地址
tstm: 交易时间
cttp: 货币资金转移方式
tsdr: 资金收付标志
crpp: 资金用途
crtp: 交易币种
crat: 交易金额
tcif_id: 交易对手ID
tcnm: 交易对手姓名/名称
tsmi: 交易对手特约商户编码
tcit: 交易对手证件/证明文件类型
tcit_ori: 交易对手证件/证明文件类型原值
tcit_nt: 交易对手证件/证明文件类型说明
tcid: 交易对手证件/证明文件号码
tcat: 交易对手的银行账号种类
tcba: 交易对手的银行账号
tcbn: 交易对手银行账号的开户行名称
tctt: 交易对手的交易账号种类
tcta: 交易对手的交易账号
tcpn: 交易对手所在支付机构的名称
tcpa: 交易对手所在支付机构的银行账号
tpbn: 交易对手所在支付机构银行账号的开户行名称
tcip: 交易对手的交易IP地址
tmnm: 交易商品名称
bptc: 银行与支付机构之间的业务交易编码
pmtc: 支付机构与商户之间的业务交易编码
ticd: 业务标识号
busi_type: 业务类型
trans_type: 交易类型
pos_dev_id: 交易终端号或IMEI号等设备标识
trans_stat: 交易状态
bank_stat: 银行状态
mer_prov: 地区省
mer_area: 地区县
pos_prov: 交易省
pos_area: 交易县
mer_unit: 管理机构
extend1: 转换标识
iofg: 境内外标识
trans_channel: 交易渠道
ctmac: 交易发生的mac地址
balance: 主体支付账户的余额
acc_flag: 交易对方账户类型
ctid_edt: 主体身份证件/证明文件有效期截止日
tran_flag: 对手账号标识
trans_order: 交易订单号
trans_cst_type: 交易类型(客户定义)
crat_u: 交易金额折合美元
crat_c: 交易金额折合人民币
trans_way: 交易方式
agency_ctnm: 代办人姓名
agency_citp: 代办人身份证件(证明文件)类型
agency_ctid: 代办人身份证件(证明文件)号码
agency_country: 代办人国籍
:param infos:
:return:
"""
ctif_id = infos.get("busi_reg_no")
ctif_tp = ctif_tp_num
client_tp = infos.get("client_tp")
smid = infos.get("smid")
ctnm = infos.get("ctnm")
citp = infos.get("citp")
citp_ori = infos.get("citp_ori")
citp_nt = infos.get("citp_nt")
ctid = infos.get("ctid")
cbat = stan_bact.get("act_tp")
cbac = stan_bact.get("act_cd")
cabm = stan_bact.get("cabm")
busi_type = make_busi_type_data()
ctat = make_ctat_data(busi_type)
ctac = make_random_num(17)
cpin = "默认机构名称"
cpba = make_random_num(17)
cpbn = make_cabm_data(make_province_code_data())
ctip = make_ip_data(busi_type)
# tstm = make_trade_time_data()
tstm = stif_time
cttp = make_cttp_data()
tsdr = random.choice(["01", "02"])
crpp = "资金用途"
crtp = "CNY"
crat = make_crat_data()
tcif_id = make_tcif_id_data(busi_type)
tcnm = make_name_data()
tsmi = make_random_num(20)
tcit = make_cert_type_data()
tcit_ori = "证件原值,需提供支付系统码表?"
tcit_nt = "证件类型说明"
tcid = make_random_num(20)
tcat = random.choice(["01", "02", "03"])
tcba = make_random_num(19)
tcbn = make_cabm_data(make_province_code_data())
tctt = random.choice(["01", "02"])
tcta = make_random_num(19)
tcpn = "默认支付机构名称"
tcpa = make_random_num(19)
tpbn = make_cabm_data(make_province_code_data())
tcip = make_ip_data(busi_type)
tmnm = "默认商品名称"
bptc = make_random_num(25)
pmtc = make_random_num(25)
ticd = make_ticd_data()
trans_type = make_trans_type_data(busi_type)
pos_dev_id = make_pos_dev_id_data(busi_type)
trans_stat = "交易状态" # 交易状态,需提供支付系统码表
bank_stat = "银行状态" # 银行状态,需提供支付系统码表
province_code = make_province_code_data()
mer_prov = province_code
mer_area = make_province_city_code_data(province_code)
province_code2 = make_province_code_data()
pos_prov = province_code2
pos_area = make_province_city_code_data(province_code2)
mer_unit = make_mer_unit_data() # 需提供支付系统代码表
extend1 = ""
# rate_rmb = "" # 老接口字段
# rate_usa = "" # 老接口字段
iofg = "0" # 暂时默认境内交易
trans_channel = make_trans_channel_data()
ctmac = make_mac_info_data(busi_type)
balance = "10000"
acc_flag = make_acc_flag_data(busi_type)
ctid_edt = infos.get("ctid_edt")
tran_flag = make_tran_flag_data(busi_type)
trans_order = make_trans_order_data(busi_type)
trans_cst_type = make_trans_cst_type_data()
crat_u = make_crat_u_data(crat)
crat_c = make_crat_r_data(crat)
trans_way = make_random_str(6) # 详见交易方式代码表(目前未收到人行的接口文件,暂定6位)
agency_ctnm = make_name_data()
agency_citp = make_citp_data()
agency_ctid = make_ctid_data()
agency_country = "CHN"
# print(ctif_id, ctif_tp, client_tp, smid, ctnm, citp, citp_ori, citp_nt, ctid, cbat, cbac, cabm, ctat, ctac, cpin,
# cpba, cpbn, ctip, tstm, cttp, tsdr, crpp, crtp, crat, tcif_id, tcnm, tsmi, tcit, tcit_ori, tcit_nt, tcid,
# tcat,
# tcba, tcbn, tctt, tcta, tcpn, tcpa, tpbn, tcip, tmnm, bptc, pmtc, ticd, busi_type, trans_type, pos_dev_id,
# trans_stat, bank_stat, mer_prov, mer_area, pos_prov, pos_area, mer_unit, extend1, iofg, trans_channel, ctmac,
# balance, acc_flag, ctid_edt, tran_flag, trans_order, trans_cst_type, crat_u, crat_c, trans_way, agency_ctnm,
# agency_citp, agency_ctid, agency_country)
# contect_data = make_connect_data([
# ctif_id, ctif_tp, client_tp, smid, ctnm, citp, citp_ori, citp_nt, ctid, cbat, cbac, cabm, ctat, ctac, cpin,
# cpba, cpbn, ctip, tstm, cttp, tsdr, crpp, crtp, crat, tcif_id, tcnm, tsmi, tcit, tcit_ori, tcit_nt, tcid, tcat,
# tcba, tcbn, tctt, tcta, tcpn, tcpa, tpbn, tcip, tmnm, bptc, pmtc, ticd, busi_type, trans_type, pos_dev_id,
# trans_stat, bank_stat, mer_prov, mer_area, pos_prov, pos_area, mer_unit, extend1, iofg, trans_channel, ctmac,
# balance, acc_flag, ctid_edt, tran_flag, trans_order, trans_cst_type, crat_u, crat_c, trans_way, agency_ctnm,
# agency_citp, agency_ctid, agency_country
# ])
contect_data = "ctif_id,ctif_tp,client_tp,smid,ctnm,citp,citp_ori,citp_nt,ctid,cbat,cbac,cabm,ctat,ctac,cpin,cpba,cpbn,ctip,tstm,cttp,tsdr,crpp,crtp,crat,tcif_id,tcnm,tsmi,tcit,tcit_ori,tcit_nt,tcid,tcat,tcba,tcbn,tctt,tcta,tcpn,tcpa,tpbn,tcip,tmnm,bptc,pmtc,ticd,busi_type,trans_type,pos_dev_id,trans_stat,bank_stat,mer_prov,mer_area,pos_prov,pos_area,mer_unit,extend1,iofg,trans_channel,ctmac,balance,acc_flag,ctid_edt,tran_flag,trans_order,trans_cst_type,crat_u,crat_c,trans_way,agency_ctnm,agency_citp,agency_ctid,agency_country"
return {
"ctif_id": ctif_id,
"ctif_tp": ctif_tp,
"client_tp": client_tp,
"smid": smid,
"ctnm": ctnm,
"citp": citp,
"citp_ori": citp_ori,
"citp_nt": citp_nt,
"ctid": ctid,
"cbat": cbat,
"cbac": cbac,
"cabm": cabm,
"ctat": ctat,
"ctac": ctac,
"cpin": cpin,
"cpba": cpba,
"cpbn": cpbn,
"ctip": ctip,
"tstm": tstm,
"cttp": cttp,
"tsdr": tsdr,
"crpp": crpp,
"crtp": crtp,
"crat": crat,
"tcif_id": tcif_id,
"tcnm": tcnm,
"tsmi": tsmi,
"tcit": tcit,
"tcit_ori": tcit_ori,
"tcit_nt": tcit_nt,
"tcid": tcid,
"tcat": tcat,
"tcba": tcba,
"tcbn": tcbn,
"tctt": tctt,
"tcta": tcta,
"tcpn": tcpn,
"tcpa": tcpa,
"tpbn": tpbn,
"tcip": tcip,
"tmnm": tmnm,
"bptc": bptc,
"pmtc": pmtc,
"ticd": ticd,
"busi_type": busi_type,
"trans_type": trans_type,
"pos_dev_id": pos_dev_id,
"trans_stat": trans_stat,
"bank_stat": bank_stat,
"mer_prov": mer_prov,
"mer_area": mer_area,
"pos_prov": pos_prov,
"pos_area": pos_area,
"mer_unit": mer_unit,
"extend1": extend1,
"iofg": iofg,
"trans_channel": trans_channel,
"ctmac": ctmac,
"balance": balance,
"acc_flag": acc_flag,
"ctid_edt": ctid_edt,
"tran_flag": tran_flag,
"trans_order": trans_order,
"trans_cst_type": trans_cst_type,
"crat_u": crat_u,
"crat_c": crat_c,
"trans_way": trans_way,
"agency_ctnm": agency_ctnm,
"agency_citp": agency_citp,
"agency_ctid": agency_ctid,
"agency_country": agency_country
}, contect_data
def person(num, connect, stif_num, stif_time):
# print("个人")
t_stan_person, stan_person_connect = make_stan_person(num)
t_stan_cert, stan_cert_connect = make_stan_cert(t_stan_person)
t_stan_address, stan_address_connect = make_stan_address(t_stan_person, "1")
t_stan_tel, stan_tel_connect = make_stan_tel(t_stan_person)
t_stan_pact, stan_pact_connect = make_stan_pact(t_stan_person)
t_stan_bact, stan_bact_connect = make_stan_bact(t_stan_person, t_stan_pact)
t_stan_relation, stan_relation_connect = make_stan_relation(t_stan_person)
# 交易表数据单独写入,一个主体写入10条数据
# for num in range(10):
# t_stan_stif, stan_stif_connect = make_stan_stif(t_stan_person, t_stan_bact, '1')
# # data = eval("t_stan_stif"[2:] + "_connect")
# data = stan_stif_connect
# file_name = "t_stan_stif".split("_")[-1] + "_" + file_date_time
# print(stan_stif_connect)
# write_to_csv(file_name + ".csv", data)
# for num in range(stif_num):
# t_stan_stif, stan_stif_connect = make_stan_stif(t_stan_person, t_stan_bact, '1', stif_time)
# print("t_stan_stif", t_stan_stif)
# connect.save("t_stan_stif", stan_stif_connect, t_stan_stif)
# connect.commit()
# print("stan_person_connect", stan_person_connect)
# print("stan_cert_connect", stan_cert_connect)
# print("stan_address_connect", stan_address_connect)
# print("stan_tel_connect", stan_tel_connect)
# print("stan_pact_connect", stan_pact_connect)
# print("stan_bact_connect", stan_bact_connect)
# print("stan_relation_connect", stan_relation_connect)
name = ["t_stan_person", "t_stan_cert", "t_stan_address", "t_stan_tel", "t_stan_relation", "t_stan_pact",
"t_stan_bact"]
for file_name in name:
data = eval(file_name[2:] + "_connect")
# file_name = file_name.split("_")[-1] + "_" + file_date_time
# write_to_csv(file_name + ".csv", data)
# write_to_csv(file_name + ".txt", data)
connect.save(file_name, data, eval(file_name))
connect.commit()
def org(num, connect,stif_num, stif_time):
# print("机构")
t_stan_org, stan_org_connect = make_stan_org(num)
t_stan_cert, stan_cert_connect = make_stan_cert(t_stan_org)
t_stan_address, stan_address_connect = make_stan_address(t_stan_org, "2")
t_stan_tel, stan_tel_connect = make_stan_tel(t_stan_org)
t_stan_pact, stan_pact_connect = make_stan_pact(t_stan_org)
t_stan_bact, stan_bact_connect = make_stan_bact(t_stan_org, t_stan_pact)
t_stan_relation, stan_relation_connect = make_stan_relation(t_stan_org)
# 交易表数据单独写入,一个主体写入10条数据
# for num in range(stif_num):
# t_stan_stif, stan_stif_connect = make_stan_stif(t_stan_org, t_stan_bact, '2', stif_time)
# print("t_stan_stif", t_stan_stif)
# # data = eval("t_stan_stif"[2:] + "_connect")
# data = stan_stif_connect
# file_name = "t_stan_stif".split("_")[-1] + "_" + file_date_time
# print(stan_stif_connect)
# write_to_csv(file_name + ".csv", data)
# connect.save("t_stan_stif", stan_stif_connect, t_stan_stif)
# connect.commit()
# print("stan_org_connect", stan_org_connect)
# print("stan_cert_connect", stan_cert_connect)
# print("stan_address_connect", stan_address_connect)
# print("stan_tel_connect", stan_tel_connect)
# print("stan_pact_connect", stan_pact_connect)
# print("stan_bact_connect", stan_bact_connect)
# print("stan_relation_connect", stan_relation_connect)
name = ["t_stan_org", "t_stan_cert", "t_stan_address", "t_stan_tel", "t_stan_relation", "t_stan_pact",
"t_stan_bact"]
for file_name in name:
data = eval(file_name[2:] + "_connect")
# file_name = file_name.split("_")[-1] + "_" + file_date_time
# write_to_csv(file_name + ".csv", data)
# write_to_csv(file_name + ".txt", data)
connect.save(file_name, data, eval(file_name))
connect.commit()
# def main(num):
# person(num)
# org(num)
def main(begin, end, stif_num, stif_time):
connect = Save_MySQL()
for num in range(begin, end):
person(num, connect, stif_num, stif_time)
org(num, connect, stif_num, stif_time)
connect.quit()
# 修改日期
# trade_date
if __name__ == "__main__":
# pinyin = word_to_pinyin("张三")
# print(pinyin)
# res = make_ctid_data()
# print(res)
# res = read_province_data()
# print(res)
# add = make_make_province_city_process_data("150722")
# print(add)
# address = make_address("230183")
# print(address)
# trade_data = make_trade_amount_data()
# print(trade_data)
# ticd = make_ticd_data()
# print(ticd)
# data = make_make_province_city_process_data(make_province_code_data())
# make_province_city_data(data)[-1]
# data2 = make_province_code_data()
# province = get_province_data(data2[:2])
# print(province)
# read_excel()
# header = "&#@".join(t_stan_tel_header)
# write_to_csv("t_stan_tel.csv", header)
# date = time.strftime("%Y-%m-%d", time.localtime())
# -------------------------多线程
from threading import Thread
# make_trade_time_data()
start_time = time.time()
# threads = []
# for count in range(10):
# t = Thread(target=main, args=(count*10, (count+1)*10))
# t.start()
# threads.append(t)
# for t in threads:
# t.join()
# -------------------------单线程
# file_date_time = "2019-10-17"
# stif_time = "201910170900"
# main(1000, 1500)
end_time = time.time()
print(end_time-start_time) # 13
# for i in range(100):
# # tt = make_register_date()
# ss = random.choice([
# "01", # 互联网支付
# "02", # 银行卡收单
# "03", # 预付卡发行与受理
# "04", # 移动电话支付
# "05", # 固定电话支付
# "06", # 数字电视支付
# "07" # 货币汇兑
# ])
# print(ss)
# tt = make_tcif_id_data(ss)
#
# print(tt)
# ctid_edt = "20170506"
# ctid_edt = "99991231"
# tt = make_iss_dt_data(ctid_edt)
# print(tt)
#
# dd = make_country_data()
# print(dd)
# tt = make_province_city_process_data("412825")
# print(tt)
|
the-stack_0_1207 | import platform
import sys
from helper import executable_exists
PackageManager = {
"macos": "brew install",
"linux": {
"readhat": "sudo yum",
"arch": "sudo packman -S",
"gentoo": "sudo emerge --ask --verbose",
"suse": "sudo zypper install",
"debian": "sudo apt-get install"
}
}
LinuxDistroRecognition = {
"yum": "redhat",
"packman": "arch",
"emerge": "gentoo",
"zypper": "suse",
"apt-get": "debian"
}
PortAudio = {
"name": "Voice Recorder",
"pip": [
'SpeechRecognition',
"pyaudio --global-option='build_ext' --global-option='-I/usr/local/include' --global-option='-L/usr/local/lib'"],
"package_guess": {
"macos": 'portaudio',
"linux": {
'redhat': 'python3-pyaudio python3-devel',
'arch': 'python-pyaudio',
'gentoo': 'pyaudio',
'suse': 'python3-PyAudio python3-devel',
'debian': 'python3-pyaudio python3-dev'
}},
"description": "Required for voice control",
"instruction": """\
Please install python-binding 'pyaudio' manually."
For more details go to the below link:
https://people.csail.mit.edu/hubert/pyaudio/"""}
RequestsSecurity = {
"name": "Requests security",
"pip": ['requests[security]'],
"description": "Better/saver https",
"instruction": "https://stackoverflow.com/questions/31811949/pip-install-requestssecurity-vs-pip-install-requests-difference"
}
NativeNotification = {
"name": "Notification",
"executable": ['notify-send'],
"description": "Native linux notifications",
"instruction": "Please install 'notify-send' manually using your local package manager!",
"package_guess": {
"linux": {
'redhat': 'libnotify',
'arch': 'libnotify',
'gentoo': 'eselect-notify-send',
'suse': 'libnotify-tools',
'debian': 'libnotify-bin'
}
}
}
FFMPEG = {
"name": "ffmpeg",
"executable": ['ffmpeg'],
"description": "Download music as .mp3 instead .webm",
"instruction": "Please install 'ffmpeg' manually using your local package manager!",
"package_guess": {
"macos": "ffmpeg",
"linux": {
'redhat': 'ffmpeg',
'arch': 'ffmpeg',
'gentoo': 'ffmpeg',
'suse': 'ffmpeg',
'debian': 'ffmpeg'
}
}
}
ESPEAK = {
"name": "espeak",
"executable": ['espeak'],
"description": "Text To Speech for Jarvis to talk out loud (alternatives: sapi5 or nsss will work, too)",
"instruction": "Please install 'espeak' manually using your local package manager!",
"package_guess": {
"linux": {
'redhat': 'espeak',
'arch': 'espeak',
'gentoo': 'espeak',
'suse': 'espeak',
'debian': 'espeak'
}
}
}
OPTIONAL_REQUIREMENTS = [PortAudio, RequestsSecurity, FFMPEG, ESPEAK]
if not sys.platform == "darwin":
OPTIONAL_REQUIREMENTS += [NativeNotification]
def get_guess(data):
if sys.platform == "darwin":
if 'macos' in data:
return data['macos']
else:
return False
elif platform.system().lower() == "linux":
if 'linux' in data:
data = data['linux']
else:
return False
for executable, distro in LinuxDistroRecognition.items():
if executable_exists(executable):
if distro in data:
return data[distro]
return False
|
the-stack_0_1208 | import sdi_utils.gensolution as gs
import sdi_utils.set_logging as slog
import sdi_utils.textfield_parser as tfp
import sdi_utils.tprogress as tp
import pandas as pd
EXAMPLE_ROWS = 5
try:
api
except NameError:
class api:
class Message:
def __init__(self,body = None,attributes = ""):
self.body = body
self.attributes = attributes
def send(port,msg) :
if isinstance(msg,api.Message) :
print('Port: ', port)
print('Attributes: ', msg.attributes)
print('Body: ', str(msg.body))
else :
print(str(msg))
return msg
def call(config,msg):
api.config = config
return process(msg)
def set_port_callback(port, callback) :
df = pd.DataFrame(
{'icol': [1, 2, 3, 4, 5], 'xcol2': ['A', 'A', 'B', 'B', 'C'], \
'xcol3': ['K', 'L', 'M', 'N', 'O'], 'xcol4': ['a1', 'a1', 'b1', 'b1', 'b1']})
default_msg = api.Message(attributes = {'format': 'pandas', 'name': 'test','process_list':[]}, body=df)
callback(default_msg)
class config:
## Meta data
config_params = dict()
version = '0.0.17'
tags = {'pandas': '','sdi_utils':''}
operator_description = "Sample from Dataframe"
operator_description_long = "Sampling over a DataFrame but keeps datasets with the same value of the \
defined column as set and not splitting them, e.g. sampling with the invariant_column='date' samples \
but ensures that all datasets of a certain date are taken or none. This leads to the fact that the \
sample_size is only a guiding target. Depending on the size of the datasets with the same value of \
the *invariant_column* compared to the *sample_size* this could deviate a lot. "
add_readme = dict()
add_readme["References"] = "[pandas doc: sample](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.sample.html)"
debug_mode = True
config_params['debug_mode'] = {'title': 'Debug mode',
'description': 'Sending debug level information to log port',
'type': 'boolean'}
sample_size = 0.1
config_params['sample_size'] = {'title': 'Sample size', 'description': 'Sample size', 'type': 'number'}
random_state = 1
config_params['random_state'] = {'title': 'Random state', 'description': 'Random state', 'type': 'integer'}
invariant_column = ''
config_params['invariant_column'] = {'title': 'Invariant column', 'description': 'Column where all the same value records should be kept as a whole in a sample', 'type': 'string'}
def process(msg) :
att_dict = msg.attributes
att_dict['operator'] = 'sample'
if api.config.debug_mode == True:
logger, log_stream = slog.set_logging(att_dict['operator'], loglevel='DEBUG')
else:
logger, log_stream = slog.set_logging(att_dict['operator'], loglevel='INFO')
logger.info("Process started")
time_monitor = tp.progress()
# start custom process definition
# test if body refers to a DataFrame type
prev_att = msg.attributes
df = msg.body
if not isinstance(df, pd.DataFrame):
logger.error('Message body does not contain a pandas DataFrame')
raise TypeError('Message body does not contain a pandas DataFrame')
###### start calculation
sample_size = api.config.sample_size
if sample_size < 1 :
sample_size = int(sample_size * df.shape[0])
if sample_size < 1 :
sample_size = 1
logger.warning("Fraction of sample size too small. Set sample size to 1.")
elif sample_size > df.shape[0]:
logger.warning("Sample size larger than number of rows")
logger.debug("Samples_size: {}/() ({})".format(sample_size,df.shape[0],sample_size/df.shape[0]))
random_state = api.config.random_state
invariant_column = tfp.read_value(api.config.invariant_column)
if invariant_column and sample_size < df.shape[0]:
# get the average number of records for each value of invariant
sc_df = df.groupby(invariant_column)[invariant_column].count()
sample_size_invariant = int(sample_size / sc_df.mean())
sample_size_invariant = 1 if sample_size_invariant == 0 else sample_size_invariant # ensure minimum
sc_df = sc_df.sample(n=sample_size_invariant, random_state=random_state).to_frame()
sc_df.rename(columns={invariant_column: 'sum'}, inplace=True)
# sample the df by merge 2 df
df = pd.merge(df, sc_df, how='inner', right_index=True, left_on=invariant_column)
df.drop(columns=['sum'], inplace=True)
else:
df = df.sample(n=sample_size, random_state=random_state)
# end custom process definition
if df.empty :
raise ValueError('DataFrame is empty')
logger.debug('Columns: {}'.format(str(df.columns)))
logger.debug('Shape (#rows - #columns): {} - {}'.format(df.shape[0],df.shape[1]))
logger.debug('Memory: {} kB'.format(df.memory_usage(deep=True).sum() / 1024 ** 2))
example_rows = EXAMPLE_ROWS if df.shape[0] > EXAMPLE_ROWS else df.shape[0]
for i in range(0, example_rows):
logger.debug('Row {}: {}'.format(i,str([str(i)[:10].ljust(10) for i in df.iloc[i, :].tolist()])))
progress_str = '<BATCH ENDED><1>'
if 'storage.fileIndex' in att_dict and 'storage.fileCount' in att_dict and 'storage.endOfSequence' in att_dict:
if att_dict['storage.fileIndex'] + 1 == att_dict['storage.fileCount']:
progress_str = '<BATCH ENDED><{}>'.format(att_dict['storage.fileCount'])
else:
progress_str = '<BATCH IN-PROCESS><{}/{}>'.format(att_dict['storage.fileIndex'] + 1,
att_dict['storage.fileCount'])
att_dict['process_list'].append(att_dict['operator'])
logger.debug('Process ended: {} - {} '.format(progress_str, time_monitor.elapsed_time()))
logger.debug('Past process steps: {}'.format(att_dict['process_list']))
return log_stream.getvalue(), api.Message(attributes=att_dict,body=df)
inports = [{'name': 'data', 'type': 'message.DataFrame',"description":"Input data"}]
outports = [{'name': 'log', 'type': 'string',"description":"Logging data"}, \
{'name': 'data', 'type': 'message.DataFrame',"description":"Output data"}]
def call_on_input(msg) :
log, msg = process(msg)
api.send(outports[0]['name'], log)
api.send(outports[1]['name'], msg)
#api.set_port_callback([inports[0]['name']], call_on_input)
def main() :
print('Test: Default')
api.set_port_callback([inports[0]['name']], call_on_input)
if __name__ == '__main__':
main()
#gs.gensolution(os.path.realpath(__file__), config, inports, outports)
|
the-stack_0_1209 | import os
import sys
import time
from circus.process import Process, RUNNING
from circus.tests.support import TestCircus, skipIf, EasyTestSuite
import circus.py3compat
from circus.py3compat import StringIO, PY3
RLIMIT = """\
import resource, sys
with open(sys.argv[1], 'w') as f:
for limit in ('NOFILE', 'NPROC'):
res = getattr(resource, 'RLIMIT_%s' % limit)
f.write('%s=%s\\n' % (limit, resource.getrlimit(res)))
"""
VERBOSE = """\
import sys
for i in range(1000):
for stream in (sys.stdout, sys.stderr):
stream.write(str(i))
stream.flush()
"""
def _nose_no_s():
if PY3:
return isinstance(sys.stdout, StringIO)
else:
return not hasattr(sys.stdout, 'fileno')
class TestProcess(TestCircus):
def test_base(self):
cmd = sys.executable
args = "-c 'import time; time.sleep(2)'"
process = Process('test', cmd, args=args, shell=False)
try:
info = process.info()
self.assertEqual(process.pid, info['pid'])
age = process.age()
self.assertTrue(age > 0.)
self.assertFalse(process.is_child(0))
finally:
process.stop()
def test_rlimits(self):
script_file = self.get_tmpfile(RLIMIT)
output_file = self.get_tmpfile()
cmd = sys.executable
args = [script_file, output_file]
rlimits = {'nofile': 20,
'nproc': 20}
process = Process('test', cmd, args=args, rlimits=rlimits)
try:
# wait for the process to finish
while process.status == RUNNING:
time.sleep(1)
finally:
process.stop()
with open(output_file, 'r') as f:
output = {}
for line in f.readlines():
limit, value = line.rstrip().split('=', 1)
output[limit] = value
def srt2ints(val):
return [circus.py3compat.long(key) for key in val[1:-1].split(',')]
wanted = [circus.py3compat.long(20), circus.py3compat.long(20)]
self.assertEqual(srt2ints(output['NOFILE']), wanted)
self.assertEqual(srt2ints(output['NPROC']), wanted)
def test_comparison(self):
cmd = sys.executable
args = ['import time; time.sleep(2)', ]
p1 = Process('1', cmd, args=args)
p2 = Process('2', cmd, args=args)
self.assertTrue(p1 < p2)
self.assertFalse(p1 == p2)
self.assertTrue(p1 == p1)
p1.stop()
p2.stop()
def test_process_parameters(self):
# all the options passed to the process should be available by the
# command / process
p1 = Process('1', 'make-me-a-coffee',
'$(circus.wid) --type $(circus.env.type)',
shell=False, spawn=False, env={'type': 'macchiato'})
self.assertEqual(['make-me-a-coffee', '1', '--type', 'macchiato'],
p1.format_args())
p2 = Process('1', 'yeah $(CIRCUS.WID)', spawn=False)
self.assertEqual(['yeah', '1'], p2.format_args())
os.environ['coffee_type'] = 'american'
p3 = Process('1', 'yeah $(circus.env.type)', shell=False, spawn=False,
env={'type': 'macchiato'})
self.assertEqual(['yeah', 'macchiato'], p3.format_args())
os.environ.pop('coffee_type')
@skipIf(_nose_no_s(), 'Nose runs without -s')
def test_streams(self):
script_file = self.get_tmpfile(VERBOSE)
cmd = sys.executable
args = [script_file]
# 1. streams sent to /dev/null
process = Process('test', cmd, args=args, close_child_stdout=True,
close_child_stderr=True)
try:
# wait for the process to finish
while process.status == RUNNING:
time.sleep(1)
# the pipes should be empty
self.assertEqual(process.stdout.read(), b'')
self.assertEqual(process.stderr.read(), b'')
finally:
process.stop()
# 2. streams sent to /dev/null, no PIPEs
process = Process('test', cmd, args=args, close_child_stdout=True,
close_child_stderr=True, pipe_stdout=False,
pipe_stderr=False)
try:
# wait for the process to finish
while process.status == RUNNING:
time.sleep(1)
# the pipes should be unexistant
self.assertTrue(process.stdout is None)
self.assertTrue(process.stderr is None)
finally:
process.stop()
# 3. streams & pipes open
process = Process('test', cmd, args=args)
try:
# wait for the process to finish
while process.status == RUNNING:
time.sleep(1)
# the pipes should be unexistant
self.assertEqual(len(process.stdout.read()), 2890)
self.assertEqual(len(process.stderr.read()), 2890)
finally:
process.stop()
test_suite = EasyTestSuite(__name__)
|
the-stack_0_1210 | """
# Mobius Software LTD
# Copyright 2015-2018, Mobius Software LTD
#
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this software; if not, write to the Free
# Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA, or see the FSF site: http://www.fsf.org.
"""
from iot.classes.IoTClient import *
from iot.mqtt.MQParser import MQParser
from iot.mqtt.mqtt_classes.MQConnackCode import *
from iot.mqtt.mqtt_classes.MQTopic import *
from iot.mqtt.mqtt_classes.Will import *
from iot.mqtt.mqtt_messages.MQConnect import *
from iot.mqtt.mqtt_messages.MQDisconnect import *
from iot.mqtt.mqtt_messages.MQPuback import *
from iot.mqtt.mqtt_messages.MQPubcomp import *
from iot.mqtt.mqtt_messages.MQPubrec import *
from iot.mqtt.mqtt_messages.MQPubrel import *
from iot.mqtt.mqtt_messages.MQSubscribe import *
from iot.mqtt.mqtt_messages.MQUnsubscribe import *
from iot.network.TCPClient import *
from iot.timers.TimersMap import *
class MQTTclient(IoTClient):
def __init__(self, account, client):
self.account = account
self.clientGUI = client
self.parser = MQParser(None)
self.resendperiod = 3000
self.connectionState = None
self.data = None
self.timers = TimersMap(self)
self.publishPackets = {}
self.can_connect = True
def send(self, message):
if self.connectionState == ConnectionState.CONNECTION_ESTABLISHED:
self.parser.setMessage(message)
message = self.parser.encode()
self.clientFactory.send(message)
else:
return False
def dataReceived(self, data):
messages = []
index = 1
while len(data) - index > 0:
length = self.parser.next(data, index)
if length < 0:
break
part = data[index - 1:index + length]
message = self.parser.decode(part)
messages.append(message)
index += length
for message in messages:
process_messageType_method(self, message.getType(), message)
def setState(self, ConnectionState):
self.connectionState = ConnectionState
def isConnected(self):
return self.connectionState == ConnectionState.CONNECTION_ESTABLISHED
def closeChannel(self):
if self.client is not None:
self.client.stop()
def goConnect(self):
self.setState(ConnectionState.CONNECTING)
if self.account.willTopic is not None:
topic = MQTopic(self.account.willTopic, self.account.qos)
will = Will(topic, self.account.will, self.account.isRetain)
else:
will = None
connect = MQConnect(self.account.username, self.account.password, self.account.clientID, self.account.cleanSession, self.account.keepAlive, will)
if self.timers is not None:
self.timers.stopAllTimers()
self.timers.goConnectTimer(connect)
self.parser.setMessage(connect)
self.clientFactory = ClientFactory(self.parser.encode(), self)
if self.account.isSecure:
ctx = CtxFactory(self.account.certificate, self.account.certPasw)
self.connector = reactor.connectSSL(self.account.serverHost, self.account.port, self.clientFactory, ctx)
else:
self.connector = reactor.connectTCP(self.account.serverHost, self.account.port, self.clientFactory)
def publish(self, name, qos, content, retain, dup):
topic = MQTopic(name, qos)
publish = MQPublish(0, topic, content, retain, dup)
if (qos == 0):
self.send(publish)
else:
if (qos in [1, 2]):
self.timers.goMessageTimer(publish)
def unsubscribeFrom(self, topicName):
listTopics = []
listTopics.append(topicName)
unsubscribe = MQUnsubscribe(0, listTopics)
self.timers.goMessageTimer(unsubscribe)
def subscribeTo(self, name, qos):
topic = MQTopic(name, qos)
listMQTopics = [topic]
subscribe = MQSubscribe(0, listMQTopics)
self.timers.goMessageTimer(subscribe)
def pingreq(self):
self.send(MQPingreq())
def disconnectWith(self, duration):
self.send(MQDisconnect())
self.timers.stopAllTimers()
self.clientFactory.client_close_connection()
def timeoutMethod(self):
if self.can_connect:
self.can_connect = False
self.timers.stopAllTimers()
reactor.callFromThread(self.clientGUI.timeout)
def connectTimeoutMethod(self):
if self.can_connect:
self.can_connect = False
self.timers.stopAllTimers()
reactor.callFromThread(self.clientGUI.show_error_message, "Connect Error", "Connection timeout")
reactor.callFromThread(self.clientGUI.timeout)
def ConnectionLost(self):
if self.can_connect:
self.can_connect = False
if self.timers is not None:
self.timers.stopAllTimers()
self.connector.disconnect()
reactor.callFromThread(self.clientGUI.errorReceived)
# _____________________________________________________________________________________
def processConnack(self, message):
self.timers.stopConnectTimer()
if message.returnCode == 0: # MQ_ACCEPTED
self.setState(ConnectionState.CONNECTION_ESTABLISHED)
self.timers.goPingTimer(MQPingreq(), self.account.keepAlive)
self.clientGUI.connackReceived(message.returnCode)
else:
messagebox.showinfo("Connect error", MQConnackCode(message.returnCode).readable_name())
self.clientGUI.errorReceived()
def processSuback(self, message):
subscribe = self.timers.removeTimer(message.packetID)
if subscribe is not None:
size = len(subscribe.listMQTopics)
topic = subscribe.listMQTopics[size - 1]
qos = topic.getQoS()
self.clientGUI.subackReceived(topic, qos, 0)
def processUnsuback(self, message):
unsubscribe = self.timers.removeTimer(message.packetID)
if unsubscribe is not None:
self.clientGUI.unsubackReceived(unsubscribe.listTopics)
def processPublish(self, message):
publisherQoS = message.topic.qos.getValue()
if publisherQoS.getValue() == 0:
self.clientGUI.publishReceived(message.topic, publisherQoS, message.content, message.dup, message.retain)
if publisherQoS.getValue() == 1: # AT_LEAST_ONCE
puback = MQPuback(message.packetID)
self.send(puback)
self.clientGUI.publishReceived(message.topic, publisherQoS, message.content, message.dup, message.retain)
if publisherQoS.getValue() == 2: # EXACTLY_ONCE
pubrec = MQPubrec(message.packetID)
self.send(pubrec)
self.publishPackets[message.packetID] = message
def processPuback(self, message):
publish = self.timers.removeTimer(message.packetID)
if publish is not None:
self.clientGUI.pubackReceived(publish.topic, publish.topic.getQoS(), publish.content, publish.dup, publish.retain, 0)
def processPubrec(self, message):
publish = self.timers.removeTimer(message.packetID)
if publish is not None:
self.timers.goMessageTimer(MQPubrel(publish.packetID))
self.publishPackets[publish.packetID] = publish
def processPubrel(self, message):
publish = self.publishPackets.get(message.packetID)
if publish is not None:
self.clientGUI.publishReceived(publish.topic, publish.topic.getQoS().getValue(), publish.content, publish.dup, publish.retain)
self.send(MQPubcomp(message.packetID))
def processPubcomp(self, message):
pubrel = self.timers.removeTimer(message.packetID)
if pubrel is not None:
publish = self.publishPackets.get(message.packetID)
self.clientGUI.pubackReceived(publish.topic, publish.topic.getQoS(), publish.content, publish.dup, publish.retain, 0)
def processPingresp(self, message):
self.clientGUI.pingrespReceived(False)
def processSubscribe(self, message):
self.clientGUI.errorReceived('received invalid message subscribe')
def processConnect(self, message):
self.clientGUI.errorReceived('received invalid message connect')
def processPingreq(self, message):
self.clientGUI.errorReceived('received invalid message pingreq')
def processDisconnect(self, message):
self.timers.stopAllTimers()
self.clientGUI.disconnectReceived()
def processUnsubscribe(self, message):
raise ValueError('received invalid message unsubscribe')
switcherProcess = {
1: processConnect,
2: processConnack,
3: processPublish,
4: processPuback,
5: processPubrec,
6: processPubrel,
7: processPubcomp,
8: processSubscribe,
9: processSuback,
10: processUnsubscribe,
11: processUnsuback,
12: processPingreq,
13: processPingresp,
14: processDisconnect,
}
def process_messageType_method(self, argument, message):
return switcherProcess[argument].__call__(self, message)
|
the-stack_0_1212 | # Copyright 2017 QuantRocket - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
def add_subparser(subparsers):
_parser = subparsers.add_parser("countdown", description="QuantRocket cron service CLI", help="Manage crontabs")
_subparsers = _parser.add_subparsers(title="subcommands", dest="subcommand")
_subparsers.required = True
examples = """
Upload a new crontab, or return the current crontab.
Examples:
Upload a new crontab to a service called countdown-australia (replaces
current crontab):
quantrocket countdown crontab mycron.crontab -s countdown-australia
Show current crontab for a service called countdown-australia:
quantrocket countdown crontab -s countdown-australia
"""
parser = _subparsers.add_parser(
"crontab",
help="upload a new crontab, or return the current crontab",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"filename",
nargs="?",
metavar="FILENAME",
help="the crontab file to upload (if omitted, return the current crontab)")
parser.add_argument(
"-s", "--service",
metavar="SERVICE_NAME",
help="the name of the countdown service (default 'countdown')")
parser.set_defaults(func="quantrocket.countdown._load_or_show_crontab")
examples = """
Set or show the countdown service timezone.
Examples:
Set the timezone of the countdown service to America/New_York:
quantrocket countdown timezone America/New_York
Show the current timezone of the countdown service:
quantrocket countdown timezone
Show the timezone for a service called countdown-australia:
quantrocket countdown timezone -s countdown-australia
"""
parser = _subparsers.add_parser(
"timezone",
help="set or show the countdown service timezone",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"tz",
nargs="?",
metavar="TZ",
help="the timezone to set (pass a partial timezone string such as 'newyork' "
"or 'europe' to see close matches, or pass '?' to see all choices)")
parser.add_argument(
"-s", "--service",
metavar="SERVICE_NAME",
help="the name of the countdown service, (default 'countdown')")
parser.set_defaults(func="quantrocket.countdown._cli_get_or_set_timezone")
|
the-stack_0_1213 | __author__ = "Junhee Yoon"
__version__ = "1.0.0"
__maintainer__ = "Junhee Yoon"
__email__ = "[email protected]"
"""
Description: This is batch job for transforming data to DESeq input
"""
import pandas as pd
import numpy as np
import os
import glob
import argparse
from libraries.botoClass import botoHandler
from libraries.externalHandler import handlers as dataHandler
## argparse setting
parser = argparse.ArgumentParser(prog='step1_get_DESeq2_input.py')
parser.add_argument('-c','--ctype', type=str, dest='celltype', required=True,\
choices=['CD4','CD8','CD14'],help='Cell type for extraction, default = CD8')
parser.add_argument('-v','--condcolumn', type=str, dest='condcolumn', required=True,\
help='Column name which is using for condition value')
parser.add_argument('-x','--cond1', type=str, dest='cond1', required=True,\
help='condition1 for metadata')
parser.add_argument('-y','--cond2', type=str, dest='cond2', required=True,\
help='condition2 for metadata')
args = parser.parse_args()
# Main function
if __name__ == "__main__":
### Get ENV variables
mainDataBucket = os.environ['mainbucket'] # openkbc-ms-maindata-bucket
metaName = os.environ['metafile'] # EPIC_HCvB_metadata_baseline_updated-share.csv
outputPath = os.environ['efspoint'] # /output/
### Error handling here
### Data prepration
s3 = botoHandler(mainDataBucket) # Call boto3
COUNT_PATH = "/data/" # Main data path
META_PATH = s3.getFile([metaName]) ## This is FIXED parameter
s3.getDirFiles('rsem_counts/', destpath=COUNT_PATH) # Download all count files
filelist = glob.glob(COUNT_PATH+"*-"+args.celltype+".genes.results") # File path
filelist = [os.path.basename(cursor) for cursor in filelist] # Extracting base file name
sampleName = dataHandler.get_samplename(filelist)
result_arr = [] # result array
# sampleName and filelist have same order, and appending to result array
for filename in filelist:
sampleValues = dataHandler.get_column(COUNT_PATH+filename, 'expected_count')
result_arr.append(sampleValues)
result_df = pd.concat(result_arr, axis=1)
result_df.columns = sampleName # Change column name by using sample names
metadata = pd.read_csv(META_PATH) # read meta data
# get meta result
meta_result_df = dataHandler.get_condtionMatrix_by_category(metadata, 'HCVB_ID', args.condcolumn, [args.cond1, args.cond2])
overlapped_samples = list(set(meta_result_df.index.tolist()).intersection(set(result_df.columns.tolist()))) # Overlapped samples
# Extract overlapped samples
meta_result_df = meta_result_df.loc[overlapped_samples]
result_df = result_df[overlapped_samples]
result_df.astype(int).to_csv(outputPath+args.celltype+"_output.csv") # Output
meta_result_df.to_csv(outputPath+args.celltype+"_meta_output.csv") |
the-stack_0_1214 | from rest_framework.views import *
from apps.core.exceptions import CustomAPIException
from apps.utils.response import simple_response
from apps.core import response_code
##
# 重写异常handler, 满足现有response 格式,
# 方便编码
###
def custom_exception_handler(exc, context):
"""
Returns the response that should be used for any given exception.
By default we handle the REST framework `APIException`, and also
Django's built-in `Http404` and `PermissionDenied` exceptions.
Any unhandled exceptions may return `None`, which will cause a 500 error
to be raised.
"""
if isinstance(exc, Http404):
exc = exceptions.NotFound()
elif isinstance(exc, PermissionDenied):
exc = exceptions.PermissionDenied()
if isinstance(exc, exceptions.APIException):
headers = {}
if getattr(exc, 'auth_header', None):
headers['WWW-Authenticate'] = exc.auth_header
if getattr(exc, 'wait', None):
headers['Retry-After'] = '%d' % exc.wait
if isinstance(exc.detail, (list, dict)):
data = exc.detail
else:
data = {'detail': exc.detail}
set_rollback()
if exc.status_code == 400:
code, message = response_code.ERR_PARAM_ERROR
elif exc.status_code == 401:
code, message = response_code.ERR_AUTH_ERROR
elif exc.status_code == 403:
code, message = response_code.ERR_PERMISSION_ERROR
elif exc.status_code == 404:
code, message = response_code.ERR_NOT_FOUND_ERROR
elif exc.status_code == 500:
code, message = response_code.ERR_SERVER_ERROR
elif exc.status_code == 405:
code, message = response_code.ERR_METHOD_NOT_ALLOWED
else:
code, message = response_code.ERR_UNKNOWN_ERROR
return simple_response(code=code, data=data, message=message, headers=headers)
elif isinstance(exc, CustomAPIException): # 捕获自定义的异常
set_rollback()
return simple_response(code=exc.get_code(), message=exc.get_message(), data=exc.get_data())
return None
|
the-stack_0_1217 | from collections import namedtuple
import itertools
import networkx as nx
import numpy as np
from pgmpy.factors.discrete import factor_product
from pgmpy.inference import Inference
from pgmpy.models import BayesianModel, MarkovChain, MarkovModel
from pgmpy.utils.mathext import sample_discrete
from pgmpy.extern.six.moves import map, range
from pgmpy.sampling import _return_samples
State = namedtuple('State', ['var', 'state'])
class BayesianModelSampling(Inference):
"""
Class for sampling methods specific to Bayesian Models
Parameters
----------
model: instance of BayesianModel
model on which inference queries will be computed
Public Methods
--------------
forward_sample(size)
"""
def __init__(self, model):
if not isinstance(model, BayesianModel):
raise TypeError("Model expected type: BayesianModel, got type: ", type(model))
self.topological_order = list(nx.topological_sort(model))
super(BayesianModelSampling, self).__init__(model)
def forward_sample(self, size=1, return_type='dataframe'):
"""
Generates sample(s) from joint distribution of the bayesian network.
Parameters
----------
size: int
size of sample to be generated
return_type: string (dataframe | recarray)
Return type for samples, either of 'dataframe' or 'recarray'.
Defaults to 'dataframe'
Returns
-------
sampled: A pandas.DataFrame or a numpy.recarray object depending upon return_type argument
the generated samples
Examples
--------
>>> from pgmpy.models.BayesianModel import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.sampling import BayesianModelSampling
>>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> cpd_d = TabularCPD('diff', 2, [[0.6], [0.4]])
>>> cpd_i = TabularCPD('intel', 2, [[0.7], [0.3]])
>>> cpd_g = TabularCPD('grade', 3, [[0.3, 0.05, 0.9, 0.5], [0.4, 0.25,
... 0.08, 0.3], [0.3, 0.7, 0.02, 0.2]],
... ['intel', 'diff'], [2, 2])
>>> student.add_cpds(cpd_d, cpd_i, cpd_g)
>>> inference = BayesianModelSampling(student)
>>> inference.forward_sample(size=2, return_type='recarray')
rec.array([(0, 0, 1), (1, 0, 2)],
dtype=[('diff', '<i8'), ('intel', '<i8'), ('grade', '<i8')])
"""
types = [(var_name, 'int') for var_name in self.topological_order]
sampled = np.zeros(size, dtype=types).view(np.recarray)
for node in self.topological_order:
cpd = self.model.get_cpds(node)
states = range(self.cardinality[node])
evidence = cpd.variables[:0:-1]
if evidence:
cached_values = self.pre_compute_reduce(variable=node)
evidence = np.vstack([sampled[i] for i in evidence])
weights = list(map(lambda t: cached_values[tuple(t)], evidence.T))
else:
weights = cpd.values
sampled[node] = sample_discrete(states, weights, size)
return _return_samples(return_type, sampled)
def pre_compute_reduce(self, variable):
variable_cpd = self.model.get_cpds(variable)
variable_evid = variable_cpd.variables[:0:-1]
cached_values = {}
for state_combination in itertools.product(*[range(self.cardinality[var]) for var in variable_evid]):
states = list(zip(variable_evid, state_combination))
cached_values[state_combination] = variable_cpd.reduce(states, inplace=False).values
return cached_values
def rejection_sample(self, evidence=None, size=1, return_type="dataframe"):
"""
Generates sample(s) from joint distribution of the bayesian network,
given the evidence.
Parameters
----------
evidence: list of `pgmpy.factor.State` namedtuples
None if no evidence
size: int
size of sample to be generated
return_type: string (dataframe | recarray)
Return type for samples, either of 'dataframe' or 'recarray'.
Defaults to 'dataframe'
Returns
-------
sampled: A pandas.DataFrame or a numpy.recarray object depending upon return_type argument
the generated samples
Examples
--------
>>> from pgmpy.models.BayesianModel import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.factors.discrete import State
>>> from pgmpy.sampling import BayesianModelSampling
>>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> cpd_d = TabularCPD('diff', 2, [[0.6], [0.4]])
>>> cpd_i = TabularCPD('intel', 2, [[0.7], [0.3]])
>>> cpd_g = TabularCPD('grade', 3, [[0.3, 0.05, 0.9, 0.5], [0.4, 0.25,
... 0.08, 0.3], [0.3, 0.7, 0.02, 0.2]],
... ['intel', 'diff'], [2, 2])
>>> student.add_cpds(cpd_d, cpd_i, cpd_g)
>>> inference = BayesianModelSampling(student)
>>> evidence = [State(var='diff', state=0)]
>>> inference.rejection_sample(evidence=evidence, size=2, return_type='dataframe')
intel diff grade
0 0 0 1
1 0 0 1
"""
if evidence is None:
return self.forward_sample(size)
types = [(var_name, 'int') for var_name in self.topological_order]
sampled = np.zeros(0, dtype=types).view(np.recarray)
prob = 1
i = 0
while i < size:
_size = int(((size - i) / prob) * 1.5)
_sampled = self.forward_sample(_size, 'recarray')
for evid in evidence:
_sampled = _sampled[_sampled[evid[0]] == evid[1]]
prob = max(len(_sampled) / _size, 0.01)
sampled = np.append(sampled, _sampled)[:size]
i += len(_sampled)
return _return_samples(return_type, sampled)
def likelihood_weighted_sample(self, evidence=None, size=1, return_type="dataframe"):
"""
Generates weighted sample(s) from joint distribution of the bayesian
network, that comply with the given evidence.
'Probabilistic Graphical Model Principles and Techniques', Koller and
Friedman, Algorithm 12.2 pp 493.
Parameters
----------
evidence: list of `pgmpy.factor.State` namedtuples
None if no evidence
size: int
size of sample to be generated
return_type: string (dataframe | recarray)
Return type for samples, either of 'dataframe' or 'recarray'.
Defaults to 'dataframe'
Returns
-------
sampled: A pandas.DataFrame or a numpy.recarray object depending upon return_type argument
the generated samples with corresponding weights
Examples
--------
>>> from pgmpy.factors.discrete import State
>>> from pgmpy.models.BayesianModel import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.sampling import BayesianModelSampling
>>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> cpd_d = TabularCPD('diff', 2, [[0.6], [0.4]])
>>> cpd_i = TabularCPD('intel', 2, [[0.7], [0.3]])
>>> cpd_g = TabularCPD('grade', 3, [[0.3, 0.05, 0.9, 0.5], [0.4, 0.25,
... 0.08, 0.3], [0.3, 0.7, 0.02, 0.2]],
... ['intel', 'diff'], [2, 2])
>>> student.add_cpds(cpd_d, cpd_i, cpd_g)
>>> inference = BayesianModelSampling(student)
>>> evidence = [State('diff', 0)]
>>> inference.likelihood_weighted_sample(evidence=evidence, size=2, return_type='recarray')
rec.array([(0, 0, 1, 0.6), (0, 0, 2, 0.6)],
dtype=[('diff', '<i8'), ('intel', '<i8'), ('grade', '<i8'), ('_weight', '<f8')])
"""
types = [(var_name, 'int') for var_name in self.topological_order]
types.append(('_weight', 'float'))
sampled = np.zeros(size, dtype=types).view(np.recarray)
sampled['_weight'] = np.ones(size)
evidence_dict = {var: st for var, st in evidence}
for node in self.topological_order:
cpd = self.model.get_cpds(node)
states = range(self.cardinality[node])
evidence = cpd.get_evidence()
if evidence:
evidence_values = np.vstack([sampled[i] for i in evidence])
cached_values = self.pre_compute_reduce(node)
weights = list(map(lambda t: cached_values[tuple(t)], evidence_values.T))
if node in evidence_dict:
sampled[node] = evidence_dict[node]
for i in range(size):
sampled['_weight'][i] *= weights[i][evidence_dict[node]]
else:
sampled[node] = sample_discrete(states, weights)
else:
if node in evidence_dict:
sampled[node] = evidence_dict[node]
for i in range(size):
sampled['_weight'][i] *= cpd.values[evidence_dict[node]]
else:
sampled[node] = sample_discrete(states, cpd.values, size)
return _return_samples(return_type, sampled)
class GibbsSampling(MarkovChain):
"""
Class for performing Gibbs sampling.
Parameters:
-----------
model: BayesianModel or MarkovModel
Model from which variables are inherited and transition probabilites computed.
Public Methods:
---------------
set_start_state(state)
sample(start_state, size)
generate_sample(start_state, size)
Examples:
---------
Initialization from a BayesianModel object:
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.models import BayesianModel
>>> intel_cpd = TabularCPD('intel', 2, [[0.7], [0.3]])
>>> sat_cpd = TabularCPD('sat', 2, [[0.95, 0.2], [0.05, 0.8]], evidence=['intel'], evidence_card=[2])
>>> student = BayesianModel()
>>> student.add_nodes_from(['intel', 'sat'])
>>> student.add_edge('intel', 'sat')
>>> student.add_cpds(intel_cpd, sat_cpd)
>>> from pgmpy.inference import GibbsSampling
>>> gibbs_chain = GibbsSampling(student)
Sample from it:
>>> gibbs_chain.sample(size=3)
intel sat
0 0 0
1 0 0
2 1 1
"""
def __init__(self, model=None):
super(GibbsSampling, self).__init__()
if isinstance(model, BayesianModel):
self._get_kernel_from_bayesian_model(model)
elif isinstance(model, MarkovModel):
self._get_kernel_from_markov_model(model)
def _get_kernel_from_bayesian_model(self, model):
"""
Computes the Gibbs transition models from a Bayesian Network.
'Probabilistic Graphical Model Principles and Techniques', Koller and
Friedman, Section 12.3.3 pp 512-513.
Parameters:
-----------
model: BayesianModel
The model from which probabilities will be computed.
"""
self.variables = np.array(model.nodes())
self.cardinalities = {var: model.get_cpds(var).variable_card for var in self.variables}
for var in self.variables:
other_vars = [v for v in self.variables if var != v]
other_cards = [self.cardinalities[v] for v in other_vars]
cpds = [cpd for cpd in model.cpds if var in cpd.scope()]
prod_cpd = factor_product(*cpds)
kernel = {}
scope = set(prod_cpd.scope())
for tup in itertools.product(*[range(card) for card in other_cards]):
states = [State(v, s) for v, s in zip(other_vars, tup) if v in scope]
prod_cpd_reduced = prod_cpd.reduce(states, inplace=False)
kernel[tup] = prod_cpd_reduced.values / sum(prod_cpd_reduced.values)
self.transition_models[var] = kernel
def _get_kernel_from_markov_model(self, model):
"""
Computes the Gibbs transition models from a Markov Network.
'Probabilistic Graphical Model Principles and Techniques', Koller and
Friedman, Section 12.3.3 pp 512-513.
Parameters:
-----------
model: MarkovModel
The model from which probabilities will be computed.
"""
self.variables = np.array(model.nodes())
factors_dict = {var: [] for var in self.variables}
for factor in model.get_factors():
for var in factor.scope():
factors_dict[var].append(factor)
# Take factor product
factors_dict = {var: factor_product(*factors) if len(factors) > 1 else factors[0]
for var, factors in factors_dict.items()}
self.cardinalities = {var: factors_dict[var].get_cardinality([var])[var] for var in self.variables}
for var in self.variables:
other_vars = [v for v in self.variables if var != v]
other_cards = [self.cardinalities[v] for v in other_vars]
kernel = {}
factor = factors_dict[var]
scope = set(factor.scope())
for tup in itertools.product(*[range(card) for card in other_cards]):
states = [State(var, s) for var, s in zip(other_vars, tup) if var in scope]
reduced_factor = factor.reduce(states, inplace=False)
kernel[tup] = reduced_factor.values / sum(reduced_factor.values)
self.transition_models[var] = kernel
def sample(self, start_state=None, size=1, return_type="dataframe"):
"""
Sample from the Markov Chain.
Parameters:
-----------
start_state: dict or array-like iterable
Representing the starting states of the variables. If None is passed, a random start_state is chosen.
size: int
Number of samples to be generated.
return_type: string (dataframe | recarray)
Return type for samples, either of 'dataframe' or 'recarray'.
Defaults to 'dataframe'
Returns
-------
sampled: A pandas.DataFrame or a numpy.recarray object depending upon return_type argument
the generated samples
Examples:
---------
>>> from pgmpy.factors import DiscreteFactor
>>> from pgmpy.inference import GibbsSampling
>>> from pgmpy.models import MarkovModel
>>> model = MarkovModel([('A', 'B'), ('C', 'B')])
>>> factor_ab = DiscreteFactor(['A', 'B'], [2, 2], [1, 2, 3, 4])
>>> factor_cb = DiscreteFactor(['C', 'B'], [2, 2], [5, 6, 7, 8])
>>> model.add_factors(factor_ab, factor_cb)
>>> gibbs = GibbsSampling(model)
>>> gibbs.sample(size=4, return_tupe='dataframe')
A B C
0 0 1 1
1 1 0 0
2 1 1 0
3 1 1 1
"""
if start_state is None and self.state is None:
self.state = self.random_state()
elif start_state is not None:
self.set_start_state(start_state)
types = [(var_name, 'int') for var_name in self.variables]
sampled = np.zeros(size, dtype=types).view(np.recarray)
sampled[0] = np.array([st for var, st in self.state])
for i in range(size - 1):
for j, (var, st) in enumerate(self.state):
other_st = tuple(st for v, st in self.state if var != v)
next_st = sample_discrete(list(range(self.cardinalities[var])),
self.transition_models[var][other_st])[0]
self.state[j] = State(var, next_st)
sampled[i + 1] = np.array([st for var, st in self.state])
return _return_samples(return_type, sampled)
def generate_sample(self, start_state=None, size=1):
"""
Generator version of self.sample
Return Type:
------------
List of State namedtuples, representing the assignment to all variables of the model.
Examples:
---------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> from pgmpy.sampling import GibbsSampling
>>> from pgmpy.models import MarkovModel
>>> model = MarkovModel([('A', 'B'), ('C', 'B')])
>>> factor_ab = DiscreteFactor(['A', 'B'], [2, 2], [1, 2, 3, 4])
>>> factor_cb = DiscreteFactor(['C', 'B'], [2, 2], [5, 6, 7, 8])
>>> model.add_factors(factor_ab, factor_cb)
>>> gibbs = GibbsSampling(model)
>>> gen = gibbs.generate_sample(size=2)
>>> [sample for sample in gen]
[[State(var='C', state=1), State(var='B', state=1), State(var='A', state=0)],
[State(var='C', state=0), State(var='B', state=1), State(var='A', state=1)]]
"""
if start_state is None and self.state is None:
self.state = self.random_state()
elif start_state is not None:
self.set_start_state(start_state)
for i in range(size):
for j, (var, st) in enumerate(self.state):
other_st = tuple(st for v, st in self.state if var != v)
next_st = sample_discrete(list(range(self.cardinalities[var])),
self.transition_models[var][other_st])[0]
self.state[j] = State(var, next_st)
yield self.state[:]
|
the-stack_0_1219 | """
=============================================
:mod:`archivers` -- Solution archival methods
=============================================
This module provides pre-defined archivers for evoluationary computations.
All archiver functions have the following arguments:
- *random* -- the random number generator object
- *population* -- the population of individuals
- *archive* -- the current archive of individuals
- *args* -- a dictionary of keyword arguments
Each archiver function returns the updated archive.
.. note::
The *population* is really a shallow copy of the actual population of
the evolutionary computation. This means that any activities like
sorting will not affect the actual population.
.. Copyright 2012 Aaron Garrett
.. Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
.. The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
.. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
.. module:: archivers
.. moduleauthor:: Aaron Garrett <[email protected]>
"""
import math
def default_archiver(random, population, archive, args):
"""Do nothing.
This function just returns the existing archive (which is
probably empty) with no changes.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
archive -- the current archive of individuals
args -- a dictionary of keyword arguments
"""
return archive
def population_archiver(random, population, archive, args):
"""Archive the current population.
This function replaces the archive with the individuals
of the current population.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
archive -- the current archive of individuals
args -- a dictionary of keyword arguments
"""
new_archive = []
for ind in population:
new_archive.append(ind)
return new_archive
def best_archiver(random, population, archive, args):
"""Archive only the best individual(s).
This function archives the best solutions and removes inferior ones.
If the comparison operators have been overloaded to define Pareto
preference (as in the ``Pareto`` class), then this archiver will form
a Pareto archive.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
archive -- the current archive of individuals
args -- a dictionary of keyword arguments
"""
new_archive = archive
for ind in population:
if len(new_archive) == 0:
new_archive.append(ind)
else:
should_remove = []
should_add = True
for a in new_archive:
if ind.candidate == a.candidate:
should_add = False
break
elif ind < a:
should_add = False
elif ind > a:
should_remove.append(a)
for r in should_remove:
new_archive.remove(r)
if should_add:
new_archive.append(ind)
return new_archive
def adaptive_grid_archiver(random, population, archive, args):
"""Archive only the best individual(s) using a fixed size grid.
This function archives the best solutions by using a fixed-size grid
to determine which existing solutions should be removed in order to
make room for new ones. This archiver is designed specifically for
use with the Pareto Archived Evolution Strategy (PAES).
.. Arguments:
random -- the random number generator object
population -- the population of individuals
archive -- the current archive of individuals
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *max_archive_size* -- the maximum number of individuals in the archive
(default len(population))
- *num_grid_divisions* -- the number of grid divisions (default 1)
"""
def get_grid_location(fitness, num_grid_divisions, global_smallest, global_largest):
loc = 0
n = 1
num_objectives = len(fitness)
inc = [0 for _ in range(num_objectives)]
width = [0 for _ in range(num_objectives)]
local_smallest = global_smallest[:]
for i, f in enumerate(fitness):
if f < local_smallest[i] or f > local_smallest[i] + global_largest[i] - global_smallest[i]:
return -1
for i in range(num_objectives):
inc[i] = n
n *= 2
width[i] = global_largest[i] - global_smallest[i]
for d in range(num_grid_divisions):
for i, f in enumerate(fitness):
if f < width[i] / 2.0 + local_smallest[i]:
loc += inc[i]
else:
local_smallest[i] += width[i] / 2.0
for i in range(num_objectives):
inc[i] *= num_objectives * 2
width[i] /= 2.0
return loc
def update_grid(individual, archive, num_grid_divisions, global_smallest, global_largest, grid_population):
if len(archive) == 0:
num_objectives = len(individual.fitness)
smallest = [individual.fitness[o] for o in range(num_objectives)]
largest = [individual.fitness[o] for o in range(num_objectives)]
else:
num_objectives = min(min([len(a.fitness) for a in archive]), len(individual.fitness))
smallest = [min(min([a.fitness[o] for a in archive]), individual.fitness[o]) for o in range(num_objectives)]
largest = [max(max([a.fitness[o] for a in archive]), individual.fitness[o]) for o in range(num_objectives)]
for i in range(num_objectives):
global_smallest[i] = smallest[i] - abs(0.2 * smallest[i])
global_largest[i] = largest[i] + abs(0.2 * largest[i])
for i in range(len(grid_population)):
grid_population[i] = 0
for a in archive:
loc = get_grid_location(a.fitness, num_grid_divisions, global_smallest, global_largest)
a.grid_location = loc
grid_population[loc] += 1
loc = get_grid_location(individual.fitness, num_grid_divisions, global_smallest, global_largest)
individual.grid_location = loc
grid_population[loc] += 1
max_archive_size = args.setdefault('max_archive_size', len(population))
num_grid_divisions = args.setdefault('num_grid_divisions', 1)
if not 'grid_population' in dir(adaptive_grid_archiver):
adaptive_grid_archiver.grid_population = [0 for _ in range(2**(min([len(p.fitness) for p in population]) * num_grid_divisions))]
if not 'global_smallest' in dir(adaptive_grid_archiver):
adaptive_grid_archiver.global_smallest = [0 for _ in range(min([len(p.fitness) for p in population]))]
if not 'global_largest' in dir(adaptive_grid_archiver):
adaptive_grid_archiver.global_largest = [0 for _ in range(min([len(p.fitness) for p in population]))]
new_archive = archive
for ind in population:
update_grid(ind, new_archive, num_grid_divisions, adaptive_grid_archiver.global_smallest,
adaptive_grid_archiver.global_largest, adaptive_grid_archiver.grid_population)
should_be_added = True
for a in new_archive:
if ind == a or a > ind:
should_be_added = False
if should_be_added:
if len(new_archive) == 0:
new_archive.append(ind)
else:
join = False
nondominated = True
removal_set = []
for i, a in enumerate(new_archive):
if ind > a and not join:
new_archive[i] = ind
join = True
elif ind > a:
if not a in removal_set:
removal_set.append(a)
# Otherwise, the individual is nondominated against this archive member.
# We can't use set difference because Individual objects are not hashable.
# We'd like to say...
# new_archive = list(set(new_archive) - set(removal_set))
# So this code gets that same result without using sets.
temp_archive = []
for ind in new_archive:
if ind not in removal_set:
temp_archive.append(ind)
new_archive = temp_archive
if not join and nondominated:
if len(new_archive) == max_archive_size:
replaced_index = 0
found_replacement = False
loc = get_grid_location(ind.fitness, num_grid_divisions,
adaptive_grid_archiver.global_smallest,
adaptive_grid_archiver.global_largest)
ind.grid_location = loc
if ind.grid_location >= 0:
most = adaptive_grid_archiver.grid_population[ind.grid_location]
else:
most = -1
for i, a in enumerate(new_archive):
pop_at_a = adaptive_grid_archiver.grid_population[a.grid_location]
if pop_at_a > most:
most = pop_at_a
replaced_index = i
found_replacement = True
if found_replacement:
new_archive[replaced_index] = ind
else:
new_archive.append(ind)
return new_archive
|
the-stack_0_1220 | import pandas as pd
import numpy as np
if __name__ == '__main__':
df = pd.read_csv(snakemake.input[0], sep="\t", index_col=0)
mc_df = pd.read_csv(snakemake.input[1], sep="\t", index_col=0)
# sum the rows of the mutation count matrix to get the number of mutations per sample
n_mutations = mc_df.sum(axis=1)
# df is samples-by-signatures
# n_mutations is vector of length samples
df = df.transpose().multiply(n_mutations).transpose()
#df.index = [i-1 for i in df.index]
df.to_csv(snakemake.output[0], sep="\t")
|
the-stack_0_1221 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""A setuptools based setup module.
See:
https://packaging.python.org/tutorials/packaging-projects/
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
__version__ = "1.0.6"
description = "Analysis Correlation Engine (ACE) API Python Bindings."
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='ace_api',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=__version__,
description=description,
long_description=long_description,
long_description_content_type='text/markdown',
# The project's main homepage.
url='https://github.com/IntegralDefense/ACE/_api_package',
# Author details
author='John Davison',
author_email='[email protected]',
# Choose your license
license='Apache-2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
"Intended Audience :: Information Technology",
"Intended Audience :: Telecommunications Industry",
'Operating System :: OS Independent',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
#'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
# What does your project relate to?
keywords='Cyber Security,Information Security,InfoSec,Detection,Response,SOAR',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
#packages=[],
include_package_data=True,
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
py_modules=["ace_api"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['tzlocal', 'requests', 'pytz'],
entry_points={
'console_scripts': ['ace_api=ace_api:main'],
}
)
|
the-stack_0_1222 | """
batch 模块实现了 fastNLP 所需的 :class:`~fastNLP.core.batch.DataSetIter` 类。
"""
__all__ = [
"BatchIter",
"DataSetIter",
"TorchLoaderIter",
]
import atexit
from numbers import Number
import numpy as np
import torch
import torch.utils.data
from ._logger import logger
from .dataset import DataSet
from .sampler import SequentialSampler
_python_is_exit = False
def _set_python_is_exit():
global _python_is_exit
_python_is_exit = True
atexit.register(_set_python_is_exit)
class DataSetGetter:
def __init__(self, dataset: DataSet, as_numpy=False):
self.dataset = dataset
self.inputs = {n: f for n, f in dataset.get_all_fields().items() if f.is_input}
self.targets = {n: f for n, f in dataset.get_all_fields().items() if f.is_target}
self.as_numpy = as_numpy
self.idx_list = list(range(len(dataset)))
def __getitem__(self, idx: int):
# mapping idx to sampled idx
idx = self.idx_list[idx]
inputs = {n:f.get(idx) for n, f in self.inputs.items()}
targets = {n:f.get(idx) for n, f in self.targets.items()}
return idx, inputs, targets
def __len__(self):
return len(self.dataset)
def collate_fn(self, batch: list):
"""
:param batch: [[idx1, x_dict1, y_dict1], [idx2, x_dict2, y_dict2], [xx, xx, xx]]
:return:
"""
# TODO 支持在DataSet中定义collate_fn,因为有时候可能需要不同的field之间融合,比如BERT的场景
batch_x = {n:[] for n in self.inputs.keys()}
batch_y = {n:[] for n in self.targets.keys()}
indices = []
for idx, x, y in batch:
indices.append(idx)
for n, v in x.items():
batch_x[n].append(v)
for n, v in y.items():
batch_y[n].append(v)
def pad_batch(batch_dict, field_array):
for n, vlist in batch_dict.items():
f = field_array[n]
if f.padder is None:
batch_dict[n] = np.array(vlist)
else:
data = f.pad(vlist)
if not self.as_numpy:
try:
data, flag = _to_tensor(data, f.dtype)
except TypeError as e:
logger.error(f"Field {n} cannot be converted to torch.tensor.")
raise e
batch_dict[n] = data
return batch_dict
return (indices,
pad_batch(batch_x, self.inputs),
pad_batch(batch_y, self.targets))
def set_idx_list(self, idx_list):
if len(idx_list) != len(self.idx_list):
raise ValueError
self.idx_list = idx_list
def __getattr__(self, item):
if hasattr(self.dataset, item):
return getattr(self.dataset, item)
else:
raise AttributeError("'DataSetGetter' object has no attribute '{}'".format(item))
class SamplerAdapter(torch.utils.data.Sampler):
def __init__(self, sampler, dataset):
super().__init__(dataset)
self.sampler = sampler
self.dataset = dataset
def __len__(self):
return len(self.dataset)
def __iter__(self):
return iter(self.sampler(self.dataset))
class BatchIter:
def __init__(self):
self.dataiter = None
self.num_batches = None
self.cur_batch_indices = None
self.batch_size = None
def init_iter(self):
pass
@staticmethod
def get_num_batches(num_samples, batch_size, drop_last):
num_batches = num_samples // batch_size
if not drop_last and (num_samples % batch_size > 0):
num_batches += 1
return num_batches
def __iter__(self):
self.init_iter()
for indices, batch_x, batch_y in self.dataiter:
self.cur_batch_indices = indices
yield batch_x, batch_y
def get_batch_indices(self):
return self.cur_batch_indices
def __len__(self):
return self.num_batches
@property
def dataset(self):
return self.dataiter.dataset
class DataSetIter(BatchIter):
"""
DataSetIter 用于从 `DataSet` 中按一定的顺序, 依次按 ``batch_size`` 的大小将数据取出,
组成 `x` 和 `y`::
batch = DataSetIter(data_set, batch_size=16, sampler=SequentialSampler())
num_batch = len(batch)
for batch_x, batch_y in batch:
# do stuff ...
"""
def __init__(self, dataset, batch_size=1, sampler=None, as_numpy=False,
num_workers=0, pin_memory=False, drop_last=False,
timeout=0, worker_init_fn=None):
"""
:param dataset: :class:`~fastNLP.DataSet` 对象, 数据集
:param int batch_size: 取出的batch大小
:param sampler: 规定使用的 :class:`~fastNLP.Sampler` 方式. 若为 ``None`` , 使用 :class:`~fastNLP.SequentialSampler`.
Default: ``None``
:param bool as_numpy: 若为 ``True`` , 输出batch为 numpy.array. 否则为 :class:`torch.Tensor`.
Default: ``False``
:param int num_workers: 使用多少个进程来预处理数据
:param bool pin_memory: 是否将产生的tensor使用pin memory, 可能会加快速度。
:param bool drop_last: 如果最后一个batch没有batch_size这么多sample,就扔掉最后一个
:param timeout:
:param worker_init_fn: 在每个worker启动时调用该函数,会传入一个值,该值是worker的index。
"""
super().__init__()
assert isinstance(dataset, DataSet)
if not isinstance(sampler, torch.utils.data.Sampler):
self.sampler = SamplerAdapter(sampler=sampler or SequentialSampler(), dataset=dataset)
else:
self.sampler = sampler
dataset = DataSetGetter(dataset, as_numpy)
collate_fn = dataset.collate_fn if hasattr(dataset, 'collate_fn') else None
self.dataiter = torch.utils.data.DataLoader(
dataset=dataset, batch_size=batch_size, sampler=self.sampler,
collate_fn=collate_fn, num_workers=num_workers,
pin_memory=pin_memory, drop_last=drop_last,
timeout=timeout, worker_init_fn=worker_init_fn)
# 以sampler的数量为准,因为DistributedSampler的时候每个进程上并不是所有的数据都用上了
self.num_batches = self.get_num_batches(len(self.dataiter.sampler), batch_size, drop_last)
self.batch_size = batch_size
class TorchLoaderIter(BatchIter):
def __init__(self, dataset):
super().__init__()
assert isinstance(dataset, torch.utils.data.DataLoader)
self.dataiter = dataset
self.num_batches = self.get_num_batches(len(dataset.sampler), dataset.batch_size, dataset.drop_last)
self.batch_size = dataset.batch_size
def _to_tensor(batch, field_dtype):
"""
:param batch: np.array()
:param field_dtype: 数据类型
:return: batch, flag. 如果传入的数据支持转为tensor,返回的batch就是tensor,且flag为True;如果传入的数据不支持转为tensor,
返回的batch就是原来的数据,且flag为False
"""
try:
if field_dtype is not None and isinstance(field_dtype, type)\
and issubclass(field_dtype, Number) \
and not isinstance(batch, torch.Tensor):
if issubclass(batch.dtype.type, np.floating):
new_batch = torch.as_tensor(batch).float() # 默认使用float32
elif issubclass(batch.dtype.type, np.integer):
new_batch = torch.as_tensor(batch).long() # 复用内存地址,避免复制
else:
new_batch = torch.as_tensor(batch)
return new_batch, True
else:
return batch, False
except Exception as e:
raise e
|
the-stack_0_1224 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
fig = plt.figure()
ax = fig.add_subplot(111)
line, = ax.plot(np.random.rand(10))
ax.set_ylim(0, 1)
def update(data):
line.set_ydata(data)
return line,
def data_gen():
while True: yield np.random.rand(10)
ani = animation.FuncAnimation(fig, update, data_gen, interval=100)
plt.show()
|
the-stack_0_1225 | from .views import *
from django.urls import path
app_name = 'product'
urlpatterns = [
# Category
path('category/', CategoryList.as_view(), name='categoryList'),
path('category/<int:pk>/', CategoryDetail.as_view(), name='categoryDetail'),
# Subject
path('subject/', SubjectList.as_view(), name='subjectList'),
path('subject/<int:pk>/', SubjectDetail.as_view(), name='subjectDetail'),
# Product
path('product/', ProductList.as_view(), name='productList'),
path('product/<int:pk>/', ProductDetail.as_view(), name='productDetail'),
# Nested (Category)
path(
'category/<int:cat>/product/',
CategoryProductList.as_view(),
name='categoryProductList'
),
# Nested (Subject)
path(
'subject/<int:sub>/product/',
SubjectProductList.as_view(),
name='subjectProductList'
),
# Nested (Category Subject)
path(
'category/<int:cat>/subject/<int:sub>/product/',
CategorySubjectProductList.as_view(),
name='categorySubjectProductList'
),
]
|
the-stack_0_1228 | import os
from string import Template
import psycopg2
import argparse
parser = argparse.ArgumentParser()
args_general = parser.add_argument_group(title="General Options")
args_general.add_argument('-t', '--table_name', default='test', help='table to be copied. target table with have _copy as a suffix')
args_general.add_argument('-c', '--columns', default='id', help='list the columns to be converted to bigint eg. col1,col2,col3')
args_general.add_argument('-p', '--primary_key', default='id', help='primary key of the table for which a new sequence is created in the copy table')
args = parser.parse_args()
table_name = args.table_name
if not os.path.exists('generated/{}'.format(table_name)):
os.makedirs('generated/{}'.format(table_name))
pk = args.primary_key
columns = args.columns
raw_convert = columns.split(',')
convert = []
for c in raw_convert:
convert.append('ALTER TABLE {} ALTER COLUMN {} TYPE bigint'.format(table_name+"_copy", c))
def create_clone(table_name, pk, convert):
template = open('templates/clone.sql')
src = Template(template.read())
d = {'source': table_name, 'target': table_name+"_copy", 'pk': pk, 'seq_name': table_name+"_copy"+"_id"+"_seq", 'convert': '\n'.join(convert)}
result = src.substitute(d)
f = open("generated/{0}/{0}_clone.sql".format(table_name), "a")
f.write(result)
f.close
def create_indexes(table_name):
try:
conn = psycopg2.connect("dbname=postgres")
except psycopg2.Error as e:
print("Failed to connect to the database: ", e.pgcode)
template = open('templates/trigger.sql')
src = Template(template.read())
query = "select replace(indexdef,'INDEX', 'INDEX CONCURRENTLY') from pg_indexes where tablename = '{}'".format(table_name)
cur = conn.cursor()
cur.execute(query)
rows = cur.fetchall()
template = open('templates/indexes.sql')
src = Template(template.read())
d = {'indexes': '\n'.join(list(str(row) for row in rows))}
result = src.substitute(d)
f = open("generated/{0}/{0}_indexes.sql".format(table_name), "a")
f.write(result)
f.close
def create_trigger(table_name, pk):
template = open('templates/trigger.sql')
src = Template(template.read())
d = {'source': table_name, 'target': table_name+"_copy", 'pk': pk, 'tname': table_name+"_trig", 'b': "$BODY$"}
result = src.substitute(d)
f = open("generated/{0}/{0}_trig.sql".format(table_name), "a")
f.write(result)
f.close
def grant_acl(table_name):
try:
conn = psycopg2.connect("dbname=postgres")
except psycopg2.Error as e:
print("Failed to connect to the database: ", e.pgcode)
query = "select 'GRANT ' || privilege_type || ' ON ' || table_name || ' TO ' || grantee || ';' from information_schema.role_table_grants where table_name = '{}' and grantee <> grantor;".format(table_name, table_name)
cur = conn.cursor()
cur.execute(query)
rows = cur.fetchall()
template = open('templates/acl.sql')
src = Template(template.read())
d = {'acl': '\n'.join(str(row) for row in rows)}
result = src.substitute(d)
f = open("generated/{0}/{0}_acl.sql".format(table_name), "a")
f.write(result)
f.close
def main():
create_trigger(table_name, pk)
create_clone(table_name, pk, convert)
grant_acl(table_name)
create_indexes(table_name)
if __name__ == "__main__":
main()
|
the-stack_0_1229 | """
weasyprint.tests.test_presentational_hints
------------------------------------------
Test the HTML presentational hints.
"""
from weasyprint import CSS, HTML
from .testing_utils import BASE_URL, assert_no_logs
PH_TESTING_CSS = CSS(string='''
@page {margin: 0; size: 1000px 1000px}
body {margin: 0}
''')
@assert_no_logs
def test_no_ph():
# Test both CSS and non-CSS rules
document = HTML(string='''
<hr size=100 />
<table align=right width=100><td>0</td></table>
''').render(stylesheets=[PH_TESTING_CSS])
page, = document.pages
html, = page._page_box.children
body, = html.children
hr, table = body.children
assert hr.border_height() != 100
assert table.position_x == 0
@assert_no_logs
def test_ph_page():
document = HTML(string='''
<body marginheight=2 topmargin=3 leftmargin=5
bgcolor=red text=blue />
''').render(stylesheets=[PH_TESTING_CSS], presentational_hints=True)
page, = document.pages
html, = page._page_box.children
body, = html.children
assert body.margin_top == 2
assert body.margin_bottom == 2
assert body.margin_left == 5
assert body.margin_right == 0
assert body.style['background_color'] == (1, 0, 0, 1)
assert body.style['color'] == (0, 0, 1, 1)
@assert_no_logs
def test_ph_flow():
document = HTML(string='''
<pre wrap></pre>
<center></center>
<div align=center></div>
<div align=middle></div>
<div align=left></div>
<div align=right></div>
<div align=justify></div>
''').render(stylesheets=[PH_TESTING_CSS], presentational_hints=True)
page, = document.pages
html, = page._page_box.children
body, = html.children
pre, center, div1, div2, div3, div4, div5 = body.children
assert pre.style['white_space'] == 'pre-wrap'
assert center.style['text_align'] == 'center'
assert div1.style['text_align'] == 'center'
assert div2.style['text_align'] == 'center'
assert div3.style['text_align'] == 'left'
assert div4.style['text_align'] == 'right'
assert div5.style['text_align'] == 'justify'
@assert_no_logs
def test_ph_phrasing():
document = HTML(string='''
<style>@font-face { src: url(AHEM____.TTF); font-family: ahem }</style>
<br clear=left>
<br clear=right />
<br clear=both />
<br clear=all />
<font color=red face=ahem size=7></font>
<Font size=4></Font>
<font size=+5 ></font>
<font size=-5 ></font>
''', base_url=BASE_URL).render(
stylesheets=[PH_TESTING_CSS], presentational_hints=True)
page, = document.pages
html, = page._page_box.children
body, = html.children
line1, line2, line3, line4, line5 = body.children
br1, = line1.children
br2, = line2.children
br3, = line3.children
br4, = line4.children
font1, font2, font3, font4 = line5.children
assert br1.style['clear'] == 'left'
assert br2.style['clear'] == 'right'
assert br3.style['clear'] == 'both'
assert br4.style['clear'] == 'both'
assert font1.style['color'] == (1, 0, 0, 1)
assert font1.style['font_family'] == ('ahem',)
assert font1.style['font_size'] == 1.5 * 2 * 16
assert font2.style['font_size'] == 6 / 5 * 16
assert font3.style['font_size'] == 1.5 * 2 * 16
assert font4.style['font_size'] == 8 / 9 * 16
@assert_no_logs
def test_ph_lists():
document = HTML(string='''
<ol>
<li type=A></li>
<li type=1></li>
<li type=a></li>
<li type=i></li>
<li type=I></li>
</ol>
<ul>
<li type=circle></li>
<li type=disc></li>
<li type=square></li>
</ul>
''').render(stylesheets=[PH_TESTING_CSS], presentational_hints=True)
page, = document.pages
html, = page._page_box.children
body, = html.children
ol, ul = body.children
oli1, oli2, oli3, oli4, oli5 = ol.children
uli1, uli2, uli3 = ul.children
assert oli1.style['list_style_type'] == 'upper-alpha'
assert oli2.style['list_style_type'] == 'decimal'
assert oli3.style['list_style_type'] == 'lower-alpha'
assert oli4.style['list_style_type'] == 'lower-roman'
assert oli5.style['list_style_type'] == 'upper-roman'
assert uli1.style['list_style_type'] == 'circle'
assert uli2.style['list_style_type'] == 'disc'
assert uli3.style['list_style_type'] == 'square'
@assert_no_logs
def test_ph_lists_types():
document = HTML(string='''
<ol type=A></ol>
<ol type=1></ol>
<ol type=a></ol>
<ol type=i></ol>
<ol type=I></ol>
<ul type=circle></ul>
<ul type=disc></ul>
<ul type=square></ul>
''').render(stylesheets=[PH_TESTING_CSS], presentational_hints=True)
page, = document.pages
html, = page._page_box.children
body, = html.children
ol1, ol2, ol3, ol4, ol5, ul1, ul2, ul3 = body.children
assert ol1.style['list_style_type'] == 'upper-alpha'
assert ol2.style['list_style_type'] == 'decimal'
assert ol3.style['list_style_type'] == 'lower-alpha'
assert ol4.style['list_style_type'] == 'lower-roman'
assert ol5.style['list_style_type'] == 'upper-roman'
assert ul1.style['list_style_type'] == 'circle'
assert ul2.style['list_style_type'] == 'disc'
assert ul3.style['list_style_type'] == 'square'
@assert_no_logs
def test_ph_tables():
document = HTML(string='''
<table align=left rules=none></table>
<table align=right rules=groups></table>
<table align=center rules=rows></table>
<table border=10 cellspacing=3 bordercolor=green>
<thead>
<tr>
<th valign=top></th>
</tr>
</thead>
<tr>
<td nowrap><h1 align=right></h1><p align=center></p></td>
</tr>
<tr>
</tr>
<tfoot align=justify>
<tr>
<td></td>
</tr>
</tfoot>
</table>
''').render(stylesheets=[PH_TESTING_CSS], presentational_hints=True)
page, = document.pages
html, = page._page_box.children
body, = html.children
wrapper1, wrapper2, wrapper3, wrapper4, = body.children
assert wrapper1.style['float'] == 'left'
assert wrapper2.style['float'] == 'right'
assert wrapper3.style['margin_left'] == 'auto'
assert wrapper3.style['margin_right'] == 'auto'
assert wrapper1.children[0].style['border_left_style'] == 'hidden'
assert wrapper1.style['border_collapse'] == 'collapse'
assert wrapper2.children[0].style['border_left_style'] == 'hidden'
assert wrapper2.style['border_collapse'] == 'collapse'
assert wrapper3.children[0].style['border_left_style'] == 'hidden'
assert wrapper3.style['border_collapse'] == 'collapse'
table4, = wrapper4.children
assert table4.style['border_top_style'] == 'outset'
assert table4.style['border_top_width'] == 10
assert table4.style['border_spacing'] == (3, 3)
r, g, b, a = table4.style['border_left_color']
assert g > r and g > b
head_group, rows_group, foot_group = table4.children
head, = head_group.children
th, = head.children
assert th.style['vertical_align'] == 'top'
line1, line2 = rows_group.children
td, = line1.children
assert td.style['white_space'] == 'nowrap'
assert td.style['border_top_width'] == 1
assert td.style['border_top_style'] == 'inset'
h1, p = td.children
assert h1.style['text_align'] == 'right'
assert p.style['text_align'] == 'center'
foot, = foot_group.children
tr, = foot.children
assert tr.style['text_align'] == 'justify'
@assert_no_logs
def test_ph_hr():
document = HTML(string='''
<hr align=left>
<hr align=right />
<hr align=both color=red />
<hr align=center noshade size=10 />
<hr align=all size=8 width=100 />
''').render(stylesheets=[PH_TESTING_CSS], presentational_hints=True)
page, = document.pages
html, = page._page_box.children
body, = html.children
hr1, hr2, hr3, hr4, hr5 = body.children
assert hr1.margin_left == 0
assert hr1.style['margin_right'] == 'auto'
assert hr2.style['margin_left'] == 'auto'
assert hr2.margin_right == 0
assert hr3.style['margin_left'] == 'auto'
assert hr3.style['margin_right'] == 'auto'
assert hr3.style['color'] == (1, 0, 0, 1)
assert hr4.style['margin_left'] == 'auto'
assert hr4.style['margin_right'] == 'auto'
assert hr4.border_height() == 10
assert hr4.style['border_top_width'] == 5
assert hr5.border_height() == 8
assert hr5.height == 6
assert hr5.width == 100
assert hr5.style['border_top_width'] == 1
@assert_no_logs
def test_ph_embedded():
document = HTML(string='''
<object data="data:image/svg+xml,<svg></svg>"
align=top hspace=10 vspace=20></object>
<img src="data:image/svg+xml,<svg></svg>" alt=text
align=right width=10 height=20 />
<embed src="data:image/svg+xml,<svg></svg>" align=texttop />
''').render(stylesheets=[PH_TESTING_CSS], presentational_hints=True)
page, = document.pages
html, = page._page_box.children
body, = html.children
line, = body.children
object_, text1, img, embed, text2 = line.children
assert embed.style['vertical_align'] == 'text-top'
assert object_.style['vertical_align'] == 'top'
assert object_.margin_top == 20
assert object_.margin_left == 10
assert img.style['float'] == 'right'
assert img.width == 10
assert img.height == 20
|
the-stack_0_1230 |
import subprocess
import sys
import re
import os
import setup_util
from os.path import expanduser
home = expanduser("~")
def start(args, logfile, errfile):
setup_util.replace_text("sinatra/hello_world.rb", ":host => '.*'", ":host => '" + args.database_host + "'")
try:
subprocess.check_call("rvm ruby-2.0.0-p0 do bundle install --gemfile=Gemfile-ruby", shell=True, cwd="sinatra", stderr=errfile, stdout=logfile)
subprocess.check_call("cp Gemfile-ruby Gemfile", shell=True, cwd="sinatra", stderr=errfile, stdout=logfile)
subprocess.check_call("cp Gemfile-ruby.lock Gemfile.lock", shell=True, cwd="sinatra", stderr=errfile, stdout=logfile)
subprocess.check_call("sudo /usr/local/nginx/sbin/nginx -c " + home + "/FrameworkBenchmarks/sinatra/config/nginx.conf", shell=True, stderr=errfile, stdout=logfile)
subprocess.Popen("rvm ruby-2.0.0-p0 do bundle exec unicorn_rails -E production -c config/unicorn.rb", shell=True, cwd="sinatra", stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1
def stop(logfile, errfile):
subprocess.call("sudo /usr/local/nginx/sbin/nginx -s stop", shell=True, stderr=errfile, stdout=logfile)
try:
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'unicorn' in line and 'master' in line:
pid = int(line.split(None, 2)[1])
os.kill(pid, 9)
# subprocess.check_call("rvm ruby-2.0.0-p0 do bundle exec passenger stop --pid-file=$HOME/FrameworkBenchmarks/rack/rack.pid", shell=True, cwd='rack')
subprocess.check_call("rm Gemfile", shell=True, cwd="sinatra", stderr=errfile, stdout=logfile)
subprocess.check_call("rm Gemfile.lock", shell=True, cwd="sinatra", stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1
|
the-stack_0_1231 | # Copyright (c) 2020 Rocky Bernstein
from uncompyle6.parsers.treenode import SyntaxTree
def tryelsestmtl3(self, lhs, n, rule, ast, tokens, first, last):
# Check the end of the except handler that there isn't a jump from
# inside the except handler to the end. If that happens
# then this is a "try" with no "else".
except_handler = ast[3]
if except_handler == "except_handler_else":
except_handler = except_handler[0]
come_from = except_handler[-1]
# We only care about the *first* come_from because that is the
# the innermost one. So if the "tryelse" is invalid (should be a "try")
# it will be invalid here.
if come_from == "COME_FROM":
first_come_from = except_handler[-1]
elif come_from == "END_FINALLY":
return False
elif come_from == "except_return":
return False
else:
assert come_from in ("come_froms", "opt_come_from_except")
first_come_from = come_from[0]
if not hasattr(first_come_from, "attr"):
# optional come from
return False
leading_jump = except_handler[0]
if not hasattr(leading_jump, "offset"):
return False
# We really don't care that this is a jump per-se. But
# we could also check that this jumps to the end of the except if
# desired.
if isinstance(leading_jump, SyntaxTree):
except_handler_first_offset = leading_jump.first_child().off2int()
else:
except_handler_first_offset = leading_jump.off2int()
return first_come_from.attr > except_handler_first_offset
|
the-stack_0_1232 | from __future__ import unicode_literals
import time
import hmac
import hashlib
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
float_or_none,
int_or_none,
sanitized_Request,
urlencode_postdata,
xpath_text,
)
class AtresPlayerIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?atresplayer\.com/television/[^/]+/[^/]+/[^/]+/(?P<id>.+?)_\d+\.html'
_NETRC_MACHINE = 'atresplayer'
_TESTS = [
{
'url': 'http://www.atresplayer.com/television/programas/el-club-de-la-comedia/temporada-4/capitulo-10-especial-solidario-nochebuena_2014122100174.html',
'md5': 'efd56753cda1bb64df52a3074f62e38a',
'info_dict': {
'id': 'capitulo-10-especial-solidario-nochebuena',
'ext': 'mp4',
'title': 'Especial Solidario de Nochebuena',
'description': 'md5:e2d52ff12214fa937107d21064075bf1',
'duration': 5527.6,
'thumbnail': r're:^https?://.*\.jpg$',
},
'skip': 'This video is only available for registered users'
},
{
'url': 'http://www.atresplayer.com/television/especial/videoencuentros/temporada-1/capitulo-112-david-bustamante_2014121600375.html',
'md5': '6e52cbb513c405e403dbacb7aacf8747',
'info_dict': {
'id': 'capitulo-112-david-bustamante',
'ext': 'flv',
'title': 'David Bustamante',
'description': 'md5:f33f1c0a05be57f6708d4dd83a3b81c6',
'duration': 1439.0,
'thumbnail': r're:^https?://.*\.jpg$',
},
},
{
'url': 'http://www.atresplayer.com/television/series/el-secreto-de-puente-viejo/el-chico-de-los-tres-lunares/capitulo-977-29-12-14_2014122400174.html',
'only_matching': True,
},
]
_USER_AGENT = 'Dalvik/1.6.0 (Linux; U; Android 4.3; GT-I9300 Build/JSS15J'
_MAGIC = 'QWtMLXs414Yo+c#_+Q#K@NN)'
_TIMESTAMP_SHIFT = 30000
_TIME_API_URL = 'http://servicios.atresplayer.com/api/admin/time.json'
_URL_VIDEO_TEMPLATE = 'https://servicios.atresplayer.com/api/urlVideo/{1}/{0}/{1}|{2}|{3}.json'
_PLAYER_URL_TEMPLATE = 'https://servicios.atresplayer.com/episode/getplayer.json?episodePk=%s'
_EPISODE_URL_TEMPLATE = 'http://www.atresplayer.com/episodexml/%s'
_LOGIN_URL = 'https://servicios.atresplayer.com/j_spring_security_check'
_ERRORS = {
'UNPUBLISHED': 'We\'re sorry, but this video is not yet available.',
'DELETED': 'This video has expired and is no longer available for online streaming.',
'GEOUNPUBLISHED': 'We\'re sorry, but this video is not available in your region due to right restrictions.',
# 'PREMIUM': 'PREMIUM',
}
def _real_initialize(self):
self._login()
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
login_form = {
'j_username': username,
'j_password': password,
}
request = sanitized_Request(
self._LOGIN_URL, urlencode_postdata(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
response = self._download_webpage(
request, None, 'Logging in')
error = self._html_search_regex(
r'(?s)<ul[^>]+class="[^"]*\blist_error\b[^"]*">(.+?)</ul>',
response, 'error', default=None)
if error:
raise ExtractorError(
'Unable to login: %s' % error, expected=True)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
episode_id = self._search_regex(
r'episode="([^"]+)"', webpage, 'episode id')
request = sanitized_Request(
self._PLAYER_URL_TEMPLATE % episode_id,
headers={'User-Agent': self._USER_AGENT})
player = self._download_json(request, episode_id, 'Downloading player JSON')
episode_type = player.get('typeOfEpisode')
error_message = self._ERRORS.get(episode_type)
if error_message:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, error_message), expected=True)
formats = []
video_url = player.get('urlVideo')
if video_url:
format_info = {
'url': video_url,
'format_id': 'http',
}
mobj = re.search(r'(?P<bitrate>\d+)K_(?P<width>\d+)x(?P<height>\d+)', video_url)
if mobj:
format_info.update({
'width': int_or_none(mobj.group('width')),
'height': int_or_none(mobj.group('height')),
'tbr': int_or_none(mobj.group('bitrate')),
})
formats.append(format_info)
timestamp = int_or_none(self._download_webpage(
self._TIME_API_URL,
video_id, 'Downloading timestamp', fatal=False), 1000, time.time())
timestamp_shifted = compat_str(timestamp + self._TIMESTAMP_SHIFT)
token = hmac.new(
self._MAGIC.encode('ascii'),
(episode_id + timestamp_shifted).encode('utf-8'), hashlib.md5
).hexdigest()
request = sanitized_Request(
self._URL_VIDEO_TEMPLATE.format('windows', episode_id, timestamp_shifted, token),
headers={'User-Agent': self._USER_AGENT})
fmt_json = self._download_json(
request, video_id, 'Downloading windows video JSON')
result = fmt_json.get('resultDes')
if result.lower() != 'ok':
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, result), expected=True)
for format_id, video_url in fmt_json['resultObject'].items():
if format_id == 'token' or not video_url.startswith('http'):
continue
if 'geodeswowsmpra3player' in video_url:
# f4m_path = video_url.split('smil:', 1)[-1].split('free_', 1)[0]
# f4m_url = 'http://drg.antena3.com/{0}hds/es/sd.f4m'.format(f4m_path)
# this videos are protected by DRM, the f4m downloader doesn't support them
continue
video_url_hd = video_url.replace('free_es', 'es')
formats.extend(self._extract_f4m_formats(
video_url_hd[:-9] + '/manifest.f4m', video_id, f4m_id='hds',
fatal=False))
formats.extend(self._extract_mpd_formats(
video_url_hd[:-9] + '/manifest.mpd', video_id, mpd_id='dash',
fatal=False))
self._sort_formats(formats)
path_data = player.get('pathData')
episode = self._download_xml(
self._EPISODE_URL_TEMPLATE % path_data, video_id,
'Downloading episode XML')
duration = float_or_none(xpath_text(
episode, './media/asset/info/technical/contentDuration', 'duration'))
art = episode.find('./media/asset/info/art')
title = xpath_text(art, './name', 'title')
description = xpath_text(art, './description', 'description')
thumbnail = xpath_text(episode, './media/asset/files/background', 'thumbnail')
subtitles = {}
subtitle_url = xpath_text(episode, './media/asset/files/subtitle', 'subtitle')
if subtitle_url:
subtitles['es'] = [{
'ext': 'srt',
'url': subtitle_url,
}]
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
|
the-stack_0_1235 | #!/usr/bin/env python3
import sys
with open(sys.argv[1]) as input:
lines = input.readlines()
numbers = [int(line.strip()) for line in lines]
# Part 1
print(sum(y > x for x, y in zip(numbers[:-1], numbers[1:])))
# Part 2
print(sum(y > x for x, y in zip(numbers[:-3], numbers[3:])))
|
the-stack_0_1236 | def evalRPN(tokens):
OPS = {
"-" : lambda x,y : x-y,
"+" : lambda x,y : x+y,
"*" : lambda x,y : x*y,
"/" : lambda x,y : int(x/y)
}
s= []
for item in tokens:
if len(s)>=2 and item in OPS:
y = s.pop()
x = s.pop()
s.append(OPS[item](x,y))
else:
s.append(int(item))
#print(s)
return s.pop()
if __name__=='__main__':
tokens = ["2","1","+","3","*"]
#Output: 9
#print(evalRPN(tokens))
tokens = ["4","13","5","/","+"] #6
print(evalRPN(tokens))
tokens = ["10","6","9","3","+","-11","*","/","*","17","+","5","+"]
#Output: 22
print(evalRPN(tokens)) |
the-stack_0_1239 | try:
from cyordereddict import OrderedDict
except:
from collections import OrderedDict
from . import util
from .pprint import PrettyPrinter
class AttrTree(object):
"""
An AttrTree offers convenient, multi-level attribute access for
collections of objects. AttrTree objects may also be combined
together using the update method or merge classmethod. Here is an
example of adding a ViewableElement to an AttrTree and accessing it:
>>> t = AttrTree()
>>> t.Example.Path = 1
>>> t.Example.Path #doctest: +ELLIPSIS
1
"""
_disabled_prefixes = [] # Underscore attributes that should be
_sanitizer = util.sanitize_identifier
@classmethod
def merge(cls, trees):
"""
Merge a collection of AttrTree objects.
"""
first = trees[0]
for tree in trees:
first.update(tree)
return first
def __dir__(self):
"""
The _dir_mode may be set to 'default' or 'user' in which case
only the child nodes added by the user are listed.
"""
dict_keys = self.__dict__.keys()
if self.__dict__['_dir_mode'] == 'user':
return self.__dict__['children']
else:
return dir(type(self)) + list(dict_keys)
def __init__(self, items=None, identifier=None, parent=None, dir_mode='default'):
"""
identifier: A string identifier for the current node (if any)
parent: The parent node (if any)
items: Items as (path, value) pairs to construct
(sub)tree down to given leaf values.
Note that the root node does not have a parent and does not
require an identifier.
"""
self.__dict__['parent'] = parent
self.__dict__['identifier'] = type(self)._sanitizer(identifier, escape=False)
self.__dict__['children'] = []
self.__dict__['_fixed'] = False
self.__dict__['_dir_mode'] = dir_mode # Either 'default' or 'user'
fixed_error = 'No attribute %r in this AttrTree, and none can be added because fixed=True'
self.__dict__['_fixed_error'] = fixed_error
self.__dict__['data'] = OrderedDict()
items = items.items() if isinstance(items, OrderedDict) else items
# Python 3
items = list(items) if items else items
items = [] if not items else items
for path, item in items:
self.set_path(path, item)
@property
def path(self):
"Returns the path up to the root for the current node."
if self.parent:
return '.'.join([self.parent.path, str(self.identifier)])
else:
return self.identifier if self.identifier else self.__class__.__name__
@property
def fixed(self):
"If fixed, no new paths can be created via attribute access"
return self.__dict__['_fixed']
@fixed.setter
def fixed(self, val):
self.__dict__['_fixed'] = val
def update(self, other):
"""
Updated the contents of the current AttrTree with the
contents of a second AttrTree.
"""
if not isinstance(other, AttrTree):
raise Exception('Can only update with another AttrTree type.')
fixed_status = (self.fixed, other.fixed)
(self.fixed, other.fixed) = (False, False)
for identifier, element in other.items():
if identifier not in self.data:
self[identifier] = element
else:
self[identifier].update(element)
(self.fixed, other.fixed) = fixed_status
def set_path(self, path, val):
"""
Set the given value at the supplied path where path is either
a tuple of strings or a string in A.B.C format.
"""
path = tuple(path.split('.')) if isinstance(path , str) else tuple(path)
disallowed = [p for p in path if not type(self)._sanitizer.allowable(p)]
if any(disallowed):
raise Exception("Attribute strings in path elements cannot be "
"correctly escaped : %s" % ','.join(repr(el) for el in disallowed))
if len(path) > 1:
attrtree = self.__getattr__(path[0])
attrtree.set_path(path[1:], val)
else:
self.__setattr__(path[0], val)
def filter(self, path_filters):
"""
Filters the loaded AttrTree using the supplied path_filters.
"""
if not path_filters: return self
# Convert string path filters
path_filters = [tuple(pf.split('.')) if not isinstance(pf, tuple)
else pf for pf in path_filters]
# Search for substring matches between paths and path filters
new_attrtree = self.__class__()
for path, item in self.data.items():
if any([all([subpath in path for subpath in pf]) for pf in path_filters]):
new_attrtree.set_path(path, item)
return new_attrtree
def _propagate(self, path, val):
"""
Propagate the value up to the root node.
"""
self.data[path] = val
if self.parent is not None:
self.parent._propagate((self.identifier,)+path, val)
def __setitem__(self, identifier, val):
"""
Set a value at a child node with given identifier. If at a root
node, multi-level path specifications is allowed (i.e. 'A.B.C'
format or tuple format) in which case the behaviour matches
that of set_path.
"""
if isinstance(identifier, str) and '.' not in identifier:
self.__setattr__(identifier, val)
elif isinstance(identifier, str) and self.parent is None:
self.set_path(tuple(identifier.split('.')), val)
elif isinstance(identifier, tuple) and self.parent is None:
self.set_path(identifier, val)
else:
raise Exception("Multi-level item setting only allowed from root node.")
def __getitem__(self, identifier):
"""
For a given non-root node, access a child element by identifier.
If the node is a root node, you may also access elements using
either tuple format or the 'A.B.C' string format.
"""
split_label = (tuple(identifier.split('.'))
if isinstance(identifier, str) else tuple(identifier))
if len(split_label) == 1:
identifier = split_label[0]
if identifier in self.children:
return self.__dict__[identifier]
else:
raise KeyError(identifier)
path_item = self
for identifier in split_label:
path_item = path_item[identifier]
return path_item
def __setattr__(self, identifier, val):
# Getattr is skipped for root and first set of children
shallow = (self.parent is None or self.parent.parent is None)
if identifier[0].isupper() and self.fixed and shallow:
raise AttributeError(self._fixed_error % identifier)
super(AttrTree, self).__setattr__(identifier, val)
if identifier[0].isupper():
if not identifier in self.children:
self.children.append(identifier)
self._propagate((identifier,), val)
def __getattr__(self, identifier):
"""
Access a identifier from the AttrTree or generate a new AttrTree
with the chosen attribute path.
"""
try:
return super(AttrTree, self).__getattr__(identifier)
except AttributeError: pass
# Attributes starting with __ get name mangled
if identifier.startswith('_' + type(self).__name__) or identifier.startswith('__'):
raise AttributeError('Attribute %s not found.' % identifier)
elif self.fixed==True:
raise AttributeError(self._fixed_error % identifier)
if not any(identifier.startswith(prefix)
for prefix in type(self)._disabled_prefixes):
identifier = type(self)._sanitizer(identifier, escape=False)
if identifier in self.children:
return self.__dict__[identifier]
if not identifier.startswith('_'):
self.children.append(identifier)
dir_mode = self.__dict__['_dir_mode']
child_tree = self.__class__(identifier=identifier,
parent=self, dir_mode=dir_mode)
self.__dict__[identifier] = child_tree
return child_tree
else:
raise AttributeError
def __iter__(self):
return iter(self.data.values())
def __contains__(self, name):
return name in self.children or name in self.data
def __len__(self):
return len(self.data)
def get(self, identifier, default=None):
split_label = (tuple(identifier.split('.'))
if isinstance(identifier, str) else tuple(identifier))
if len(split_label) == 1:
identifier = split_label[0]
return self.__dict__.get(identifier, default)
path_item = self
for identifier in split_label:
if path_item == default or path_item is None:
return default
path_item = path_item.get(identifier, default)
return path_item
def keys(self):
return list(self.data.keys())
def items(self):
return list(self.data.items())
def values(self):
return list(self.data.values())
def pop(self, identifier, default=None):
if identifier in self.children:
item = self[identifier]
self.__delitem__(identifier)
return item
else:
return default
def __repr__(self):
return PrettyPrinter.pprint(self)
__all__ = ['AttrTree']
|
the-stack_0_1240 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
import google.api_core.operations_v1
from google.cloud.automl_v1beta1.proto import prediction_service_pb2_grpc
class PredictionServiceGrpcTransport(object):
"""gRPC transport class providing stubs for
google.cloud.automl.v1beta1 PredictionService API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
def __init__(
self, channel=None, credentials=None, address="automl.googleapis.com:443"
):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
"The `channel` and `credentials` arguments are mutually " "exclusive."
)
# Create the channel.
if channel is None:
channel = self.create_channel(address=address, credentials=credentials)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
"prediction_service_stub": prediction_service_pb2_grpc.PredictionServiceStub(
channel
)
}
# Because this API includes a method that returns a
# long-running operation (proto: google.longrunning.Operation),
# instantiate an LRO client.
self._operations_client = google.api_core.operations_v1.OperationsClient(
channel
)
@classmethod
def create_channel(cls, address="automl.googleapis.com:443", credentials=None):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address, credentials=credentials, scopes=cls._OAUTH_SCOPES
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def predict(self):
"""Return the gRPC stub for :meth:`PredictionServiceClient.predict`.
Perform an online prediction. The prediction result will be directly
returned in the response. Available for following ML problems, and their
expected request payloads:
- Image Classification - Image in .JPEG, .GIF or .PNG format,
image\_bytes up to 30MB.
- Image Object Detection - Image in .JPEG, .GIF or .PNG format,
image\_bytes up to 30MB.
- Text Classification - TextSnippet, content up to 10,000 characters,
UTF-8 encoded.
- Text Extraction - TextSnippet, content up to 30,000 characters, UTF-8
NFC encoded. \* Translation - TextSnippet, content up to 25,000
characters, UTF-8 encoded.
- Tables - Row, with column values matching the columns of the model,
up to 5MB.
- Text Sentiment - TextSnippet, content up 500 characters, UTF-8
encoded.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["prediction_service_stub"].Predict
@property
def batch_predict(self):
"""Return the gRPC stub for :meth:`PredictionServiceClient.batch_predict`.
Perform a batch prediction. Unlike the online ``Predict``, batch
prediction result won't be immediately available in the response.
Instead, a long running operation object is returned. User can poll the
operation result via ``GetOperation`` method. Once the operation is
done, ``BatchPredictResult`` is returned in the ``response`` field.
Available for following ML problems:
- Video Classification
- Text Extraction
- Tables
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["prediction_service_stub"].BatchPredict
|
the-stack_0_1244 | """ Tests for AttMap. """
import itertools
import os
import pickle
import numpy as np
import pytest
import yaml
from attmap import AttMap, AttMapEcho
__author__ = "Vince Reuter"
__email__ = "[email protected]"
# Provide some basic atomic-type data for models tests.
_BASE_KEYS = ("epigenomics", "H3K", "ac", "EWS", "FLI1")
_BASE_VALUES = \
("topic", "residue", "acetylation", "RNA binding protein", "FLI1")
_ENTRIES_PROVISION_MODES = ["gen", "dict", "zip", "list", "items"]
_SEASON_HIERARCHY = {
"spring": {"February": 28, "March": 31, "April": 30, "May": 31},
"summer": {"June": 30, "July": 31, "August": 31},
"fall": {"September": 30, "October": 31, "November": 30},
"winter": {"December": 31, "January": 31}
}
ADDITIONAL_NON_NESTED = {"West Complex": {"CPHG": 6}, "BIG": {"MR-4": 6}}
ADDITIONAL_NESTED = {"JPA": {"West Complex": {"CPHG": 6}},
"Lane": {"BIG": {"MR-4": 6}}}
ADDITIONAL_VALUES_BY_NESTING = {
False: ADDITIONAL_NON_NESTED,
True: ADDITIONAL_NESTED
}
COMPARISON_FUNCTIONS = ["__eq__", "__ne__", "__len__",
"keys", "values", "items"]
def pytest_generate_tests(metafunc):
""" Centralize dynamic test case parameterization. """
if "empty_collection" in metafunc.fixturenames:
# Test case strives to validate expected behavior on empty container.
collection_types = [tuple, list, set, dict]
metafunc.parametrize(
"empty_collection",
argvalues=[ctype() for ctype in collection_types],
ids=[ctype.__name__ for ctype in collection_types])
def basic_entries():
""" AttMap data that lack nested structure. """
for k, v in zip(_BASE_KEYS, _BASE_VALUES):
yield k, v
def nested_entries():
""" AttributeDict data with some nesting going on. """
for k, v in _SEASON_HIERARCHY.items():
yield k, v
@pytest.mark.parametrize("base", ["random", "irrelevant", "arbitrary"])
@pytest.mark.parametrize("protect", [False, True])
def test_echo_is_conditional(base, protect):
""" Protected member isn't echoed. """
m = AttMapEcho({})
if protect:
with pytest.raises(AttributeError):
m.__getattr__("__{}__".format(base))
else:
assert base == m.__getattr__(base)
class AttributeConstructionDictTests:
"""Tests for the AttMap ADT.
Note that the implementation of the equality comparison operator
is tested indirectly via the mechanism of many of the assertion
statements used throughout these test cases. Some test cases are
parameterized by comparison function to test for equivalence, rather
than via input data as is typically the case. This avoids some overhead,
This is to ensure that the implemented `collections.MutableMapping`
or `collections.abc.MutableMapping` methods are valid.
"""
# Refer to tail of class definition for
# data and fixtures specific to this class.
def test_null_construction(self):
""" Null entries value creates empty AttMap. """
assert AttMap({}) == AttMap(None)
def test_empty_construction(self, empty_collection):
""" Empty entries container create empty AttMap. """
m = AttMap(empty_collection)
assert AttMap(None) == m
assert m != dict()
@pytest.mark.parametrize(
argnames="entries_gen,entries_provision_type",
argvalues=itertools.product([basic_entries, nested_entries],
_ENTRIES_PROVISION_MODES),
ids=["{entries}-{mode}".format(entries=gen.__name__, mode=mode)
for gen, mode in
itertools.product([basic_entries, nested_entries],
_ENTRIES_PROVISION_MODES)]
)
def test_construction_modes_supported(
self, entries_gen, entries_provision_type):
""" Construction wants key-value pairs; wrapping doesn't matter. """
entries_mapping = dict(entries_gen())
if entries_provision_type == "dict":
entries = entries_mapping
elif entries_provision_type == "zip":
keys, values = zip(*entries_gen())
entries = zip(keys, values)
elif entries_provision_type == "items":
entries = entries_mapping.items()
elif entries_provision_type == "list":
entries = list(entries_gen())
elif entries_provision_type == "gen":
entries = entries_gen
else:
raise ValueError("Unexpected entries type: {}".
format(entries_provision_type))
expected = AttMap(entries_mapping)
observed = AttMap(entries)
assert expected == observed
@staticmethod
def _validate_mapping_function_implementation(entries_gen, name_comp_func):
data = dict(entries_gen())
attrdict = AttMap(data)
if __name__ == '__main__':
if name_comp_func in ["__eq__", "__ne__"]:
are_equal = getattr(attrdict, name_comp_func).__call__(data)
assert are_equal if name_comp_func == "__eq__" \
else (not are_equal)
else:
raw_dict_comp_func = getattr(data, name_comp_func)
attrdict_comp_func = getattr(attrdict, name_comp_func)
expected = raw_dict_comp_func.__call__()
observed = attrdict_comp_func.__call__()
try:
# Most comparison methods are returning iterables.
assert set(expected) == set(observed)
except TypeError:
# Could be int or other non-iterable that we're comparing.
assert expected == observed
class AttMapUpdateTests:
"""Validate behavior of post-construction addition of entries.
Though entries may and often will be provided at instantiation time,
AttMap is motivated to support inheritance by domain-specific
data types for which use cases are likely to be unable to provide
all relevant data at construction time. So let's verify that we get the
expected behavior when entries are added after initial construction.
"""
_TOTALLY_ARBITRARY_VALUES = [
"abc", 123,
(4, "text", ("nes", "ted")), list("-101")
]
_GETTERS = ["__getattr__", "__getitem__"]
_SETTERS = ["__setattr__", "__setitem__"]
@pytest.mark.parametrize(
argnames="setter_name,getter_name,is_novel",
argvalues=itertools.product(_SETTERS, _GETTERS, (False, True)))
def test_set_get_atomic(self, setter_name, getter_name, is_novel):
""" For new and existing items, validate set/get behavior. """
# Establish the AttMap for the test case.
data = dict(basic_entries())
ad = AttMap(basic_entries())
# Establish a ground truth and select name/value(s) based on
# whether or not the test case wants to test a new or existing item.
if is_novel:
item_name = "awesome_novel_attribute"
assert item_name not in ad
with pytest.raises(AttributeError):
getattr(ad, item_name)
item_values = self._TOTALLY_ARBITRARY_VALUES
else:
item_name = np.random.choice(a=list(data.keys()), size=1)[0]
item_value = data[item_name]
assert ad[item_name] == item_value
assert getattr(ad, item_name) == item_value
item_values = [item_value]
# Determine which functions to use to make the set/get calls.
setter = getattr(ad, setter_name)
getter = getattr(ad, getter_name)
# Validate set/get for each value.
for value in item_values:
setter(item_name, value)
assert getter(item_name) == value
class AttMapCollisionTests:
""" Tests for proper merging and type conversion of mappings.
AttMap converts a mapping being inserted as a value to an
AttMap. """
@pytest.mark.parametrize(
argnames="name_update_func",
argvalues=["add_entries", "__setattr__", "__setitem__"])
def test_squash_existing(self, name_update_func):
""" When a value that's a mapping is assigned to existing key with
non-mapping value, the new value overwrites the old. """
ad = AttMap({"MR": 4})
assert 4 == ad.MR
assert 4 == ad["MR"]
new_value = [4, 5, 6]
args = ("MR", new_value)
setter = getattr(ad, name_update_func)
if name_update_func == "add_entries":
setter([args])
else:
setter(*args)
assert new_value == ad.MR
assert new_value == ad["MR"]
@pytest.mark.parametrize(
argnames="name_update_func",
argvalues=["add_entries", "__setattr__", "__setitem__"])
@pytest.mark.parametrize(
argnames="name_fetch_func",
argvalues=["__getattr__", "__getitem__"])
class AttMapNullTests:
""" AttMap has configurable behavior regarding null values. """
def test_new_null(self, name_update_func, name_fetch_func):
""" When a key/item, isn't known, null is allowed. """
ad = AttMap()
setter = getattr(ad, name_update_func)
args = ("new_key", None)
self._do_update(name_update_func, setter, args)
getter = getattr(ad, name_fetch_func)
assert getter("new_key") is None
def test_replace_null(self, name_update_func, name_fetch_func):
""" Null can be replaced by non-null. """
ad = AttMap({"lone_attr": None})
assert getattr(ad, name_fetch_func)("lone_attr") is None
setter = getattr(ad, name_update_func)
non_null_value = AttMap({"was_null": "not_now"})
self._do_update(name_update_func, setter,
("lone_attr", non_null_value))
assert non_null_value == getattr(ad, name_fetch_func)("lone_attr")
@staticmethod
def _do_update(name_setter_func, setter_bound_method, args):
if name_setter_func == "add_entries":
setter_bound_method([args])
else:
setter_bound_method(*args)
class AttMapItemAccessTests:
""" Tests for access of items (key- or attribute- style). """
@pytest.mark.parametrize(argnames="missing", argvalues=["att", ""])
def test_missing_getattr(self, missing):
attrd = AttMap()
with pytest.raises(AttributeError):
getattr(attrd, missing)
@pytest.mark.parametrize(argnames="missing", argvalues=["", "b", "missing"])
def test_missing_getitem(self, missing):
attrd = AttMap()
with pytest.raises(KeyError):
attrd[missing]
def test_numeric_key(self):
""" Attribute request must be string. """
ad = AttMap({1: 'a'})
assert 'a' == ad[1]
with pytest.raises(TypeError):
getattr(ad, 1)
class AttMapSerializationTests:
""" Tests for AttMap serialization. """
DATA_PAIRS = [('a', 1), ('b', False), ('c', range(5)),
('d', {'A': None, 'T': []}),
('e', AttMap({'G': 1, 'C': [False, None]})),
('f', [AttMap({"DNA": "deoxyribose", "RNA": "ribose"}),
AttMap({"DNA": "thymine", "RNA": "uracil"})])]
@pytest.mark.parametrize(
argnames="data",
argvalues=itertools.combinations(DATA_PAIRS, 2),
ids=lambda data: " data = {}".format(str(data)))
@pytest.mark.parametrize(
argnames="data_type", argvalues=[list, dict],
ids=lambda data_type: " data_type = {}".format(data_type))
def test_pickle_restoration(self, tmpdir, data, data_type):
""" Pickled and restored AttMap objects are identical. """
# Type the AttMap input data argument according to parameter.
data = data_type(data)
original_attrdict = AttMap(data)
filename = "attrdict-test.pkl"
# Allow either Path or raw string.
try:
dirpath = tmpdir.strpath
except AttributeError:
dirpath = tmpdir
# Serialize AttMap and write to disk.
filepath = os.path.join(dirpath, filename)
with open(filepath, 'wb') as pkl:
pickle.dump(original_attrdict, pkl)
# Validate equivalence between original and restored versions.
with open(filepath, 'rb') as pkl:
restored_attrdict = AttMap(pickle.load(pkl))
assert restored_attrdict == original_attrdict
class AttMapObjectSyntaxAccessTests:
""" Test behavior of dot attribute access / identity setting. """
DEFAULT_VALUE = "totally-arbitrary"
NORMAL_ITEM_ARG_VALUES = \
["__getattr__", "__getitem__", "__dict__", "__repr__", "__str__"]
PICKLE_ITEM_ARG_VALUES = ["__getstate__", "__setstate__"]
ATTR_DICT_DATA = {"a": 0, "b": range(1, 3), "c": {"CO": 70, "WA": 5}}
UNMAPPED = ["arb-att-1", "random-attribute-2"]
@pytest.fixture(scope="function")
def attrdict(self, request):
""" Provide a test case with an AttMap. """
d = self.ATTR_DICT_DATA
return AttMapEcho(d) if request.getfixturevalue("return_identity") \
else AttMap(d)
@pytest.mark.parametrize(
argnames="return_identity", argvalues=[False, True],
ids=lambda ret_id: " identity setting={} ".format(ret_id))
@pytest.mark.parametrize(
argnames="attr_to_request",
argvalues=NORMAL_ITEM_ARG_VALUES + PICKLE_ITEM_ARG_VALUES +
UNMAPPED + list(ATTR_DICT_DATA.keys()),
ids=lambda attr: " requested={} ".format(attr))
def test_attribute_access(
self, return_identity, attr_to_request, attrdict):
""" Access behavior depends on request and behavior toggle. """
if attr_to_request == "__dict__":
# The underlying mapping is still accessible.
assert attrdict.__dict__ is getattr(attrdict, "__dict__")
elif attr_to_request in self.NORMAL_ITEM_ARG_VALUES:
# Request for common protected function returns the function.
assert callable(getattr(attrdict, attr_to_request))
elif attr_to_request in self.PICKLE_ITEM_ARG_VALUES:
# We don't tinker with the pickle-relevant attributes.
with pytest.raises(AttributeError):
print("Should have failed, but got result: {}".
format(getattr(attrdict, attr_to_request)))
elif attr_to_request in self.UNMAPPED:
# Unmapped request behavior depends on parameterization.
if return_identity:
assert attr_to_request == getattr(attrdict, attr_to_request)
else:
with pytest.raises(AttributeError):
getattr(attrdict, attr_to_request)
else:
# A mapped attribute returns its known value.
expected = self.ATTR_DICT_DATA[attr_to_request]
if isinstance(expected, dict):
expected = type(attrdict)(expected)
observed = getattr(attrdict, attr_to_request)
print("AD (below):\n{}".format(attrdict))
assert expected == observed
class NullityTests:
""" Tests of null/non-null values """
_KEYNAMES = ["sample_name", "protocol", "arbitrary_attribute"]
@pytest.mark.parametrize(argnames="item", argvalues=_KEYNAMES)
def test_missing_is_neither_null_nor_non_null(self, item):
""" Value of absent key is neither null nor non-null """
ad = AttMap()
assert not ad.is_null(item) and not ad.non_null(item)
@pytest.mark.parametrize(argnames="item", argvalues=_KEYNAMES)
def test_is_null(self, item):
""" Null-valued key/item evaluates as such. """
ad = AttMap()
ad[item] = None
assert ad.is_null(item) and not ad.non_null(item)
@pytest.mark.parametrize(
argnames=["k", "v"],
argvalues=list(zip(_KEYNAMES, ["sampleA", "WGBS", "random"])))
def test_non_null(self, k, v):
""" AD is sensitive to value updates """
ad = AttMap()
assert not ad.is_null(k) and not ad.non_null(k)
ad[k] = None
assert ad.is_null(k) and not ad.non_null(k)
ad[k] = v
assert not ad.is_null(k) and ad.non_null(k)
@pytest.mark.usefixtures("write_project_files")
class SampleYamlTests:
""" AttMap metadata only appear in YAML if non-default. """
@staticmethod
def _yaml_data(sample, filepath, section_to_change=None,
attr_to_change=None, newval=None):
"""
Serialize a Sample, possibly tweaking it first, write, and parse.
:param models.Sample sample: what to serialize and write
:param str filepath: where to write the data
:param str section_to_change: name of section
in which to change attribute
:param str attr_to_change: name of attribute to change
:param object newval: value to set for targeted attribute
:return (Iterable[str], dict): raw lines and parsed version (YAML)
"""
if section_to_change:
getattr(sample, section_to_change)[attr_to_change] = newval
sample.to_yaml(filepath)
with open(filepath, 'r') as f:
data = yaml.safe_load(f)
with open(filepath, 'r') as f:
lines = f.readlines()
return lines, data
@pytest.mark.parametrize(
["func", "exp"], [(repr, "AttMap: {}"), (str, "AttMap: {}")])
def test_text_repr_empty(func, exp):
""" Empty AttMap is correctly represented as text. """
assert exp == func(AttMap())
|
the-stack_0_1245 | # Copyright (c) 2020 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from .core import (
require_version as _require_version,
int2bytes,
bytes2int,
Version,
Tlv,
AID,
CommandError,
NotSupportedError,
BadResponseError,
)
from .core.smartcard import (
SmartCardConnection,
SmartCardProtocol,
ApduError,
SW,
ApduFormat,
)
from cryptography import x509
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives.constant_time import bytes_eq
from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat
from cryptography.hazmat.primitives.asymmetric import rsa, ec
from cryptography.hazmat.primitives.asymmetric.padding import AsymmetricPadding
from cryptography.hazmat.backends import default_backend
from dataclasses import dataclass
from enum import Enum, IntEnum, unique
from typing import Optional, Union, Type, cast
import logging
import os
import re
logger = logging.getLogger(__name__)
@unique
class ALGORITHM(str, Enum):
EC = "ec"
RSA = "rsa"
# Don't treat pre 1.0 versions as "developer builds".
def require_version(my_version: Version, *args, **kwargs):
if my_version <= (0, 1, 3): # Last pre 1.0 release of ykneo-piv
my_version = Version(1, 0, 0)
_require_version(my_version, *args, **kwargs)
@unique
class KEY_TYPE(IntEnum):
RSA1024 = 0x06
RSA2048 = 0x07
ECCP256 = 0x11
ECCP384 = 0x14
@property
def algorithm(self):
return ALGORITHM.EC if self.name.startswith("ECC") else ALGORITHM.RSA
@property
def bit_len(self):
match = re.search(r"\d+$", self.name)
if match:
return int(match.group())
raise ValueError("No bit_len")
@classmethod
def from_public_key(cls, key):
if isinstance(key, rsa.RSAPublicKey):
try:
return getattr(cls, "RSA%d" % key.key_size)
except AttributeError:
raise ValueError("Unsupported RSA key size: %d" % key.key_size)
pass # Fall through to ValueError
elif isinstance(key, ec.EllipticCurvePublicKey):
curve_name = key.curve.name
if curve_name == "secp256r1":
return cls.ECCP256
elif curve_name == "secp384r1":
return cls.ECCP384
raise ValueError(f"Unsupported EC curve: {curve_name}")
raise ValueError(f"Unsupported key type: {type(key).__name__}")
@unique
class MANAGEMENT_KEY_TYPE(IntEnum):
TDES = 0x03
AES128 = 0x08
AES192 = 0x0A
AES256 = 0x0C
@property
def key_len(self):
if self.name == "TDES":
return 24
# AES
return int(self.name[3:]) // 8
@property
def challenge_len(self):
if self.name == "TDES":
return 8
return 16
def _parse_management_key(key_type, management_key):
if key_type == MANAGEMENT_KEY_TYPE.TDES:
return algorithms.TripleDES(management_key)
else:
return algorithms.AES(management_key)
# The card management slot is special, we don't include it in SLOT below
SLOT_CARD_MANAGEMENT = 0x9B
@unique
class SLOT(IntEnum):
AUTHENTICATION = 0x9A
SIGNATURE = 0x9C
KEY_MANAGEMENT = 0x9D
CARD_AUTH = 0x9E
RETIRED1 = 0x82
RETIRED2 = 0x83
RETIRED3 = 0x84
RETIRED4 = 0x85
RETIRED5 = 0x86
RETIRED6 = 0x87
RETIRED7 = 0x88
RETIRED8 = 0x89
RETIRED9 = 0x8A
RETIRED10 = 0x8B
RETIRED11 = 0x8C
RETIRED12 = 0x8D
RETIRED13 = 0x8E
RETIRED14 = 0x8F
RETIRED15 = 0x90
RETIRED16 = 0x91
RETIRED17 = 0x92
RETIRED18 = 0x93
RETIRED19 = 0x94
RETIRED20 = 0x95
ATTESTATION = 0xF9
@unique
class OBJECT_ID(IntEnum):
CAPABILITY = 0x5FC107
CHUID = 0x5FC102
AUTHENTICATION = 0x5FC105 # cert for 9a key
FINGERPRINTS = 0x5FC103
SECURITY = 0x5FC106
FACIAL = 0x5FC108
PRINTED = 0x5FC109
SIGNATURE = 0x5FC10A # cert for 9c key
KEY_MANAGEMENT = 0x5FC10B # cert for 9d key
CARD_AUTH = 0x5FC101 # cert for 9e key
DISCOVERY = 0x7E
KEY_HISTORY = 0x5FC10C
IRIS = 0x5FC121
RETIRED1 = 0x5FC10D
RETIRED2 = 0x5FC10E
RETIRED3 = 0x5FC10F
RETIRED4 = 0x5FC110
RETIRED5 = 0x5FC111
RETIRED6 = 0x5FC112
RETIRED7 = 0x5FC113
RETIRED8 = 0x5FC114
RETIRED9 = 0x5FC115
RETIRED10 = 0x5FC116
RETIRED11 = 0x5FC117
RETIRED12 = 0x5FC118
RETIRED13 = 0x5FC119
RETIRED14 = 0x5FC11A
RETIRED15 = 0x5FC11B
RETIRED16 = 0x5FC11C
RETIRED17 = 0x5FC11D
RETIRED18 = 0x5FC11E
RETIRED19 = 0x5FC11F
RETIRED20 = 0x5FC120
ATTESTATION = 0x5FFF01
@classmethod
def from_slot(cls, slot):
return getattr(cls, SLOT(slot).name)
@unique
class PIN_POLICY(IntEnum):
DEFAULT = 0x0
NEVER = 0x1
ONCE = 0x2
ALWAYS = 0x3
@unique
class TOUCH_POLICY(IntEnum):
DEFAULT = 0x0
NEVER = 0x1
ALWAYS = 0x2
CACHED = 0x3
# 010203040506070801020304050607080102030405060708
DEFAULT_MANAGEMENT_KEY = (
b"\x01\x02\x03\x04\x05\x06\x07\x08"
+ b"\x01\x02\x03\x04\x05\x06\x07\x08"
+ b"\x01\x02\x03\x04\x05\x06\x07\x08"
)
PIN_LEN = 8
# Instruction set
INS_VERIFY = 0x20
INS_CHANGE_REFERENCE = 0x24
INS_RESET_RETRY = 0x2C
INS_GENERATE_ASYMMETRIC = 0x47
INS_AUTHENTICATE = 0x87
INS_GET_DATA = 0xCB
INS_PUT_DATA = 0xDB
INS_GET_METADATA = 0xF7
INS_ATTEST = 0xF9
INS_SET_PIN_RETRIES = 0xFA
INS_RESET = 0xFB
INS_GET_VERSION = 0xFD
INS_IMPORT_KEY = 0xFE
INS_SET_MGMKEY = 0xFF
# Tags for parsing responses and preparing requests
TAG_AUTH_WITNESS = 0x80
TAG_AUTH_CHALLENGE = 0x81
TAG_AUTH_RESPONSE = 0x82
TAG_AUTH_EXPONENTIATION = 0x85
TAG_GEN_ALGORITHM = 0x80
TAG_OBJ_DATA = 0x53
TAG_OBJ_ID = 0x5C
TAG_CERTIFICATE = 0x70
TAG_CERT_INFO = 0x71
TAG_DYN_AUTH = 0x7C
TAG_LRC = 0xFE
TAG_PIN_POLICY = 0xAA
TAG_TOUCH_POLICY = 0xAB
# Metadata tags
TAG_METADATA_ALGO = 0x01
TAG_METADATA_POLICY = 0x02
TAG_METADATA_ORIGIN = 0x03
TAG_METADATA_PUBLIC_KEY = 0x04
TAG_METADATA_IS_DEFAULT = 0x05
TAG_METADATA_RETRIES = 0x06
ORIGIN_GENERATED = 1
ORIGIN_IMPORTED = 2
INDEX_PIN_POLICY = 0
INDEX_TOUCH_POLICY = 1
INDEX_RETRIES_TOTAL = 0
INDEX_RETRIES_REMAINING = 1
PIN_P2 = 0x80
PUK_P2 = 0x81
class InvalidPinError(CommandError):
def __init__(self, attempts_remaining):
super(InvalidPinError, self).__init__(
"Invalid PIN/PUK. Remaining attempts: %d" % attempts_remaining
)
self.attempts_remaining = attempts_remaining
def _pin_bytes(pin):
pin = pin.encode()
if len(pin) > PIN_LEN:
raise ValueError("PIN/PUK must be no longer than 8 bytes")
return pin.ljust(PIN_LEN, b"\xff")
def _retries_from_sw(version, sw):
if sw == SW.AUTH_METHOD_BLOCKED:
return 0
if version < (1, 0, 4):
if 0x6300 <= sw <= 0x63FF:
return sw & 0xFF
else:
if 0x63C0 <= sw <= 0x63CF:
return sw & 0x0F
return None
@dataclass
class PinMetadata:
default_value: bool
total_attempts: int
attempts_remaining: int
@dataclass
class ManagementKeyMetadata:
key_type: MANAGEMENT_KEY_TYPE
default_value: bool
touch_policy: TOUCH_POLICY
@dataclass
class SlotMetadata:
key_type: KEY_TYPE
pin_policy: PIN_POLICY
touch_policy: TOUCH_POLICY
generated: bool
public_key_encoded: bytes
@property
def public_key(self):
return _parse_device_public_key(self.key_type, self.public_key_encoded)
def _pad_message(key_type, message, hash_algorithm, padding):
if key_type.algorithm == ALGORITHM.EC:
h = hashes.Hash(hash_algorithm, default_backend())
h.update(message)
hashed = h.finalize()
byte_len = key_type.bit_len // 8
if len(hashed) < byte_len:
return hashed.rjust(byte_len // 8, b"\0")
return hashed[:byte_len]
elif key_type.algorithm == ALGORITHM.RSA:
# Sign with a dummy key, then encrypt the signature to get the padded message
e = 65537
dummy = rsa.generate_private_key(e, key_type.bit_len, default_backend())
signature = dummy.sign(message, padding, hash_algorithm)
# Raw (textbook) RSA encrypt
n = dummy.public_key().public_numbers().n
return int2bytes(pow(bytes2int(signature), e, n), key_type.bit_len // 8)
def _unpad_message(padded, padding):
e = 65537
dummy = rsa.generate_private_key(e, len(padded) * 8, default_backend())
# Raw (textbook) RSA encrypt
n = dummy.public_key().public_numbers().n
encrypted = int2bytes(pow(bytes2int(padded), e, n), len(padded))
return dummy.decrypt(encrypted, padding)
def check_key_support(
version: Version,
key_type: KEY_TYPE,
pin_policy: PIN_POLICY,
touch_policy: TOUCH_POLICY,
generate: bool = True,
) -> None:
"""Check if a key type is supported by a specific YubiKey firmware version.
This method will return None if the key (with PIN and touch policies) is supported,
or it will raise a NotSupportedError if it is not.
"""
if version[0] == 0 and version > (0, 1, 3):
return # Development build, skip version checks
if version < (4, 0, 0):
if key_type == KEY_TYPE.ECCP384:
raise NotSupportedError("ECCP384 requires YubiKey 4 or later")
if touch_policy != TOUCH_POLICY.DEFAULT or pin_policy != PIN_POLICY.DEFAULT:
raise NotSupportedError("PIN/Touch policy requires YubiKey 4 or later")
if version < (4, 3, 0) and touch_policy == TOUCH_POLICY.CACHED:
raise NotSupportedError("Cached touch policy requires YubiKey 4.3 or later")
# ROCA
if (4, 2, 0) <= version < (4, 3, 5):
if generate and key_type.algorithm == ALGORITHM.RSA:
raise NotSupportedError("RSA key generation not supported on this YubiKey")
# FIPS
if (4, 4, 0) <= version < (4, 5, 0):
if key_type == KEY_TYPE.RSA1024:
raise NotSupportedError("RSA 1024 not supported on YubiKey FIPS")
if pin_policy == PIN_POLICY.NEVER:
raise NotSupportedError("PIN_POLICY.NEVER not allowed on YubiKey FIPS")
def _parse_device_public_key(key_type, encoded):
data = Tlv.parse_dict(encoded)
if key_type.algorithm == ALGORITHM.RSA:
modulus = bytes2int(data[0x81])
exponent = bytes2int(data[0x82])
return rsa.RSAPublicNumbers(exponent, modulus).public_key(default_backend())
else:
if key_type == KEY_TYPE.ECCP256:
curve: Type[ec.EllipticCurve] = ec.SECP256R1
else:
curve = ec.SECP384R1
try:
# Added in cryptography 2.5
return ec.EllipticCurvePublicKey.from_encoded_point(curve(), data[0x86])
except AttributeError:
return ec.EllipticCurvePublicNumbers.from_encoded_point(
curve(), data[0x86]
).public_key(default_backend())
class PivSession:
def __init__(self, connection: SmartCardConnection):
self.protocol = SmartCardProtocol(connection)
self.protocol.select(AID.PIV)
self._version = Version.from_bytes(
self.protocol.send_apdu(0, INS_GET_VERSION, 0, 0)
)
self.protocol.enable_touch_workaround(self.version)
if self.version >= (4, 0, 0):
self.protocol.apdu_format = ApduFormat.EXTENDED
self._current_pin_retries = 3
self._max_pin_retries = 3
@property
def version(self) -> Version:
return self._version
def reset(self) -> None:
# Block PIN
counter = self.get_pin_attempts()
while counter > 0:
try:
self.verify_pin("")
except InvalidPinError as e:
counter = e.attempts_remaining
# Block PUK
counter = 1
while counter > 0:
try:
self._change_reference(INS_RESET_RETRY, PIN_P2, "", "")
except InvalidPinError as e:
counter = e.attempts_remaining
# Reset
self.protocol.send_apdu(0, INS_RESET, 0, 0)
self._current_pin_retries = 3
self._max_pin_retries = 3
def authenticate(
self, key_type: MANAGEMENT_KEY_TYPE, management_key: bytes
) -> None:
key_type = MANAGEMENT_KEY_TYPE(key_type)
response = self.protocol.send_apdu(
0,
INS_AUTHENTICATE,
key_type,
SLOT_CARD_MANAGEMENT,
Tlv(TAG_DYN_AUTH, Tlv(TAG_AUTH_WITNESS)),
)
witness = Tlv.unpack(TAG_AUTH_WITNESS, Tlv.unpack(TAG_DYN_AUTH, response))
challenge = os.urandom(key_type.challenge_len)
backend = default_backend()
cipher_key = _parse_management_key(key_type, management_key)
cipher = Cipher(cipher_key, modes.ECB(), backend) # nosec
decryptor = cipher.decryptor()
decrypted = decryptor.update(witness) + decryptor.finalize()
response = self.protocol.send_apdu(
0,
INS_AUTHENTICATE,
key_type,
SLOT_CARD_MANAGEMENT,
Tlv(
TAG_DYN_AUTH,
Tlv(TAG_AUTH_WITNESS, decrypted) + Tlv(TAG_AUTH_CHALLENGE, challenge),
),
)
encrypted = Tlv.unpack(TAG_AUTH_RESPONSE, Tlv.unpack(TAG_DYN_AUTH, response))
encryptor = cipher.encryptor()
expected = encryptor.update(challenge) + encryptor.finalize()
if not bytes_eq(expected, encrypted):
raise BadResponseError("Device response is incorrect")
def set_management_key(
self,
key_type: MANAGEMENT_KEY_TYPE,
management_key: bytes,
require_touch: bool = False,
) -> None:
key_type = MANAGEMENT_KEY_TYPE(key_type)
if key_type != MANAGEMENT_KEY_TYPE.TDES:
require_version(self.version, (5, 4, 0))
if len(management_key) != key_type.key_len:
raise ValueError("Management key must be %d bytes" % key_type.key_len)
self.protocol.send_apdu(
0,
INS_SET_MGMKEY,
0xFF,
0xFE if require_touch else 0xFF,
int2bytes(key_type) + Tlv(SLOT_CARD_MANAGEMENT, management_key),
)
def verify_pin(self, pin: str) -> None:
try:
self.protocol.send_apdu(0, INS_VERIFY, 0, PIN_P2, _pin_bytes(pin))
self._current_pin_retries = self._max_pin_retries
except ApduError as e:
retries = _retries_from_sw(self.version, e.sw)
if retries is None:
raise
self._current_pin_retries = retries
raise InvalidPinError(retries)
def get_pin_attempts(self) -> int:
try:
return self.get_pin_metadata().attempts_remaining
except NotSupportedError:
try:
self.protocol.send_apdu(0, INS_VERIFY, 0, PIN_P2)
# Already verified, no way to know true count
return self._current_pin_retries
except ApduError as e:
retries = _retries_from_sw(self.version, e.sw)
if retries is None:
raise
self._current_pin_retries = retries
return retries
def change_pin(self, old_pin: str, new_pin: str) -> None:
self._change_reference(INS_CHANGE_REFERENCE, PIN_P2, old_pin, new_pin)
def change_puk(self, old_puk: str, new_puk: str) -> None:
self._change_reference(INS_CHANGE_REFERENCE, PUK_P2, old_puk, new_puk)
def unblock_pin(self, puk: str, new_pin: str) -> None:
self._change_reference(INS_RESET_RETRY, PIN_P2, puk, new_pin)
def set_pin_attempts(self, pin_attempts: int, puk_attempts: int) -> None:
self.protocol.send_apdu(0, INS_SET_PIN_RETRIES, pin_attempts, puk_attempts)
self._max_pin_retries = pin_attempts
self._current_pin_retries = pin_attempts
def get_pin_metadata(self) -> PinMetadata:
return self._get_pin_puk_metadata(PIN_P2)
def get_puk_metadata(self) -> PinMetadata:
return self._get_pin_puk_metadata(PUK_P2)
def get_management_key_metadata(self) -> ManagementKeyMetadata:
require_version(self.version, (5, 3, 0))
data = Tlv.parse_dict(
self.protocol.send_apdu(0, INS_GET_METADATA, 0, SLOT_CARD_MANAGEMENT)
)
policy = data[TAG_METADATA_POLICY]
return ManagementKeyMetadata(
MANAGEMENT_KEY_TYPE(data.get(TAG_METADATA_ALGO, b"\x03")[0]),
data[TAG_METADATA_IS_DEFAULT] != b"\0",
TOUCH_POLICY(policy[INDEX_TOUCH_POLICY]),
)
def get_slot_metadata(self, slot: SLOT) -> SlotMetadata:
require_version(self.version, (5, 3, 0))
data = Tlv.parse_dict(self.protocol.send_apdu(0, INS_GET_METADATA, 0, slot))
policy = data[TAG_METADATA_POLICY]
return SlotMetadata(
KEY_TYPE(data[TAG_METADATA_ALGO][0]),
PIN_POLICY(policy[INDEX_PIN_POLICY]),
TOUCH_POLICY(policy[INDEX_TOUCH_POLICY]),
data[TAG_METADATA_ORIGIN][0] == ORIGIN_GENERATED,
data[TAG_METADATA_PUBLIC_KEY],
)
def sign(
self,
slot: SLOT,
key_type: KEY_TYPE,
message: bytes,
hash_algorithm: hashes.HashAlgorithm,
padding: Optional[AsymmetricPadding] = None,
) -> bytes:
key_type = KEY_TYPE(key_type)
padded = _pad_message(key_type, message, hash_algorithm, padding)
return self._use_private_key(slot, key_type, padded, False)
def decrypt(
self, slot: SLOT, cipher_text: bytes, padding: AsymmetricPadding
) -> bytes:
if len(cipher_text) == 1024 // 8:
key_type = KEY_TYPE.RSA1024
elif len(cipher_text) == 2048 // 8:
key_type = KEY_TYPE.RSA2048
else:
raise ValueError("Invalid length of ciphertext")
padded = self._use_private_key(slot, key_type, cipher_text, False)
return _unpad_message(padded, padding)
def calculate_secret(
self, slot: SLOT, peer_public_key: ec.EllipticCurvePublicKey
) -> bytes:
key_type = KEY_TYPE.from_public_key(peer_public_key)
if key_type.algorithm != ALGORITHM.EC:
raise ValueError("Unsupported key type")
data = peer_public_key.public_bytes(
Encoding.X962, PublicFormat.UncompressedPoint
)
return self._use_private_key(slot, key_type, data, True)
def get_object(self, object_id: int) -> bytes:
if object_id == OBJECT_ID.DISCOVERY:
expected: int = OBJECT_ID.DISCOVERY
else:
expected = TAG_OBJ_DATA
try:
return Tlv.unpack(
expected,
self.protocol.send_apdu(
0,
INS_GET_DATA,
0x3F,
0xFF,
Tlv(TAG_OBJ_ID, int2bytes(object_id)),
),
)
except ValueError as e:
raise BadResponseError("Malformed object data", e)
def put_object(self, object_id: int, data: Optional[bytes] = None) -> None:
self.protocol.send_apdu(
0,
INS_PUT_DATA,
0x3F,
0xFF,
Tlv(TAG_OBJ_ID, int2bytes(object_id)) + Tlv(TAG_OBJ_DATA, data or b""),
)
def get_certificate(self, slot: SLOT) -> x509.Certificate:
try:
data = Tlv.parse_dict(self.get_object(OBJECT_ID.from_slot(slot)))
except ValueError:
raise BadResponseError("Malformed certificate data object")
cert_info = data.get(TAG_CERT_INFO)
if cert_info and cert_info[0] != 0:
raise NotSupportedError("Compressed certificates are not supported")
try:
return x509.load_der_x509_certificate(
data[TAG_CERTIFICATE], default_backend()
)
except Exception as e:
raise BadResponseError("Invalid certificate", e)
def put_certificate(self, slot: SLOT, certificate: x509.Certificate) -> None:
cert_data = certificate.public_bytes(Encoding.DER)
data = (
Tlv(TAG_CERTIFICATE, cert_data) + Tlv(TAG_CERT_INFO, b"\0") + Tlv(TAG_LRC)
)
self.put_object(OBJECT_ID.from_slot(slot), data)
def delete_certificate(self, slot: SLOT) -> None:
self.put_object(OBJECT_ID.from_slot(slot))
def put_key(
self,
slot: SLOT,
private_key: Union[
rsa.RSAPrivateKeyWithSerialization,
ec.EllipticCurvePrivateKeyWithSerialization,
],
pin_policy: PIN_POLICY = PIN_POLICY.DEFAULT,
touch_policy: TOUCH_POLICY = TOUCH_POLICY.DEFAULT,
) -> None:
key_type = KEY_TYPE.from_public_key(private_key.public_key())
check_key_support(self.version, key_type, pin_policy, touch_policy, False)
ln = key_type.bit_len // 8
numbers = private_key.private_numbers()
if key_type.algorithm == ALGORITHM.RSA:
numbers = cast(rsa.RSAPrivateNumbers, numbers)
if numbers.public_numbers.e != 65537:
raise NotSupportedError("RSA exponent must be 65537")
ln //= 2
data = (
Tlv(0x01, int2bytes(numbers.p, ln))
+ Tlv(0x02, int2bytes(numbers.q, ln))
+ Tlv(0x03, int2bytes(numbers.dmp1, ln))
+ Tlv(0x04, int2bytes(numbers.dmq1, ln))
+ Tlv(0x05, int2bytes(numbers.iqmp, ln))
)
else:
numbers = cast(ec.EllipticCurvePrivateNumbers, numbers)
data = Tlv(0x06, int2bytes(numbers.private_value, ln))
if pin_policy:
data += Tlv(TAG_PIN_POLICY, int2bytes(pin_policy))
if touch_policy:
data += Tlv(TAG_TOUCH_POLICY, int2bytes(touch_policy))
self.protocol.send_apdu(0, INS_IMPORT_KEY, key_type, slot, data)
return key_type
def generate_key(
self,
slot: SLOT,
key_type: KEY_TYPE,
pin_policy: PIN_POLICY = PIN_POLICY.DEFAULT,
touch_policy: TOUCH_POLICY = TOUCH_POLICY.DEFAULT,
) -> Union[rsa.RSAPublicKey, ec.EllipticCurvePublicKey]:
key_type = KEY_TYPE(key_type)
check_key_support(self.version, key_type, pin_policy, touch_policy, True)
data: bytes = Tlv(TAG_GEN_ALGORITHM, int2bytes(key_type))
if pin_policy:
data += Tlv(TAG_PIN_POLICY, int2bytes(pin_policy))
if touch_policy:
data += Tlv(TAG_TOUCH_POLICY, int2bytes(touch_policy))
response = self.protocol.send_apdu(
0, INS_GENERATE_ASYMMETRIC, 0, slot, Tlv(0xAC, data)
)
return _parse_device_public_key(key_type, Tlv.unpack(0x7F49, response))
def attest_key(self, slot: SLOT) -> x509.Certificate:
require_version(self.version, (4, 3, 0))
response = self.protocol.send_apdu(0, INS_ATTEST, slot, 0)
return x509.load_der_x509_certificate(response, default_backend())
def _change_reference(self, ins, p2, value1, value2):
try:
self.protocol.send_apdu(
0, ins, 0, p2, _pin_bytes(value1) + _pin_bytes(value2)
)
except ApduError as e:
retries = _retries_from_sw(self.version, e.sw)
if retries is None:
raise
if p2 == PIN_P2:
self._current_pin_retries = retries
raise InvalidPinError(retries)
def _get_pin_puk_metadata(self, p2):
require_version(self.version, (5, 3, 0))
data = Tlv.parse_dict(self.protocol.send_apdu(0, INS_GET_METADATA, 0, p2))
attempts = data[TAG_METADATA_RETRIES]
return PinMetadata(
data[TAG_METADATA_IS_DEFAULT] != b"\0",
attempts[INDEX_RETRIES_TOTAL],
attempts[INDEX_RETRIES_REMAINING],
)
def _use_private_key(self, slot, key_type, message, exponentiation):
try:
response = self.protocol.send_apdu(
0,
INS_AUTHENTICATE,
key_type,
slot,
Tlv(
TAG_DYN_AUTH,
Tlv(TAG_AUTH_RESPONSE)
+ Tlv(
TAG_AUTH_EXPONENTIATION
if exponentiation
else TAG_AUTH_CHALLENGE,
message,
),
),
)
return Tlv.unpack(
TAG_AUTH_RESPONSE,
Tlv.unpack(
TAG_DYN_AUTH,
response,
),
)
except ApduError as e:
if e.sw == SW.INCORRECT_PARAMETERS:
raise e # TODO: Different error, No key?
raise
|
the-stack_0_1247 | # Tencent is pleased to support the open source community by making ncnn available.
#
# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import torch
import torchvision.models as models
def test():
net = models.resnet18().half().float()
net.eval()
torch.manual_seed(0)
x = torch.rand(1, 3, 224, 224)
a = net(x)
# export torchscript
mod = torch.jit.trace(net, x)
mod.save("test_resnet18.pt")
# torchscript to pnnx
import os
os.system("../../src/pnnx test_resnet18.pt inputshape=[1,3,224,224]")
# ncnn inference
import test_resnet18_ncnn
b = test_resnet18_ncnn.test_inference()
return torch.allclose(a, b, 1e-2, 1e-2)
if __name__ == "__main__":
if test():
exit(0)
else:
exit(1)
|
the-stack_0_1248 | from pydyn.operations.binary_tree import has_nested_add
from pydyn.operations.geometry import Dot, Cross, Vee, Hat
from pydyn.operations.addition import Add, VAdd, MAdd
from pydyn.operations.multiplication import Mul, SMMul, SVMul, MVMul, VVMul, MMMul
from pydyn.base.matrices import MatrixExpr
from pydyn.base.scalars import ScalarExpr
from pydyn.base.vectors import VectorExpr
from pydyn.utils.errors import UndefinedCaseError
def expand_scalar(expr):
if isinstance(expr, Add):
expanded_expr = Add()
for n in expr.nodes:
expanded_expr += expand(n)
return expanded_expr
elif isinstance(expr, Mul):
if isinstance(expr.left, Add) and isinstance(expr.right, Add):
"""(a+b)(c+d) = ac + ad + bc + bd"""
expanded_expr = Add()
for nl in expr.left.nodes:
for nr in expr.right.nodes:
expanded_expr += expand(nl * nr)
return expanded_expr
elif isinstance(expr.left, Add):
"""(a+b)c = ac + bc"""
expanded_expr = Add()
for n in expr.left.nodes:
expanded_expr += expand(n * expr.right)
return expanded_expr
elif isinstance(expr.right, Add):
"""a(b+c) = ab + ac"""
expanded_expr = Add()
for n in expr.right.nodes:
expanded_expr += expand(expr.left * n)
return expanded_expr
else:
if has_nested_add(expr):
return expand(expand(expr.left) * expand(expr.right))
else:
return expr
elif isinstance(expr, Dot):
if isinstance(expr.left, VAdd) and isinstance(expr.right, VAdd):
"""(x+y).(u+v) = x.u + x.v + y.u + y.v"""
expanded_expr = Add()
for nl in expr.left.nodes:
for nr in expr.right.nodes:
expanded_expr += expand(Dot(nl, nr))
return expanded_expr
elif isinstance(expr.right, VAdd):
"""x.(u+v) = x.u + x.v"""
expanded_expr = Add()
for n in expr.right.nodes:
expanded_expr += expand(Dot(expr.left, n))
return expanded_expr
elif isinstance(expr.left, VAdd):
"""(x+y).u = x.u + y.u"""
expanded_expr = Add()
for n in expr.left.nodes:
expanded_expr += expand(Dot(n, expr.right))
return expanded_expr
else:
if has_nested_add(expr):
return expand(Dot(expand(expr.left), expand(expr.right)))
else:
return Dot(expand(expr.left), expand(expr.right))
elif isinstance(expr, VVMul):
raise NotImplementedError
return expr
def expand_vector(expr):
if isinstance(expr, VAdd):
expanded_expr = VAdd()
for n in expr.nodes:
expanded_expr += expand(n)
return expanded_expr
elif isinstance(expr, MVMul):
if isinstance(expr.left, MAdd):
"""(A+B)x = Ax+Bx"""
expanded_expr = VAdd()
for n in expr.left.nodes:
expanded_expr += expand(MVMul(n, expr.right))
return expanded_expr
elif isinstance(expr.right, VAdd):
"""A(x+y) = Ax + Ay"""
expanded_expr = VAdd()
for n in expr.right.nodes:
expanded_expr += expand(MVMul(expr.left, n))
return expanded_expr
else:
if has_nested_add(expr):
return expand(MVMul(expand(expr.left), expand(expr.right)))
else:
return expr
elif isinstance(expr, SVMul):
if isinstance(expr.left, VAdd):
"""(x+y)a=xa+ya"""
expanded_expr = VAdd()
for n in expr.left.nodes:
expanded_expr += expand(SVMul(n, expr.right))
return expanded_expr
else:
if has_nested_add(expr):
return expand(SVMul(expand(expr.left), expand(expr.right)))
else:
return expr
pass
elif isinstance(expr, Cross):
if isinstance(expr.left, VAdd) and isinstance(expr.right, VAdd):
expanded_expr = VAdd()
for nl in expr.left.nodes:
for nr in expr.right.nodes:
expanded_expr += expand(Cross(nl, nr))
return expanded_expr
elif isinstance(expr.left, VAdd):
expanded_expr = VAdd()
for n in expr.left.nodes:
expanded_expr += expand(Cross(n, expr.right))
return expanded_expr
elif isinstance(expr.right, VAdd):
"""x.(u+v) = x.u + x.v"""
expanded_expr = VAdd()
for n in expr.right.nodes:
expanded_expr += expand(Cross(expr.left, n))
return expanded_expr
else:
if has_nested_add(expr):
return expand(Cross(expand(expr.left), expand(expr.right)))
else:
return expr
elif isinstance(expr, Vee):
return Vee(expand(expr))
return expr
def expand_matrix(expr):
if isinstance(expr, MAdd):
expanded_expr = MAdd()
for n in expr.nodes:
expanded_expr += expand(n)
return expanded_expr
elif isinstance(expr, MMMul):
if isinstance(expr.left, MAdd) and isinstance(expr.right, MAdd):
expanded_expr = MAdd()
for nl in expr.left.nodes:
for nr in expr.right.nodes:
expanded_expr += expand(nl * nr)
return expanded_expr
elif isinstance(expr.left, MAdd):
expanded_expr = MAdd()
for nl in expr.left.nodes:
expanded_expr += expand(nl * expr.right)
return expanded_expr
elif isinstance(expr.right, MAdd):
expanded_expr = MAdd()
for nr in expr.right.nodes:
expanded_expr += expand(expr.left * nr)
return expanded_expr
else:
if has_nested_add(expr):
return expand(MMMul(expand(expr.left), expand(expr.right)))
else:
return expr
elif isinstance(expr, SMMul):
raise Exception('SSMul in expand_matrix is not implemented')
elif isinstance(expr, VVMul):
raise Exception('VVMul in expand_matrix is not implemented')
elif isinstance(expr, Hat):
return Hat(expand(expr.expr))
return expr
def expand(expr):
# TODO add expand functionality to the Expr class directly
if isinstance(expr, ScalarExpr):
return expand_scalar(expr)
elif isinstance(expr, VectorExpr):
return expand_vector(expr)
elif isinstance(expr, MatrixExpr):
return expand_matrix(expr)
else:
raise UndefinedCaseError
|
the-stack_0_1249 | import matplotlib.pyplot as plt
import pprint
from string import ascii_lowercase as letters
def read_and_count_letters(to_read):
# Open the file that is passed in as an argument to the function
with open(to_read, encoding='utf-8') as f:
text = f.read().lower()
text_count = dict((l, text.count(l)) for l in letters)
# Sorting the returned data to be processed further
text_sort = sorted(text_count.items(), key=lambda x: x[1], reverse=True)
# Printing the returned data to the command line
pprint.pprint(text_sort)
# Initialising a new matplotlib bar graph illustrating the occurrences of letters detected in the text file
plt.bar(*zip(*text_count.items()))
plt.show()
# Enter the .txt document you wish to process between the two apostrophes below
to_read = ''
read_and_count_letters(to_read)
|
the-stack_0_1250 | """
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz
Copyright (c) 2021-present Pycord Development
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import copy
import unicodedata
from typing import (
Any,
ClassVar,
Dict,
List,
NamedTuple,
Sequence,
Set,
Literal,
Optional,
TYPE_CHECKING,
Tuple,
Union,
overload,
)
from . import utils, abc
from .role import Role
from .member import Member, VoiceState
from .emoji import Emoji
from .errors import InvalidData
from .permissions import PermissionOverwrite
from .colour import Colour
from .errors import InvalidArgument, ClientException
from .channel import *
from .channel import _guild_channel_factory
from .channel import _threaded_guild_channel_factory
from .enums import (
AuditLogAction,
VideoQualityMode,
VoiceRegion,
ChannelType,
try_enum,
VerificationLevel,
ContentFilter,
NotificationLevel,
NSFWLevel,
)
from .mixins import Hashable
from .user import User
from .invite import Invite
from .iterators import AuditLogIterator, MemberIterator
from .widget import Widget
from .asset import Asset
from .flags import SystemChannelFlags
from .integrations import Integration, _integration_factory
from .stage_instance import StageInstance
from .threads import Thread, ThreadMember
from .sticker import GuildSticker
from .file import File
from .welcome_screen import WelcomeScreen, WelcomeScreenChannel
__all__ = (
'Guild',
)
MISSING = utils.MISSING
if TYPE_CHECKING:
from .abc import Snowflake, SnowflakeTime
from .types.guild import Ban as BanPayload, Guild as GuildPayload, MFALevel, GuildFeature
from .types.threads import (
Thread as ThreadPayload,
)
from .types.voice import GuildVoiceState
from .permissions import Permissions
from .channel import VoiceChannel, StageChannel, TextChannel, CategoryChannel, StoreChannel
from .template import Template
from .webhook import Webhook
from .state import ConnectionState
from .voice_client import VoiceProtocol
import datetime
VocalGuildChannel = Union[VoiceChannel, StageChannel]
GuildChannel = Union[VoiceChannel, StageChannel, TextChannel, CategoryChannel, StoreChannel]
ByCategoryItem = Tuple[Optional[CategoryChannel], List[GuildChannel]]
class BanEntry(NamedTuple):
reason: Optional[str]
user: User
class _GuildLimit(NamedTuple):
emoji: int
stickers: int
bitrate: float
filesize: int
class Guild(Hashable):
"""Represents a Discord guild.
This is referred to as a "server" in the official Discord UI.
.. container:: operations
.. describe:: x == y
Checks if two guilds are equal.
.. describe:: x != y
Checks if two guilds are not equal.
.. describe:: hash(x)
Returns the guild's hash.
.. describe:: str(x)
Returns the guild's name.
Attributes
----------
name: :class:`str`
The guild name.
emojis: Tuple[:class:`Emoji`, ...]
All emojis that the guild owns.
stickers: Tuple[:class:`GuildSticker`, ...]
All stickers that the guild owns.
.. versionadded:: 2.0
region: :class:`VoiceRegion`
The region the guild belongs on. There is a chance that the region
will be a :class:`str` if the value is not recognised by the enumerator.
afk_timeout: :class:`int`
The timeout to get sent to the AFK channel.
afk_channel: Optional[:class:`VoiceChannel`]
The channel that denotes the AFK channel. ``None`` if it doesn't exist.
id: :class:`int`
The guild's ID.
owner_id: :class:`int`
The guild owner's ID. Use :attr:`Guild.owner` instead.
unavailable: :class:`bool`
Indicates if the guild is unavailable. If this is ``True`` then the
reliability of other attributes outside of :attr:`Guild.id` is slim and they might
all be ``None``. It is best to not do anything with the guild if it is unavailable.
Check the :func:`on_guild_unavailable` and :func:`on_guild_available` events.
max_presences: Optional[:class:`int`]
The maximum amount of presences for the guild.
max_members: Optional[:class:`int`]
The maximum amount of members for the guild.
.. note::
This attribute is only available via :meth:`.Client.fetch_guild`.
max_video_channel_users: Optional[:class:`int`]
The maximum amount of users in a video channel.
.. versionadded:: 1.4
description: Optional[:class:`str`]
The guild's description.
mfa_level: :class:`int`
Indicates the guild's two factor authorisation level. If this value is 0 then
the guild does not require 2FA for their administrative members. If the value is
1 then they do.
verification_level: :class:`VerificationLevel`
The guild's verification level.
explicit_content_filter: :class:`ContentFilter`
The guild's explicit content filter.
default_notifications: :class:`NotificationLevel`
The guild's notification settings.
features: List[:class:`str`]
A list of features that the guild has. The features that a guild can have are
subject to arbitrary change by Discord.
They are currently as follows:
- ``ANIMATED_BANNER``: Guild can upload an animated banner.
- ``ANIMATED_ICON``: Guild can upload an animated icon.
- ``BANNER``: Guild can upload and use a banner. (i.e. :attr:`.banner`)
- ``CHANNEL_BANNER``: Guild can upload and use a channel banners.
- ``COMMERCE``: Guild can sell things using store channels.
- ``COMMUNITY``: Guild is a community server.
- ``DISCOVERABLE``: Guild shows up in Server Discovery.
- ``FEATURABLE``: Guild is able to be featured in Server Discovery.
- ``HAS_DIRECTORY_ENTRY``: Unknown.
- ``HUB``: Hubs contain a directory channel that let you find school-related, student-run servers for your school or university.
- ``INTERNAL_EMPLOYEE_ONLY``: Indicates that only users with the staff badge can join the guild.
- ``INVITE_SPLASH``: Guild's invite page can have a special splash.
- ``LINKED_TO_HUB``: 'Guild is linked to a hub.
- ``MEMBER_PROFILES``: Unknown.
- ``MEMBER_VERIFICATION_GATE_ENABLED``: Guild has Membership Screening enabled.
- ``MONETIZATION_ENABLED``: Guild has enabled monetization.
- ``MORE_EMOJI``: Guild has increased custom emoji slots.
- ``MORE_STICKERS``: Guild has increased custom sticker slots.
- ``NEWS``: Guild can create news channels.
- ``NEW_THREAD_PERMISSIONS``: Guild has new thread permissions.
- ``PARTNERED``: Guild is a partnered server.
- ``PREMIUM_TIER_3_OVERRIDE``: Forces the server to server boosting level 3 (specifically created by Discord Staff Member "Jethro" for their personal server).
- ``PREVIEW_ENABLED``: Guild can be viewed before being accepted via Membership Screening.
- ``PRIVATE_THREADS``: Guild has access to create private threads.
- ``ROLE_ICONS``: Guild can set an image or emoji as a role icon.
- ``ROLE_SUBSCRIPTIONS_AVAILABLE_FOR_PURCHASE``: Role subscriptions are available for purchasing.
- ``ROLE_SUBSCRIPTIONS_ENABLED``: Guild is able to view and manage role subscriptions.
- ``SEVEN_DAY_THREAD_ARCHIVE``: Guild has access to the seven day archive time for threads.
- ``TEXT_IN_VOICE_ENABLED``: Guild has a chat button inside voice channels that opens a dedicated text channel in a sidebar similar to thread view.
- ``THREAD_DEFAULT_AUTO_ARCHIVE_DURATION``: Unknown, presumably used for testing changes to the thread default auto archive duration..
- ``THREADS_ENABLED_TESTING``: Used by bot developers to test their bots with threads in guilds with 5 or less members and a bot. Also gives the premium thread features.
- ``THREE_DAY_THREAD_ARCHIVE``: Guild has access to the three day archive time for threads.
- ``TICKETED_EVENTS_ENABLED``: Guild has enabled ticketed events.
- ``VANITY_URL``: Guild can have a vanity invite URL (e.g. discord.gg/discord-api).
- ``VERIFIED``: Guild is a verified server.
- ``VIP_REGIONS``: Guild has VIP voice regions.
- ``WELCOME_SCREEN_ENABLED``: Guild has enabled the welcome screen.
premium_tier: :class:`int`
The premium tier for this guild. Corresponds to "Nitro Server" in the official UI.
The number goes from 0 to 3 inclusive.
premium_subscription_count: :class:`int`
The number of "boosts" this guild currently has.
premium_progress_bar_enabled: :class:`bool`
Indicates if the guild has premium progress bar enabled.
.. versionadded:: 2.0
preferred_locale: Optional[:class:`str`]
The preferred locale for the guild. Used when filtering Server Discovery
results to a specific language.
nsfw_level: :class:`NSFWLevel`
The guild's NSFW level.
.. versionadded:: 2.0
approximate_member_count: Optional[:class:`int`]
The approximate number of members in the guild. This is ``None`` unless the guild is obtained
using :meth:`Client.fetch_guild` with ``with_counts=True``.
.. versionadded:: 2.0
approximate_presence_count: Optional[:class:`int`]
The approximate number of members currently active in the guild.
This includes idle, dnd, online, and invisible members. Offline members are excluded.
This is ``None`` unless the guild is obtained using :meth:`Client.fetch_guild`
with ``with_counts=True``.
.. versionadded:: 2.0
"""
__slots__ = (
'afk_timeout',
'afk_channel',
'name',
'id',
'unavailable',
'region',
'owner_id',
'mfa_level',
'emojis',
'stickers',
'features',
'verification_level',
'explicit_content_filter',
'default_notifications',
'description',
'max_presences',
'max_members',
'max_video_channel_users',
'premium_tier',
'premium_subscription_count',
'premium_progress_bar_enabled',
'preferred_locale',
'nsfw_level',
'_members',
'_channels',
'_icon',
'_banner',
'_state',
'_roles',
'_member_count',
'_large',
'_splash',
'_voice_states',
'_system_channel_id',
'_system_channel_flags',
'_discovery_splash',
'_rules_channel_id',
'_public_updates_channel_id',
'_stage_instances',
'_threads',
"approximate_member_count",
"approximate_presence_count",
)
_PREMIUM_GUILD_LIMITS: ClassVar[Dict[Optional[int], _GuildLimit]] = {
None: _GuildLimit(emoji=50, stickers=0, bitrate=96e3, filesize=8388608),
0: _GuildLimit(emoji=50, stickers=0, bitrate=96e3, filesize=8388608),
1: _GuildLimit(emoji=100, stickers=15, bitrate=128e3, filesize=8388608),
2: _GuildLimit(emoji=150, stickers=30, bitrate=256e3, filesize=52428800),
3: _GuildLimit(emoji=250, stickers=60, bitrate=384e3, filesize=104857600),
}
def __init__(self, *, data: GuildPayload, state: ConnectionState):
self._channels: Dict[int, GuildChannel] = {}
self._members: Dict[int, Member] = {}
self._voice_states: Dict[int, VoiceState] = {}
self._threads: Dict[int, Thread] = {}
self._state: ConnectionState = state
self._from_data(data)
def _add_channel(self, channel: GuildChannel, /) -> None:
self._channels[channel.id] = channel
def _remove_channel(self, channel: Snowflake, /) -> None:
self._channels.pop(channel.id, None)
def _voice_state_for(self, user_id: int, /) -> Optional[VoiceState]:
return self._voice_states.get(user_id)
def _add_member(self, member: Member, /) -> None:
self._members[member.id] = member
def _store_thread(self, payload: ThreadPayload, /) -> Thread:
thread = Thread(guild=self, state=self._state, data=payload)
self._threads[thread.id] = thread
return thread
def _remove_member(self, member: Snowflake, /) -> None:
self._members.pop(member.id, None)
def _add_thread(self, thread: Thread, /) -> None:
self._threads[thread.id] = thread
def _remove_thread(self, thread: Snowflake, /) -> None:
self._threads.pop(thread.id, None)
def _clear_threads(self) -> None:
self._threads.clear()
def _remove_threads_by_channel(self, channel_id: int) -> None:
to_remove = [k for k, t in self._threads.items() if t.parent_id == channel_id]
for k in to_remove:
del self._threads[k]
def _filter_threads(self, channel_ids: Set[int]) -> Dict[int, Thread]:
to_remove: Dict[int, Thread] = {k: t for k, t in self._threads.items() if t.parent_id in channel_ids}
for k in to_remove:
del self._threads[k]
return to_remove
def __str__(self) -> str:
return self.name or ''
def __repr__(self) -> str:
attrs = (
('id', self.id),
('name', self.name),
('shard_id', self.shard_id),
('chunked', self.chunked),
('member_count', getattr(self, '_member_count', None)),
)
inner = ' '.join('%s=%r' % t for t in attrs)
return f'<Guild {inner}>'
def _update_voice_state(self, data: GuildVoiceState, channel_id: int) -> Tuple[Optional[Member], VoiceState, VoiceState]:
user_id = int(data['user_id'])
channel = self.get_channel(channel_id)
try:
# check if we should remove the voice state from cache
if channel is None:
after = self._voice_states.pop(user_id)
else:
after = self._voice_states[user_id]
before = copy.copy(after)
after._update(data, channel)
except KeyError:
# if we're here then we're getting added into the cache
after = VoiceState(data=data, channel=channel)
before = VoiceState(data=data, channel=None)
self._voice_states[user_id] = after
member = self.get_member(user_id)
if member is None:
try:
member = Member(data=data['member'], state=self._state, guild=self)
except KeyError:
member = None
return member, before, after
def _add_role(self, role: Role, /) -> None:
# roles get added to the bottom (position 1, pos 0 is @everyone)
# so since self.roles has the @everyone role, we can't increment
# its position because it's stuck at position 0. Luckily x += False
# is equivalent to adding 0. So we cast the position to a bool and
# increment it.
for r in self._roles.values():
r.position += not r.is_default()
self._roles[role.id] = role
def _remove_role(self, role_id: int, /) -> Role:
# this raises KeyError if it fails..
role = self._roles.pop(role_id)
# since it didn't, we can change the positions now
# basically the same as above except we only decrement
# the position if we're above the role we deleted.
for r in self._roles.values():
r.position -= r.position > role.position
return role
def _from_data(self, guild: GuildPayload) -> None:
# according to Stan, this is always available even if the guild is unavailable
# I don't have this guarantee when someone updates the guild.
member_count = guild.get('member_count', None)
if member_count is not None:
self._member_count: int = member_count
self.name: str = guild.get('name')
self.region: VoiceRegion = try_enum(VoiceRegion, guild.get('region'))
self.verification_level: VerificationLevel = try_enum(VerificationLevel, guild.get('verification_level'))
self.default_notifications: NotificationLevel = try_enum(
NotificationLevel, guild.get('default_message_notifications')
)
self.explicit_content_filter: ContentFilter = try_enum(ContentFilter, guild.get('explicit_content_filter', 0))
self.afk_timeout: int = guild.get('afk_timeout')
self._icon: Optional[str] = guild.get('icon')
self._banner: Optional[str] = guild.get('banner')
self.unavailable: bool = guild.get('unavailable', False)
self.id: int = int(guild['id'])
self._roles: Dict[int, Role] = {}
state = self._state # speed up attribute access
for r in guild.get('roles', []):
role = Role(guild=self, data=r, state=state)
self._roles[role.id] = role
self.mfa_level: MFALevel = guild.get('mfa_level')
self.emojis: Tuple[Emoji, ...] = tuple(map(lambda d: state.store_emoji(self, d), guild.get('emojis', [])))
self.stickers: Tuple[GuildSticker, ...] = tuple(
map(lambda d: state.store_sticker(self, d), guild.get('stickers', []))
)
self.features: List[GuildFeature] = guild.get('features', [])
self._splash: Optional[str] = guild.get('splash')
self._system_channel_id: Optional[int] = utils._get_as_snowflake(guild, 'system_channel_id')
self.description: Optional[str] = guild.get('description')
self.max_presences: Optional[int] = guild.get('max_presences')
self.max_members: Optional[int] = guild.get('max_members')
self.max_video_channel_users: Optional[int] = guild.get('max_video_channel_users')
self.premium_tier: int = guild.get('premium_tier', 0)
self.premium_subscription_count: int = guild.get('premium_subscription_count') or 0
self.premium_progress_bar_enabled: bool = guild.get('premium_progress_bar_enabled') or False
self._system_channel_flags: int = guild.get('system_channel_flags', 0)
self.preferred_locale: Optional[str] = guild.get('preferred_locale')
self._discovery_splash: Optional[str] = guild.get('discovery_splash')
self._rules_channel_id: Optional[int] = utils._get_as_snowflake(guild, 'rules_channel_id')
self._public_updates_channel_id: Optional[int] = utils._get_as_snowflake(guild, 'public_updates_channel_id')
self.nsfw_level: NSFWLevel = try_enum(NSFWLevel, guild.get('nsfw_level', 0))
self.approximate_presence_count = guild.get('approximate_presence_count')
self.approximate_member_count = guild.get('approximate_member_count')
self._stage_instances: Dict[int, StageInstance] = {}
for s in guild.get('stage_instances', []):
stage_instance = StageInstance(guild=self, data=s, state=state)
self._stage_instances[stage_instance.id] = stage_instance
cache_joined = self._state.member_cache_flags.joined
self_id = self._state.self_id
for mdata in guild.get('members', []):
member = Member(data=mdata, guild=self, state=state)
if cache_joined or member.id == self_id:
self._add_member(member)
self._sync(guild)
self._large: Optional[bool] = None if member_count is None else self._member_count >= 250
self.owner_id: Optional[int] = utils._get_as_snowflake(guild, 'owner_id')
self.afk_channel: Optional[VocalGuildChannel] = self.get_channel(utils._get_as_snowflake(guild, 'afk_channel_id')) # type: ignore
for obj in guild.get('voice_states', []):
self._update_voice_state(obj, int(obj['channel_id']))
# TODO: refactor/remove?
def _sync(self, data: GuildPayload) -> None:
try:
self._large = data['large']
except KeyError:
pass
empty_tuple = tuple()
for presence in data.get('presences', []):
user_id = int(presence['user']['id'])
member = self.get_member(user_id)
if member is not None:
member._presence_update(presence, empty_tuple) # type: ignore
if 'channels' in data:
channels = data['channels']
for c in channels:
factory, ch_type = _guild_channel_factory(c['type'])
if factory:
self._add_channel(factory(guild=self, data=c, state=self._state)) # type: ignore
if 'threads' in data:
threads = data['threads']
for thread in threads:
self._add_thread(Thread(guild=self, state=self._state, data=thread))
@property
def channels(self) -> List[GuildChannel]:
"""List[:class:`abc.GuildChannel`]: A list of channels that belongs to this guild."""
return list(self._channels.values())
@property
def threads(self) -> List[Thread]:
"""List[:class:`Thread`]: A list of threads that you have permission to view.
.. versionadded:: 2.0
"""
return list(self._threads.values())
@property
def large(self) -> bool:
""":class:`bool`: Indicates if the guild is a 'large' guild.
A large guild is defined as having more than ``large_threshold`` count
members, which for this library is set to the maximum of 250.
"""
if self._large is None:
try:
return self._member_count >= 250
except AttributeError:
return len(self._members) >= 250
return self._large
@property
def voice_channels(self) -> List[VoiceChannel]:
"""List[:class:`VoiceChannel`]: A list of voice channels that belongs to this guild.
This is sorted by the position and are in UI order from top to bottom.
"""
r = [ch for ch in self._channels.values() if isinstance(ch, VoiceChannel)]
r.sort(key=lambda c: (c.position, c.id))
return r
@property
def stage_channels(self) -> List[StageChannel]:
"""List[:class:`StageChannel`]: A list of stage channels that belongs to this guild.
.. versionadded:: 1.7
This is sorted by the position and are in UI order from top to bottom.
"""
r = [ch for ch in self._channels.values() if isinstance(ch, StageChannel)]
r.sort(key=lambda c: (c.position, c.id))
return r
@property
def me(self) -> Member:
""":class:`Member`: Similar to :attr:`Client.user` except an instance of :class:`Member`.
This is essentially used to get the member version of yourself.
"""
self_id = self._state.user.id
# The self member is *always* cached
return self.get_member(self_id) # type: ignore
@property
def voice_client(self) -> Optional[VoiceProtocol]:
"""Optional[:class:`VoiceProtocol`]: Returns the :class:`VoiceProtocol` associated with this guild, if any."""
return self._state._get_voice_client(self.id)
@property
def text_channels(self) -> List[TextChannel]:
"""List[:class:`TextChannel`]: A list of text channels that belongs to this guild.
This is sorted by the position and are in UI order from top to bottom.
"""
r = [ch for ch in self._channels.values() if isinstance(ch, TextChannel)]
r.sort(key=lambda c: (c.position, c.id))
return r
@property
def categories(self) -> List[CategoryChannel]:
"""List[:class:`CategoryChannel`]: A list of categories that belongs to this guild.
This is sorted by the position and are in UI order from top to bottom.
"""
r = [ch for ch in self._channels.values() if isinstance(ch, CategoryChannel)]
r.sort(key=lambda c: (c.position, c.id))
return r
def by_category(self) -> List[ByCategoryItem]:
"""Returns every :class:`CategoryChannel` and their associated channels.
These channels and categories are sorted in the official Discord UI order.
If the channels do not have a category, then the first element of the tuple is
``None``.
Returns
--------
List[Tuple[Optional[:class:`CategoryChannel`], List[:class:`abc.GuildChannel`]]]:
The categories and their associated channels.
"""
grouped: Dict[Optional[int], List[GuildChannel]] = {}
for channel in self._channels.values():
if isinstance(channel, CategoryChannel):
grouped.setdefault(channel.id, [])
continue
try:
grouped[channel.category_id].append(channel)
except KeyError:
grouped[channel.category_id] = [channel]
def key(t: ByCategoryItem) -> Tuple[Tuple[int, int], List[GuildChannel]]:
k, v = t
return ((k.position, k.id) if k else (-1, -1), v)
_get = self._channels.get
as_list: List[ByCategoryItem] = [(_get(k), v) for k, v in grouped.items()] # type: ignore
as_list.sort(key=key)
for _, channels in as_list:
channels.sort(key=lambda c: (c._sorting_bucket, c.position, c.id))
return as_list
def _resolve_channel(self, id: Optional[int], /) -> Optional[Union[GuildChannel, Thread]]:
if id is None:
return
return self._channels.get(id) or self._threads.get(id)
def get_channel_or_thread(self, channel_id: int, /) -> Optional[Union[Thread, GuildChannel]]:
"""Returns a channel or thread with the given ID.
.. versionadded:: 2.0
Parameters
-----------
channel_id: :class:`int`
The ID to search for.
Returns
--------
Optional[Union[:class:`Thread`, :class:`.abc.GuildChannel`]]
The returned channel or thread or ``None`` if not found.
"""
return self._channels.get(channel_id) or self._threads.get(channel_id)
def get_channel(self, channel_id: int, /) -> Optional[GuildChannel]:
"""Returns a channel with the given ID.
.. note::
This does *not* search for threads.
Parameters
-----------
channel_id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`.abc.GuildChannel`]
The returned channel or ``None`` if not found.
"""
return self._channels.get(channel_id)
def get_thread(self, thread_id: int, /) -> Optional[Thread]:
"""Returns a thread with the given ID.
.. versionadded:: 2.0
Parameters
-----------
thread_id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`Thread`]
The returned thread or ``None`` if not found.
"""
return self._threads.get(thread_id)
@property
def system_channel(self) -> Optional[TextChannel]:
"""Optional[:class:`TextChannel`]: Returns the guild's channel used for system messages.
If no channel is set, then this returns ``None``.
"""
channel_id = self._system_channel_id
return channel_id and self._channels.get(channel_id) # type: ignore
@property
def system_channel_flags(self) -> SystemChannelFlags:
""":class:`SystemChannelFlags`: Returns the guild's system channel settings."""
return SystemChannelFlags._from_value(self._system_channel_flags)
@property
def rules_channel(self) -> Optional[TextChannel]:
"""Optional[:class:`TextChannel`]: Return's the guild's channel used for the rules.
The guild must be a Community guild.
If no channel is set, then this returns ``None``.
.. versionadded:: 1.3
"""
channel_id = self._rules_channel_id
return channel_id and self._channels.get(channel_id) # type: ignore
@property
def public_updates_channel(self) -> Optional[TextChannel]:
"""Optional[:class:`TextChannel`]: Return's the guild's channel where admins and
moderators of the guilds receive notices from Discord. The guild must be a
Community guild.
If no channel is set, then this returns ``None``.
.. versionadded:: 1.4
"""
channel_id = self._public_updates_channel_id
return channel_id and self._channels.get(channel_id) # type: ignore
@property
def emoji_limit(self) -> int:
""":class:`int`: The maximum number of emoji slots this guild has."""
more_emoji = 200 if 'MORE_EMOJI' in self.features else 50
return max(more_emoji, self._PREMIUM_GUILD_LIMITS[self.premium_tier].emoji)
@property
def sticker_limit(self) -> int:
""":class:`int`: The maximum number of sticker slots this guild has.
.. versionadded:: 2.0
"""
more_stickers = 60 if 'MORE_STICKERS' in self.features else 0
return max(more_stickers, self._PREMIUM_GUILD_LIMITS[self.premium_tier].stickers)
@property
def bitrate_limit(self) -> float:
""":class:`float`: The maximum bitrate for voice channels this guild can have."""
vip_guild = self._PREMIUM_GUILD_LIMITS[1].bitrate if 'VIP_REGIONS' in self.features else 96e3
return max(vip_guild, self._PREMIUM_GUILD_LIMITS[self.premium_tier].bitrate)
@property
def filesize_limit(self) -> int:
""":class:`int`: The maximum number of bytes files can have when uploaded to this guild."""
return self._PREMIUM_GUILD_LIMITS[self.premium_tier].filesize
@property
def members(self) -> List[Member]:
"""List[:class:`Member`]: A list of members that belong to this guild."""
return list(self._members.values())
def get_member(self, user_id: int, /) -> Optional[Member]:
"""Returns a member with the given ID.
Parameters
-----------
user_id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`Member`]
The member or ``None`` if not found.
"""
return self._members.get(user_id)
@property
def premium_subscribers(self) -> List[Member]:
"""List[:class:`Member`]: A list of members who have "boosted" this guild."""
return [member for member in self.members if member.premium_since is not None]
@property
def roles(self) -> List[Role]:
"""List[:class:`Role`]: Returns a :class:`list` of the guild's roles in hierarchy order.
The first element of this list will be the lowest role in the
hierarchy.
"""
return sorted(self._roles.values())
def get_role(self, role_id: int, /) -> Optional[Role]:
"""Returns a role with the given ID.
Parameters
-----------
role_id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`Role`]
The role or ``None`` if not found.
"""
return self._roles.get(role_id)
@property
def default_role(self) -> Role:
""":class:`Role`: Gets the @everyone role that all members have by default."""
# The @everyone role is *always* given
return self.get_role(self.id) # type: ignore
@property
def premium_subscriber_role(self) -> Optional[Role]:
"""Optional[:class:`Role`]: Gets the premium subscriber role, AKA "boost" role, in this guild.
.. versionadded:: 1.6
"""
for role in self._roles.values():
if role.is_premium_subscriber():
return role
return None
@property
def self_role(self) -> Optional[Role]:
"""Optional[:class:`Role`]: Gets the role associated with this client's user, if any.
.. versionadded:: 1.6
"""
self_id = self._state.self_id
for role in self._roles.values():
tags = role.tags
if tags and tags.bot_id == self_id:
return role
return None
@property
def stage_instances(self) -> List[StageInstance]:
"""List[:class:`StageInstance`]: Returns a :class:`list` of the guild's stage instances that
are currently running.
.. versionadded:: 2.0
"""
return list(self._stage_instances.values())
def get_stage_instance(self, stage_instance_id: int, /) -> Optional[StageInstance]:
"""Returns a stage instance with the given ID.
.. versionadded:: 2.0
Parameters
-----------
stage_instance_id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`StageInstance`]
The stage instance or ``None`` if not found.
"""
return self._stage_instances.get(stage_instance_id)
@property
def owner(self) -> Optional[Member]:
"""Optional[:class:`Member`]: The member that owns the guild."""
return self.get_member(self.owner_id) # type: ignore
@property
def icon(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the guild's icon asset, if available."""
if self._icon is None:
return None
return Asset._from_guild_icon(self._state, self.id, self._icon)
@property
def banner(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the guild's banner asset, if available."""
if self._banner is None:
return None
return Asset._from_guild_image(self._state, self.id, self._banner, path='banners')
@property
def splash(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the guild's invite splash asset, if available."""
if self._splash is None:
return None
return Asset._from_guild_image(self._state, self.id, self._splash, path='splashes')
@property
def discovery_splash(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the guild's discovery splash asset, if available."""
if self._discovery_splash is None:
return None
return Asset._from_guild_image(self._state, self.id, self._discovery_splash, path='discovery-splashes')
@property
def member_count(self) -> int:
""":class:`int`: Returns the true member count regardless of it being loaded fully or not.
.. warning::
Due to a Discord limitation, in order for this attribute to remain up-to-date and
accurate, it requires :attr:`Intents.members` to be specified.
"""
return self._member_count
@property
def chunked(self) -> bool:
""":class:`bool`: Returns a boolean indicating if the guild is "chunked".
A chunked guild means that :attr:`member_count` is equal to the
number of members stored in the internal :attr:`members` cache.
If this value returns ``False``, then you should request for
offline members.
"""
count = getattr(self, '_member_count', None)
if count is None:
return False
return count == len(self._members)
@property
def shard_id(self) -> int:
""":class:`int`: Returns the shard ID for this guild if applicable."""
count = self._state.shard_count
if count is None:
return 0
return (self.id >> 22) % count
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: Returns the guild's creation time in UTC."""
return utils.snowflake_time(self.id)
def get_member_named(self, name: str, /) -> Optional[Member]:
"""Returns the first member found that matches the name provided.
The name can have an optional discriminator argument, e.g. "Jake#0001"
or "Jake" will both do the lookup. However the former will give a more
precise result. Note that the discriminator must have all 4 digits
for this to work.
If a nickname is passed, then it is looked up via the nickname. Note
however, that a nickname + discriminator combo will not lookup the nickname
but rather the username + discriminator combo due to nickname + discriminator
not being unique.
If no member is found, ``None`` is returned.
Parameters
-----------
name: :class:`str`
The name of the member to lookup with an optional discriminator.
Returns
--------
Optional[:class:`Member`]
The member in this guild with the associated name. If not found
then ``None`` is returned.
"""
result = None
members = self.members
if len(name) > 5 and name[-5] == '#':
# The 5 length is checking to see if #0000 is in the string,
# as a#0000 has a length of 6, the minimum for a potential
# discriminator lookup.
potential_discriminator = name[-4:]
# do the actual lookup and return if found
# if it isn't found then we'll do a full name lookup below.
result = utils.get(members, name=name[:-5], discriminator=potential_discriminator)
if result is not None:
return result
def pred(m: Member) -> bool:
return m.nick == name or m.name == name
return utils.find(pred, members)
def _create_channel(
self,
name: str,
channel_type: ChannelType,
overwrites: Dict[Union[Role, Member], PermissionOverwrite] = MISSING,
category: Optional[Snowflake] = None,
**options: Any,
):
if overwrites is MISSING:
overwrites = {}
elif not isinstance(overwrites, dict):
raise InvalidArgument('overwrites parameter expects a dict.')
perms = []
for target, perm in overwrites.items():
if not isinstance(perm, PermissionOverwrite):
raise InvalidArgument(f'Expected PermissionOverwrite received {perm.__class__.__name__}')
allow, deny = perm.pair()
payload = {'allow': allow.value, 'deny': deny.value, 'id': target.id}
if isinstance(target, Role):
payload['type'] = abc._Overwrites.ROLE
else:
payload['type'] = abc._Overwrites.MEMBER
perms.append(payload)
parent_id = category.id if category else None
return self._state.http.create_channel(
self.id, channel_type.value, name=name, parent_id=parent_id, permission_overwrites=perms, **options
)
async def create_text_channel(
self,
name: str,
*,
reason: Optional[str] = None,
category: Optional[CategoryChannel] = None,
position: int = MISSING,
topic: str = MISSING,
slowmode_delay: int = MISSING,
nsfw: bool = MISSING,
overwrites: Dict[Union[Role, Member], PermissionOverwrite] = MISSING,
) -> TextChannel:
"""|coro|
Creates a :class:`TextChannel` for the guild.
Note that you need the :attr:`~Permissions.manage_channels` permission
to create the channel.
The ``overwrites`` parameter can be used to create a 'secret'
channel upon creation. This parameter expects a :class:`dict` of
overwrites with the target (either a :class:`Member` or a :class:`Role`)
as the key and a :class:`PermissionOverwrite` as the value.
.. note::
Creating a channel of a specified position will not update the position of
other channels to follow suit. A follow-up call to :meth:`~TextChannel.edit`
will be required to update the position of the channel in the channel list.
Examples
----------
Creating a basic channel:
.. code-block:: python3
channel = await guild.create_text_channel('cool-channel')
Creating a "secret" channel:
.. code-block:: python3
overwrites = {
guild.default_role: discord.PermissionOverwrite(read_messages=False),
guild.me: discord.PermissionOverwrite(read_messages=True)
}
channel = await guild.create_text_channel('secret', overwrites=overwrites)
Parameters
-----------
name: :class:`str`
The channel's name.
overwrites: Dict[Union[:class:`Role`, :class:`Member`], :class:`PermissionOverwrite`]
A :class:`dict` of target (either a role or a member) to
:class:`PermissionOverwrite` to apply upon creation of a channel.
Useful for creating secret channels.
category: Optional[:class:`CategoryChannel`]
The category to place the newly created channel under.
The permissions will be automatically synced to category if no
overwrites are provided.
position: :class:`int`
The position in the channel list. This is a number that starts
at 0. e.g. the top channel is position 0.
topic: :class:`str`
The new channel's topic.
slowmode_delay: :class:`int`
Specifies the slowmode rate limit for user in this channel, in seconds.
The maximum value possible is `21600`.
nsfw: :class:`bool`
To mark the channel as NSFW or not.
reason: Optional[:class:`str`]
The reason for creating this channel. Shows up on the audit log.
Raises
-------
Forbidden
You do not have the proper permissions to create this channel.
HTTPException
Creating the channel failed.
InvalidArgument
The permission overwrite information is not in proper form.
Returns
-------
:class:`TextChannel`
The channel that was just created.
"""
options = {}
if position is not MISSING:
options['position'] = position
if topic is not MISSING:
options['topic'] = topic
if slowmode_delay is not MISSING:
options['rate_limit_per_user'] = slowmode_delay
if nsfw is not MISSING:
options['nsfw'] = nsfw
data = await self._create_channel(
name, overwrites=overwrites, channel_type=ChannelType.text, category=category, reason=reason, **options
)
channel = TextChannel(state=self._state, guild=self, data=data)
# temporarily add to the cache
self._channels[channel.id] = channel
return channel
async def create_voice_channel(
self,
name: str,
*,
reason: Optional[str] = None,
category: Optional[CategoryChannel] = None,
position: int = MISSING,
bitrate: int = MISSING,
user_limit: int = MISSING,
rtc_region: Optional[VoiceRegion] = MISSING,
video_quality_mode: VideoQualityMode = MISSING,
overwrites: Dict[Union[Role, Member], PermissionOverwrite] = MISSING,
) -> VoiceChannel:
"""|coro|
This is similar to :meth:`create_text_channel` except makes a :class:`VoiceChannel` instead.
Parameters
-----------
name: :class:`str`
The channel's name.
overwrites: Dict[Union[:class:`Role`, :class:`Member`], :class:`PermissionOverwrite`]
A :class:`dict` of target (either a role or a member) to
:class:`PermissionOverwrite` to apply upon creation of a channel.
Useful for creating secret channels.
category: Optional[:class:`CategoryChannel`]
The category to place the newly created channel under.
The permissions will be automatically synced to category if no
overwrites are provided.
position: :class:`int`
The position in the channel list. This is a number that starts
at 0. e.g. the top channel is position 0.
bitrate: :class:`int`
The channel's preferred audio bitrate in bits per second.
user_limit: :class:`int`
The channel's limit for number of members that can be in a voice channel.
rtc_region: Optional[:class:`VoiceRegion`]
The region for the voice channel's voice communication.
A value of ``None`` indicates automatic voice region detection.
.. versionadded:: 1.7
video_quality_mode: :class:`VideoQualityMode`
The camera video quality for the voice channel's participants.
.. versionadded:: 2.0
reason: Optional[:class:`str`]
The reason for creating this channel. Shows up on the audit log.
Raises
------
Forbidden
You do not have the proper permissions to create this channel.
HTTPException
Creating the channel failed.
InvalidArgument
The permission overwrite information is not in proper form.
Returns
-------
:class:`VoiceChannel`
The channel that was just created.
"""
options = {}
if position is not MISSING:
options['position'] = position
if bitrate is not MISSING:
options['bitrate'] = bitrate
if user_limit is not MISSING:
options['user_limit'] = user_limit
if rtc_region is not MISSING:
options['rtc_region'] = None if rtc_region is None else str(rtc_region)
if video_quality_mode is not MISSING:
options['video_quality_mode'] = video_quality_mode.value
data = await self._create_channel(
name, overwrites=overwrites, channel_type=ChannelType.voice, category=category, reason=reason, **options
)
channel = VoiceChannel(state=self._state, guild=self, data=data)
# temporarily add to the cache
self._channels[channel.id] = channel
return channel
async def create_stage_channel(
self,
name: str,
*,
topic: str,
position: int = MISSING,
overwrites: Dict[Union[Role, Member], PermissionOverwrite] = MISSING,
category: Optional[CategoryChannel] = None,
reason: Optional[str] = None,
) -> StageChannel:
"""|coro|
This is similar to :meth:`create_text_channel` except makes a :class:`StageChannel` instead.
.. versionadded:: 1.7
Parameters
-----------
name: :class:`str`
The channel's name.
topic: :class:`str`
The new channel's topic.
overwrites: Dict[Union[:class:`Role`, :class:`Member`], :class:`PermissionOverwrite`]
A :class:`dict` of target (either a role or a member) to
:class:`PermissionOverwrite` to apply upon creation of a channel.
Useful for creating secret channels.
category: Optional[:class:`CategoryChannel`]
The category to place the newly created channel under.
The permissions will be automatically synced to category if no
overwrites are provided.
position: :class:`int`
The position in the channel list. This is a number that starts
at 0. e.g. the top channel is position 0.
reason: Optional[:class:`str`]
The reason for creating this channel. Shows up on the audit log.
Raises
------
Forbidden
You do not have the proper permissions to create this channel.
HTTPException
Creating the channel failed.
InvalidArgument
The permission overwrite information is not in proper form.
Returns
-------
:class:`StageChannel`
The channel that was just created.
"""
options: Dict[str, Any] = {
'topic': topic,
}
if position is not MISSING:
options['position'] = position
data = await self._create_channel(
name, overwrites=overwrites, channel_type=ChannelType.stage_voice, category=category, reason=reason, **options
)
channel = StageChannel(state=self._state, guild=self, data=data)
# temporarily add to the cache
self._channels[channel.id] = channel
return channel
async def create_category(
self,
name: str,
*,
overwrites: Dict[Union[Role, Member], PermissionOverwrite] = MISSING,
reason: Optional[str] = None,
position: int = MISSING,
) -> CategoryChannel:
"""|coro|
Same as :meth:`create_text_channel` except makes a :class:`CategoryChannel` instead.
.. note::
The ``category`` parameter is not supported in this function since categories
cannot have categories.
Raises
------
Forbidden
You do not have the proper permissions to create this channel.
HTTPException
Creating the channel failed.
InvalidArgument
The permission overwrite information is not in proper form.
Returns
-------
:class:`CategoryChannel`
The channel that was just created.
"""
options: Dict[str, Any] = {}
if position is not MISSING:
options['position'] = position
data = await self._create_channel(
name, overwrites=overwrites, channel_type=ChannelType.category, reason=reason, **options
)
channel = CategoryChannel(state=self._state, guild=self, data=data)
# temporarily add to the cache
self._channels[channel.id] = channel
return channel
create_category_channel = create_category
async def leave(self) -> None:
"""|coro|
Leaves the guild.
.. note::
You cannot leave the guild that you own, you must delete it instead
via :meth:`delete`.
Raises
--------
HTTPException
Leaving the guild failed.
"""
await self._state.http.leave_guild(self.id)
async def delete(self) -> None:
"""|coro|
Deletes the guild. You must be the guild owner to delete the
guild.
Raises
--------
HTTPException
Deleting the guild failed.
Forbidden
You do not have permissions to delete the guild.
"""
await self._state.http.delete_guild(self.id)
async def edit(
self,
*,
reason: Optional[str] = MISSING,
name: str = MISSING,
description: Optional[str] = MISSING,
icon: Optional[bytes] = MISSING,
banner: Optional[bytes] = MISSING,
splash: Optional[bytes] = MISSING,
discovery_splash: Optional[bytes] = MISSING,
community: bool = MISSING,
region: Optional[Union[str, VoiceRegion]] = MISSING,
afk_channel: Optional[VoiceChannel] = MISSING,
owner: Snowflake = MISSING,
afk_timeout: int = MISSING,
default_notifications: NotificationLevel = MISSING,
verification_level: VerificationLevel = MISSING,
explicit_content_filter: ContentFilter = MISSING,
vanity_code: str = MISSING,
system_channel: Optional[TextChannel] = MISSING,
system_channel_flags: SystemChannelFlags = MISSING,
preferred_locale: str = MISSING,
rules_channel: Optional[TextChannel] = MISSING,
public_updates_channel: Optional[TextChannel] = MISSING,
premium_progress_bar_enabled: bool = MISSING,
) -> Guild:
r"""|coro|
Edits the guild.
You must have the :attr:`~Permissions.manage_guild` permission
to edit the guild.
.. versionchanged:: 1.4
The `rules_channel` and `public_updates_channel` keyword-only parameters were added.
.. versionchanged:: 2.0
The `discovery_splash` and `community` keyword-only parameters were added.
.. versionchanged:: 2.0
The newly updated guild is returned.
Parameters
----------
name: :class:`str`
The new name of the guild.
description: Optional[:class:`str`]
The new description of the guild. Could be ``None`` for no description.
This is only available to guilds that contain ``PUBLIC`` in :attr:`Guild.features`.
icon: :class:`bytes`
A :term:`py:bytes-like object` representing the icon. Only PNG/JPEG is supported.
GIF is only available to guilds that contain ``ANIMATED_ICON`` in :attr:`Guild.features`.
Could be ``None`` to denote removal of the icon.
banner: :class:`bytes`
A :term:`py:bytes-like object` representing the banner.
Could be ``None`` to denote removal of the banner. This is only available to guilds that contain
``BANNER`` in :attr:`Guild.features`.
splash: :class:`bytes`
A :term:`py:bytes-like object` representing the invite splash.
Only PNG/JPEG supported. Could be ``None`` to denote removing the
splash. This is only available to guilds that contain ``INVITE_SPLASH``
in :attr:`Guild.features`.
discovery_splash: :class:`bytes`
A :term:`py:bytes-like object` representing the discovery splash.
Only PNG/JPEG supported. Could be ``None`` to denote removing the
splash. This is only available to guilds that contain ``DISCOVERABLE``
in :attr:`Guild.features`.
community: :class:`bool`
Whether the guild should be a Community guild. If set to ``True``\, both ``rules_channel``
and ``public_updates_channel`` parameters are required.
region: Union[:class:`str`, :class:`VoiceRegion`]
The new region for the guild's voice communication.
afk_channel: Optional[:class:`VoiceChannel`]
The new channel that is the AFK channel. Could be ``None`` for no AFK channel.
afk_timeout: :class:`int`
The number of seconds until someone is moved to the AFK channel.
owner: :class:`Member`
The new owner of the guild to transfer ownership to. Note that you must
be owner of the guild to do this.
verification_level: :class:`VerificationLevel`
The new verification level for the guild.
default_notifications: :class:`NotificationLevel`
The new default notification level for the guild.
explicit_content_filter: :class:`ContentFilter`
The new explicit content filter for the guild.
vanity_code: :class:`str`
The new vanity code for the guild.
system_channel: Optional[:class:`TextChannel`]
The new channel that is used for the system channel. Could be ``None`` for no system channel.
system_channel_flags: :class:`SystemChannelFlags`
The new system channel settings to use with the new system channel.
preferred_locale: :class:`str`
The new preferred locale for the guild. Used as the primary language in the guild.
If set, this must be an ISO 639 code, e.g. ``en-US`` or ``ja`` or ``zh-CN``.
rules_channel: Optional[:class:`TextChannel`]
The new channel that is used for rules. This is only available to
guilds that contain ``PUBLIC`` in :attr:`Guild.features`. Could be ``None`` for no rules
channel.
public_updates_channel: Optional[:class:`TextChannel`]
The new channel that is used for public updates from Discord. This is only available to
guilds that contain ``PUBLIC`` in :attr:`Guild.features`. Could be ``None`` for no
public updates channel.
premium_progress_bar_enabled: :class:`bool`
Whether the guild should have premium progress bar enabled.
reason: Optional[:class:`str`]
The reason for editing this guild. Shows up on the audit log.
Raises
-------
Forbidden
You do not have permissions to edit the guild.
HTTPException
Editing the guild failed.
InvalidArgument
The image format passed in to ``icon`` is invalid. It must be
PNG or JPG. This is also raised if you are not the owner of the
guild and request an ownership transfer.
Returns
--------
:class:`Guild`
The newly updated guild. Note that this has the same limitations as
mentioned in :meth:`Client.fetch_guild` and may not have full data.
"""
http = self._state.http
if vanity_code is not MISSING:
await http.change_vanity_code(self.id, vanity_code, reason=reason)
fields: Dict[str, Any] = {}
if name is not MISSING:
fields['name'] = name
if description is not MISSING:
fields['description'] = description
if preferred_locale is not MISSING:
fields['preferred_locale'] = preferred_locale
if afk_timeout is not MISSING:
fields['afk_timeout'] = afk_timeout
if icon is not MISSING:
if icon is None:
fields['icon'] = icon
else:
fields['icon'] = utils._bytes_to_base64_data(icon)
if banner is not MISSING:
if banner is None:
fields['banner'] = banner
else:
fields['banner'] = utils._bytes_to_base64_data(banner)
if splash is not MISSING:
if splash is None:
fields['splash'] = splash
else:
fields['splash'] = utils._bytes_to_base64_data(splash)
if discovery_splash is not MISSING:
if discovery_splash is None:
fields['discovery_splash'] = discovery_splash
else:
fields['discovery_splash'] = utils._bytes_to_base64_data(discovery_splash)
if default_notifications is not MISSING:
if not isinstance(default_notifications, NotificationLevel):
raise InvalidArgument('default_notifications field must be of type NotificationLevel')
fields['default_message_notifications'] = default_notifications.value
if afk_channel is not MISSING:
if afk_channel is None:
fields['afk_channel_id'] = afk_channel
else:
fields['afk_channel_id'] = afk_channel.id
if system_channel is not MISSING:
if system_channel is None:
fields['system_channel_id'] = system_channel
else:
fields['system_channel_id'] = system_channel.id
if rules_channel is not MISSING:
if rules_channel is None:
fields['rules_channel_id'] = rules_channel
else:
fields['rules_channel_id'] = rules_channel.id
if public_updates_channel is not MISSING:
if public_updates_channel is None:
fields['public_updates_channel_id'] = public_updates_channel
else:
fields['public_updates_channel_id'] = public_updates_channel.id
if owner is not MISSING:
if self.owner_id != self._state.self_id:
raise InvalidArgument('To transfer ownership you must be the owner of the guild.')
fields['owner_id'] = owner.id
if region is not MISSING:
fields['region'] = str(region)
if verification_level is not MISSING:
if not isinstance(verification_level, VerificationLevel):
raise InvalidArgument('verification_level field must be of type VerificationLevel')
fields['verification_level'] = verification_level.value
if explicit_content_filter is not MISSING:
if not isinstance(explicit_content_filter, ContentFilter):
raise InvalidArgument('explicit_content_filter field must be of type ContentFilter')
fields['explicit_content_filter'] = explicit_content_filter.value
if system_channel_flags is not MISSING:
if not isinstance(system_channel_flags, SystemChannelFlags):
raise InvalidArgument('system_channel_flags field must be of type SystemChannelFlags')
fields['system_channel_flags'] = system_channel_flags.value
if community is not MISSING:
features = []
if community:
if 'rules_channel_id' in fields and 'public_updates_channel_id' in fields:
features.append('COMMUNITY')
else:
raise InvalidArgument(
'community field requires both rules_channel and public_updates_channel fields to be provided'
)
fields['features'] = features
if premium_progress_bar_enabled is not MISSING:
fields['premium_progress_bar_enabled'] = premium_progress_bar_enabled
data = await http.edit_guild(self.id, reason=reason, **fields)
return Guild(data=data, state=self._state)
async def fetch_channels(self) -> Sequence[GuildChannel]:
"""|coro|
Retrieves all :class:`abc.GuildChannel` that the guild has.
.. note::
This method is an API call. For general usage, consider :attr:`channels` instead.
.. versionadded:: 1.2
Raises
-------
InvalidData
An unknown channel type was received from Discord.
HTTPException
Retrieving the channels failed.
Returns
-------
Sequence[:class:`abc.GuildChannel`]
All channels in the guild.
"""
data = await self._state.http.get_all_guild_channels(self.id)
def convert(d):
factory, ch_type = _guild_channel_factory(d['type'])
if factory is None:
raise InvalidData('Unknown channel type {type} for channel ID {id}.'.format_map(d))
channel = factory(guild=self, state=self._state, data=d)
return channel
return [convert(d) for d in data]
async def active_threads(self) -> List[Thread]:
"""|coro|
Returns a list of active :class:`Thread` that the client can access.
This includes both private and public threads.
.. versionadded:: 2.0
Raises
------
HTTPException
The request to get the active threads failed.
Returns
--------
List[:class:`Thread`]
The active threads
"""
data = await self._state.http.get_active_threads(self.id)
threads = [Thread(guild=self, state=self._state, data=d) for d in data.get('threads', [])]
thread_lookup: Dict[int, Thread] = {thread.id: thread for thread in threads}
for member in data.get('members', []):
thread = thread_lookup.get(int(member['id']))
if thread is not None:
thread._add_member(ThreadMember(parent=thread, data=member))
return threads
# TODO: Remove Optional typing here when async iterators are refactored
def fetch_members(self, *, limit: int = 1000, after: Optional[SnowflakeTime] = None) -> MemberIterator:
"""Retrieves an :class:`.AsyncIterator` that enables receiving the guild's members. In order to use this,
:meth:`Intents.members` must be enabled.
.. note::
This method is an API call. For general usage, consider :attr:`members` instead.
.. versionadded:: 1.3
All parameters are optional.
Parameters
----------
limit: Optional[:class:`int`]
The number of members to retrieve. Defaults to 1000.
Pass ``None`` to fetch all members. Note that this is potentially slow.
after: Optional[Union[:class:`.abc.Snowflake`, :class:`datetime.datetime`]]
Retrieve members after this date or object.
If a datetime is provided, it is recommended to use a UTC aware datetime.
If the datetime is naive, it is assumed to be local time.
Raises
------
ClientException
The members intent is not enabled.
HTTPException
Getting the members failed.
Yields
------
:class:`.Member`
The member with the member data parsed.
Examples
--------
Usage ::
async for member in guild.fetch_members(limit=150):
print(member.name)
Flattening into a list ::
members = await guild.fetch_members(limit=150).flatten()
# members is now a list of Member...
"""
if not self._state._intents.members:
raise ClientException('Intents.members must be enabled to use this.')
return MemberIterator(self, limit=limit, after=after)
async def fetch_member(self, member_id: int, /) -> Member:
"""|coro|
Retrieves a :class:`Member` from a guild ID, and a member ID.
.. note::
This method is an API call. If you have :attr:`Intents.members` and member cache enabled, consider :meth:`get_member` instead.
Parameters
-----------
member_id: :class:`int`
The member's ID to fetch from.
Raises
-------
Forbidden
You do not have access to the guild.
HTTPException
Fetching the member failed.
Returns
--------
:class:`Member`
The member from the member ID.
"""
data = await self._state.http.get_member(self.id, member_id)
return Member(data=data, state=self._state, guild=self)
async def fetch_ban(self, user: Snowflake) -> BanEntry:
"""|coro|
Retrieves the :class:`BanEntry` for a user.
You must have the :attr:`~Permissions.ban_members` permission
to get this information.
Parameters
-----------
user: :class:`abc.Snowflake`
The user to get ban information from.
Raises
------
Forbidden
You do not have proper permissions to get the information.
NotFound
This user is not banned.
HTTPException
An error occurred while fetching the information.
Returns
-------
:class:`BanEntry`
The :class:`BanEntry` object for the specified user.
"""
data: BanPayload = await self._state.http.get_ban(user.id, self.id)
return BanEntry(user=User(state=self._state, data=data['user']), reason=data['reason'])
async def fetch_channel(self, channel_id: int, /) -> Union[GuildChannel, Thread]:
"""|coro|
Retrieves a :class:`.abc.GuildChannel` or :class:`.Thread` with the specified ID.
.. note::
This method is an API call. For general usage, consider :meth:`get_channel_or_thread` instead.
.. versionadded:: 2.0
Raises
-------
:exc:`.InvalidData`
An unknown channel type was received from Discord
or the guild the channel belongs to is not the same
as the one in this object points to.
:exc:`.HTTPException`
Retrieving the channel failed.
:exc:`.NotFound`
Invalid Channel ID.
:exc:`.Forbidden`
You do not have permission to fetch this channel.
Returns
--------
Union[:class:`.abc.GuildChannel`, :class:`.Thread`]
The channel from the ID.
"""
data = await self._state.http.get_channel(channel_id)
factory, ch_type = _threaded_guild_channel_factory(data['type'])
if factory is None:
raise InvalidData('Unknown channel type {type} for channel ID {id}.'.format_map(data))
if ch_type in (ChannelType.group, ChannelType.private):
raise InvalidData('Channel ID resolved to a private channel')
guild_id = int(data['guild_id'])
if self.id != guild_id:
raise InvalidData('Guild ID resolved to a different guild')
channel: GuildChannel = factory(guild=self, state=self._state, data=data) # type: ignore
return channel
async def bans(self) -> List[BanEntry]:
"""|coro|
Retrieves all the users that are banned from the guild as a :class:`list` of :class:`BanEntry`.
You must have the :attr:`~Permissions.ban_members` permission
to get this information.
Raises
-------
Forbidden
You do not have proper permissions to get the information.
HTTPException
An error occurred while fetching the information.
Returns
--------
List[:class:`BanEntry`]
A list of :class:`BanEntry` objects.
"""
data: List[BanPayload] = await self._state.http.get_bans(self.id)
return [BanEntry(user=User(state=self._state, data=e['user']), reason=e['reason']) for e in data]
async def prune_members(
self,
*,
days: int,
compute_prune_count: bool = True,
roles: List[Snowflake] = MISSING,
reason: Optional[str] = None,
) -> Optional[int]:
r"""|coro|
Prunes the guild from its inactive members.
The inactive members are denoted if they have not logged on in
``days`` number of days and they have no roles.
You must have the :attr:`~Permissions.kick_members` permission
to use this.
To check how many members you would prune without actually pruning,
see the :meth:`estimate_pruned_members` function.
To prune members that have specific roles see the ``roles`` parameter.
.. versionchanged:: 1.4
The ``roles`` keyword-only parameter was added.
Parameters
-----------
days: :class:`int`
The number of days before counting as inactive.
reason: Optional[:class:`str`]
The reason for doing this action. Shows up on the audit log.
compute_prune_count: :class:`bool`
Whether to compute the prune count. This defaults to ``True``
which makes it prone to timeouts in very large guilds. In order
to prevent timeouts, you must set this to ``False``. If this is
set to ``False``\, then this function will always return ``None``.
roles: List[:class:`abc.Snowflake`]
A list of :class:`abc.Snowflake` that represent roles to include in the pruning process. If a member
has a role that is not specified, they'll be excluded.
Raises
-------
Forbidden
You do not have permissions to prune members.
HTTPException
An error occurred while pruning members.
InvalidArgument
An integer was not passed for ``days``.
Returns
---------
Optional[:class:`int`]
The number of members pruned. If ``compute_prune_count`` is ``False``
then this returns ``None``.
"""
if not isinstance(days, int):
raise InvalidArgument(f'Expected int for ``days``, received {days.__class__.__name__} instead.')
if roles:
role_ids = [str(role.id) for role in roles]
else:
role_ids = []
data = await self._state.http.prune_members(
self.id, days, compute_prune_count=compute_prune_count, roles=role_ids, reason=reason
)
return data['pruned']
async def templates(self) -> List[Template]:
"""|coro|
Gets the list of templates from this guild.
Requires :attr:`~.Permissions.manage_guild` permissions.
.. versionadded:: 1.7
Raises
-------
Forbidden
You don't have permissions to get the templates.
Returns
--------
List[:class:`Template`]
The templates for this guild.
"""
from .template import Template
data = await self._state.http.guild_templates(self.id)
return [Template(data=d, state=self._state) for d in data]
async def webhooks(self) -> List[Webhook]:
"""|coro|
Gets the list of webhooks from this guild.
Requires :attr:`~.Permissions.manage_webhooks` permissions.
Raises
-------
Forbidden
You don't have permissions to get the webhooks.
Returns
--------
List[:class:`Webhook`]
The webhooks for this guild.
"""
from .webhook import Webhook
data = await self._state.http.guild_webhooks(self.id)
return [Webhook.from_state(d, state=self._state) for d in data]
async def estimate_pruned_members(self, *, days: int, roles: List[Snowflake] = MISSING) -> int:
"""|coro|
Similar to :meth:`prune_members` except instead of actually
pruning members, it returns how many members it would prune
from the guild had it been called.
Parameters
-----------
days: :class:`int`
The number of days before counting as inactive.
roles: List[:class:`abc.Snowflake`]
A list of :class:`abc.Snowflake` that represent roles to include in the estimate. If a member
has a role that is not specified, they'll be excluded.
.. versionadded:: 1.7
Raises
-------
Forbidden
You do not have permissions to prune members.
HTTPException
An error occurred while fetching the prune members estimate.
InvalidArgument
An integer was not passed for ``days``.
Returns
---------
:class:`int`
The number of members estimated to be pruned.
"""
if not isinstance(days, int):
raise InvalidArgument(f'Expected int for ``days``, received {days.__class__.__name__} instead.')
if roles:
role_ids = [str(role.id) for role in roles]
else:
role_ids = []
data = await self._state.http.estimate_pruned_members(self.id, days, role_ids)
return data['pruned']
async def invites(self) -> List[Invite]:
"""|coro|
Returns a list of all active instant invites from the guild.
You must have the :attr:`~Permissions.manage_guild` permission to get
this information.
Raises
-------
Forbidden
You do not have proper permissions to get the information.
HTTPException
An error occurred while fetching the information.
Returns
-------
List[:class:`Invite`]
The list of invites that are currently active.
"""
data = await self._state.http.invites_from(self.id)
result = []
for invite in data:
channel = self.get_channel(int(invite['channel']['id']))
result.append(Invite(state=self._state, data=invite, guild=self, channel=channel))
return result
async def create_template(self, *, name: str, description: str = MISSING) -> Template:
"""|coro|
Creates a template for the guild.
You must have the :attr:`~Permissions.manage_guild` permission to
do this.
.. versionadded:: 1.7
Parameters
-----------
name: :class:`str`
The name of the template.
description: :class:`str`
The description of the template.
"""
from .template import Template
payload = {'name': name}
if description:
payload['description'] = description
data = await self._state.http.create_template(self.id, payload)
return Template(state=self._state, data=data)
async def create_integration(self, *, type: str, id: int) -> None:
"""|coro|
Attaches an integration to the guild.
You must have the :attr:`~Permissions.manage_guild` permission to
do this.
.. versionadded:: 1.4
Parameters
-----------
type: :class:`str`
The integration type (e.g. Twitch).
id: :class:`int`
The integration ID.
Raises
-------
Forbidden
You do not have permission to create the integration.
HTTPException
The account could not be found.
"""
await self._state.http.create_integration(self.id, type, id)
async def integrations(self) -> List[Integration]:
"""|coro|
Returns a list of all integrations attached to the guild.
You must have the :attr:`~Permissions.manage_guild` permission to
do this.
.. versionadded:: 1.4
Raises
-------
Forbidden
You do not have permission to create the integration.
HTTPException
Fetching the integrations failed.
Returns
--------
List[:class:`Integration`]
The list of integrations that are attached to the guild.
"""
data = await self._state.http.get_all_integrations(self.id)
def convert(d):
factory, _ = _integration_factory(d['type'])
if factory is None:
raise InvalidData('Unknown integration type {type!r} for integration ID {id}'.format_map(d))
return factory(guild=self, data=d)
return [convert(d) for d in data]
async def fetch_stickers(self) -> List[GuildSticker]:
r"""|coro|
Retrieves a list of all :class:`Sticker`\s for the guild.
.. versionadded:: 2.0
.. note::
This method is an API call. For general usage, consider :attr:`stickers` instead.
Raises
---------
HTTPException
An error occurred fetching the stickers.
Returns
--------
List[:class:`GuildSticker`]
The retrieved stickers.
"""
data = await self._state.http.get_all_guild_stickers(self.id)
return [GuildSticker(state=self._state, data=d) for d in data]
async def fetch_sticker(self, sticker_id: int, /) -> GuildSticker:
"""|coro|
Retrieves a custom :class:`Sticker` from the guild.
.. versionadded:: 2.0
.. note::
This method is an API call.
For general usage, consider iterating over :attr:`stickers` instead.
Parameters
-------------
sticker_id: :class:`int`
The sticker's ID.
Raises
---------
NotFound
The sticker requested could not be found.
HTTPException
An error occurred fetching the sticker.
Returns
--------
:class:`GuildSticker`
The retrieved sticker.
"""
data = await self._state.http.get_guild_sticker(self.id, sticker_id)
return GuildSticker(state=self._state, data=data)
async def create_sticker(
self,
*,
name: str,
description: Optional[str] = None,
emoji: str,
file: File,
reason: Optional[str] = None,
) -> GuildSticker:
"""|coro|
Creates a :class:`Sticker` for the guild.
You must have :attr:`~Permissions.manage_emojis_and_stickers` permission to
do this.
.. versionadded:: 2.0
Parameters
-----------
name: :class:`str`
The sticker name. Must be at least 2 characters.
description: Optional[:class:`str`]
The sticker's description. Can be ``None``.
emoji: :class:`str`
The name of a unicode emoji that represents the sticker's expression.
file: :class:`File`
The file of the sticker to upload.
reason: :class:`str`
The reason for creating this sticker. Shows up on the audit log.
Raises
-------
Forbidden
You are not allowed to create stickers.
HTTPException
An error occurred creating a sticker.
Returns
--------
:class:`GuildSticker`
The created sticker.
"""
payload = {
'name': name,
}
if description:
payload['description'] = description
try:
emoji = unicodedata.name(emoji)
except TypeError:
pass
else:
emoji = emoji.replace(' ', '_')
payload['tags'] = emoji
data = await self._state.http.create_guild_sticker(self.id, payload, file, reason)
return self._state.store_sticker(self, data)
async def delete_sticker(self, sticker: Snowflake, *, reason: Optional[str] = None) -> None:
"""|coro|
Deletes the custom :class:`Sticker` from the guild.
You must have :attr:`~Permissions.manage_emojis_and_stickers` permission to
do this.
.. versionadded:: 2.0
Parameters
-----------
sticker: :class:`abc.Snowflake`
The sticker you are deleting.
reason: Optional[:class:`str`]
The reason for deleting this sticker. Shows up on the audit log.
Raises
-------
Forbidden
You are not allowed to delete stickers.
HTTPException
An error occurred deleting the sticker.
"""
await self._state.http.delete_guild_sticker(self.id, sticker.id, reason)
async def fetch_emojis(self) -> List[Emoji]:
r"""|coro|
Retrieves all custom :class:`Emoji`\s from the guild.
.. note::
This method is an API call. For general usage, consider :attr:`emojis` instead.
Raises
---------
HTTPException
An error occurred fetching the emojis.
Returns
--------
List[:class:`Emoji`]
The retrieved emojis.
"""
data = await self._state.http.get_all_custom_emojis(self.id)
return [Emoji(guild=self, state=self._state, data=d) for d in data]
async def fetch_emoji(self, emoji_id: int, /) -> Emoji:
"""|coro|
Retrieves a custom :class:`Emoji` from the guild.
.. note::
This method is an API call.
For general usage, consider iterating over :attr:`emojis` instead.
Parameters
-------------
emoji_id: :class:`int`
The emoji's ID.
Raises
---------
NotFound
The emoji requested could not be found.
HTTPException
An error occurred fetching the emoji.
Returns
--------
:class:`Emoji`
The retrieved emoji.
"""
data = await self._state.http.get_custom_emoji(self.id, emoji_id)
return Emoji(guild=self, state=self._state, data=data)
async def create_custom_emoji(
self,
*,
name: str,
image: bytes,
roles: List[Role] = MISSING,
reason: Optional[str] = None,
) -> Emoji:
r"""|coro|
Creates a custom :class:`Emoji` for the guild.
There is currently a limit of 50 static and animated emojis respectively per guild,
unless the guild has the ``MORE_EMOJI`` feature which extends the limit to 200.
You must have the :attr:`~Permissions.manage_emojis` permission to
do this.
Parameters
-----------
name: :class:`str`
The emoji name. Must be at least 2 characters.
image: :class:`bytes`
The :term:`py:bytes-like object` representing the image data to use.
Only JPG, PNG and GIF images are supported.
roles: List[:class:`Role`]
A :class:`list` of :class:`Role`\s that can use this emoji. Leave empty to make it available to everyone.
reason: Optional[:class:`str`]
The reason for creating this emoji. Shows up on the audit log.
Raises
-------
Forbidden
You are not allowed to create emojis.
HTTPException
An error occurred creating an emoji.
Returns
--------
:class:`Emoji`
The created emoji.
"""
img = utils._bytes_to_base64_data(image)
if roles:
role_ids = [role.id for role in roles]
else:
role_ids = []
data = await self._state.http.create_custom_emoji(self.id, name, img, roles=role_ids, reason=reason)
return self._state.store_emoji(self, data)
async def delete_emoji(self, emoji: Snowflake, *, reason: Optional[str] = None) -> None:
"""|coro|
Deletes the custom :class:`Emoji` from the guild.
You must have :attr:`~Permissions.manage_emojis` permission to
do this.
Parameters
-----------
emoji: :class:`abc.Snowflake`
The emoji you are deleting.
reason: Optional[:class:`str`]
The reason for deleting this emoji. Shows up on the audit log.
Raises
-------
Forbidden
You are not allowed to delete emojis.
HTTPException
An error occurred deleting the emoji.
"""
await self._state.http.delete_custom_emoji(self.id, emoji.id, reason=reason)
async def fetch_roles(self) -> List[Role]:
"""|coro|
Retrieves all :class:`Role` that the guild has.
.. note::
This method is an API call. For general usage, consider :attr:`roles` instead.
.. versionadded:: 1.3
Raises
-------
HTTPException
Retrieving the roles failed.
Returns
-------
List[:class:`Role`]
All roles in the guild.
"""
data = await self._state.http.get_roles(self.id)
return [Role(guild=self, state=self._state, data=d) for d in data]
@overload
async def create_role(
self,
*,
reason: Optional[str] = ...,
name: str = ...,
permissions: Permissions = ...,
colour: Union[Colour, int] = ...,
hoist: bool = ...,
mentionable: bool = ...,
) -> Role:
...
@overload
async def create_role(
self,
*,
reason: Optional[str] = ...,
name: str = ...,
permissions: Permissions = ...,
color: Union[Colour, int] = ...,
hoist: bool = ...,
mentionable: bool = ...,
) -> Role:
...
async def create_role(
self,
*,
name: str = MISSING,
permissions: Permissions = MISSING,
color: Union[Colour, int] = MISSING,
colour: Union[Colour, int] = MISSING,
hoist: bool = MISSING,
mentionable: bool = MISSING,
reason: Optional[str] = None,
) -> Role:
"""|coro|
Creates a :class:`Role` for the guild.
All fields are optional.
You must have the :attr:`~Permissions.manage_roles` permission to
do this.
.. versionchanged:: 1.6
Can now pass ``int`` to ``colour`` keyword-only parameter.
Parameters
-----------
name: :class:`str`
The role name. Defaults to 'new role'.
permissions: :class:`Permissions`
The permissions to have. Defaults to no permissions.
colour: Union[:class:`Colour`, :class:`int`]
The colour for the role. Defaults to :meth:`Colour.default`.
This is aliased to ``color`` as well.
hoist: :class:`bool`
Indicates if the role should be shown separately in the member list.
Defaults to ``False``.
mentionable: :class:`bool`
Indicates if the role should be mentionable by others.
Defaults to ``False``.
reason: Optional[:class:`str`]
The reason for creating this role. Shows up on the audit log.
Raises
-------
Forbidden
You do not have permissions to create the role.
HTTPException
Creating the role failed.
InvalidArgument
An invalid keyword argument was given.
Returns
--------
:class:`Role`
The newly created role.
"""
fields: Dict[str, Any] = {}
if permissions is not MISSING:
fields['permissions'] = str(permissions.value)
else:
fields['permissions'] = '0'
actual_colour = colour or color or Colour.default()
if isinstance(actual_colour, int):
fields['color'] = actual_colour
else:
fields['color'] = actual_colour.value
if hoist is not MISSING:
fields['hoist'] = hoist
if mentionable is not MISSING:
fields['mentionable'] = mentionable
if name is not MISSING:
fields['name'] = name
data = await self._state.http.create_role(self.id, reason=reason, **fields)
role = Role(guild=self, data=data, state=self._state)
# TODO: add to cache
return role
async def edit_role_positions(self, positions: Dict[Snowflake, int], *, reason: Optional[str] = None) -> List[Role]:
"""|coro|
Bulk edits a list of :class:`Role` in the guild.
You must have the :attr:`~Permissions.manage_roles` permission to
do this.
.. versionadded:: 1.4
Example:
.. code-block:: python3
positions = {
bots_role: 1, # penultimate role
tester_role: 2,
admin_role: 6
}
await guild.edit_role_positions(positions=positions)
Parameters
-----------
positions
A :class:`dict` of :class:`Role` to :class:`int` to change the positions
of each given role.
reason: Optional[:class:`str`]
The reason for editing the role positions. Shows up on the audit log.
Raises
-------
Forbidden
You do not have permissions to move the roles.
HTTPException
Moving the roles failed.
InvalidArgument
An invalid keyword argument was given.
Returns
--------
List[:class:`Role`]
A list of all the roles in the guild.
"""
if not isinstance(positions, dict):
raise InvalidArgument('positions parameter expects a dict.')
role_positions: List[Dict[str, Any]] = []
for role, position in positions.items():
payload = {'id': role.id, 'position': position}
role_positions.append(payload)
data = await self._state.http.move_role_position(self.id, role_positions, reason=reason)
roles: List[Role] = []
for d in data:
role = Role(guild=self, data=d, state=self._state)
roles.append(role)
self._roles[role.id] = role
return roles
async def kick(self, user: Snowflake, *, reason: Optional[str] = None) -> None:
"""|coro|
Kicks a user from the guild.
The user must meet the :class:`abc.Snowflake` abc.
You must have the :attr:`~Permissions.kick_members` permission to
do this.
Parameters
-----------
user: :class:`abc.Snowflake`
The user to kick from their guild.
reason: Optional[:class:`str`]
The reason the user got kicked.
Raises
-------
Forbidden
You do not have the proper permissions to kick.
HTTPException
Kicking failed.
"""
await self._state.http.kick(user.id, self.id, reason=reason)
async def ban(
self,
user: Snowflake,
*,
reason: Optional[str] = None,
delete_message_days: Literal[0, 1, 2, 3, 4, 5, 6, 7] = 1,
) -> None:
"""|coro|
Bans a user from the guild.
The user must meet the :class:`abc.Snowflake` abc.
You must have the :attr:`~Permissions.ban_members` permission to
do this.
Parameters
-----------
user: :class:`abc.Snowflake`
The user to ban from their guild.
delete_message_days: :class:`int`
The number of days worth of messages to delete from the user
in the guild. The minimum is 0 and the maximum is 7.
reason: Optional[:class:`str`]
The reason the user got banned.
Raises
-------
Forbidden
You do not have the proper permissions to ban.
HTTPException
Banning failed.
"""
await self._state.http.ban(user.id, self.id, delete_message_days, reason=reason)
async def unban(self, user: Snowflake, *, reason: Optional[str] = None) -> None:
"""|coro|
Unbans a user from the guild.
The user must meet the :class:`abc.Snowflake` abc.
You must have the :attr:`~Permissions.ban_members` permission to
do this.
Parameters
-----------
user: :class:`abc.Snowflake`
The user to unban.
reason: Optional[:class:`str`]
The reason for doing this action. Shows up on the audit log.
Raises
-------
Forbidden
You do not have the proper permissions to unban.
HTTPException
Unbanning failed.
"""
await self._state.http.unban(user.id, self.id, reason=reason)
async def vanity_invite(self) -> Optional[Invite]:
"""|coro|
Returns the guild's special vanity invite.
The guild must have ``VANITY_URL`` in :attr:`~Guild.features`.
You must have the :attr:`~Permissions.manage_guild` permission to use
this as well.
Raises
-------
Forbidden
You do not have the proper permissions to get this.
HTTPException
Retrieving the vanity invite failed.
Returns
--------
Optional[:class:`Invite`]
The special vanity invite. If ``None`` then the guild does not
have a vanity invite set.
"""
# we start with { code: abc }
payload = await self._state.http.get_vanity_code(self.id)
if not payload['code']:
return None
# get the vanity URL channel since default channels aren't
# reliable or a thing anymore
data = await self._state.http.get_invite(payload['code'])
channel = self.get_channel(int(data['channel']['id']))
payload['revoked'] = False
payload['temporary'] = False
payload['max_uses'] = 0
payload['max_age'] = 0
payload['uses'] = payload.get('uses', 0)
return Invite(state=self._state, data=payload, guild=self, channel=channel)
# TODO: use MISSING when async iterators get refactored
def audit_logs(
self,
*,
limit: Optional[int] = 100,
before: Optional[SnowflakeTime] = None,
after: Optional[SnowflakeTime] = None,
oldest_first: Optional[bool] = None,
user: Snowflake = None,
action: AuditLogAction = None,
) -> AuditLogIterator:
"""Returns an :class:`AsyncIterator` that enables receiving the guild's audit logs.
You must have the :attr:`~Permissions.view_audit_log` permission to use this.
Examples
----------
Getting the first 100 entries: ::
async for entry in guild.audit_logs(limit=100):
print(f'{entry.user} did {entry.action} to {entry.target}')
Getting entries for a specific action: ::
async for entry in guild.audit_logs(action=discord.AuditLogAction.ban):
print(f'{entry.user} banned {entry.target}')
Getting entries made by a specific user: ::
entries = await guild.audit_logs(limit=None, user=guild.me).flatten()
await channel.send(f'I made {len(entries)} moderation actions.')
Parameters
-----------
limit: Optional[:class:`int`]
The number of entries to retrieve. If ``None`` retrieve all entries.
before: Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]
Retrieve entries before this date or entry.
If a datetime is provided, it is recommended to use a UTC aware datetime.
If the datetime is naive, it is assumed to be local time.
after: Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]
Retrieve entries after this date or entry.
If a datetime is provided, it is recommended to use a UTC aware datetime.
If the datetime is naive, it is assumed to be local time.
oldest_first: :class:`bool`
If set to ``True``, return entries in oldest->newest order. Defaults to ``True`` if
``after`` is specified, otherwise ``False``.
user: :class:`abc.Snowflake`
The moderator to filter entries from.
action: :class:`AuditLogAction`
The action to filter with.
Raises
-------
Forbidden
You are not allowed to fetch audit logs
HTTPException
An error occurred while fetching the audit logs.
Yields
--------
:class:`AuditLogEntry`
The audit log entry.
"""
if user is not None:
user_id = user.id
else:
user_id = None
if action:
action = action.value
return AuditLogIterator(
self, before=before, after=after, limit=limit, oldest_first=oldest_first, user_id=user_id, action_type=action
)
async def widget(self) -> Widget:
"""|coro|
Returns the widget of the guild.
.. note::
The guild must have the widget enabled to get this information.
Raises
-------
Forbidden
The widget for this guild is disabled.
HTTPException
Retrieving the widget failed.
Returns
--------
:class:`Widget`
The guild's widget.
"""
data = await self._state.http.get_widget(self.id)
return Widget(state=self._state, data=data)
async def edit_widget(self, *, enabled: bool = MISSING, channel: Optional[Snowflake] = MISSING) -> None:
"""|coro|
Edits the widget of the guild.
You must have the :attr:`~Permissions.manage_guild` permission to
use this
.. versionadded:: 2.0
Parameters
-----------
enabled: :class:`bool`
Whether to enable the widget for the guild.
channel: Optional[:class:`~discord.abc.Snowflake`]
The new widget channel. ``None`` removes the widget channel.
Raises
-------
Forbidden
You do not have permission to edit the widget.
HTTPException
Editing the widget failed.
"""
payload = {}
if channel is not MISSING:
payload['channel_id'] = None if channel is None else channel.id
if enabled is not MISSING:
payload['enabled'] = enabled
await self._state.http.edit_widget(self.id, payload=payload)
async def chunk(self, *, cache: bool = True) -> None:
"""|coro|
Requests all members that belong to this guild. In order to use this,
:meth:`Intents.members` must be enabled.
This is a websocket operation and can be slow.
.. versionadded:: 1.5
Parameters
-----------
cache: :class:`bool`
Whether to cache the members as well.
Raises
-------
ClientException
The members intent is not enabled.
"""
if not self._state._intents.members:
raise ClientException('Intents.members must be enabled to use this.')
if not self._state.is_guild_evicted(self):
return await self._state.chunk_guild(self, cache=cache)
async def query_members(
self,
query: Optional[str] = None,
*,
limit: int = 5,
user_ids: Optional[List[int]] = None,
presences: bool = False,
cache: bool = True,
) -> List[Member]:
"""|coro|
Request members that belong to this guild whose username starts with
the query given.
This is a websocket operation and can be slow.
.. versionadded:: 1.3
Parameters
-----------
query: Optional[:class:`str`]
The string that the username's start with.
limit: :class:`int`
The maximum number of members to send back. This must be
a number between 5 and 100.
presences: :class:`bool`
Whether to request for presences to be provided. This defaults
to ``False``.
.. versionadded:: 1.6
cache: :class:`bool`
Whether to cache the members internally. This makes operations
such as :meth:`get_member` work for those that matched.
user_ids: Optional[List[:class:`int`]]
List of user IDs to search for. If the user ID is not in the guild then it won't be returned.
.. versionadded:: 1.4
Raises
-------
asyncio.TimeoutError
The query timed out waiting for the members.
ValueError
Invalid parameters were passed to the function
ClientException
The presences intent is not enabled.
Returns
--------
List[:class:`Member`]
The list of members that have matched the query.
"""
if presences and not self._state._intents.presences:
raise ClientException('Intents.presences must be enabled to use this.')
if query is None:
if query == '':
raise ValueError('Cannot pass empty query string.')
if user_ids is None:
raise ValueError('Must pass either query or user_ids')
if user_ids is not None and query is not None:
raise ValueError('Cannot pass both query and user_ids')
if user_ids is not None and not user_ids:
raise ValueError('user_ids must contain at least 1 value')
limit = min(100, limit or 5)
return await self._state.query_members(
self, query=query, limit=limit, user_ids=user_ids, presences=presences, cache=cache
)
async def change_voice_state(
self, *, channel: Optional[VocalGuildChannel], self_mute: bool = False, self_deaf: bool = False
):
"""|coro|
Changes client's voice state in the guild.
.. versionadded:: 1.4
Parameters
-----------
channel: Optional[:class:`VoiceChannel`]
Channel the client wants to join. Use ``None`` to disconnect.
self_mute: :class:`bool`
Indicates if the client should be self-muted.
self_deaf: :class:`bool`
Indicates if the client should be self-deafened.
"""
ws = self._state._get_websocket(self.id)
channel_id = channel.id if channel else None
await ws.voice_state(self.id, channel_id, self_mute, self_deaf)
async def welcome_screen(self):
"""|coro|
Returns the :class:`WelcomeScreen` of the guild.
The guild must have ``COMMUNITY`` in :attr:`~Guild.features`.
You must have the :attr:`~Permissions.manage_guild` permission in order to get this.
.. versionadded:: 2.0
Raises
-------
Forbidden
You do not have the proper permissions to get this.
HTTPException
Retrieving the welcome screen failed somehow.
NotFound
The guild doesn't has a welcome screen or community feature is disabled.
Returns
--------
:class:`WelcomeScreen`
The welcome screen of guild.
"""
data = await self._state.http.get_welcome_screen(self.id)
return WelcomeScreen(data=data, guild=self)
@overload
async def edit_welcome_screen(
self,
*,
description: Optional[str] = ...,
welcome_channels: Optional[List[WelcomeScreenChannel]] = ...,
enabled: Optional[bool] = ...,
) -> WelcomeScreen:
...
@overload
async def edit_welcome_screen(self) -> None:
...
async def edit_welcome_screen(self, **options):
"""|coro|
A shorthand for :attr:`WelcomeScreen.edit` without fetching the welcome screen.
You must have the :attr:`~Permissions.manage_guild` permission in the
guild to do this.
The guild must have ``COMMUNITY`` in :attr:`Guild.features`
Parameters
------------
description: Optional[:class:`str`]
The new description of welcome screen.
welcome_channels: Optional[List[:class:`WelcomeChannel`]]
The welcome channels. The order of the channels would be same as the passed list order.
enabled: Optional[:class:`bool`]
Whether the welcome screen should be displayed.
reason: Optional[:class:`str`]
The reason that shows up on audit log.
Raises
-------
HTTPException
Editing the welcome screen failed somehow.
Forbidden
You don't have permissions to edit the welcome screen.
NotFound
This welcome screen does not exist.
Returns
--------
:class:`WelcomeScreen`
The edited welcome screen.
"""
welcome_channels = options.get('welcome_channels', [])
welcome_channels_data = []
for channel in welcome_channels:
if not isinstance(channel, WelcomeScreenChannel):
raise TypeError('welcome_channels parameter must be a list of WelcomeScreenChannel.')
welcome_channels_data.append(channel.to_dict())
options['welcome_channels'] = welcome_channels_data
if options:
new = await self._state.http.edit_welcome_screen(self.id, options, reason=options.get('reason'))
return WelcomeScreen(data=new, guild=self)
|
the-stack_0_1251 | import sys
import xlrd
import csv
from main.model.model import db_save
template = {'fecha_hora': '',
'vereda': '',
'PM2_5_CC_ICA': -9999.0,
'altitud': -9999.0,
'estado': '',
'online': '',
'longitude': -9999.0,
'barrio': '',
'ciudad': '',
'temperatura': -9999.0,
'humedad_relativa': -9999.0,
'latitude': -9999.0,
'nombre': '',
'PM2_5_last': -9999.0,
'PM2_5_mean': -9999.0,
'codigo': -9999.0}
def load_xlsx(datafile):
workbook = xlrd.open_workbook(datafile)
worksheet = workbook.sheet_by_index(0)
print(">Msg: Reading '"+datafile+"'")
print("- Filas: "+str(worksheet.nrows))
print("- Columnas: "+str(worksheet.ncols))
for fila in range(worksheet.nrows):
#Almacenará unicamente una medicion a la vez
medicion = []
for columna in range(worksheet.ncols):
medicion.append(worksheet.cell(fila,columna).value)
print(medicion)
#print("aquí se guardaría el dato")
def load_csv(datafile):
censors = []
with open(datafile,'r') as csvfile:
reader = csv.reader(csvfile)
#cont = 0
row1 = True
for row in reader:
if row1 == True:
censors = row
row1 = False
continue
#cont += 1
index = 0
date = ''
for field in row:
if index == 0:
date = field
index += 1
else:
# censors[index] = numero identificacion de sensor
# date = fecha de la medicion
# field = medicion
#print(censors[index],date,field)
if field != '':
medicion = template
medicion['nombre'] = str(censors[index])
medicion['codigo'] = int(censors[index])
medicion['fecha_hora'] = str(date[:10]) + "T" + str(date[11:])
medicion['PM2_5_last'] = float(field)
save_response = db_save('mediciones', medicion)
if save_response == False:
print("- Hubo un problema almacenando el dato: ")
print(sensor,"\n")
#print(medicion['fecha_hora'])
index += 1
#if cont == 2:
# break
def main():
for datafile in sys.argv[1:]:
ext=""
i=datafile.rfind(".")
if i == -1:
print(">Error: Los ficheros no tienen extención '"+datafile+"'\n")
else:
if datafile[i:] == ".csv":
load_csv(datafile)
else:
if datafile[i:] == ".xlsx" or datafile[i:] == ".xls" or datafile[i:] == ".xlsm":
#load_xlsx(datafile)
pass
else:
print(">Warning: Extención de fichero no soportado '"+datafile+"'\n")
main() |
the-stack_0_1253 | import os
import numpy as np
from simplegrid.abstractcreature import MAX_ENERGY, Action, AbstractCreature
from simplegrid.dqn_agent import DQNAgent
from simplegrid.map_feature import MapFeature
HISTORY_FILE = 'deep_cow_history.jsonl'
WEIGHTS_FILE = 'deep_cow_model_weights.h5'
MODEL_FILE = 'deep_cow_model.json'
class DeepCow(AbstractCreature):
agent = None
COLOR = (240, 240, 20)
IS_PREDATOR = False
def __init__(self, x, y, settings, energy=None):
super().__init__(x, y, settings, energy)
self.prev_state = None
self.prev_reward = None
self.prev_action_idx = None
self.state = None
self.reward = None
self.done = None
self.action_idx = 0
@staticmethod
def to_internal_state(observation):
"""Convert state to an internal representation.
The input state is a (2 x d + 1, 2 x d + 1) matrix with for each
cell either a 1 for food, 0 for nothing or -x for another animal.
The center cell is always "us". Only the largest diamond fitting
the matrix is actually visible.
"""
size = observation.shape[0]
view_distance = size // 2
if view_distance == 1:
diamond = observation.flatten()
diamond = [diamond[3], diamond[7], diamond[5], diamond[1]]
else:
diamond = []
for x in range(size):
for y in range(size):
if 0 < abs(x - size // 2) + abs(y - size // 2) <= view_distance:
diamond.append(observation[x][y])
diamond = np.asarray(diamond)
grass = MapFeature.GRASS.to_feature_vector(diamond)
rock = MapFeature.ROCK.to_feature_vector(diamond)
water = MapFeature.ROCK.to_feature_vector(diamond)
wolves = MapFeature.WOLF.to_feature_vector(diamond)
return np.concatenate((grass, rock, water, wolves))
def step(self, observation):
if self.energy > MAX_ENERGY:
return Action.SPLIT
self.prev_state = self.state
self.prev_reward = self.reward
self.prev_action_idx = self.action_idx
self.state = self.to_internal_state(observation)
if not DeepCow.agent:
DeepCow.agent = DQNAgent.from_dimensions(len(self.state), layers=self.settings.layers, action_size=4)
self.action_idx = DeepCow.agent.act(self.state)
return Action(self.action_idx + 1)
def learn(self, reward, done):
self.reward = reward
if self.prev_state is not None and self.state is not None:
DeepCow.agent.remember(self.prev_state, self.prev_action_idx, self.prev_reward, self.state)
DeepCow.agent.replay()
if done:
DeepCow.agent.remember(self.state, self.action_idx, self.reward, None)
@classmethod
def restore_state(cls, settings):
model_file = settings.get_path(MODEL_FILE)
if model_file and os.path.isfile(model_file):
DeepCow.agent = DQNAgent.from_stored_model(model_file)
weights_file = settings.get_path(WEIGHTS_FILE)
if weights_file and os.path.isfile(weights_file):
DeepCow.agent.load_weights(weights_file)
@classmethod
def save_state(cls, settings):
weights_file = settings.get_path(WEIGHTS_FILE)
if weights_file:
cls.agent.save_weights(weights_file)
cls.agent.save_history(settings.get_path(HISTORY_FILE))
cls.agent.save_model(settings.get_path(MODEL_FILE))
|
the-stack_0_1254 | """Add meta field to Task table
Revision ID: a4a031f74720
Revises: 860c6ff76ea8
Create Date: 2019-06-08 14:12:10.983247
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'a4a031f74720'
down_revision = '860c6ff76ea8'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('task', sa.Column('meta', postgresql.JSON(
astext_type=sa.Text()), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('task', 'meta')
# ### end Alembic commands ###
|
the-stack_0_1257 | # Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Partial implementation for AppleDynamicFrameworkInfo configuration."""
load(
"@bazel_skylib//lib:partial.bzl",
"partial",
)
load(
"@bazel_skylib//lib:paths.bzl",
"paths",
)
# TODO(b/161370390): Remove ctx from the args when ctx is removed from all partials.
def _framework_provider_partial_impl(
*,
ctx,
actions,
bin_root_path,
binary_provider,
bundle_name,
rule_label):
"""Implementation for the framework provider partial."""
binary_file = binary_provider.binary
# Create a directory structure that the linker can use to reference this
# framework. It follows the pattern of
# any_path/MyFramework.framework/MyFramework. The absolute path and files are
# propagated using the AppleDynamicFrameworkInfo provider.
framework_dir = paths.join("frameworks", "%s.framework" % bundle_name)
framework_file = actions.declare_file(
paths.join(framework_dir, bundle_name),
)
actions.symlink(
target_file = binary_file,
output = framework_file,
)
absolute_framework_dir = paths.join(
bin_root_path,
rule_label.package,
framework_dir,
)
# TODO(cparsons): These will no longer be necessary once apple_binary
# uses the values in the dynamic framework provider.
legacy_objc_provider = apple_common.new_objc_provider(
dynamic_framework_file = depset([framework_file]),
providers = [binary_provider.objc],
)
framework_provider = apple_common.new_dynamic_framework_provider(
binary = binary_file,
framework_dirs = depset([absolute_framework_dir]),
framework_files = depset([framework_file]),
objc = legacy_objc_provider,
)
return struct(
providers = [framework_provider],
)
def framework_provider_partial(
*,
actions,
bin_root_path,
binary_provider,
bundle_name,
rule_label):
"""Constructor for the framework provider partial.
This partial propagates the AppleDynamicFrameworkInfo provider required by
the linking step. It contains the necessary files and configuration so that
the framework can be linked against. This is only required for dynamic
framework bundles.
Args:
actions: The actions provider from `ctx.actions`.
bin_root_path: The path to the root `-bin` directory.
binary_provider: The AppleDylibBinary provider containing this target's binary.
bundle_name: The name of the output bundle.
rule_label: The label of the target being analyzed.
Returns:
A partial that returns the AppleDynamicFrameworkInfo provider used to link
this framework into the final binary.
"""
return partial.make(
_framework_provider_partial_impl,
actions = actions,
bin_root_path = bin_root_path,
binary_provider = binary_provider,
bundle_name = bundle_name,
rule_label = rule_label,
)
|
the-stack_0_1258 | # -*- coding: UTF-8 -*-
import olympe
from olympe.messages.ardrone3.Piloting import TakeOff, moveBy, Landing
drone = olympe.Drone("10.202.0.1")
drone.connect()
drone(TakeOff()).wait()
drone(moveBy(10, 0, 0, 0)).wait()
drone(Landing()).wait()
drone.disconnect()
|
the-stack_0_1259 | """This package enables saving and loading of python objects to disk
while also backing to S3 storage. """
import os
import datetime
import ntpath # to extract file name from path, OS-independent
import traceback # for printing full stacktraces of errors
import concurrent.futures # for asynchronous file uploads
import pickle # for pickling files
try: # for automatic caching of return values of functions
from functools import lru_cache
except ImportError:
from functools32 import lru_cache # pylint: disable=E0401
import pandas as pd
import boto3 # to interact with AWS S3
from botocore.exceptions import ClientError
import dateutil # to make local change-time datetime objects time-aware
import yaml # to read the s3bp config
import feather # to read/write pandas dataframes as feather objects
CFG_FILE_NAME = 's3bp_cfg.yml'
DEFAULT_MAX_WORKERS = 5
EXECUTOR = None
# === Reading configuration ===
def _s3bp_cfg_file_path():
return os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
CFG_FILE_NAME))
def _get_s3bp_cfg():
try:
with open(_s3bp_cfg_file_path(), 'r') as cfg_file:
cfg = yaml.safe_load(cfg_file)
if not isinstance(cfg, dict):
cfg = {'base_dir_to_bucket_map': {}},
return cfg
except FileNotFoundError:
with open(_s3bp_cfg_file_path(), 'w') as outfile:
outfile.write(yaml.dump(
{'base_dir_to_bucket_map': {}},
default_flow_style=False
))
return _get_s3bp_cfg()
def _max_workers():
try:
return _get_s3bp_cfg()['max_workers']
except KeyError:
return DEFAULT_MAX_WORKERS
def _default_bucket():
return _get_s3bp_cfg()['default_bucket']
def _base_dir_to_bucket_map():
return _get_s3bp_cfg()['base_dir_to_bucket_map']
def _base_dirs():
return list(_get_s3bp_cfg()['base_dir_to_bucket_map'].keys())
# === Setting configuration ===
def _set_s3bp_cfg(cfg):
with open(_s3bp_cfg_file_path(), 'w') as outfile:
outfile.write(yaml.dump(cfg, default_flow_style=False))
def set_max_workers(max_workers):
"""Sets the maximum number of workers in the thread pool used to
asynchronously upload files. NOTE: Resets the current thread pool!"""
cfg = _get_s3bp_cfg()
cfg['max_workers'] = max_workers
_set_s3bp_cfg(cfg)
_get_executor(reset=True)
def set_default_bucket(bucket_name):
"""Sets the given string as the default bucket name."""
cfg = _get_s3bp_cfg()
cfg['default_bucket'] = bucket_name
_set_s3bp_cfg(cfg)
def unset_default_bucket():
"""Unsets the currently set default bucket, if set."""
cfg = _get_s3bp_cfg()
cfg.pop('default_bucket', None)
_set_s3bp_cfg(cfg)
def _parse_dir_path(dir_path):
if '~' in dir_path:
return os.path.expanduser(dir_path)
return dir_path
def set_default_base_directory(base_directory):
"""Sets the given string as the default base directory name."""
cfg = _get_s3bp_cfg()
cfg['default_base_dir'] = _parse_dir_path(base_directory)
_set_s3bp_cfg(cfg)
def map_base_directory_to_bucket(base_directory, bucket_name):
"""Maps the given directory as a base directory of the given bucket.
Arguments
---------
base_directory : str
The full path, from root, to the desired base directory.
bucket_name : str
The name of the bucket to map the given directory to.
"""
cfg = _get_s3bp_cfg()
parsed_path = _parse_dir_path(base_directory)
if not isinstance(cfg['base_dir_to_bucket_map'], dict):
cfg['base_dir_to_bucket_map'] = {}
cfg['base_dir_to_bucket_map'][parsed_path] = bucket_name
_set_s3bp_cfg(cfg)
def remove_base_directory_mapping(base_directory):
"""Remove the mapping associated with the given directory, if exists."""
cfg = _get_s3bp_cfg()
parsed_path = _parse_dir_path(base_directory)
cfg['base_dir_to_bucket_map'].pop(parsed_path, None)
_set_s3bp_cfg(cfg)
# === Getting parameters ===
def _get_executor(reset=False):
if reset:
_get_executor.executor = concurrent.futures.ThreadPoolExecutor(
_max_workers())
try:
return _get_executor.executor
except AttributeError:
_get_executor.executor = concurrent.futures.ThreadPoolExecutor(
_max_workers())
return _get_executor.executor
@lru_cache(maxsize=32)
def _get_bucket_by_name(bucket_name):
s3_rsc = boto3.resource('s3')
return s3_rsc.Bucket(bucket_name)
@lru_cache(maxsize=32)
def _get_base_dir_by_file_path_and_bucket_name(filepath, bucket_name):
try:
for directory in _base_dirs():
if (directory in filepath) and (
_base_dir_to_bucket_map()[directory] == bucket_name):
return directory
except (KeyError, AttributeError):
return None
return None
def _bucket_name_and_base_dir_by_filepath(filepath):
try:
for directory in _base_dirs():
if directory in filepath:
return _base_dir_to_bucket_map()[directory], directory
except (KeyError, AttributeError):
pass
try:
return _default_bucket(), None
except KeyError:
raise ValueError(
"No bucket name was given, and neither a default was defined "
"nor could one be interpreted from the file path. Please "
"provide one explicitly, or define an appropriate bucket.")
return None, None
def _get_key(filepath, namekey, base_directory):
if namekey or not base_directory:
return ntpath.basename(filepath)
index = filepath.find(base_directory[base_directory.rfind('/'):])
return filepath[index + 1:]
@lru_cache(maxsize=32)
def _get_bucket_and_key(filepath, bucket_name, namekey):
base_directory = None
if bucket_name is None:
bucket_name, base_directory = _bucket_name_and_base_dir_by_filepath(
filepath)
elif not namekey:
base_directory = _get_base_dir_by_file_path_and_bucket_name(
filepath, bucket_name)
os.makedirs(base_directory, exist_ok=True)
bucket = _get_bucket_by_name(bucket_name)
key = _get_key(filepath, namekey, base_directory)
return bucket, key
# === Uploading/Downloading files ===
def _parse_file_path(filepath):
if '~' in filepath:
return os.path.expanduser(filepath)
return filepath
def _file_upload_thread(bucket, filepath, key):
try:
bucket.upload_file(filepath, key)
except BaseException as exc: # pylint: disable=W0703
print(
'File upload failed with following exception:\n{}'.format(exc),
flush=True
)
def upload_file(filepath, bucket_name=None, namekey=None, wait=False):
"""Uploads the given file to S3 storage.
Arguments
---------
filepath : str
The full path, from root, to the desired file.
bucket_name (optional) : str
The name of the bucket to upload the file to. If not given, it will be
inferred from any defined base directory that is present on the path
(there is no guarentee which base directory will be used if several are
present in the given path). If base directory inferrence fails the
default bukcet will be used, if defined, else the operation will fail.
namekey (optional) : bool
Indicate whether to use the name of the file as the key when uploading
to the bucket. If set, or if no base directory is found in the
filepath, the file name will be used as key. Otherwise, the path
rooted at the detected base directory will be used, resulting in a
directory-like structure in the S3 bucket.
wait (optional) : bool
Defaults to False. If set to True, the function will wait on the upload
operation. Otherwise, the upload will be performed asynchronously in a
separate thread.
"""
filepath = _parse_file_path(filepath)
bucket, key = _get_bucket_and_key(filepath, bucket_name, namekey)
if wait:
bucket.upload_file(filepath, key)
else:
_get_executor().submit(_file_upload_thread, bucket, filepath, key)
def _file_time_modified(filepath):
timestamp = os.path.getmtime(filepath)
dt_obj = datetime.datetime.utcfromtimestamp(timestamp)
# this is correct only because the non-time-aware obj is in UTC!
dt_obj = dt_obj.replace(tzinfo=dateutil.tz.tzutc())
return dt_obj
def download_file(filepath, bucket_name=None, namekey=None, verbose=False):
"""Downloads the most recent version of the given file from S3, if needed.
Arguments
---------
filepath : str
The full path, from root, to the desired file.
bucket_name (optional) : str
The name of the bucket to download the file from. If not given, it
will be inferred from any defined base directory that is present on
the path (there is no guarentee which base directory will be used if
several are present in the given path). If base directory inferrence
fails the default bukcet will be used, if defined, else the operation
will fail.
namekey (optional) : bool
Indicate whether to use the name of the file as the key when
downloading from the bucket. If set, or if no base directory is found
in the filepath, the file name will be used as key. Otherwise, the path
rooted at the detected base directory will be used, resulting in a
directory-like structure in the S3 bucket.
verbose (optional) : bool
Defaults to False. If set to True, some informative messages will be
printed.
"""
filepath = _parse_file_path(filepath)
bucket, key = _get_bucket_and_key(filepath, bucket_name, namekey)
try:
if os.path.isfile(filepath):
if verbose:
print('File %s found on disk.' % key)
# this datetime object has tzinfo=dateutil.tz.utc()
s3_last_modified = bucket.Object(key).get()['LastModified']
if s3_last_modified > _file_time_modified(filepath):
if verbose:
print('But S3 has an updated version. Downloading...')
bucket.download_file(key, filepath)
else:
if verbose:
print('File %s NOT found on disk. Downloading...' % key)
# creating non-existing dirs on the path
if not os.path.exists(filepath):
os.makedirs(filepath[:filepath.rfind('/')])
bucket.download_file(key, filepath)
except ClientError:
if verbose:
print('Loading dataframe failed with the following exception:')
print(traceback.format_exc())
raise ValueError('No dataframe found with key %s' % key)
# === Saving/loading Python objects ===
def _pickle_serialiazer(pyobject, filepath):
pickle.dump(pyobject, open(filepath, 'wb'))
def save_object(pyobject, filepath, bucket_name=None,
serializer=_pickle_serialiazer, namekey=None, wait=False):
"""Saves the given object to S3 storage, caching it as the given file.
Arguments
---------
pyobject : object
The python object to save.
filepath : str
The full path, from root, to the desired cache file.
bucket_name (optional) : str
The name of the bucket to upload the file to. If not given, it will be
inferred from any defined base directory that is present on the path
(there is no guarentee which base directory will be used if several are
present in the given path). If base directory inferrence fails the
default bukcet will be used, if defined, else the operation will fail.
serializer (optional) : callable
A callable that takes two positonal arguments, a Python object and a
path to a file, and dumps the object to the given file. Defaults to a
wrapper of pickle.dump.
namekey (optional) : bool
Indicate whether to use the name of the file as the key when uploading
to the bucket. If set, or if no base directory is found in the
filepath, the file name will be used as key. Otherwise, the path
rooted at the detected base directory will be used, resulting in a
directory-like structure in the S3 bucket.
wait (optional) : bool
Defaults to False. If set to True, the function will wait on the upload
operation. Otherwise, the upload will be performed asynchronously in a
separate thread.
"""
serializer(pyobject, filepath)
upload_file(filepath, bucket_name, namekey, wait)
def _picke_deserializer(filepath):
return pickle.load(open(filepath, 'rb'))
def load_object(filepath, bucket_name=None, deserializer=_picke_deserializer,
namekey=None, verbose=False):
"""Loads the most recent version of the object cached in the given file.
Arguments
---------
filepath : str
The full path, from root, to the desired file.
bucket_name (optional) : str
The name of the bucket to download the file from. If not given, it
will be inferred from any defined base directory that is present on
the path (there is no guarentee which base directory will be used if
several are present in the given path). If base directory inferrence
fails the default bukcet will be used, if defined, else the operation
will fail.
deserializer (optional) : callable
A callable that takes one positonal argument, a path to a file, and
returns the object stored in it. Defaults to a wrapper of pickle.load.
namekey (optional) : bool
Indicate whether to use the name of the file as the key when
downloading from the bucket. If set, or if no base directory is found
in the filepath, the file name will be used as key. Otherwise, the path
rooted at the detected base directory will be used, resulting in a
directory-like structure in the S3 bucket.
verbose (optional) : bool
Defaults to False. If set to True, some informative messages will be
printed.
"""
download_file(filepath, bucket_name=bucket_name, namekey=namekey,
verbose=verbose)
return deserializer(filepath)
# === Saving/loading dataframes ===
def _pandas_df_csv_serializer(pyobject, filepath):
pyobject.to_csv(filepath)
def _pandas_df_excel_serializer(pyobject, filepath):
pyobject.to_excel(filepath)
def _pandas_df_feather_serializer(pyobject, filepath):
feather.write_dataframe(pyobject, filepath)
def _get_pandas_df_serializer(dformat):
dformat = dformat.lower()
if dformat == 'csv':
return _pandas_df_csv_serializer
if dformat == 'excel':
return _pandas_df_excel_serializer
if dformat == 'feather':
return _pandas_df_feather_serializer
def save_dataframe(df, filepath, bucket_name=None, dformat='csv', namekey=None,
wait=False):
"""Writes the given dataframe as a CSV file to disk and S3 storage.
Arguments
---------
df : pandas.Dataframe
The pandas Dataframe object to save.
filepath : str
The full path, from root, to the desired file.
bucket_name (optional) : str
The name of the bucket to upload the file to. If not given, it will be
inferred from any defined base directory that is present on the path
(there is no guarentee which base directory will be used if several are
present in the given path). If base directory inferrence fails the
default bukcet will be used, if defined, else the operation will fail.
dformat (optional) : str
The storage format for the Dataframe. One of 'csv','excel' and
'feather'. Defaults to 'csv'.
namekey (optional) : bool
Indicate whether to use the name of the file as the key when uploading
to the bucket. If set, or if no base directory is found in the
filepath, the file name will be used as key. Otherwise, the path
rooted at the detected base directory will be used, resulting in a
directory-like structure in the S3 bucket.
wait (optional) : bool
Defaults to False. If set to True, the function will wait on the upload
operation. Otherwise, the upload will be performed asynchronously in a
separate thread.
"""
save_object(df, filepath, serializer=_get_pandas_df_serializer(dformat),
bucket_name=bucket_name, namekey=namekey, wait=wait)
def _pandas_df_csv_deserializer(filepath):
return pd.read_csv(filepath)
def _pandas_df_excel_deserializer(filepath):
return pd.read_excel(filepath)
def _pandas_df_feather_deserializer(filepath):
return feather.read_dataframe(filepath)
def _get_pandf_defserializer(dformat):
dformat = dformat.lower()
if dformat == 'csv':
return _pandas_df_csv_deserializer
if dformat == 'excel':
return _pandas_df_excel_deserializer
if dformat == 'feather':
return _pandas_df_feather_deserializer
def load_dataframe(filepath, bucket_name=None, dformat='csv', namekey=None,
verbose=False):
"""Loads the most updated version of a dataframe from file, fetching it
from S3 storage if necessary.
Arguments
---------
filepath : str
The full path, from root, to the desired file.
bucket_name (optional) : str
The name of the bucket to download the file from. If not given, it
will be inferred from any defined base directory that is present on
the path (there is no guarentee which base directory will be used if
several are present in the given path). If base directory inferrence
fails the default bukcet will be used, if defined, else the operation
will fail.
dformat (optional) : str
The storage format for the Dataframe. One of 'csv','excel' and
'feather'. Defaults to 'csv'.
namekey (optional) : bool
Indicate whether to use the name of the file as the key when
downloading from the bucket. If set, or if no base directory is found
in the filepath, the file name will be used as key. Otherwise, the path
rooted at the detected base directory will be used, resulting in a
directory-like structure in the S3 bucket.
verbose (optional) : bool
Defaults to False. If set to True, some informative messages will be
printed.
"""
return load_object(
filepath, deserializer=_get_pandf_defserializer(dformat),
bucket_name=bucket_name, namekey=namekey, verbose=verbose)
|
the-stack_0_1260 | """Dependency injector base providers unit tests."""
import unittest2 as unittest
from dependency_injector import (
containers,
providers,
errors,
)
class ProviderTests(unittest.TestCase):
def setUp(self):
self.provider = providers.Provider()
def test_is_provider(self):
self.assertTrue(providers.is_provider(self.provider))
def test_call(self):
self.assertRaises(NotImplementedError, self.provider.__call__)
def test_delegate(self):
delegate1 = self.provider.delegate()
self.assertIsInstance(delegate1, providers.Delegate)
self.assertIs(delegate1(), self.provider)
delegate2 = self.provider.delegate()
self.assertIsInstance(delegate2, providers.Delegate)
self.assertIs(delegate2(), self.provider)
self.assertIsNot(delegate1, delegate2)
def test_provider(self):
delegate1 = self.provider.provider
self.assertIsInstance(delegate1, providers.Delegate)
self.assertIs(delegate1(), self.provider)
delegate2 = self.provider.provider
self.assertIsInstance(delegate2, providers.Delegate)
self.assertIs(delegate2(), self.provider)
self.assertIsNot(delegate1, delegate2)
def test_override(self):
overriding_provider = providers.Provider()
self.provider.override(overriding_provider)
self.assertTrue(self.provider.overridden)
self.assertIs(self.provider.last_overriding, overriding_provider)
def test_double_override(self):
overriding_provider1 = providers.Object(1)
overriding_provider2 = providers.Object(2)
self.provider.override(overriding_provider1)
overriding_provider1.override(overriding_provider2)
self.assertEqual(self.provider(), overriding_provider2())
def test_overriding_context(self):
overriding_provider = providers.Provider()
with self.provider.override(overriding_provider):
self.assertTrue(self.provider.overridden)
self.assertFalse(self.provider.overridden)
def test_override_with_itself(self):
self.assertRaises(errors.Error, self.provider.override, self.provider)
def test_override_with_not_provider(self):
obj = object()
self.provider.override(obj)
self.assertIs(self.provider(), obj)
def test_reset_last_overriding(self):
overriding_provider1 = providers.Provider()
overriding_provider2 = providers.Provider()
self.provider.override(overriding_provider1)
self.provider.override(overriding_provider2)
self.assertIs(self.provider.overridden[-1], overriding_provider2)
self.assertIs(self.provider.last_overriding, overriding_provider2)
self.provider.reset_last_overriding()
self.assertIs(self.provider.overridden[-1], overriding_provider1)
self.assertIs(self.provider.last_overriding, overriding_provider1)
self.provider.reset_last_overriding()
self.assertFalse(self.provider.overridden)
self.assertIsNone(self.provider.last_overriding)
def test_reset_last_overriding_of_not_overridden_provider(self):
self.assertRaises(errors.Error, self.provider.reset_last_overriding)
def test_reset_override(self):
overriding_provider = providers.Provider()
self.provider.override(overriding_provider)
self.assertTrue(self.provider.overridden)
self.assertEqual(self.provider.overridden, (overriding_provider,))
self.provider.reset_override()
self.assertEqual(self.provider.overridden, tuple())
def test_deepcopy(self):
provider = providers.Provider()
provider_copy = providers.deepcopy(provider)
self.assertIsNot(provider, provider_copy)
self.assertIsInstance(provider, providers.Provider)
def test_deepcopy_from_memo(self):
provider = providers.Provider()
provider_copy_memo = providers.Provider()
provider_copy = providers.deepcopy(
provider, memo={id(provider): provider_copy_memo})
self.assertIs(provider_copy, provider_copy_memo)
def test_deepcopy_overridden(self):
provider = providers.Provider()
overriding_provider = providers.Provider()
provider.override(overriding_provider)
provider_copy = providers.deepcopy(provider)
overriding_provider_copy = provider_copy.overridden[0]
self.assertIsNot(provider, provider_copy)
self.assertIsInstance(provider, providers.Provider)
self.assertIsNot(overriding_provider, overriding_provider_copy)
self.assertIsInstance(overriding_provider_copy, providers.Provider)
def test_repr(self):
self.assertEqual(repr(self.provider),
'<dependency_injector.providers.'
'Provider() at {0}>'.format(hex(id(self.provider))))
class ObjectProviderTests(unittest.TestCase):
def test_is_provider(self):
self.assertTrue(providers.is_provider(providers.Object(object())))
def test_provided_instance_provider(self):
provider = providers.Object(object())
self.assertIsInstance(provider.provided, providers.ProvidedInstance)
def test_call_object_provider(self):
obj = object()
self.assertIs(providers.Object(obj)(), obj)
def test_call_overridden_object_provider(self):
obj1 = object()
obj2 = object()
provider = providers.Object(obj1)
provider.override(providers.Object(obj2))
self.assertIs(provider(), obj2)
def test_deepcopy(self):
provider = providers.Object(1)
provider_copy = providers.deepcopy(provider)
self.assertIsNot(provider, provider_copy)
self.assertIsInstance(provider, providers.Object)
def test_deepcopy_from_memo(self):
provider = providers.Object(1)
provider_copy_memo = providers.Provider()
provider_copy = providers.deepcopy(
provider, memo={id(provider): provider_copy_memo})
self.assertIs(provider_copy, provider_copy_memo)
def test_deepcopy_overridden(self):
provider = providers.Object(1)
overriding_provider = providers.Provider()
provider.override(overriding_provider)
provider_copy = providers.deepcopy(provider)
overriding_provider_copy = provider_copy.overridden[0]
self.assertIsNot(provider, provider_copy)
self.assertIsInstance(provider, providers.Object)
self.assertIsNot(overriding_provider, overriding_provider_copy)
self.assertIsInstance(overriding_provider_copy, providers.Provider)
def test_deepcopy_doesnt_copy_provided_object(self):
# Fixes bug #231
# Details: https://github.com/ets-labs/python-dependency-injector/issues/231
some_object = object()
provider = providers.Object(some_object)
provider_copy = providers.deepcopy(provider)
self.assertIs(provider(), some_object)
self.assertIs(provider_copy(), some_object)
def test_repr(self):
some_object = object()
provider = providers.Object(some_object)
self.assertEqual(repr(provider),
'<dependency_injector.providers.'
'Object({0}) at {1}>'.format(
repr(some_object),
hex(id(provider))))
class DelegateTests(unittest.TestCase):
def setUp(self):
self.delegated = providers.Provider()
self.delegate = providers.Delegate(self.delegated)
def test_is_provider(self):
self.assertTrue(providers.is_provider(self.delegate))
def test_init_with_not_provider(self):
self.assertRaises(errors.Error, providers.Delegate, object())
def test_call(self):
delegated1 = self.delegate()
delegated2 = self.delegate()
self.assertIs(delegated1, self.delegated)
self.assertIs(delegated2, self.delegated)
def test_repr(self):
self.assertEqual(repr(self.delegate),
'<dependency_injector.providers.'
'Delegate({0}) at {1}>'.format(
repr(self.delegated),
hex(id(self.delegate))))
class DependencyTests(unittest.TestCase):
def setUp(self):
self.provider = providers.Dependency(instance_of=list)
def test_init_with_not_class(self):
self.assertRaises(TypeError, providers.Dependency, object())
def test_with_abc(self):
try:
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
provider = providers.Dependency(collections_abc.Mapping)
provider.provided_by(providers.Factory(dict))
self.assertIsInstance(provider(), collections_abc.Mapping)
self.assertIsInstance(provider(), dict)
def test_is_provider(self):
self.assertTrue(providers.is_provider(self.provider))
def test_provided_instance_provider(self):
self.assertIsInstance(self.provider.provided, providers.ProvidedInstance)
def test_call_overridden(self):
self.provider.provided_by(providers.Factory(list))
self.assertIsInstance(self.provider(), list)
def test_call_overridden_but_not_instance_of(self):
self.provider.provided_by(providers.Factory(dict))
self.assertRaises(errors.Error, self.provider)
def test_call_not_overridden(self):
self.assertRaises(errors.Error, self.provider)
def test_deepcopy(self):
provider = providers.Dependency(int)
provider_copy = providers.deepcopy(provider)
self.assertIsNot(provider, provider_copy)
self.assertIsInstance(provider, providers.Dependency)
def test_deepcopy_from_memo(self):
provider = providers.Dependency(int)
provider_copy_memo = providers.Provider()
provider_copy = providers.deepcopy(
provider, memo={id(provider): provider_copy_memo})
self.assertIs(provider_copy, provider_copy_memo)
def test_deepcopy_overridden(self):
provider = providers.Dependency(int)
overriding_provider = providers.Provider()
provider.override(overriding_provider)
provider_copy = providers.deepcopy(provider)
overriding_provider_copy = provider_copy.overridden[0]
self.assertIsNot(provider, provider_copy)
self.assertIsInstance(provider, providers.Dependency)
self.assertIsNot(overriding_provider, overriding_provider_copy)
self.assertIsInstance(overriding_provider_copy, providers.Provider)
def test_repr(self):
self.assertEqual(repr(self.provider),
'<dependency_injector.providers.'
'Dependency({0}) at {1}>'.format(
repr(list),
hex(id(self.provider))))
class ExternalDependencyTests(unittest.TestCase):
def setUp(self):
self.provider = providers.ExternalDependency(instance_of=list)
def test_is_instance(self):
self.assertIsInstance(self.provider, providers.Dependency)
class DependenciesContainerTests(unittest.TestCase):
class Container(containers.DeclarativeContainer):
dependency = providers.Provider()
def setUp(self):
self.provider = providers.DependenciesContainer()
self.container = self.Container()
def test_getattr(self):
has_dependency = hasattr(self.provider, 'dependency')
dependency = self.provider.dependency
self.assertIsInstance(dependency, providers.Dependency)
self.assertIs(dependency, self.provider.dependency)
self.assertTrue(has_dependency)
self.assertIsNone(dependency.last_overriding)
def test_getattr_with_container(self):
self.provider.override(self.container)
dependency = self.provider.dependency
self.assertTrue(dependency.overridden)
self.assertIs(dependency.last_overriding, self.container.dependency)
def test_providers(self):
dependency1 = self.provider.dependency1
dependency2 = self.provider.dependency2
self.assertEqual(self.provider.providers, {'dependency1': dependency1,
'dependency2': dependency2})
def test_override(self):
dependency = self.provider.dependency
self.provider.override(self.container)
self.assertTrue(dependency.overridden)
self.assertIs(dependency.last_overriding, self.container.dependency)
def test_reset_last_overriding(self):
dependency = self.provider.dependency
self.provider.override(self.container)
self.provider.reset_last_overriding()
self.assertIsNone(dependency.last_overriding)
self.assertIsNone(dependency.last_overriding)
def test_reset_override(self):
dependency = self.provider.dependency
self.provider.override(self.container)
self.provider.reset_override()
self.assertFalse(dependency.overridden)
self.assertFalse(dependency.overridden)
|
the-stack_0_1261 | import random
import threading
import time
from statistics import mean
from typing import Optional
from cereal import log
from common.params import Params, put_nonblocking
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE
from selfdrive.swaglog import cloudlog
PANDA_OUTPUT_VOLTAGE = 5.28
CAR_VOLTAGE_LOW_PASS_K = 0.091 # LPF gain for 5s tau (dt/tau / (dt/tau + 1))
# A C2 uses about 1W while idling, and 30h seens like a good shutoff for most cars
# While driving, a battery charges completely in about 30-60 minutes
CAR_BATTERY_CAPACITY_uWh = 30e6
CAR_CHARGING_RATE_W = 45
VBATT_PAUSE_CHARGING = 11.0 # Lower limit on the LPF car battery voltage
VBATT_INSTANT_PAUSE_CHARGING = 7.0 # Lower limit on the instant car battery voltage measurements to avoid triggering on instant power loss
MAX_TIME_OFFROAD_S = 30*3600
MIN_ON_TIME_S = 3600
# Parameters
def get_battery_capacity():
return _read_param("/sys/class/power_supply/battery/capacity", int)
# Helpers
def _read_param(path, parser, default=0):
try:
with open(path) as f:
return parser(f.read())
except Exception:
return default
def panda_current_to_actual_current(panda_current):
# From white/grey panda schematic
return (3.3 - (panda_current * 3.3 / 4096)) / 8.25
class PowerMonitoring:
def __init__(self):
self.params = Params()
self.last_measurement_time = None # Used for integration delta
self.last_save_time = 0 # Used for saving current value in a param
self.power_used_uWh = 0 # Integrated power usage in uWh since going into offroad
self.next_pulsed_measurement_time = None
self.car_voltage_mV = 12e3 # Low-passed version of pandaState voltage
self.car_voltage_instant_mV = 12e3 # Last value of pandaState voltage
self.integration_lock = threading.Lock()
self.ts_last_charging_ctrl = None
car_battery_capacity_uWh = self.params.get("CarBatteryCapacity")
if car_battery_capacity_uWh is None:
car_battery_capacity_uWh = 0
# Reset capacity if it's low
self.car_battery_capacity_uWh = max((CAR_BATTERY_CAPACITY_uWh / 10), int(car_battery_capacity_uWh))
# Calculation tick
def calculate(self, pandaState):
try:
now = sec_since_boot()
# If pandaState is None, we're probably not in a car, so we don't care
if pandaState is None or pandaState.pandaState.pandaType == log.PandaState.PandaType.unknown:
with self.integration_lock:
self.last_measurement_time = None
self.next_pulsed_measurement_time = None
self.power_used_uWh = 0
return
# Low-pass battery voltage
self.car_voltage_instant_mV = pandaState.pandaState.voltage
self.car_voltage_mV = ((pandaState.pandaState.voltage * CAR_VOLTAGE_LOW_PASS_K) + (self.car_voltage_mV * (1 - CAR_VOLTAGE_LOW_PASS_K)))
# Cap the car battery power and save it in a param every 10-ish seconds
self.car_battery_capacity_uWh = max(self.car_battery_capacity_uWh, 0)
self.car_battery_capacity_uWh = min(self.car_battery_capacity_uWh, CAR_BATTERY_CAPACITY_uWh)
if now - self.last_save_time >= 10:
put_nonblocking("CarBatteryCapacity", str(int(self.car_battery_capacity_uWh)))
self.last_save_time = now
# First measurement, set integration time
with self.integration_lock:
if self.last_measurement_time is None:
self.last_measurement_time = now
return
if (pandaState.pandaState.ignitionLine or pandaState.pandaState.ignitionCan):
# If there is ignition, we integrate the charging rate of the car
with self.integration_lock:
self.power_used_uWh = 0
integration_time_h = (now - self.last_measurement_time) / 3600
if integration_time_h < 0:
raise ValueError(f"Negative integration time: {integration_time_h}h")
self.car_battery_capacity_uWh += (CAR_CHARGING_RATE_W * 1e6 * integration_time_h)
self.last_measurement_time = now
else:
# No ignition, we integrate the offroad power used by the device
is_uno = pandaState.pandaState.pandaType == log.PandaState.PandaType.uno
# Get current power draw somehow
current_power = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
if current_power is not None:
pass
elif HARDWARE.get_battery_status() == 'Discharging':
# If the battery is discharging, we can use this measurement
# On C2: this is low by about 10-15%, probably mostly due to UNO draw not being factored in
current_power = ((HARDWARE.get_battery_voltage() / 1000000) * (HARDWARE.get_battery_current() / 1000000))
elif (pandaState.pandaState.pandaType in (log.PandaState.PandaType.whitePanda, log.PandaState.PandaType.greyPanda)) and (pandaState.pandaState.current > 1):
# If white/grey panda, use the integrated current measurements if the measurement is not 0
# If the measurement is 0, the current is 400mA or greater, and out of the measurement range of the panda
# This seems to be accurate to about 5%
current_power = (PANDA_OUTPUT_VOLTAGE * panda_current_to_actual_current(pandaState.pandaState.current))
elif (self.next_pulsed_measurement_time is not None) and (self.next_pulsed_measurement_time <= now):
# TODO: Figure out why this is off by a factor of 3/4???
FUDGE_FACTOR = 1.33
# Turn off charging for about 10 sec in a thread that does not get killed on SIGINT, and perform measurement here to avoid blocking thermal
def perform_pulse_measurement(now):
try:
HARDWARE.set_battery_charging(False)
time.sleep(5)
# Measure for a few sec to get a good average
voltages = []
currents = []
for _ in range(6):
voltages.append(HARDWARE.get_battery_voltage())
currents.append(HARDWARE.get_battery_current())
time.sleep(1)
current_power = ((mean(voltages) / 1000000) * (mean(currents) / 1000000))
self._perform_integration(now, current_power * FUDGE_FACTOR)
# Enable charging again
HARDWARE.set_battery_charging(True)
except Exception:
cloudlog.exception("Pulsed power measurement failed")
# Start pulsed measurement and return
threading.Thread(target=perform_pulse_measurement, args=(now,)).start()
self.next_pulsed_measurement_time = None
return
elif self.next_pulsed_measurement_time is None and not is_uno:
# On a charging EON with black panda, or drawing more than 400mA out of a white/grey one
# Only way to get the power draw is to turn off charging for a few sec and check what the discharging rate is
# We shouldn't do this very often, so make sure it has been some long-ish random time interval
self.next_pulsed_measurement_time = now + random.randint(120, 180)
return
else:
# Do nothing
return
# Do the integration
self._perform_integration(now, current_power)
except Exception:
cloudlog.exception("Power monitoring calculation failed")
def _perform_integration(self, t: float, current_power: float) -> None:
with self.integration_lock:
try:
if self.last_measurement_time:
integration_time_h = (t - self.last_measurement_time) / 3600
power_used = (current_power * 1000000) * integration_time_h
if power_used < 0:
raise ValueError(f"Negative power used! Integration time: {integration_time_h} h Current Power: {power_used} uWh")
self.power_used_uWh += power_used
self.car_battery_capacity_uWh -= power_used
self.last_measurement_time = t
except Exception:
cloudlog.exception("Integration failed")
# Get the power usage
def get_power_used(self) -> int:
return int(self.power_used_uWh)
def get_car_battery_capacity(self) -> int:
return int(self.car_battery_capacity_uWh)
# See if we need to disable charging
def should_disable_charging(self, pandaState, offroad_timestamp: Optional[float]) -> bool:
if pandaState is None or offroad_timestamp is None:
return False
now = sec_since_boot()
disable_charging = False
disable_charging |= (now - offroad_timestamp) > MAX_TIME_OFFROAD_S
disable_charging |= (self.car_voltage_mV < (VBATT_PAUSE_CHARGING * 1e3)) and (self.car_voltage_instant_mV > (VBATT_INSTANT_PAUSE_CHARGING * 1e3))
disable_charging |= (self.car_battery_capacity_uWh <= 0)
disable_charging &= (not pandaState.pandaState.ignitionLine and not pandaState.pandaState.ignitionCan)
disable_charging &= (not self.params.get_bool("DisablePowerDown"))
disable_charging &= (pandaState.pandaState.harnessStatus != log.PandaState.HarnessStatus.notConnected)
disable_charging |= self.params.get_bool("ForcePowerDown")
return disable_charging
# See if we need to shutdown
def should_shutdown(self, pandaState, offroad_timestamp, started_seen):
if pandaState is None or offroad_timestamp is None:
return False
now = sec_since_boot()
panda_charging = (pandaState.pandaState.usbPowerMode != log.PandaState.UsbPowerMode.client)
BATT_PERC_OFF = 10
should_shutdown = False
# Wait until we have shut down charging before powering down
should_shutdown |= (not panda_charging and self.should_disable_charging(pandaState, offroad_timestamp))
should_shutdown |= ((HARDWARE.get_battery_capacity() < BATT_PERC_OFF) and (not HARDWARE.get_battery_charging()) and ((now - offroad_timestamp) > 60))
should_shutdown &= started_seen or (now > MIN_ON_TIME_S)
return should_shutdown
def charging_ctrl(self, msg, ts, to_discharge, to_charge ):
if self.ts_last_charging_ctrl is None or (ts - self.ts_last_charging_ctrl) >= 300.:
battery_changing = HARDWARE.get_battery_charging()
if self.ts_last_charging_ctrl:
if msg.deviceState.batteryPercent >= to_discharge and battery_changing:
HARDWARE.set_battery_charging(False)
elif msg.deviceState.batteryPercent <= to_charge and not battery_changing:
HARDWARE.set_battery_charging(True)
self.ts_last_charging_ctrl = ts |
the-stack_0_1264 | """
Provides a `scantree` function which recurses a given directory, yielding
(pathname, os.stat(pathname)) pairs.
Attempts to use the more efficient `scandir` function if this is available,
falling back to `os.listdir` otherwise.
"""
import os
import stat
try:
from os import scandir
except ImportError:
try:
from scandir import scandir
except ImportError:
scandir = None
if scandir:
def scantree(root):
for entry in scandir(root):
if entry.is_dir():
for item in scantree(entry.path):
yield item
else:
yield entry.path, entry.stat()
else:
def scantree(root):
for filename in os.listdir(root):
path = os.path.join(root, filename)
stat_result = os.stat(path)
if stat.S_ISDIR(stat_result.st_mode):
for item in scantree(path):
yield item
else:
yield path, stat_result
|
the-stack_0_1265 | # original implementation: https://github.com/odegeasslbc/FastGAN-pytorch/blob/main/models.py
#
# modified by Axel Sauer for "Projected GANs Converge Faster"
#
import torch.nn as nn
from pg_modules.blocks import (InitLayer, UpBlockBig, UpBlockBigCond, UpBlockSmall, UpBlockSmallCond, SEBlock, conv2d)
def normalize_second_moment(x, dim=1, eps=1e-8):
return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt()
class DummyMapping(nn.Module):
def __init__(self):
super().__init__()
def forward(self, z, c, **kwargs):
return z.unsqueeze(1) # to fit the StyleGAN API
class FastganSynthesis(nn.Module):
def __init__(self, ngf=128, z_dim=256, nc=3, img_resolution=256, lite=False):
super().__init__()
self.img_resolution = img_resolution
self.z_dim = z_dim
# channel multiplier
nfc_multi = {2: 16, 4:16, 8:8, 16:4, 32:2, 64:2, 128:1, 256:0.5,
512:0.25, 1024:0.125}
nfc = {}
for k, v in nfc_multi.items():
nfc[k] = int(v*ngf)
# layers
self.init = InitLayer(z_dim, channel=nfc[2], sz=4)
UpBlock = UpBlockSmall if lite else UpBlockBig
self.feat_8 = UpBlock(nfc[4], nfc[8])
self.feat_16 = UpBlock(nfc[8], nfc[16])
self.feat_32 = UpBlock(nfc[16], nfc[32])
self.feat_64 = UpBlock(nfc[32], nfc[64])
self.feat_128 = UpBlock(nfc[64], nfc[128])
self.feat_256 = UpBlock(nfc[128], nfc[256])
self.se_64 = SEBlock(nfc[4], nfc[64])
self.se_128 = SEBlock(nfc[8], nfc[128])
self.se_256 = SEBlock(nfc[16], nfc[256])
self.to_big = conv2d(nfc[img_resolution], nc, 3, 1, 1, bias=True)
if img_resolution > 256:
self.feat_512 = UpBlock(nfc[256], nfc[512])
self.se_512 = SEBlock(nfc[32], nfc[512])
if img_resolution > 512:
self.feat_1024 = UpBlock(nfc[512], nfc[1024])
def forward(self, input, c, **kwargs):
# map noise to hypersphere as in "Progressive Growing of GANS"
input = normalize_second_moment(input[:, 0])
feat_4 = self.init(input)
feat_8 = self.feat_8(feat_4)
feat_16 = self.feat_16(feat_8)
feat_32 = self.feat_32(feat_16)
feat_64 = self.se_64(feat_4, self.feat_64(feat_32))
feat_128 = self.se_128(feat_8, self.feat_128(feat_64))
if self.img_resolution >= 128:
feat_last = feat_128
if self.img_resolution >= 256:
feat_last = self.se_256(feat_16, self.feat_256(feat_last))
if self.img_resolution >= 512:
feat_last = self.se_512(feat_32, self.feat_512(feat_last))
if self.img_resolution >= 1024:
feat_last = self.feat_1024(feat_last)
return self.to_big(feat_last)
class FastganSynthesisCond(nn.Module):
def __init__(self, ngf=64, z_dim=256, nc=3, img_resolution=256, num_classes=1000, lite=False):
super().__init__()
self.z_dim = z_dim
nfc_multi = {2: 16, 4:16, 8:8, 16:4, 32:2, 64:2, 128:1, 256:0.5,
512:0.25, 1024:0.125, 2048:0.125}
nfc = {}
for k, v in nfc_multi.items():
nfc[k] = int(v*ngf)
self.img_resolution = img_resolution
self.init = InitLayer(z_dim, channel=nfc[2], sz=4)
UpBlock = UpBlockSmallCond if lite else UpBlockBigCond
self.feat_8 = UpBlock(nfc[4], nfc[8], z_dim)
self.feat_16 = UpBlock(nfc[8], nfc[16], z_dim)
self.feat_32 = UpBlock(nfc[16], nfc[32], z_dim)
self.feat_64 = UpBlock(nfc[32], nfc[64], z_dim)
self.feat_128 = UpBlock(nfc[64], nfc[128], z_dim)
self.feat_256 = UpBlock(nfc[128], nfc[256], z_dim)
self.se_64 = SEBlock(nfc[4], nfc[64])
self.se_128 = SEBlock(nfc[8], nfc[128])
self.se_256 = SEBlock(nfc[16], nfc[256])
self.to_big = conv2d(nfc[img_resolution], nc, 3, 1, 1, bias=True)
if img_resolution > 256:
self.feat_512 = UpBlock(nfc[256], nfc[512])
self.se_512 = SEBlock(nfc[32], nfc[512])
if img_resolution > 512:
self.feat_1024 = UpBlock(nfc[512], nfc[1024])
self.embed = nn.Embedding(num_classes, z_dim)
def forward(self, input, c, update_emas=False):
c = self.embed(c.argmax(1))
# map noise to hypersphere as in "Progressive Growing of GANS"
input = normalize_second_moment(input[:, 0])
feat_4 = self.init(input)
feat_8 = self.feat_8(feat_4, c)
feat_16 = self.feat_16(feat_8, c)
feat_32 = self.feat_32(feat_16, c)
feat_64 = self.se_64(feat_4, self.feat_64(feat_32, c))
feat_128 = self.se_128(feat_8, self.feat_128(feat_64, c))
if self.img_resolution >= 128:
feat_last = feat_128
if self.img_resolution >= 256:
feat_last = self.se_256(feat_16, self.feat_256(feat_last, c))
if self.img_resolution >= 512:
feat_last = self.se_512(feat_32, self.feat_512(feat_last, c))
if self.img_resolution >= 1024:
feat_last = self.feat_1024(feat_last, c)
return self.to_big(feat_last)
class Generator(nn.Module):
def __init__(
self,
z_dim=256,
c_dim=0,
w_dim=0,
img_resolution=256,
img_channels=3,
ngf=128,
cond=0,
mapping_kwargs={},
synthesis_kwargs={}
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_channels = img_channels
# Mapping and Synthesis Networks
self.mapping = DummyMapping() # to fit the StyleGAN API
Synthesis = FastganSynthesisCond if cond else FastganSynthesis
self.synthesis = Synthesis(ngf=ngf, z_dim=z_dim, nc=img_channels, img_resolution=img_resolution, **synthesis_kwargs)
def forward(self, z, c, **kwargs):
w = self.mapping(z, c)
img = self.synthesis(w, c)
return img
|
the-stack_0_1266 | import webbrowser
from liquid import Liquid
from pathlib import Path
DATA_DIRECTORY = Path.home() / ".acm_dl_data"
SEARCH_STRING = "https://dl.acm.org/action/doSearch?LimitedContentGroupKey={key}&pageSize=50&startPage={page_id}"
def _ensure_data_directory_exists(sub_dir=None):
"""Makes sure the data directory exists and returns the data directory path"""
if sub_dir:
path = DATA_DIRECTORY / sub_dir
else:
path = DATA_DIRECTORY
if not path.exists():
path.mkdir(parents=True)
return path
def _display_results_html(pattern, search_results):
with open(Path(__file__).parent / "templates/search_result.html") as f:
ret = Liquid(f).render(tempName = f"Results for : {pattern} (found {len(search_results)})", items = search_results)
out_file = _ensure_data_directory_exists("temp") / "search_results.html"
with open(out_file, "w") as f:
f.write(ret)
webbrowser.open("file://" + str(out_file.absolute()))
|
the-stack_0_1268 | """
pygments.formatters.img
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for Pixmap output.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import subprocess
import sys
from pygments.formatter import Formatter
from pygments.util import get_bool_opt
from pygments.util import get_choice_opt
from pygments.util import get_int_opt
from pygments.util import get_list_opt
# Import this carefully
try:
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
pil_available = True
except ImportError:
pil_available = False
try:
import _winreg
except ImportError:
try:
import winreg as _winreg
except ImportError:
_winreg = None
__all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter',
'BmpImageFormatter']
# For some unknown reason every font calls it something different
STYLES = {
'NORMAL': ['', 'Roman', 'Book', 'Normal', 'Regular', 'Medium'],
'ITALIC': ['Oblique', 'Italic'],
'BOLD': ['Bold'],
'BOLDITALIC': ['Bold Oblique', 'Bold Italic'],
}
# A sane default for modern systems
DEFAULT_FONT_NAME_NIX = 'DejaVu Sans Mono'
DEFAULT_FONT_NAME_WIN = 'Courier New'
DEFAULT_FONT_NAME_MAC = 'Menlo'
class PilNotAvailable(ImportError):
"""When Python imaging library is not available"""
class FontNotFound(Exception):
"""When there are no usable fonts specified"""
class FontManager:
"""
Manages a set of fonts: normal, italic, bold, etc...
"""
def __init__(self, font_name, font_size=14):
self.font_name = font_name
self.font_size = font_size
self.fonts = {}
self.encoding = None
if sys.platform.startswith('win'):
if not font_name:
self.font_name = DEFAULT_FONT_NAME_WIN
self._create_win()
elif sys.platform.startswith('darwin'):
if not font_name:
self.font_name = DEFAULT_FONT_NAME_MAC
self._create_mac()
else:
if not font_name:
self.font_name = DEFAULT_FONT_NAME_NIX
self._create_nix()
def _get_nix_font_path(self, name, style):
proc = subprocess.Popen(['fc-list', "%s:style=%s" % (name, style), 'file'],
stdout=subprocess.PIPE, stderr=None)
stdout, _ = proc.communicate()
if proc.returncode == 0:
lines = stdout.splitlines()
for line in lines:
if line.startswith(b'Fontconfig warning:'):
continue
path = line.decode().strip().strip(':')
if path:
return path
return None
def _create_nix(self):
for name in STYLES['NORMAL']:
path = self._get_nix_font_path(self.font_name, name)
if path is not None:
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
break
else:
raise FontNotFound('No usable fonts named: "%s"' %
self.font_name)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
for stylename in STYLES[style]:
path = self._get_nix_font_path(self.font_name, stylename)
if path is not None:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
break
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
def _get_mac_font_path(self, font_map, name, style):
return font_map.get((name + ' ' + style).strip().lower())
def _create_mac(self):
font_map = {}
for font_dir in (os.path.join(os.getenv("HOME"), 'Library/Fonts/'),
'/Library/Fonts/', '/System/Library/Fonts/'):
font_map.update(
(os.path.splitext(f)[0].lower(), os.path.join(font_dir, f))
for f in os.listdir(font_dir)
if f.lower().endswith(('ttf', 'ttc')))
for name in STYLES['NORMAL']:
path = self._get_mac_font_path(font_map, self.font_name, name)
if path is not None:
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
break
else:
raise FontNotFound('No usable fonts named: "%s"' %
self.font_name)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
for stylename in STYLES[style]:
path = self._get_mac_font_path(font_map, self.font_name, stylename)
if path is not None:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
break
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
def _lookup_win(self, key, basename, styles, fail=False):
for suffix in ('', ' (TrueType)'):
for style in styles:
try:
valname = '%s%s%s' % (basename, style and ' '+style, suffix)
val, _ = _winreg.QueryValueEx(key, valname)
return val
except OSError:
continue
else:
if fail:
raise FontNotFound('Font %s (%s) not found in registry' %
(basename, styles[0]))
return None
def _create_win(self):
lookuperror = None
keynames = [ (_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'),
(_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows\CurrentVersion\Fonts'),
(_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'),
(_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows\CurrentVersion\Fonts') ]
for keyname in keynames:
try:
key = _winreg.OpenKey(*keyname)
try:
path = self._lookup_win(key, self.font_name, STYLES['NORMAL'], True)
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
path = self._lookup_win(key, self.font_name, STYLES[style])
if path:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
return
except FontNotFound as err:
lookuperror = err
finally:
_winreg.CloseKey(key)
except OSError:
pass
else:
# If we get here, we checked all registry keys and had no luck
# We can be in one of two situations now:
# * All key lookups failed. In this case lookuperror is None and we
# will raise a generic error
# * At least one lookup failed with a FontNotFound error. In this
# case, we will raise that as a more specific error
if lookuperror:
raise lookuperror
raise FontNotFound('Can\'t open Windows font registry key')
def get_char_size(self):
"""
Get the character size.
"""
return self.fonts['NORMAL'].getsize('M')
def get_text_size(self, text):
"""
Get the text size(width, height).
"""
return self.fonts['NORMAL'].getsize(text)
def get_font(self, bold, oblique):
"""
Get the font based on bold and italic flags.
"""
if bold and oblique:
return self.fonts['BOLDITALIC']
elif bold:
return self.fonts['BOLD']
elif oblique:
return self.fonts['ITALIC']
else:
return self.fonts['NORMAL']
class ImageFormatter(Formatter):
"""
Create a PNG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 0.10
Additional options accepted:
`image_format`
An image format to output to that is recognised by PIL, these include:
* "PNG" (default)
* "JPEG"
* "BMP"
* "GIF"
`line_pad`
The extra spacing (in pixels) between each line of text.
Default: 2
`font_name`
The font name to be used as the base font from which others, such as
bold and italic fonts will be generated. This really should be a
monospace font to look sane.
Default: "Courier New" on Windows, "Menlo" on Mac OS, and
"DejaVu Sans Mono" on \\*nix
`font_size`
The font size in points to be used.
Default: 14
`image_pad`
The padding, in pixels to be used at each edge of the resulting image.
Default: 10
`line_numbers`
Whether line numbers should be shown: True/False
Default: True
`line_number_start`
The line number of the first line.
Default: 1
`line_number_step`
The step used when printing line numbers.
Default: 1
`line_number_bg`
The background colour (in "#123456" format) of the line number bar, or
None to use the style background color.
Default: "#eed"
`line_number_fg`
The text color of the line numbers (in "#123456"-like format).
Default: "#886"
`line_number_chars`
The number of columns of line numbers allowable in the line number
margin.
Default: 2
`line_number_bold`
Whether line numbers will be bold: True/False
Default: False
`line_number_italic`
Whether line numbers will be italicized: True/False
Default: False
`line_number_separator`
Whether a line will be drawn between the line number area and the
source code area: True/False
Default: True
`line_number_pad`
The horizontal padding (in pixels) between the line number margin, and
the source code area.
Default: 6
`hl_lines`
Specify a list of lines to be highlighted.
.. versionadded:: 1.2
Default: empty list
`hl_color`
Specify the color for highlighting lines.
.. versionadded:: 1.2
Default: highlight color of the selected style
"""
# Required by the pygments mapper
name = 'img'
aliases = ['img', 'IMG', 'png']
filenames = ['*.png']
unicodeoutput = False
default_image_format = 'png'
def __init__(self, **options):
"""
See the class docstring for explanation of options.
"""
if not pil_available:
raise PilNotAvailable(
'Python Imaging Library is required for this formatter')
Formatter.__init__(self, **options)
self.encoding = 'latin1' # let pygments.format() do the right thing
# Read the style
self.styles = dict(self.style)
if self.style.background_color is None:
self.background_color = '#fff'
else:
self.background_color = self.style.background_color
# Image options
self.image_format = get_choice_opt(
options, 'image_format', ['png', 'jpeg', 'gif', 'bmp'],
self.default_image_format, normcase=True)
self.image_pad = get_int_opt(options, 'image_pad', 10)
self.line_pad = get_int_opt(options, 'line_pad', 2)
# The fonts
fontsize = get_int_opt(options, 'font_size', 14)
self.fonts = FontManager(options.get('font_name', ''), fontsize)
self.fontw, self.fonth = self.fonts.get_char_size()
# Line number options
self.line_number_fg = options.get('line_number_fg', '#886')
self.line_number_bg = options.get('line_number_bg', '#eed')
self.line_number_chars = get_int_opt(options,
'line_number_chars', 2)
self.line_number_bold = get_bool_opt(options,
'line_number_bold', False)
self.line_number_italic = get_bool_opt(options,
'line_number_italic', False)
self.line_number_pad = get_int_opt(options, 'line_number_pad', 6)
self.line_numbers = get_bool_opt(options, 'line_numbers', True)
self.line_number_separator = get_bool_opt(options,
'line_number_separator', True)
self.line_number_step = get_int_opt(options, 'line_number_step', 1)
self.line_number_start = get_int_opt(options, 'line_number_start', 1)
if self.line_numbers:
self.line_number_width = (self.fontw * self.line_number_chars +
self.line_number_pad * 2)
else:
self.line_number_width = 0
self.hl_lines = []
hl_lines_str = get_list_opt(options, 'hl_lines', [])
for line in hl_lines_str:
try:
self.hl_lines.append(int(line))
except ValueError:
pass
self.hl_color = options.get('hl_color',
self.style.highlight_color) or '#f90'
self.drawables = []
def get_style_defs(self, arg=''):
raise NotImplementedError('The -S option is meaningless for the image '
'formatter. Use -O style=<stylename> instead.')
def _get_line_height(self):
"""
Get the height of a line.
"""
return self.fonth + self.line_pad
def _get_line_y(self, lineno):
"""
Get the Y coordinate of a line number.
"""
return lineno * self._get_line_height() + self.image_pad
def _get_char_width(self):
"""
Get the width of a character.
"""
return self.fontw
def _get_char_x(self, linelength):
"""
Get the X coordinate of a character position.
"""
return linelength + self.image_pad + self.line_number_width
def _get_text_pos(self, linelength, lineno):
"""
Get the actual position for a character and line position.
"""
return self._get_char_x(linelength), self._get_line_y(lineno)
def _get_linenumber_pos(self, lineno):
"""
Get the actual position for the start of a line number.
"""
return (self.image_pad, self._get_line_y(lineno))
def _get_text_color(self, style):
"""
Get the correct color for the token from the style.
"""
if style['color'] is not None:
fill = '#' + style['color']
else:
fill = '#000'
return fill
def _get_text_bg_color(self, style):
"""
Get the correct background color for the token from the style.
"""
if style['bgcolor'] is not None:
bg_color = '#' + style['bgcolor']
else:
bg_color = None
return bg_color
def _get_style_font(self, style):
"""
Get the correct font for the style.
"""
return self.fonts.get_font(style['bold'], style['italic'])
def _get_image_size(self, maxlinelength, maxlineno):
"""
Get the required image size.
"""
return (self._get_char_x(maxlinelength) + self.image_pad,
self._get_line_y(maxlineno + 0) + self.image_pad)
def _draw_linenumber(self, posno, lineno):
"""
Remember a line number drawable to paint later.
"""
self._draw_text(
self._get_linenumber_pos(posno),
str(lineno).rjust(self.line_number_chars),
font=self.fonts.get_font(self.line_number_bold,
self.line_number_italic),
text_fg=self.line_number_fg,
text_bg=None,
)
def _draw_text(self, pos, text, font, text_fg, text_bg):
"""
Remember a single drawable tuple to paint later.
"""
self.drawables.append((pos, text, font, text_fg, text_bg))
def _create_drawables(self, tokensource):
"""
Create drawables for the token content.
"""
lineno = charno = maxcharno = 0
maxlinelength = linelength = 0
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
style = self.styles[ttype]
# TODO: make sure tab expansion happens earlier in the chain. It
# really ought to be done on the input, as to do it right here is
# quite complex.
value = value.expandtabs(4)
lines = value.splitlines(True)
# print lines
for i, line in enumerate(lines):
temp = line.rstrip('\n')
if temp:
self._draw_text(
self._get_text_pos(linelength, lineno),
temp,
font = self._get_style_font(style),
text_fg = self._get_text_color(style),
text_bg = self._get_text_bg_color(style),
)
temp_width, temp_hight = self.fonts.get_text_size(temp)
linelength += temp_width
maxlinelength = max(maxlinelength, linelength)
charno += len(temp)
maxcharno = max(maxcharno, charno)
if line.endswith('\n'):
# add a line for each extra line in the value
linelength = 0
charno = 0
lineno += 1
self.maxlinelength = maxlinelength
self.maxcharno = maxcharno
self.maxlineno = lineno
def _draw_line_numbers(self):
"""
Create drawables for the line numbers.
"""
if not self.line_numbers:
return
for p in range(self.maxlineno):
n = p + self.line_number_start
if (n % self.line_number_step) == 0:
self._draw_linenumber(p, n)
def _paint_line_number_bg(self, im):
"""
Paint the line number background on the image.
"""
if not self.line_numbers:
return
if self.line_number_fg is None:
return
draw = ImageDraw.Draw(im)
recth = im.size[-1]
rectw = self.image_pad + self.line_number_width - self.line_number_pad
draw.rectangle([(0, 0), (rectw, recth)],
fill=self.line_number_bg)
if self.line_number_separator:
draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
del draw
def format(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
This implementation calculates where it should draw each token on the
pixmap, then calculates the required pixmap size and draws the items.
"""
self._create_drawables(tokensource)
self._draw_line_numbers()
im = Image.new(
'RGB',
self._get_image_size(self.maxlinelength, self.maxlineno),
self.background_color
)
self._paint_line_number_bg(im)
draw = ImageDraw.Draw(im)
# Highlight
if self.hl_lines:
x = self.image_pad + self.line_number_width - self.line_number_pad + 1
recth = self._get_line_height()
rectw = im.size[0] - x
for linenumber in self.hl_lines:
y = self._get_line_y(linenumber - 1)
draw.rectangle([(x, y), (x + rectw, y + recth)],
fill=self.hl_color)
for pos, value, font, text_fg, text_bg in self.drawables:
if text_bg:
text_size = draw.textsize(text=value, font=font)
draw.rectangle([pos[0], pos[1], pos[0] + text_size[0], pos[1] + text_size[1]], fill=text_bg)
draw.text(pos, value, font=font, fill=text_fg)
im.save(outfile, self.image_format.upper())
# Add one formatter per format, so that the "-f gif" option gives the correct result
# when used in pygmentize.
class GifImageFormatter(ImageFormatter):
"""
Create a GIF image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 1.0
"""
name = 'img_gif'
aliases = ['gif']
filenames = ['*.gif']
default_image_format = 'gif'
class JpgImageFormatter(ImageFormatter):
"""
Create a JPEG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 1.0
"""
name = 'img_jpg'
aliases = ['jpg', 'jpeg']
filenames = ['*.jpg']
default_image_format = 'jpeg'
class BmpImageFormatter(ImageFormatter):
"""
Create a bitmap image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 1.0
"""
name = 'img_bmp'
aliases = ['bmp', 'bitmap']
filenames = ['*.bmp']
default_image_format = 'bmp'
|
the-stack_0_1270 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2020, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import skbio.stats.ordination
import pandas as pd
def pcoa(distance_matrix: skbio.DistanceMatrix,
number_of_dimensions: int = None) -> skbio.OrdinationResults:
if number_of_dimensions is None:
# calculate full decomposition using eigh
return skbio.stats.ordination.pcoa(distance_matrix, method='eigh',
inplace=False)
else:
# calculate the decomposition only for the `number_of_dimensions`
# using fast heuristic eigendecomposition (fsvd)
return skbio.stats.ordination.pcoa(
distance_matrix, method='fsvd',
number_of_dimensions=number_of_dimensions,
inplace=True)
def pcoa_biplot(pcoa: skbio.OrdinationResults,
features: pd.DataFrame) -> skbio.OrdinationResults:
return skbio.stats.ordination.pcoa_biplot(pcoa, features)
|
the-stack_0_1272 | import asyncio
import json
import logging
import os.path
import random
import click
import sounddevice as sd
import soundfile as sf
from chmp.label import write_label, find_unlabeled
from chmp.app.kwdetect.aio import detect as _async_detect
from chmp.app.kwdetect.util import load_optional_model
_logger = logging.getLogger(__name__)
@click.group()
def main():
pass
@main.command()
@click.argument('target')
@click.option('--model')
def detect(target, model):
"""Continuously detect keywords and save extracted samples to disk."""
loop = asyncio.get_event_loop()
# TODO: add better exception handler
loop.set_exception_handler(print)
loop.run_until_complete(_detect(target, model))
async def _detect(target, model):
_logger.info('load model')
model = load_optional_model(model)
_logger.info('enter detection loop')
async for label in _async_detect(model, sample_target=target):
print('detected: ', label)
@main.command()
@click.argument('path')
@click.option('--labels')
def label(path, labels):
"""Generate labels in an interactive fashion."""
with open(labels, 'rt') as fobj:
labels = json.load(fobj)
label_decoding = {int(key): label for label, key in labels.items()}
label_decoding[-1] = '<repeat>'
unlabeled_files = find_unlabeled(os.path.join(path, '*.ogg'))
if not unlabeled_files:
print('No files to label :)')
return
random.shuffle(unlabeled_files)
print(f'Found {len(unlabeled_files)} unlabeled files')
print('Start labelling ...')
while unlabeled_files:
try:
fname = unlabeled_files.pop()
_label_example(fname, label_decoding)
except KeyboardInterrupt:
print('Stop labelling ...')
raise SystemExit(0)
print('No more files to label :)')
def _label_example(fname, label_decoding):
print(f'Processing: {fname}')
sample, _ = sf.read(fname)
while True:
sd.play(sample, blocking=True)
label = _get_label_from_user(label_decoding)
if label == '<skip>':
print('Skip sample')
return
elif label == '<repeat>':
continue
else:
write_label(fname, label=label, file=os.path.basename(fname))
return
def _get_label_from_user(label_decoding):
print('Chose label:', ' '.join(f'{label!r} ({code})' for code, label in label_decoding.items()))
while True:
user_input = input('Label [empty to skip]: > ')
if not user_input.strip():
return '<skip>'
try:
user_input = int(user_input)
except ValueError:
print('Invalid input ...')
else:
if user_input not in label_decoding:
print('Invalid input ...')
continue
return label_decoding[user_input]
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
|
the-stack_0_1274 | #!/usr/bin/env python
# encoding: utf-8
'''
@author: Jason Lee
@license: (C) Copyright @ Jason Lee
@contact: [email protected]
@file: 334.py
@time: 2019/6/10 19:14
@desc:
'''
class Solution:
def increasingTriplet(self, nums: List[int]) -> bool:
if len(nums) < 3:
return False
# first < second < third
first = second = float('inf')
for i in nums:
if i <= first:
first = i
elif i <= second: # 第二个数比第一个数大
second = i
else:
return True # 第三个数比前两个都大
return False
|
the-stack_0_1275 | from gi.repository import Gtk
import face_functions
import face_recognizer
import barcode
import string
import os.path
class TableWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="FACE RECOGNIZER")
self.set_size_request(500, 300)
table = Gtk.Table(6, 3, True)
self.add(table)
hbox = Gtk.Box(spacing=6)
self.take_picture_normal = Gtk.Button(label="NORMAL")
hbox.pack_start(self.take_picture_normal, True, True, 0)
self.take_picture_normal.connect("clicked", self.on_normal_clicked)
self.take_picture_happy = Gtk.Button(label="HAPPY")
hbox.pack_start(self.take_picture_happy, True, True, 0)
self.take_picture_happy.connect("clicked", self.on_happy_clicked)
self.take_picture_surprised = Gtk.Button(label="SURPRISED")
hbox.pack_start(self.take_picture_surprised, True, True, 0)
self.take_picture_surprised.connect("clicked", self.on_surprised_clicked)
self.take_picture_wink = Gtk.Button(label="WINK")
hbox.pack_start(self.take_picture_wink, True, True, 0)
self.take_picture_wink.connect("clicked", self.on_wink_clicked)
self.take_picture_sleepy = Gtk.Button(label="SLEEPY")
hbox.pack_start(self.take_picture_sleepy, True, True, 0)
self.take_picture_sleepy.connect("clicked", self.on_sleepy_clicked)
self.take_picture_sad = Gtk.Button(label="SAD")
hbox.pack_start(self.take_picture_sad, True, True, 0)
self.take_picture_sad.connect("clicked", self.on_sad_clicked)
self.Entry_ID = Gtk.Entry()
self.Entry_ID.set_text("Enter your ID")
Detection_Button = Gtk.Button(label="Start Detector")
Detection_Button.connect("clicked", self.on_start_clicked)
Label_Admin = Gtk.Label("Admin Menu")
Label_User = Gtk.Label("User Menu")
table.attach(self.Entry_ID, 0, 1, 1, 2)
table.attach(hbox, 1, 3, 1, 2)
table.attach(Detection_Button, 0, 3, 3, 6)
table.attach(Label_Admin, 0, 3, 0, 1)
table.attach(Label_User, 0, 3, 2, 3)
def on_start_clicked(self, button):
self.recognizer = face_recognizer.train_recognizer("./Database")
img = face_functions.snap()
predicted,conf = face_recognizer.recognize_face(self.recognizer, img)
if(predicted==-1 or conf>50):
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.WARNING,Gtk.ButtonsType.CANCEL, "Face not recognized.")
message.run()
message.destroy()
return
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.CANCEL, "Face recognized!")
message.format_secondary_text("Recognized as subject "+str(predicted)+" with a doubt rating of "+str(conf))
message.run()
message.destroy()
d_barcode = barcode.get_barcode(img)
if (len(d_barcode)>0): d_barcode=self.trim_barcode(d_barcode[0])
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.CANCEL, "Barcode Detection")
print(predicted)
print("Barcode data found in this picture: " + str(d_barcode))
if (len(d_barcode)==0):
message.format_secondary_text("Barcode not detected.")
elif (int(predicted)==int(d_barcode)):
message.format_secondary_text("Barcode detected:" + d_barcode + "\nMatches with face.")
else:
message.format_secondary_text("Barcode detected:" + d_barcode + "\nDoes not match face.")
message.run()
message.destroy()
#print("\"Click me\" button was clicked")
def id_is_valid(self):
text = self.Entry_ID.get_text()
if len(text)!=7:
error_message = Gtk.MessageDialog(self, 0, Gtk.MessageType.ERROR,Gtk.ButtonsType.CANCEL, "ID must be exactly 7 digits long!")
error_message.run()
error_message.destroy()
return False
for ch in text:
if(ch not in string.digits):
error_message = Gtk.MessageDialog(self, 0, Gtk.MessageType.ERROR,Gtk.ButtonsType.CANCEL, "ID must contain numbers only!")
error_message.run()
error_message.destroy()
return False
return True
def trim_barcode(self, barcode):
return barcode[:7]
def on_normal_clicked(self, button):
if(self.id_is_valid()):
face_functions.take_picture("./Database/subject"+self.Entry_ID.get_text()+".normal.png")
if(os.path.isfile("./Database/subject"+self.Entry_ID.get_text()+".normal.png")):
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.OK, "Picture taken")
message.run()
message.destroy()
else:
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.OK, "Error saving file - try again!")
message.run()
message.destroy()
def on_happy_clicked(self, button):
if(self.id_is_valid()):
face_functions.take_picture("./Database/subject"+self.Entry_ID.get_text()+".happy.png")
if(os.path.isfile("./Database/subject"+self.Entry_ID.get_text()+".happy.png")):
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.OK, "Picture taken")
message.run()
message.destroy()
else:
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.OK, "Error saving file - try again!")
message.run()
message.destroy()
def on_surprised_clicked(self, button):
if(self.id_is_valid()):
face_functions.take_picture("./Database/subject"+self.Entry_ID.get_text()+".surprised.png")
if(os.path.isfile("./Database/subject"+self.Entry_ID.get_text()+".surprised.png")):
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.OK, "Picture taken")
message.run()
message.destroy()
else:
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.OK, "Error saving file - try again!")
message.run()
message.destroy()
def on_wink_clicked(self, button):
if(self.id_is_valid()):
face_functions.take_picture("./Database/subject"+self.Entry_ID.get_text()+".wink.png")
if(os.path.isfile("./Database/subject"+self.Entry_ID.get_text()+".wink.png")):
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.OK, "Picture taken")
message.run()
message.destroy()
else:
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.OK, "Error saving file - try again!")
message.run()
message.destroy()
def on_sleepy_clicked(self, button):
if(self.id_is_valid()):
face_functions.take_picture("./Database/subject"+self.Entry_ID.get_text()+".sleepy.png")
if(os.path.isfile("./Database/subject"+self.Entry_ID.get_text()+".sleepy.png")):
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.OK, "Picture taken")
message.run()
message.destroy()
else:
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.OK, "Error saving file - try again!")
message.run()
message.destroy()
def on_sad_clicked(self, button):
if(self.id_is_valid()):
face_functions.take_picture("./Database/subject"+self.Entry_ID.get_text()+".sad.png")
if(os.path.isfile("./Database/subject"+self.Entry_ID.get_text()+".sad.png")):
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.OK, "Picture taken")
message.run()
message.destroy()
else:
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.OK, "Error saving file - try again!")
message.run()
message.destroy()
win = TableWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main() |
the-stack_0_1276 | import numpy as np
import tensorflow as tf
import yolo.config as cfg
import tensorflow.contrib.slim as slim
#slim = tf.contrib.slim
class YOLONet(object):
def __init__(self, is_training=True):
self.classes = cfg.CLASSES
self.num_class = len(self.classes)
self.image_size = cfg.IMAGE_SIZE
self.cell_size = cfg.CELL_SIZE
self.boxes_per_cell = cfg.BOXES_PER_CELL
self.output_size = (self.cell_size * self.cell_size) *\
(self.num_class + self.boxes_per_cell * 5)
self.scale = 1.0 * self.image_size / self.cell_size
# 7*7*20(表示类别)转换为相应的矩阵形式 (类别向量)
self.boundary1 = self.cell_size * self.cell_size * self.num_class
# + 7*7*2 转换为相应的矩阵形式 (尺度向量)
self.boundary2 = self.boundary1 +\
self.cell_size * self.cell_size * self.boxes_per_cell
self.object_scale = cfg.OBJECT_SCALE
self.noobject_scale = cfg.NOOBJECT_SCALE
self.class_scale = cfg.CLASS_SCALE
self.coord_scale = cfg.COORD_SCALE
self.learning_rate = cfg.LEARNING_RATE
self.batch_size = cfg.BATCH_SIZE
self.alpha = cfg.ALPHA
self.offset = np.transpose(np.reshape(np.array(
[np.arange(self.cell_size)] * self.cell_size * self.boxes_per_cell),
(self.boxes_per_cell, self.cell_size, self.cell_size)), (1, 2, 0)
)
self.images = tf.placeholder(
tf.float32, [None, self.image_size, self.image_size, 3],
name='images'
)
self.logits = self.build_network(
self.images, num_outputs=self.output_size, alpha=self.alpha,
is_training=is_training
)
if is_training:
self.labels = tf.placeholder(
tf.float32,
[None, self.cell_size, self.cell_size, 5 + self.num_class]
)
self.loss_layer(self.logits, self.labels)
self.total_loss = tf.losses.get_total_loss()
tf.summary.scalar('total_loss', self.total_loss)
def build_network(self,
images,
num_outputs,
alpha,
keep_prob=0.5,
is_training=True,
scope='yolo'):
with tf.variable_scope(scope):
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
activation_fn=leaky_relu(alpha),
weights_regularizer=slim.l2_regularizer(0.0005),
weights_initializer=tf.truncated_normal_initializer(0.0, 0.01)
):
# conv2d(input, num_output, filter_size, stride=1, padding='SAME')
# maxpool(input, kernel_size, stride=2, padding='VAILD')
net = tf.pad(images, np.array([[0, 0], [3, 3], [3, 3], [0, 0]]), name='pad_1')
net = slim.conv2d(net, 64, 7, 2, padding='VALID', scope='conv_2')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_3')
net = slim.conv2d(net, 192, 3, scope='conv_4')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_5')
net = slim.conv2d(net, 128, 1, scope='conv_6')
net = slim.conv2d(net, 256, 3, scope='conv_7')
net = slim.conv2d(net, 256, 1, scope='conv_8')
net = slim.conv2d(net, 512, 3, scope='conv_9')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_10')
net = slim.conv2d(net, 256, 1, scope='conv_11')
net = slim.conv2d(net, 512, 3, scope='conv_12')
net = slim.conv2d(net, 256, 1, scope='conv_13')
net = slim.conv2d(net, 512, 3, scope='conv_14')
net = slim.conv2d(net, 256, 1, scope='conv_15')
net = slim.conv2d(net, 512, 3, scope='conv_16')
net = slim.conv2d(net, 256, 1, scope='conv_17')
net = slim.conv2d(net, 512, 3, scope='conv_18')
net = slim.conv2d(net, 512, 1, scope='conv_19')
net = slim.conv2d(net, 1024, 3, scope='conv_20')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_21')
net = slim.conv2d(net, 512, 1, scope='conv_22')
net = slim.conv2d(net, 1024, 3, scope='conv_23')
net = slim.conv2d(net, 512, 1, scope='conv_24')
net = slim.conv2d(net, 1024, 3, scope='conv_25')
net = slim.conv2d(net, 1024, 3, scope='conv_26')
net = tf.pad(
net, np.array([[0, 0], [1, 1], [1, 1], [0, 0]]),
name='pad_27')
net = slim.conv2d(
net, 1024, 3, 2, padding='VALID', scope='conv_28')
net = slim.conv2d(net, 1024, 3, scope='conv_29')
net = slim.conv2d(net, 1024, 3, scope='conv_30')
net = tf.transpose(net, [0, 3, 1, 2], name='trans_31')
net = slim.flatten(net, scope='flat_32')
net = slim.fully_connected(net, 512, scope='fc_33')
net = slim.fully_connected(net, 4096, scope='fc_34')
net = slim.dropout(
net, keep_prob=keep_prob, is_training=is_training,
scope='dropout_35')
net = slim.fully_connected(
net, num_outputs, activation_fn=None, scope='fc_36')
return net
def calc_iou(self, boxes1, boxes2, scope='iou'):
"""calculate ious
Args:
boxes1: 5-D tensor [BATCH_SIZE, CELL_SIZE, CELL_SIZE, BOXES_PER_CELL, 4] => (x_center, y_center, w, h)
boxes2: 5-D tensor [BATCH_SIZE, CELL_SIZE, CELL_SIZE, BOXES_PER_CELL, 4] => (x_center, y_center, w, h)
Return:
iou: 4-D tensor [BATCH_SIZE, CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
这里没有极大值抑制,raw output
"""
with tf.variable_scope(scope):
# transform (x_center, y_center, w, h) to (x1, y1, x2, y2)
# stack 可以从n维变成n+1维,给最后一维加箱子,再叠起来
boxes1_t = tf.stack([boxes1[..., 0] - boxes1[..., 2] / 2.0,
boxes1[..., 1] - boxes1[..., 3] / 2.0,
boxes1[..., 0] + boxes1[..., 2] / 2.0,
boxes1[..., 1] + boxes1[..., 3] / 2.0],
axis=-1)
boxes2_t = tf.stack([boxes2[..., 0] - boxes2[..., 2] / 2.0,
boxes2[..., 1] - boxes2[..., 3] / 2.0,
boxes2[..., 0] + boxes2[..., 2] / 2.0,
boxes2[..., 1] + boxes2[..., 3] / 2.0],
axis=-1)
# calculate the left up point & right down point
# 我觉得这里找的是intersection的左下角和右上角!
lu = tf.maximum(boxes1_t[..., :2], boxes2_t[..., :2])
rd = tf.minimum(boxes1_t[..., 2:], boxes2_t[..., 2:])
# intersection
intersection = tf.maximum(0.0, rd - lu)
inter_square = intersection[..., 0] * intersection[..., 1]
# calculate the boxs1 square and boxs2 square
# 未变换前的w * h
square1 = boxes1[..., 2] * boxes1[..., 3]
square2 = boxes2[..., 2] * boxes2[..., 3]
union_square = tf.maximum(square1 + square2 - inter_square, 1e-10)
return tf.clip_by_value(inter_square / union_square, 0.0, 1.0)
def loss_layer(self, predicts, labels, scope='loss_layer'):
"""
:param predicts: 卷积后得到的tensor
:param labels: 待解码的真实标注
:param scope:
:return: loss
"""
with tf.variable_scope(scope):
# 类别向量 shape为(45, 7, 7, 20)
# 这里的classes是20种类型的概率值, C个条件概率: P(Class_i | Object)
predict_classes = tf.reshape(
predicts[:, :self.boundary1],
[self.batch_size, self.cell_size, self.cell_size, self.num_class]
)
# 是confidence-score shape为(45, 7, 7, 2)
predict_scales = tf.reshape(
predicts[:, self.boundary1:self.boundary2],
[self.batch_size, self.cell_size, self.cell_size, self.boxes_per_cell]
)
# boxes 所在的位置坐标 shape为(45, 7, 7, 2, 4)
predict_boxes = tf.reshape(
predicts[:, self.boundary2:],
[self.batch_size, self.cell_size, self.cell_size, self.boxes_per_cell, 4]
)
# 将真实的 labels 转换为相应的矩阵形式
# response是7*7的矩阵,除了目标中心所在网格对应位置为1,其余为0
# response = 1_obj_i
response = tf.reshape(
labels[..., 0],
[self.batch_size, self.cell_size, self.cell_size, 1]
)
# 定位
boxes = tf.reshape(
labels[..., 1:5],
[self.batch_size, self.cell_size, self.cell_size, 1, 4]
)
# boxes 所在的位置坐标 shape (45, 7, 7, 2, 4)
boxes = tf.tile(boxes, [1, 1, 1, self.boxes_per_cell, 1]) / self.image_size
# 对类别信息进行one-hot编码,除了实际目标类别为1,其余为0 ???
classes = labels[..., 5:]
offset = tf.reshape(
tf.constant(self.offset, dtype=tf.float32),
[1, self.cell_size, self.cell_size, self.boxes_per_cell]
)
offset = tf.tile(offset, [self.batch_size, 1, 1, 1])
offset_tran = tf.transpose(offset, (0, 2, 1, 3))
# shape为 [batch_size, 7, 7, 2, 4]
# 给中心点加offset
predict_boxes_tran = tf.stack(
[(predict_boxes[..., 0] + offset) / self.cell_size,
(predict_boxes[..., 1] + offset_tran) / self.cell_size,
tf.square(predict_boxes[..., 2]),
tf.square(predict_boxes[..., 3])], axis=-1
)
# shape: batch*7*7*2
iou_predict_truth = self.calc_iou(predict_boxes_tran, boxes)
# calculate I tensor [BATCH_SIZE, CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
# 1_obj_ij: 第i格子,第j个bbox是否有obj
# object_mask是response加强版,在格子中细分bbox
object_mask = tf.reduce_max(iou_predict_truth, 3, keep_dims=True)
# response是Pr(object)(是否有obj,01matrix) 在这里把这个值乘上放进object_mask里,后面就只用考虑IoU了
object_mask = tf.cast((iou_predict_truth >= object_mask), tf.float32) * response
# calculate no_I tensor [BATCH_SIZE, CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
# 全1矩阵减1,剩下的1就是noobject
noobject_mask = tf.ones_like(object_mask, dtype=tf.float32) - object_mask
# 参数中加上平方根是对 w 和 h 进行开平方操作,原因在论文中有说明
# shape为(4, batch_size, 7, 7, 2)
boxes_tran = tf.stack(
[boxes[..., 0] * self.cell_size - offset,
boxes[..., 1] * self.cell_size - offset_tran,
tf.sqrt(boxes[..., 2]),
tf.sqrt(boxes[..., 3])], axis=-1
)
# 类别损失,predict是概率,classes是one-hot的label
class_delta = response * (predict_classes - classes)
class_loss = tf.reduce_mean(
tf.reduce_sum(tf.square(class_delta), axis=[1, 2, 3]),
name='class_loss') * self.class_scale
# 置信度损失
# object_loss
object_delta = object_mask * (predict_scales - iou_predict_truth)
object_loss = tf.reduce_mean(
tf.reduce_sum(tf.square(object_delta), axis=[1, 2, 3]),
name='object_loss') * self.object_scale
# noobject_loss
noobject_delta = noobject_mask * predict_scales
noobject_loss = tf.reduce_mean(
tf.reduce_sum(tf.square(noobject_delta), axis=[1, 2, 3]),
name='noobject_loss') * self.noobject_scale
# coord_loss,也要用到object_mask!
coord_mask = tf.expand_dims(object_mask, 4)
boxes_delta = coord_mask * (predict_boxes - boxes_tran)
coord_loss = tf.reduce_mean(
tf.reduce_sum(tf.square(boxes_delta), axis=[1, 2, 3, 4]),
name='coord_loss') * self.coord_scale
tf.losses.add_loss(class_loss)
tf.losses.add_loss(object_loss)
tf.losses.add_loss(noobject_loss)
tf.losses.add_loss(coord_loss)
tf.summary.scalar('class_loss', class_loss)
tf.summary.scalar('object_loss', object_loss)
tf.summary.scalar('noobject_loss', noobject_loss)
tf.summary.scalar('coord_loss', coord_loss)
tf.summary.histogram('boxes_delta_x', boxes_delta[..., 0])
tf.summary.histogram('boxes_delta_y', boxes_delta[..., 1])
tf.summary.histogram('boxes_delta_w', boxes_delta[..., 2])
tf.summary.histogram('boxes_delta_h', boxes_delta[..., 3])
tf.summary.histogram('iou', iou_predict_truth)
def leaky_relu(alpha):
def op(inputs):
return tf.nn.leaky_relu(inputs, alpha=alpha, name='leaky_relu')
return op
|
the-stack_0_1277 | """
Functions that can are used to modify XBlock fragments for use in the LMS and Studio
"""
import datetime
import hashlib
import json
import logging
import re
import uuid
import markupsafe
import webpack_loader.utils
from django.conf import settings
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.contrib.staticfiles.storage import staticfiles_storage
from django.urls import reverse
from django.utils.html import escape
from edx_django_utils.plugins import pluggable_override
from lxml import etree, html
from opaque_keys.edx.asides import AsideUsageKeyV1, AsideUsageKeyV2
from pytz import UTC
from web_fragments.fragment import Fragment
from xblock.core import XBlock
from xblock.exceptions import InvalidScopeError
from xblock.scorable import ScorableXBlockMixin
from common.djangoapps import static_replace
from common.djangoapps.edxmako.shortcuts import render_to_string
from xmodule.seq_module import SequenceBlock
from xmodule.util.xmodule_django import add_webpack_to_fragment
from xmodule.vertical_block import VerticalBlock
from xmodule.x_module import (
PREVIEW_VIEWS, STUDENT_VIEW, STUDIO_VIEW,
XModule, XModuleDescriptor, shim_xmodule_js,
)
log = logging.getLogger(__name__)
def wrap_fragment(fragment, new_content):
"""
Returns a new Fragment that has `new_content` and all
as its content, and all of the resources from fragment
"""
wrapper_frag = Fragment(content=new_content)
wrapper_frag.add_fragment_resources(fragment)
return wrapper_frag
def request_token(request):
"""
Return a unique token for the supplied request.
This token will be the same for all calls to `request_token`
made on the same request object.
"""
# pylint: disable=protected-access
if not hasattr(request, '_xblock_token'):
request._xblock_token = uuid.uuid1().hex
return request._xblock_token
def wrap_xblock(
runtime_class,
block,
view,
frag,
context,
usage_id_serializer,
request_token, # pylint: disable=redefined-outer-name
display_name_only=False,
extra_data=None
):
"""
Wraps the results of rendering an XBlock view in a standard <section> with identifying
data so that the appropriate javascript module can be loaded onto it.
:param runtime_class: The name of the javascript runtime class to use to load this block
:param block: An XBlock (that may be an XModule or XModuleDescriptor)
:param view: The name of the view that rendered the fragment being wrapped
:param frag: The :class:`Fragment` to be wrapped
:param context: The context passed to the view being rendered
:param usage_id_serializer: A function to serialize the block's usage_id for use by the
front-end Javascript Runtime.
:param request_token: An identifier that is unique per-request, so that only xblocks
rendered as part of this request will have their javascript initialized.
:param display_name_only: If true, don't render the fragment content at all.
Instead, just render the `display_name` of `block`
:param extra_data: A dictionary with extra data values to be set on the wrapper
"""
if extra_data is None:
extra_data = {}
# If any mixins have been applied, then use the unmixed class
class_name = getattr(block, 'unmixed_class', block.__class__).__name__
data = {}
data.update(extra_data)
if context:
data.update(context.get('wrap_xblock_data', {}))
css_classes = [
'xblock',
f'xblock-{markupsafe.escape(view)}',
'xblock-{}-{}'.format(
markupsafe.escape(view),
markupsafe.escape(block.scope_ids.block_type),
)
]
if view == STUDENT_VIEW and getattr(block, 'HIDDEN', False):
css_classes.append('is-hidden')
if isinstance(block, (XModule, XModuleDescriptor)) or getattr(block, 'uses_xmodule_styles_setup', False):
if view in PREVIEW_VIEWS:
# The block is acting as an XModule
css_classes.append('xmodule_display')
elif view == STUDIO_VIEW:
# The block is acting as an XModuleDescriptor
css_classes.append('xmodule_edit')
css_classes.append('xmodule_' + markupsafe.escape(class_name))
if isinstance(block, (XModule, XModuleDescriptor)):
data['type'] = block.js_module_name
shim_xmodule_js(frag, block.js_module_name)
if frag.js_init_fn:
data['init'] = frag.js_init_fn
data['runtime-class'] = runtime_class
data['runtime-version'] = frag.js_init_version
data['block-type'] = block.scope_ids.block_type
data['usage-id'] = usage_id_serializer(block.scope_ids.usage_id)
data['request-token'] = request_token
data['graded'] = getattr(block, 'graded', False)
data['has-score'] = getattr(block, 'has_score', False)
if block.name:
data['name'] = block.name
template_context = {
'content': block.display_name if display_name_only else frag.content,
'classes': css_classes,
'display_name': block.display_name_with_default_escaped, # xss-lint: disable=python-deprecated-display-name
'data_attributes': ' '.join(f'data-{markupsafe.escape(key)}="{markupsafe.escape(value)}"'
for key, value in data.items()),
}
if hasattr(frag, 'json_init_args') and frag.json_init_args is not None:
template_context['js_init_parameters'] = frag.json_init_args
else:
template_context['js_init_parameters'] = ""
if isinstance(block, (XModule, XModuleDescriptor)):
# Add the webpackified asset tags
add_webpack_to_fragment(frag, class_name)
return wrap_fragment(frag, render_to_string('xblock_wrapper.html', template_context))
def wrap_xblock_aside(
runtime_class,
aside,
view,
frag,
context, # pylint: disable=unused-argument
usage_id_serializer,
request_token, # pylint: disable=redefined-outer-name
extra_data=None,
extra_classes=None
):
"""
Wraps the results of rendering an XBlockAside view in a standard <section> with identifying
data so that the appropriate javascript module can be loaded onto it.
:param runtime_class: The name of the javascript runtime class to use to load this block
:param aside: An XBlockAside
:param view: The name of the view that rendered the fragment being wrapped
:param frag: The :class:`Fragment` to be wrapped
:param context: The context passed to the view being rendered
:param usage_id_serializer: A function to serialize the block's usage_id for use by the
front-end Javascript Runtime.
:param request_token: An identifier that is unique per-request, so that only xblocks
rendered as part of this request will have their javascript initialized.
:param extra_data: A dictionary with extra data values to be set on the wrapper
:param extra_classes: A list with extra classes to be set on the wrapper element
"""
if extra_data is None:
extra_data = {}
data = {}
data.update(extra_data)
css_classes = [
f'xblock-{markupsafe.escape(view)}',
'xblock-{}-{}'.format(
markupsafe.escape(view),
markupsafe.escape(aside.scope_ids.block_type),
),
'xblock_asides-v1'
]
if extra_classes:
css_classes.extend(extra_classes)
if frag.js_init_fn:
data['init'] = frag.js_init_fn
data['runtime-class'] = runtime_class
data['runtime-version'] = frag.js_init_version
data['block-type'] = aside.scope_ids.block_type
data['usage-id'] = usage_id_serializer(aside.scope_ids.usage_id)
data['request-token'] = request_token
template_context = {
'content': frag.content,
'classes': css_classes,
'data_attributes': ' '.join(f'data-{markupsafe.escape(key)}="{markupsafe.escape(value)}"'
for key, value in data.items()),
}
if hasattr(frag, 'json_init_args') and frag.json_init_args is not None:
template_context['js_init_parameters'] = frag.json_init_args
else:
template_context['js_init_parameters'] = ""
return wrap_fragment(frag, render_to_string('xblock_wrapper.html', template_context))
def replace_jump_to_id_urls(course_id, jump_to_id_base_url, block, view, frag, context): # pylint: disable=unused-argument
"""
This will replace a link between courseware in the format
/jump_to_id/<id> with a URL for a page that will correctly redirect
This is similar to replace_course_urls, but much more flexible and
durable for Studio authored courses. See more comments in static_replace.replace_jump_to_urls
course_id: The course_id in which this rewrite happens
jump_to_id_base_url:
A app-tier (e.g. LMS) absolute path to the base of the handler that will perform the
redirect. e.g. /courses/<org>/<course>/<run>/jump_to_id. NOTE the <id> will be appended to
the end of this URL at re-write time
output: a new :class:`~web_fragments.fragment.Fragment` that modifies `frag` with
content that has been update with /jump_to_id links replaced
"""
return wrap_fragment(frag, static_replace.replace_jump_to_id_urls(frag.content, course_id, jump_to_id_base_url))
def replace_course_urls(course_id, block, view, frag, context): # pylint: disable=unused-argument
"""
Updates the supplied module with a new get_html function that wraps
the old get_html function and substitutes urls of the form /course/...
with urls that are /courses/<course_id>/...
"""
return wrap_fragment(frag, static_replace.replace_course_urls(frag.content, course_id))
def replace_static_urls(data_dir, block, view, frag, context, course_id=None, static_asset_path=''): # pylint: disable=unused-argument
"""
Updates the supplied module with a new get_html function that wraps
the old get_html function and substitutes urls of the form /static/...
with urls that are /static/<prefix>/...
"""
return wrap_fragment(frag, static_replace.replace_static_urls(
frag.content,
data_dir,
course_id,
static_asset_path=static_asset_path
))
def grade_histogram(module_id):
'''
Print out a histogram of grades on a given problem in staff member debug info.
Warning: If a student has just looked at an xmodule and not attempted
it, their grade is None. Since there will always be at least one such student
this function almost always returns [].
'''
from django.db import connection
cursor = connection.cursor()
query = """\
SELECT courseware_studentmodule.grade,
COUNT(courseware_studentmodule.student_id)
FROM courseware_studentmodule
WHERE courseware_studentmodule.module_id=%s
GROUP BY courseware_studentmodule.grade"""
# Passing module_id this way prevents sql-injection.
cursor.execute(query, [str(module_id)])
grades = list(cursor.fetchall())
grades.sort(key=lambda x: x[0]) # Add ORDER BY to sql query?
if len(grades) >= 1 and grades[0][0] is None:
return []
return grades
def sanitize_html_id(html_id):
"""
Template uses element_id in js function names, so can't allow dashes and colons.
"""
sanitized_html_id = re.sub(r'[:-]', '_', html_id)
return sanitized_html_id
def add_staff_markup(user, disable_staff_debug_info, block, view, frag, context): # pylint: disable=unused-argument
"""
Updates the supplied module with a new get_html function that wraps
the output of the old get_html function with additional information
for admin users only, including a histogram of student answers, the
definition of the xmodule, and a link to view the module in Studio
if it is a Studio edited, mongo stored course.
Does nothing if module is a SequenceBlock.
"""
if context and context.get('hide_staff_markup', False):
# If hide_staff_markup is passed, don't add the markup
return frag
# TODO: make this more general, eg use an XModule attribute instead
if isinstance(block, VerticalBlock) and (not context or not context.get('child_of_vertical', False)):
return frag
if isinstance(block, SequenceBlock) or getattr(block, 'HIDDEN', False):
return frag
block_id = block.location
if block.has_score and settings.FEATURES.get('DISPLAY_HISTOGRAMS_TO_STAFF'):
histogram = grade_histogram(block_id)
render_histogram = len(histogram) > 0
else:
histogram = None
render_histogram = False
if settings.FEATURES.get('ENABLE_LMS_MIGRATION') and hasattr(block.runtime, 'filestore'):
[filepath, filename] = getattr(block, 'xml_attributes', {}).get('filename', ['', None])
osfs = block.runtime.filestore
if filename is not None and osfs.exists(filename):
# if original, unmangled filename exists then use it (github
# doesn't like symlinks)
filepath = filename
data_dir = block.static_asset_path or osfs.root_path.rsplit('/')[-1]
giturl = block.giturl or 'https://github.com/MITx'
edit_link = f"{giturl}/{data_dir}/tree/master/{filepath}"
else:
edit_link = False
# Need to define all the variables that are about to be used
giturl = ""
data_dir = ""
source_file = block.source_file # source used to generate the problem XML, eg latex or word
# Useful to indicate to staff if problem has been released or not.
# TODO (ichuang): use _has_access_descriptor.can_load in lms.courseware.access,
# instead of now>mstart comparison here.
now = datetime.datetime.now(UTC)
is_released = "unknown"
mstart = block.start
if mstart is not None:
is_released = "<font color='red'>Yes!</font>" if (now > mstart) else "<font color='green'>Not yet</font>"
field_contents = []
for name, field in block.fields.items():
try:
field_contents.append((name, field.read_from(block)))
except InvalidScopeError:
log.warning("Unable to read field in Staff Debug information", exc_info=True)
field_contents.append((name, "WARNING: Unable to read field"))
staff_context = {
'fields': field_contents,
'xml_attributes': getattr(block, 'xml_attributes', {}),
'tags': block._class_tags, # pylint: disable=protected-access
'location': block.location,
'xqa_key': block.xqa_key,
'source_file': source_file,
'source_url': f'{giturl}/{data_dir}/tree/master/{source_file}',
'category': str(block.__class__.__name__),
'element_id': sanitize_html_id(block.location.html_id()),
'edit_link': edit_link,
'user': user,
'xqa_server': settings.FEATURES.get('XQA_SERVER', "http://your_xqa_server.com"),
'histogram': json.dumps(histogram),
'render_histogram': render_histogram,
'block_content': frag.content,
'is_released': is_released,
'can_reset_attempts': 'attempts' in block.fields,
'can_rescore_problem': hasattr(block, 'rescore'),
'can_override_problem_score': isinstance(block, ScorableXBlockMixin),
'disable_staff_debug_info': disable_staff_debug_info,
}
if isinstance(block, ScorableXBlockMixin):
staff_context['max_problem_score'] = block.max_score()
return wrap_fragment(frag, render_to_string("staff_problem_info.html", staff_context))
def get_course_update_items(course_updates, provided_index=0):
"""
Returns list of course_updates data dictionaries either from new format if available or
from old. This function don't modify old data to new data (in db), instead returns data
in common old dictionary format.
New Format: {"items" : [{"id": computed_id, "date": date, "content": html-string}],
"data": "<ol>[<li><h2>date</h2>content</li>]</ol>"}
Old Format: {"data": "<ol>[<li><h2>date</h2>content</li>]</ol>"}
"""
def _course_info_content(html_parsed):
"""
Constructs the HTML for the course info update, not including the header.
"""
if len(html_parsed) == 1:
# could enforce that update[0].tag == 'h2'
content = html_parsed[0].tail
else:
content = html_parsed[0].tail if html_parsed[0].tail is not None else ""
content += "\n".join([html.tostring(ele).decode('utf-8') for ele in html_parsed[1:]])
return content
if course_updates and getattr(course_updates, "items", None):
if provided_index and 0 < provided_index <= len(course_updates.items):
return course_updates.items[provided_index - 1]
else:
# return list in reversed order (old format: [4,3,2,1]) for compatibility
return list(reversed(course_updates.items))
course_update_items = []
if course_updates:
# old method to get course updates
# purely to handle free formed updates not done via editor. Actually kills them, but at least doesn't break.
try:
course_html_parsed = html.fromstring(course_updates.data)
except (etree.XMLSyntaxError, etree.ParserError):
log.error("Cannot parse: " + course_updates.data) # lint-amnesty, pylint: disable=logging-not-lazy
escaped = escape(course_updates.data)
# xss-lint: disable=python-concat-html
course_html_parsed = html.fromstring("<ol><li>" + escaped + "</li></ol>")
# confirm that root is <ol>, iterate over <li>, pull out <h2> subs and then rest of val
if course_html_parsed.tag == 'ol':
# 0 is the newest
for index, update in enumerate(course_html_parsed):
if len(update) > 0:
content = _course_info_content(update)
# make the id on the client be 1..len w/ 1 being the oldest and len being the newest
computed_id = len(course_html_parsed) - index
payload = {
"id": computed_id,
"date": update.findtext("h2"),
"content": content
}
if provided_index == 0:
course_update_items.append(payload)
elif provided_index == computed_id:
return payload
return course_update_items
def xblock_local_resource_url(block, uri):
"""
Returns the URL for an XBlock's local resource.
Note: when running with the full Django pipeline, the file will be accessed
as a static asset which will use a CDN in production.
"""
xblock_class = getattr(block.__class__, 'unmixed_class', block.__class__)
if settings.PIPELINE['PIPELINE_ENABLED'] or not settings.REQUIRE_DEBUG:
return staticfiles_storage.url('xblock/resources/{package_name}/{path}'.format(
package_name=xblock_resource_pkg(xblock_class),
path=uri
))
else:
return reverse('xblock_resource_url', kwargs={
'block_type': block.scope_ids.block_type,
'uri': uri,
})
def xblock_resource_pkg(block):
"""
Return the module name needed to find an XBlock's shared static assets.
This method will return the full module name that is one level higher than
the one the block is in. For instance, problem_builder.answer.AnswerBlock
has a __module__ value of 'problem_builder.answer'. This method will return
'problem_builder' instead. However, for edx-ora2's
openassessment.xblock.openassessmentblock.OpenAssessmentBlock, the value
returned is 'openassessment.xblock'.
XModules are special cased because they're local to this repo and they
actually don't share their resource files when compiled out as part of the
XBlock asset pipeline. This only covers XBlocks and XModules using the
XBlock-style of asset specification. If they use the XModule bundling part
of the asset pipeline (xmodule_assets), their assets are compiled through an
entirely separate mechanism and put into lms-modules.js/css.
"""
# XModules are a special case because they map to different dirs for
# sub-modules.
module_name = block.__module__
if module_name.startswith('xmodule.'):
return module_name
return module_name.rsplit('.', 1)[0]
def is_xblock_aside(usage_key):
"""
Returns True if the given usage key is for an XBlock aside
Args:
usage_key (opaque_keys.edx.keys.UsageKey): A usage key
Returns:
bool: Whether or not the usage key is an aside key type
"""
return isinstance(usage_key, (AsideUsageKeyV1, AsideUsageKeyV2))
def get_aside_from_xblock(xblock, aside_type):
"""
Gets an instance of an XBlock aside from the XBlock that it's decorating. This also
configures the aside instance with the runtime and fields of the given XBlock.
Args:
xblock (xblock.core.XBlock): The XBlock that the desired aside is decorating
aside_type (str): The aside type
Returns:
xblock.core.XBlockAside: Instance of an xblock aside
"""
return xblock.runtime.get_aside_of_type(xblock, aside_type)
def hash_resource(resource):
"""
Hash a :class:`web_fragments.fragment.FragmentResource
Those hash values are used to avoid loading the resources
multiple times.
"""
md5 = hashlib.md5()
for data in resource:
if isinstance(data, bytes):
md5.update(data)
elif isinstance(data, str):
md5.update(data.encode('utf-8'))
else:
md5.update(repr(data).encode('utf-8'))
return md5.hexdigest()
@pluggable_override('OVERRIDE_GET_UNIT_ICON')
def get_icon(block):
"""
A function that returns the CSS class representing an icon to use for this particular
XBlock (in the courseware navigation bar). Mostly used for Vertical/Unit XBlocks.
It can be overridden by setting `GET_UNIT_ICON_IMPL` to an alternative implementation.
"""
return block.get_icon_class()
|
the-stack_0_1278 | import subprocess, os, re
from mycroft import MycroftSkill, intent_handler
class SystemControl(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
self.log.info("System Control Skill loaded")
@intent_handler('ShutDown.intent')
def handle_shut_down_intent(self, message):
self.speak_dialog('shutdown')
@intent_handler('OpenApp.intent')
def handle_open_app_intent(self, message):
app_name = message.data.get('app')
ls = subprocess.run(['ls ~/.local/share/applications/*.desktop'],
shell=True,
stdout=subprocess.PIPE,
universal_newlines=True)
user_apps = ls.stdout.splitlines()
matches = [app for app in user_apps if app_name in app]
#print(user_apps)
#print(sorted(matches, key=len))
if matches:
with open(os.path.join('~/.local/share/applications/', sorted(matches, key=len)[0])) as f:
lines = f.readlines()
for line in lines:
path = re.match(r'^Exec=(.*)', line)
if path:
self.log.info('Executing ' + path.group(1))
launch = subprocess.run('exec ' + path.group(1),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
if not launch.stderr:
self.speak_dialog('open.app', data={'app': app_name})
else:
self.speak(launch.stderr)
else:
self.speak('I did not find ' + app_name)
def create_skill():
return SystemControl()
|
the-stack_0_1280 | import pyxel
class Hero:
def __init__(self):
self.x = 0
self.y = 52
self.walk_counter = 0
self.state = 'idle_right'
self.models = {
'idle_right': [
[0, 0, 0, 16, 16, 0]
],
'idle_left': [
[0, 0, 0, -16, 16, 0]
],
'walk_right': [
[0, 0, 0, 16, 16, 0]
],
'walk_left': [
[0, 0, 0, -16, 16, 0]
],
'walk_down': [
[0, 0, 0, 16, 16, 0]
],
'walk_up': [
[0, 0, 0, 16, 16, 0]
]
}
def draw(self):
if self.state[:4] == 'walk':
pyxel.blt(self.x, self.y, *self.models[self.state][self.walk_counter])
else:
pyxel.blt(self.x, self.y, *self.models[self.state][self.walk_counter])
class OneBit:
def __init__(self):
pyxel.image(0).load(0, 0, 'assets.png')
self.models = {
'pine_group_1': [0, 80, 128, 32, 48, 0],
'pine_group_2': [0, 208, 48, 32, 48, 0],
'pine_single': [0, 48, 144, 32, 32, 0],
'pine_single_bare': [0, 112, 144, 32, 32, 0],
'grass_2': [0, 80, 0, 16, 16, 0],
'grass_4': [0, 80, 16, 16, 16, 0],
'tower_cone': [0, 144, 128, 16, 48, 0],
'tower_broken': [0, 208, 96, 32, 64, 0],
'rock_wall_1': [0, 16, 96, 16, 16, 0],
'rock_wall_2': [0, 32, 96, 16, 16, 0]
}
def draw(self):
pyxel.blt(40, 40, *self.models['pine_single'])
pyxel.blt(70, 0, *self.models['pine_single_bare'])
pyxel.blt(10, 176, *self.models['pine_single'])
pyxel.blt(20, 40, *self.models['grass_2'])
pyxel.blt(60, 38, *self.models['grass_2'])
pyxel.blt(20, 60, *self.models['grass_4'])
pyxel.blt(30, 80, *self.models['grass_4'])
|
the-stack_0_1281 | class ROBConfig(Config):
"""Configuration for training on the toy shapes dataset.
Derives from the base Config class and overrides values specific
to the toy shapes dataset.
"""
# Give the configuration a recognizable name
NAME = "rob"
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 1
# IMAGES_PER_GPU = 8
# Default is 2
IMAGES_PER_GPU = 2
# Number of classes (including background)
NUM_CLASSES = 1 + 4 # background + 4 class labels
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
# IMAGE_MIN_DIM = 128
# IMAGE_MAX_DIM = 128
# Default is 800 x 1024
# Use smaller anchors because our image and objects are small
# RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels
# DEFAULT: RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512)
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 32
# Default is 200
# Use a small epoch since the data is simple
# STEPS_PER_EPOCH = 100
# Default is 1000
STEPS_PER_EPOCH = int(5561/(GPU_COUNT*IMAGES_PER_GPU))
# use small validation steps since the epoch is small
# VALIDATION_STEPS = 5
# Max number of final detections
DETECTION_MAX_INSTANCES = 5
# Minimum probability value to accept a detected instance
# ROIs below this threshold are skipped
DETECTION_MIN_CONFIDENCE = 0.6
# Run these lines in the co-lab cell where this is imported:
# config = ROBConfig()
# config.display()
class InferenceConfig(ROBConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1 |
the-stack_0_1283 | import time, datetime
print("Importing OpenShift/Kubernetes packages ...")
import kubernetes
import ocp_resources
import openshift
from ocp_resources.node import Node
from ocp_resources.machine import Machine
from ocp_resources.node import Node
from openshift.dynamic import DynamicClient
try:
client_k8s = DynamicClient(client=kubernetes.config.new_client_from_config())
except Exception:
client_k8s = None
print("WARNING: kubernetes not available.")
print("Importing AWS boto3 ...")
import boto3
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html
client_ec2 = boto3.client('ec2')
resource_ec2 = boto3.resource('ec2')
print("Ready.")
def wait_openshift():
first = True
print("Waiting for OpenShift cluster to be ready ...")
import urllib3
while True:
try:
global client_k8s
client_k8s = DynamicClient(client=kubernetes.config.new_client_from_config())
nodes = [m for m in Node.get(dyn_client=client_k8s)]
if len(nodes) != 0:
print(f"Found {len(nodes)} node, OpenShift Cluster is ready!")
break
except urllib3.exceptions.MaxRetryError: pass
except kubernetes.client.exceptions.ApiException: pass
time.sleep(10)
def get_machine_props():
if not client_k8s:
return None, None
machines = [m for m in Machine.get(dyn_client=client_k8s)]
if len(machines) != 1:
raise RuntimeError("Should be only one machine ...")
machine = machines[0]
cluster_name = machine.cluster_name
print(f"Cluster name: {cluster_name}")
instance = resource_ec2.Instance(machine.instance.status.providerStatus.instanceId)
instance.load()
print(f"Instance Id: {instance.id}")
zone = machine.instance.spec.providerSpec.value.placement.availabilityZone
print(f"Availability zone: {zone}")
return cluster_name, instance, zone
def get_instance_root_volume(instance):
volumes = [v for v in instance.volumes.all()]
if len(volumes) > 1:
print("WARNING: more than 1 volume found ...")
return volumes[0]
def get_cluster_snapshot(cluster_name, instance, zone):
resp = client_ec2.describe_snapshots(
Filters=[{
'Name': f'tag:kubernetes.io/cluster/{cluster_name}',
'Values': ['owned']
}])
snapshots = resp["Snapshots"]
if len(snapshots) == 0:
return None
if len(snapshots) > 1:
print("WARNING: more than 1 snapshot found ... taking the first one.")
snapshot = resource_ec2.Snapshot(snapshots[0]['SnapshotId'])
snapshot.load()
return snapshot
def await_snapshot(snapshot):
prev = ""
if snapshot.progress == "100%":
print(f"Snapshot {snapshot.id} is ready.")
while not snapshot.progress == "100%":
if prev == "":
print(f"Awaiting for the completion of snapshot {snapshot.id} ...")
print(snapshot.progress)
prev = snapshot.progress
time.sleep(10)
snapshot.reload()
if prev != snapshot.progress:
prev = snapshot.progress
print(snapshot.progress)
def human_ts():
return datetime.datetime.now().strftime("%Y-%m-%dT%H:%M")
|
the-stack_0_1288 | from typing import List, Dict, Tuple
from collections import Counter
from datasets import translation
class Node:
def __init__(self, key: str, counter: int, parent_node) -> None:
self.key = key
self.counter = counter
self.parent = parent_node
self.childs: Dict[str, Node] = {}
self.link = None
def increment_counter(self):
pass
def display(self, index: int=0) -> None:
# print("{} [{}: {}]\n".format(" -"*(index), translation.get(self.key, self.key), self.counter))
print("{} [{}: {}]\n".format(" -"*(index), self.key, self.counter))
for child in self.childs.values():
child.display(index+1)
def display_linked(self):
current_node = self
while current_node != None:
print("[Key = {}]".format(current_node.key), end='')
if current_node.link: print(" => ", end='')
current_node = current_node.link
print()
class FPG:
def __init__(self, min_support: int=2) -> None:
self.minimum_support = min_support
self.root_node = None
self.support = None
self.clean_dataset = None
self.header_table: Dict[str, list] = {}
self.conditional_pattern_base = {}
self.fis = None
def run(self, dataset: List[list]) -> Tuple[List[list], Dict[frozenset, int]]:
self.initial_dataset = dataset
wset = self.initial_dataset
wset = [list(set(transaction)) for transaction in wset] # Make sure that items in transaction are uniqe
ui = self.get_unique_items(wset)
self.support = self.get_support(wset, ui)
self.clean_dataset = self.preprocess_dataset(wset)
return self.clean_dataset
def display_info(self) -> None:
# print("Initial dataset (minimum support = {}):".format(self.minimum_support), *self.initial_dataset, sep='\n')
# print("Support:", *{list(k)[0]:v for k,v in self.support.items()}.items(), sep='\n')
print("Cleaned and sorted dataset:", *self.clean_dataset, sep='\n')
# print("Support table:")
# print(*self.support.items(), sep='\n')
print("\nTree:")
self.print_tree()
if self.header_table != {}:
print("Header Table:")
print(*self.header_table.items(), sep='\n')
# print("Linked nodes:")
# for v in self.header_table.values():
# v['nodes'][0].display_linked()
if self.conditional_pattern_base != {}:
print("Conditional pattern base:")
print(*self.conditional_pattern_base.items(), sep='\n')
if self.fis:
print("Frequent item sets:", len(self.fis))
print(*self.fis, sep='\n')
def print_tree(self) -> None:
try:
self.root_node.display()
except:
print("\tNo root node.\n")
def get_unique_items(self, wset: List[list]) -> List[set]:
unique_items = list(set(sum(wset, [])))
return [frozenset([x]) for x in unique_items]
def get_support(self, dataset: List[list], candidates: List[frozenset]) -> Dict[frozenset, int]:
# support = {}
# for transaction in dataset:
# for item in candidates:
# if item.issubset(transaction):
# sub = frozenset(item)
# if sub in support.keys():
# support[sub] += 1
# else:
# support[sub] = 1
# support = sorted(support.items(), key=lambda x: x[1], reverse=True) # Sorting by value
# support = {k:v for k, v in support if v >= self.minimum_support} # Filtering by minimum support value
support = Counter(item for item in sum(dataset, []))
support = filter(lambda item: item[1]>=self.minimum_support, support.items())
support = sorted(support, key=lambda x:x[0])
support = sorted(support, key=lambda x:x[1], reverse=True)
# support = {frozenset([k]):v for k,v in support}
support = dict(support)
return support
def preprocess_dataset(self, dataset: List[list]) -> List[list]:
# Cleaning and sorting dataset
clean_dataset = []
# mask = [x for x in list(self.support)]
mask = list(self.support.keys())
for transaction in dataset:
clean_dataset.append(list(filter(lambda item: item in mask, transaction)))
clean_dataset[-1].sort(key=lambda i: mask.index(i))
return clean_dataset
def build_tree(self, dataset: List[list]) -> None:
for k in self.support:
self.header_table[k] = {'support': self.support[k], 'nodes': []}
self.root_node = Node('NULL', 0, None)
for transaction in dataset:
self.insert_transaction(transaction, self.root_node)
# Linking nodes
for v in self.header_table.values():
if len(v['nodes']) > 1:
for i in range(len(v['nodes'])-1):
v['nodes'][i].link = v['nodes'][i+1]
def insert_transaction(self, transaction: List[str], node: Node) -> None:
if len(transaction) < 1: return
key = transaction[0]
if key in node.childs.keys():
node.childs[key].counter += 1 ################################################## increment by support
else:
node.childs[key] = Node(key, 1, node)
self.header_table[key]['nodes'].append(node.childs[key])
if len(transaction) > 1:
self.insert_transaction(transaction[1:], node.childs[key])
def get_prefix(self, node: Node):
paths = []
while node:
path = self.traverse_root(node)
if len(path) > 1:
paths.append([path[1:], node.counter])
node = node.link
return paths
def traverse_root(self, node: Node) -> list:
tmp = node
path = []
while tmp is not self.root_node:
path.append(tmp.key)
tmp = tmp.parent
return path
def get_CPB(self, key:str) -> List[list]:
start_node = self.header_table[key]['nodes'][0]
paths = self.get_prefix(start_node)
dataset = []
for item in paths:
dataset.append(item[0])
self.conditional_pattern_base[key] = dataset
return dataset
def mine_fis(self, header_parent, prefix, fis):
reverse_header_keys = list(header_parent.keys())[::-1]
for key in reverse_header_keys:
new_fis = prefix.copy()
new_fis.add(key)
fis.append(new_fis)
CPB = self.get_CPB(key)
# Generate sub-tree
tmp_fpg = FPG(self.minimum_support)
tmp_clean_dataset = tmp_fpg.run(CPB)
tmp_fpg.build_tree(tmp_clean_dataset)
if tmp_fpg.header_table != {}:
self.mine_fis(tmp_fpg.header_table, new_fis, fis)
self.fis = fis
|
the-stack_0_1289 | import gzip
import math
import numpy as np
import os
from PIL import Image
import random
import torch
import torch.utils.data as data
def load_fixed_set(root, is_train):
# Load the fixed dataset
if is_train==False:
filename = 'testA_100.npy'
elif is_train==True:
filename = 'train.npy'
else:
print('Please choose is_train ture or False')
path = os.path.join(root, filename)
dataset = np.load(path)
return dataset
class Radar(data.Dataset):
def __init__(self, root, is_train, n_frames_input, n_frames_output, num_objects,
transform=None):
'''
param num_objects: a list of number of possible objects.
'''
super(Radar, self).__init__()
self.dataset = load_fixed_set(root, is_train)
self.length = self.dataset.shape[1]
self.is_train = is_train
self.num_objects = num_objects
self.n_frames_input = n_frames_input
self.n_frames_output = n_frames_output
self.n_frames_total = self.n_frames_input + self.n_frames_output
self.transform = transform
def __getitem__(self, idx):
length = self.n_frames_input + self.n_frames_output #20
images = self.dataset[:, idx, ...] # [20,64,64,1] #(14,100,100,1)
images = images[:,:,:,0] #(14,100,100)
images=images[:,np.newaxis,:,:] #(14,1,100,100)
input = images[:self.n_frames_input] #10,1,64,64
output = images[self.n_frames_input:length]
frozen = input[-1]
# add a wall to input data
# pad = np.zeros_like(input[:, 0])
# pad[:, 0] = 1
# pad[:, pad.shape[1] - 1] = 1
# pad[:, :, 0] = 1
# pad[:, :, pad.shape[2] - 1] = 1
#
# input = np.concatenate((input, np.expand_dims(pad, 1)), 1)
output = torch.from_numpy(output / 255.0).contiguous().float() #除以255?Normalize into 0-1
input = torch.from_numpy(input / 255.0).contiguous().float()
# print()
# print(input.size())
# print(output.size())
out = [idx, output, input, frozen, np.zeros(1)]
return out
def __len__(self):
return self.length
if __name__ == "__main__":
trainFolder = Radar(is_train=False,
root='data/',
n_frames_input=7,
n_frames_output=7,
num_objects=[2])
trainLoader = torch.utils.data.DataLoader(trainFolder,
batch_size=4,
shuffle=False)
# #S B OUTPUT INPUT FORZEN 0
for i, (idx, targetVar, inputVar, _, _) in enumerate(trainLoader):
inputs = inputVar # B,S,1,64,64
print("runing")
break
print("inputs.shape",inputs.shape)
print("inputs[0].shape",inputs[0].shape) # S,1,H,W Aim: 3S,1,H,W
print("inputs[0,0].shape",inputs[0,0].shape) |
the-stack_0_1290 | #!/usr/bin/env python
#
# Electrum - lightweight Avian client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This module uses code from TLSLlite
# TLSLite Author: Trevor Perrin)
import binascii
from .x509 import ASN1_Node, bytestr_to_int, decode_OID
def a2b_base64(s):
try:
b = bytearray(binascii.a2b_base64(s))
except Exception as e:
raise SyntaxError("base64 error: %s" % e)
return b
def b2a_base64(b):
return binascii.b2a_base64(b)
def dePem(s, name):
"""Decode a PEM string into a bytearray of its payload.
The input must contain an appropriate PEM prefix and postfix
based on the input name string, e.g. for name="CERTIFICATE":
-----BEGIN CERTIFICATE-----
MIIBXDCCAUSgAwIBAgIBADANBgkqhkiG9w0BAQUFADAPMQ0wCwYDVQQDEwRUQUNL
...
KoZIhvcNAQEFBQADAwA5kw==
-----END CERTIFICATE-----
The first such PEM block in the input will be found, and its
payload will be base64 decoded and returned.
"""
prefix = "-----BEGIN %s-----" % name
postfix = "-----END %s-----" % name
start = s.find(prefix)
if start == -1:
raise SyntaxError("Missing PEM prefix")
end = s.find(postfix, start+len(prefix))
if end == -1:
raise SyntaxError("Missing PEM postfix")
s = s[start+len("-----BEGIN %s-----" % name) : end]
retBytes = a2b_base64(s) # May raise SyntaxError
return retBytes
def dePemList(s, name):
"""Decode a sequence of PEM blocks into a list of bytearrays.
The input must contain any number of PEM blocks, each with the appropriate
PEM prefix and postfix based on the input name string, e.g. for
name="TACK BREAK SIG". Arbitrary text can appear between and before and
after the PEM blocks. For example:
" Created by TACK.py 0.9.3 Created at 2012-02-01T00:30:10Z -----BEGIN TACK
BREAK SIG-----
ATKhrz5C6JHJW8BF5fLVrnQss6JnWVyEaC0p89LNhKPswvcC9/s6+vWLd9snYTUv
YMEBdw69PUP8JB4AdqA3K6Ap0Fgd9SSTOECeAKOUAym8zcYaXUwpk0+WuPYa7Zmm
SkbOlK4ywqt+amhWbg9txSGUwFO5tWUHT3QrnRlE/e3PeNFXLx5Bckg= -----END TACK
BREAK SIG----- Created by TACK.py 0.9.3 Created at 2012-02-01T00:30:11Z
-----BEGIN TACK BREAK SIG-----
ATKhrz5C6JHJW8BF5fLVrnQss6JnWVyEaC0p89LNhKPswvcC9/s6+vWLd9snYTUv
YMEBdw69PUP8JB4AdqA3K6BVCWfcjN36lx6JwxmZQncS6sww7DecFO/qjSePCxwM
+kdDqX/9/183nmjx6bf0ewhPXkA0nVXsDYZaydN8rJU1GaMlnjcIYxY= -----END TACK
BREAK SIG----- "
All such PEM blocks will be found, decoded, and return in an ordered list
of bytearrays, which may have zero elements if not PEM blocks are found.
"""
bList = []
prefix = "-----BEGIN %s-----" % name
postfix = "-----END %s-----" % name
while 1:
start = s.find(prefix)
if start == -1:
return bList
end = s.find(postfix, start+len(prefix))
if end == -1:
raise SyntaxError("Missing PEM postfix")
s2 = s[start+len(prefix) : end]
retBytes = a2b_base64(s2) # May raise SyntaxError
bList.append(retBytes)
s = s[end+len(postfix) : ]
def pem(b, name):
"""Encode a payload bytearray into a PEM string.
The input will be base64 encoded, then wrapped in a PEM prefix/postfix
based on the name string, e.g. for name="CERTIFICATE":
-----BEGIN CERTIFICATE-----
MIIBXDCCAUSgAwIBAgIBADANBgkqhkiG9w0BAQUFADAPMQ0wCwYDVQQDEwRUQUNL
...
KoZIhvcNAQEFBQADAwA5kw==
-----END CERTIFICATE-----
"""
s1 = b2a_base64(b)[:-1] # remove terminating \n
s2 = b""
while s1:
s2 += s1[:64] + b"\n"
s1 = s1[64:]
s = ("-----BEGIN %s-----\n" % name).encode('ascii') + s2 + \
("-----END %s-----\n" % name).encode('ascii')
return s
def pemSniff(inStr, name):
searchStr = "-----BEGIN %s-----" % name
return searchStr in inStr
def parse_private_key(s):
"""Parse a string containing a PEM-encoded <privateKey>."""
if pemSniff(s, "PRIVATE KEY"):
bytes = dePem(s, "PRIVATE KEY")
return _parsePKCS8(bytes)
elif pemSniff(s, "RSA PRIVATE KEY"):
bytes = dePem(s, "RSA PRIVATE KEY")
return _parseSSLeay(bytes)
else:
raise SyntaxError("Not a PEM private key file")
def _parsePKCS8(_bytes):
s = ASN1_Node(_bytes)
root = s.root()
version_node = s.first_child(root)
version = bytestr_to_int(s.get_value_of_type(version_node, 'INTEGER'))
if version != 0:
raise SyntaxError("Unrecognized PKCS8 version")
rsaOID_node = s.next_node(version_node)
ii = s.first_child(rsaOID_node)
rsaOID = decode_OID(s.get_value_of_type(ii, 'OBJECT IDENTIFIER'))
if rsaOID != '1.2.840.113549.1.1.1':
raise SyntaxError("Unrecognized AlgorithmIdentifier")
privkey_node = s.next_node(rsaOID_node)
value = s.get_value_of_type(privkey_node, 'OCTET STRING')
return _parseASN1PrivateKey(value)
def _parseSSLeay(bytes):
return _parseASN1PrivateKey(ASN1_Node(bytes))
def bytesToNumber(s):
return int(binascii.hexlify(s), 16)
def _parseASN1PrivateKey(s):
s = ASN1_Node(s)
root = s.root()
version_node = s.first_child(root)
version = bytestr_to_int(s.get_value_of_type(version_node, 'INTEGER'))
if version != 0:
raise SyntaxError("Unrecognized RSAPrivateKey version")
n = s.next_node(version_node)
e = s.next_node(n)
d = s.next_node(e)
p = s.next_node(d)
q = s.next_node(p)
dP = s.next_node(q)
dQ = s.next_node(dP)
qInv = s.next_node(dQ)
return list(map(lambda x: bytesToNumber(s.get_value_of_type(x, 'INTEGER')), [n, e, d, p, q, dP, dQ, qInv]))
|
the-stack_0_1292 | import argparse
import lbann
import lbann.models
import lbann.models.resnet
import lbann.contrib.args
import lbann.contrib.models.wide_resnet
import lbann.contrib.launcher
import data.imagenet
# Command-line arguments
desc = ('Construct and run ResNet on ImageNet-1K data. '
'Running the experiment is only supported on LC systems.')
parser = argparse.ArgumentParser(description=desc)
lbann.contrib.args.add_scheduler_arguments(parser)
parser.add_argument(
'--job-name', action='store', default='lbann_resnet', type=str,
help='scheduler job name (default: lbann_resnet)')
parser.add_argument(
'--resnet', action='store', default=50, type=int,
choices=(18, 34, 50, 101, 152),
help='ResNet variant (default: 50)')
parser.add_argument(
'--width', action='store', default=1, type=float,
help='Wide ResNet width factor (default: 1)')
parser.add_argument(
'--block-type', action='store', default=None, type=str,
choices=('basic', 'bottleneck'),
help='ResNet block type')
parser.add_argument(
'--blocks', action='store', default=None, type=str,
help='ResNet block counts (comma-separated list)')
parser.add_argument(
'--block-channels', action='store', default=None, type=str,
help='Internal channels in each ResNet block (comma-separated list)')
parser.add_argument(
'--bn-statistics-group-size', action='store', default=1, type=int,
help=('Group size for aggregating batch normalization statistics '
'(default: 1)'))
parser.add_argument(
'--warmup', action='store_true', help='use a linear warmup')
parser.add_argument(
'--mini-batch-size', action='store', default=256, type=int,
help='mini-batch size (default: 256)', metavar='NUM')
parser.add_argument(
'--num-epochs', action='store', default=90, type=int,
help='number of epochs (default: 90)', metavar='NUM')
parser.add_argument(
'--num-classes', action='store', default=1000, type=int,
help='number of ImageNet classes (default: 1000)', metavar='NUM')
parser.add_argument(
'--random-seed', action='store', default=0, type=int,
help='random seed for LBANN RNGs', metavar='NUM')
lbann.contrib.args.add_optimizer_arguments(parser, default_learning_rate=0.1)
args = parser.parse_args()
# Due to a data reader limitation, the actual model realization must be
# hardcoded to 1000 labels for ImageNet.
imagenet_labels = 1000
# Choose ResNet variant
resnet_variant_dict = {18: lbann.models.ResNet18,
34: lbann.models.ResNet34,
50: lbann.models.ResNet50,
101: lbann.models.ResNet101,
152: lbann.models.ResNet152}
wide_resnet_variant_dict = {50: lbann.contrib.models.wide_resnet.WideResNet50_2}
block_variant_dict = {
'basic': lbann.models.resnet.BasicBlock,
'bottleneck': lbann.models.resnet.BottleneckBlock
}
if (any([args.block_type, args.blocks, args.block_channels])
and not all([args.block_type, args.blocks, args.block_channels])):
raise RuntimeError('Must specify all of --block-type, --blocks, --block-channels')
if args.block_type and args.blocks and args.block_channels:
# Build custom ResNet.
resnet = lbann.models.ResNet(
block_variant_dict[args.block_type],
imagenet_labels,
list(map(int, args.blocks.split(','))),
list(map(int, args.block_channels.split(','))),
zero_init_residual=True,
bn_statistics_group_size=args.bn_statistics_group_size,
name='custom_resnet',
width=args.width)
elif args.width == 1:
# Vanilla ResNet.
resnet = resnet_variant_dict[args.resnet](
imagenet_labels,
bn_statistics_group_size=args.bn_statistics_group_size)
elif args.width == 2 and args.resnet == 50:
# Use pre-defined WRN-50-2.
resnet = wide_resnet_variant_dict[args.resnet](
imagenet_labels,
bn_statistics_group_size=args.bn_statistics_group_size)
else:
# Some other Wide ResNet.
resnet = resnet_variant_dict[args.resnet](
imagenet_labels,
bn_statistics_group_size=args.bn_statistics_group_size,
width=args.width)
# Construct layer graph
input_ = lbann.Input()
images = lbann.Identity(input_)
labels = lbann.Identity(input_)
preds = resnet(images)
probs = lbann.Softmax(preds)
cross_entropy = lbann.CrossEntropy(probs, labels)
top1 = lbann.CategoricalAccuracy(probs, labels)
top5 = lbann.TopKCategoricalAccuracy(probs, labels, k=5)
layers = list(lbann.traverse_layer_graph(input_))
# Setup objective function
l2_reg_weights = set()
for l in layers:
if type(l) == lbann.Convolution or type(l) == lbann.FullyConnected:
l2_reg_weights.update(l.weights)
l2_reg = lbann.L2WeightRegularization(weights=l2_reg_weights, scale=1e-4)
obj = lbann.ObjectiveFunction([cross_entropy, l2_reg])
# Setup model
metrics = [lbann.Metric(top1, name='top-1 accuracy', unit='%'),
lbann.Metric(top5, name='top-5 accuracy', unit='%')]
callbacks = [lbann.CallbackPrint(),
lbann.CallbackTimer(),
lbann.CallbackDropFixedLearningRate(
drop_epoch=[30, 60, 80], amt=0.1)]
if args.warmup:
callbacks.append(
lbann.CallbackLinearGrowthLearningRate(
target=0.1 * args.mini_batch_size / 256, num_epochs=5))
model = lbann.Model(args.mini_batch_size,
args.num_epochs,
layers=layers,
objective_function=obj,
metrics=metrics,
callbacks=callbacks)
# Setup optimizer
opt = lbann.contrib.args.create_optimizer(args)
# Setup data reader
data_reader = data.imagenet.make_data_reader(num_classes=args.num_classes)
# Setup trainer
trainer = lbann.Trainer(random_seed=args.random_seed)
# Run experiment
kwargs = lbann.contrib.args.get_scheduler_kwargs(args)
lbann.contrib.launcher.run(trainer, model, data_reader, opt,
job_name=args.job_name,
**kwargs)
|
the-stack_0_1295 | import argparse
from configparser import ConfigParser
import shlex
parser = argparse.ArgumentParser(description='Short sample app')
parser.add_argument('-a', action="store_true", default=False)
parser.add_argument('-b', action="store", dest="b")
parser.add_argument('-c', action="store", dest="c", type=int)
config = ConfigParser()
config.read('argparse_with_shlex.ini')
config_value = config.get('cli', 'options')
print('Config :', config_value)
argument_list = shlex.split(config_value)
print('Arg List:', argument_list)
print('Results :', parser.parse_args(argument_list)) |
the-stack_0_1299 | # coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from abc import ABC, abstractmethod
from azure.core.exceptions import HttpResponseError
from knack.util import CLIError
from knack.log import get_logger
from azext_iot.common.shared import AuthenticationTypeDataplane
from typing import Any, Dict, List
from types import SimpleNamespace
logger = get_logger(__name__)
POLICY_ERROR_TEMPLATE = (
"Unable to discover a priviledged policy for {0}: {1}, in subscription {2}. "
"When interfacing with an {0}, the IoT extension requires any single policy with "
"{3} rights."
)
def _format_policy_set(inputs: set) -> str:
inputs = list(f"'{x}'" for x in inputs)
if len(inputs) == 1:
return inputs[0]
elif len(inputs) == 2:
return inputs[0] + " and " + inputs[1]
inputs[-1] = "and " + inputs[-1]
return ", ".join(inputs)
# Abstract base class
class BaseDiscovery(ABC):
"""BaseDiscovery to support resource and policy auto discovery.
Eliminates the need to provide the resource group and policy name to
find a specific target resource.
:ivar cmd: The cmd object
:vartype cmd:
:ivar client: The client object
:vartype client:
:ivar sub_id: Subscription id
:vartype sub_id: str
:ivar track2: Whether the client uses track2.
:vartype track2: bool
:ivar resource_type: Type of the resources the client fetches. Used to abstract
error messages.
:vartype resource_type: DiscoveryResourceType
:ivar necessary_rights_set: Set of policy names needed for the Iot Extension to run
commands against the DPS instance.
:vartype necessary_rights_set: Set[str]
"""
def __init__(self, cmd, necessary_rights_set: set = None, resource_type: str = None):
self.cmd = cmd
self.client = None
self.sub_id = "unknown"
self.resource_type = resource_type
self.track2 = False
self.necessary_rights_set = necessary_rights_set
@abstractmethod
def _initialize_client(self):
"""Creates the client if not created already."""
pass
@abstractmethod
def _make_kwargs(self, **kwargs) -> Dict[str, Any]:
"""Returns the correct kwargs for the client operations."""
pass
def get_resources(self, rg: str = None) -> List:
"""
Returns a list of all raw resources that are present within the subscription (and
resource group if provided).
The resources are the raw data returned by the client and will be used to build
target objects.
:param rg: Resource Group
:type rg: str
:return: List of resources
:rtype: List
"""
self._initialize_client()
resource_list = []
if not rg:
resource_pager = self.client.list_by_subscription()
else:
resource_pager = self.client.list_by_resource_group(resource_group_name=rg)
if self.track2:
for resource in resource_pager.by_page():
resource_list.extend(resource)
else:
try:
while True:
resource_list.extend(resource_pager.advance_page())
except StopIteration:
pass
return resource_list
def get_policies(self, resource_name: str, rg: str) -> List:
"""
Returns a list of all policies for a given resource in a given resource group.
:param resource_name: Resource Name
:type resource_name: str
:param rg: Resource Group
:type rg: str
:return: List of policies
:rtype: List
"""
self._initialize_client()
policy_pager = self.client.list_keys(
**self._make_kwargs(resource_name=resource_name, resource_group_name=rg)
)
policy_list = []
if self.track2:
for policy in policy_pager.by_page():
policy_list.extend(policy)
else:
try:
while True:
policy_list.extend(policy_pager.advance_page())
except StopIteration:
pass
return policy_list
def find_resource(self, resource_name: str, rg: str = None):
"""
Returns the resource with the given resource_name.
If the resource group is not provided, will look through all resources within the
subscription and return first match. This functionality will only work for
resource types that require unique names within the subscription.
Raises CLIError if no resource is found.
:param resource_name: Resource Name
:type resource_name: str
:param rg: Resource Group
:type rg: str
:return: Resource
:rtype: dict representing self.resource_type
"""
self._initialize_client()
if rg:
try:
return self.client.get(
**self._make_kwargs(
resource_name=resource_name, resource_group_name=rg
)
)
except: # pylint: disable=broad-except
raise CLIError(
"Unable to find {}: {} in resource group: {}".format(
self.resource_type, resource_name, rg
)
)
resource_list = self.get_resources()
if resource_list:
target = next(
(resource for resource in resource_list if resource_name.lower() == resource.name.lower()),
None
)
if target:
return target
raise CLIError(
"Unable to find {}: {} in current subscription {}.".format(
self.resource_type, resource_name, self.sub_id
)
)
def find_policy(self, resource_name: str, rg: str, policy_name: str = "auto"):
"""
Returns the policy with the policy_name for the given resource.
If the policy name is not provided, will look through all policies for the given
resource and return the first usable policy (the first policy that the IoT
extension can use).
Raises CLIError if no usable policy is found.
:param resource_name: Resource Name
:type resource_name: str
:param rg: Resource Group
:type rg: str
:param policy_name: Policy Name
:type policy_name: str
:return: Policy
:rtype: policy
"""
self._initialize_client()
if policy_name.lower() != "auto":
return self.client.get_keys_for_key_name(
**self._make_kwargs(
resource_name=resource_name,
resource_group_name=rg,
key_name=policy_name
)
)
policy_list = self.get_policies(resource_name=resource_name, rg=rg)
for policy in policy_list:
rights_set = set(policy.rights.split(", "))
if self.necessary_rights_set.issubset(rights_set):
logger.info(
"Using policy '%s' for %s interaction.", policy.key_name, self.resource_type
)
return policy
raise CLIError(
POLICY_ERROR_TEMPLATE.format(
self.resource_type,
resource_name,
self.sub_id,
_format_policy_set(self.necessary_rights_set)
)
)
@classmethod
@abstractmethod
def get_target_by_cstring(cls, connection_string):
"""Returns target inforation needed from a connection string."""
pass
def get_target(
self, resource_name: str, resource_group_name: str = None, **kwargs
) -> Dict[str, str]:
"""
Returns a dictionary of the given resource's connection string parts to be used
by the extension.
This function finds the target resource and builds up a dictionary of connection
string parts needed for IoT extension operation. In future iteration we will
return a 'Target' object rather than dict but that will be better served aligning
with vNext pattern for Iot Hub/DPS.
If the resource group is not provided, will look through all resources within the
subscription and return first match. This functionality will only work for
resource types that require unique names within the subscription.
If the policy name is not provided, will look through all policies for the given
resource and return the first usable policy (the first policy that the IoT
extension can use).
Raises CLIError if no resource is found.
:param resource_name: Resource Name
:type resource_name: str
:param rg: Resource Group
:type rg: str
:keyword str login: Connection string for the target resource
:keyword str key_type: Key type to use in connection string construction
:keyword auth_type: Authentication Type for the Dataplane
:paramtype auth_type: AuthenticationTypeDataplane
:keyword str policy_name: Policy name to use
:return: Resource
:rtype: dict representing self.resource_type
"""
cstring = kwargs.get("login")
if cstring:
return self.get_target_by_cstring(connection_string=cstring)
resource_group_name = resource_group_name or kwargs.get("rg")
resource = self.find_resource(resource_name=resource_name, rg=resource_group_name)
key_type = kwargs.get("key_type", "primary")
# Azure AD auth path
auth_type = kwargs.get("auth_type", AuthenticationTypeDataplane.key.value)
if auth_type == AuthenticationTypeDataplane.login.value:
logger.info("Using AAD access token for %s interaction.", self.resource_type)
policy = SimpleNamespace()
policy.key_name = AuthenticationTypeDataplane.login.value
policy.primary_key = AuthenticationTypeDataplane.login.value
policy.secondary_key = AuthenticationTypeDataplane.login.value
return self._build_target(
resource=resource,
policy=policy,
key_type="primary",
**kwargs
)
policy_name = kwargs.get("policy_name", "auto")
rg = resource.additional_properties.get("resourcegroup")
resource_policy = self.find_policy(
resource_name=resource.name, rg=rg, policy_name=policy_name,
)
return self._build_target(
resource=resource,
policy=resource_policy,
key_type=key_type,
**kwargs
)
def get_targets(self, resource_group_name: str = None, **kwargs) -> List[Dict[str, str]]:
"""
Returns a list of targets (dicts representing a resource's connection string parts)
that are usable by the extension within the subscription (and resource group if
provided).
:param rg: Resource Group
:type rg: str
:return: Resources
:rtype: list[dict]
"""
targets = []
resources = self.get_resources(rg=resource_group_name)
if resources:
for resource in resources:
try:
targets.append(
self.get_target(
resource_name=resource.name,
resource_group_name=resource.additional_properties.get("resourcegroup"),
**kwargs
)
)
except HttpResponseError as e:
logger.warning("Could not access %s. %s", resource, e)
return targets
@abstractmethod
def _build_target(self, resource, policy, key_type=None, **kwargs):
"""Returns a dictionary representing the resource connection string parts to
be used by the IoT extension."""
pass
|
the-stack_0_1300 | """Sensor platform for Brottsplatskartan information."""
from __future__ import annotations
from collections import defaultdict
from datetime import timedelta
import logging
import uuid
import brottsplatskartan
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
_LOGGER = logging.getLogger(__name__)
CONF_AREA = "area"
DEFAULT_NAME = "Brottsplatskartan"
SCAN_INTERVAL = timedelta(minutes=30)
AREAS = [
"Blekinge län",
"Dalarnas län",
"Gotlands län",
"Gävleborgs län",
"Hallands län",
"Jämtlands län",
"Jönköpings län",
"Kalmar län",
"Kronobergs län",
"Norrbottens län",
"Skåne län",
"Stockholms län",
"Södermanlands län",
"Uppsala län",
"Värmlands län",
"Västerbottens län",
"Västernorrlands län",
"Västmanlands län",
"Västra Götalands län",
"Örebro län",
"Östergötlands län",
]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Inclusive(CONF_LATITUDE, "coordinates"): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, "coordinates"): cv.longitude,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_AREA, default=[]): vol.All(cv.ensure_list, [vol.In(AREAS)]),
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Brottsplatskartan platform."""
area = config.get(CONF_AREA)
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
name = config[CONF_NAME]
# Every Home Assistant instance should have their own unique
# app parameter: https://brottsplatskartan.se/sida/api
app = f"ha-{uuid.getnode()}"
bpk = brottsplatskartan.BrottsplatsKartan(
app=app, area=area, latitude=latitude, longitude=longitude
)
add_entities([BrottsplatskartanSensor(bpk, name)], True)
class BrottsplatskartanSensor(SensorEntity):
"""Representation of a Brottsplatskartan Sensor."""
def __init__(self, bpk, name):
"""Initialize the Brottsplatskartan sensor."""
self._brottsplatskartan = bpk
self._attr_name = name
def update(self):
"""Update device state."""
incident_counts = defaultdict(int)
incidents = self._brottsplatskartan.get_incidents()
if incidents is False:
_LOGGER.debug("Problems fetching incidents")
return
for incident in incidents:
incident_type = incident.get("title_type")
incident_counts[incident_type] += 1
self._attr_extra_state_attributes = {
ATTR_ATTRIBUTION: brottsplatskartan.ATTRIBUTION
}
self._attr_extra_state_attributes.update(incident_counts)
self._attr_native_value = len(incidents)
|
the-stack_0_1301 | import torch
def images_to_levels(target, num_levels):
"""Convert targets by image to targets by feature level.
[target_img0, target_img1] -> [target_level0, target_level1, ...]
"""
target = torch.stack(target, 0)
level_targets = []
start = 0
for n in num_levels:
end = start + n
# level_targets.append(target[:, start:end].squeeze(0))
level_targets.append(target[:, start:end])
start = end
return level_targets
def anchor_inside_flags(flat_anchors,
valid_flags,
img_shape,
allowed_border=0):
img_h, img_w = img_shape[:2]
if allowed_border >= 0:
inside_flags = valid_flags & \
(flat_anchors[:, 0] >= -allowed_border) & \
(flat_anchors[:, 1] >= -allowed_border) & \
(flat_anchors[:, 2] < img_w + allowed_border) & \
(flat_anchors[:, 3] < img_h + allowed_border)
else:
inside_flags = valid_flags
return inside_flags
def calc_region(bbox, ratio, featmap_size=None):
"""Calculate a proportional bbox region.
The bbox center are fixed and the new h' and w' is h * ratio and w * ratio.
Args:
bbox (Tensor): Bboxes to calculate regions, shape (n, 4)
ratio (float): Ratio of the output region.
featmap_size (tuple): Feature map size used for clipping the boundary.
Returns:
tuple: x1, y1, x2, y2
"""
x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long()
y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long()
x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long()
y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long()
if featmap_size is not None:
x1 = x1.clamp(min=0, max=featmap_size[1])
y1 = y1.clamp(min=0, max=featmap_size[0])
x2 = x2.clamp(min=0, max=featmap_size[1])
y2 = y2.clamp(min=0, max=featmap_size[0])
return (x1, y1, x2, y2)
|
the-stack_0_1302 | import argparse
import json
import os
import sys
from datetime import datetime
from pathlib import Path
from shlex import quote
import fuckit
import mutagen
import pandas as pd
from joblib import Parallel, delayed
from rich import inspect, print
from tinytag import TinyTag
from db import fetchall_dict, sqlite_con
from subtitle import get_subtitle
from utils import chunks, cmd, get_video_files, log
def parse_mutagen_tags(m, tiny_tags):
def c(l):
if isinstance(l, str):
l = [l]
if l is None or len(l) == 0:
return None
no_comma = sum([s.split(",") for s in l], [])
no_semicol = sum([s.split(";") for s in no_comma], [])
no_unknown = [x for x in no_semicol if x.lower() not in ["unknown", ""]]
return ";".join(no_unknown)
def ss(idx, l):
if l is None:
return None
try:
return l[idx]
except IndexError:
return None
return {
"albumgenre": c(m.tags.get("albumgenre")),
"albumgrouping": c(m.tags.get("albumgrouping")),
"mood": c(
list(
set(
(m.tags.get("albummood") or [])
+ (m.tags.get("MusicMatch_Situation") or [])
+ (m.tags.get("Songs-DB_Occasion") or [])
)
)
),
"genre": c(list(set((m.tags.get("genre") or []) + list(filter(None, [tiny_tags["genre"]]))))),
"year": ss(
0,
ss(
0,
list(
filter(
None,
[
m.tags.get("originalyear"),
m.tags.get("TDOR"),
m.tags.get("TORY"),
m.tags.get("date"),
m.tags.get("TDRC"),
m.tags.get("TDRL"),
],
)
),
),
),
"bpm": ss(
0,
ss(
0,
list(
filter(
None,
[m.tags.get("fBPM"), m.tags.get("bpm_accuracy")],
)
),
),
),
"key": ss(
0,
ss(
0,
list(
filter(
None,
[
m.tags.get("TIT1"),
m.tags.get("key_accuracy"),
m.tags.get("TKEY"),
],
)
),
),
),
"gain": ss(0, m.tags.get("replaygain_track_gain")),
"time": c(ss(0, m.tags.get("time_signature"))),
"decade": ss(0, m.tags.get("Songs-DB_Custom1")),
"categories": ss(0, m.tags.get("Songs-DB_Custom2")),
"city": ss(0, m.tags.get("Songs-DB_Custom3")),
"country": c(
ss(
0,
list(
filter(
None,
[
m.tags.get("Songs-DB_Custom4"),
m.tags.get("MusicBrainz Album Release Country"),
],
)
),
)
),
}
def extract_metadata(args, f):
try:
ffprobe = json.loads(
cmd(
f"ffprobe -loglevel quiet -print_format json=compact=1 -show_entries format {quote(f)}", quiet=True
).stdout
)
except:
try:
cmd(f"trash-put {quote(f)}")
print(f"Failed reading {f}", file=sys.stderr)
except:
pass
return
if not "format" in ffprobe:
print(f"Failed reading format {f}", file=sys.stderr)
print(ffprobe)
return
stat = os.stat(f)
blocks_allocated = stat.st_blocks * 512
if "tags" in ffprobe["format"]:
del ffprobe["format"]["tags"]
if "size" in ffprobe["format"]:
ffprobe["format"]["size"] = int(ffprobe["format"]["size"])
if blocks_allocated == 0:
sparseness = 0
else:
sparseness = ffprobe["format"]["size"] / blocks_allocated
media = dict(
**ffprobe["format"],
# streams=ffprobe["streams"],
sparseness=sparseness,
time_created=datetime.fromtimestamp(stat.st_ctime),
time_modified=datetime.fromtimestamp(stat.st_mtime),
)
if args.audio:
media = {**media, "listen_count": 0}
try:
tiny_tags = TinyTag.get(f).as_dict()
mutagen_tags = mutagen.File(f)
assert mutagen_tags.tags
if "extra" in tiny_tags:
del tiny_tags["extra"]
except:
return media
mutagen_tags_p = parse_mutagen_tags(mutagen_tags, tiny_tags)
audio = {
**media,
**tiny_tags,
**mutagen_tags_p,
}
# print(audio)
@fuckit
def get_rid_of_known_tags():
del mutagen_tags.tags["encoder"]
del mutagen_tags.tags["TMED"]
del mutagen_tags.tags["TSO2"]
del mutagen_tags.tags["artist-sort"]
del mutagen_tags.tags["ASIN"]
del mutagen_tags.tags["Acoustid Id"]
del mutagen_tags.tags["Artists"]
del mutagen_tags.tags["BARCODE"]
del mutagen_tags.tags["CATALOGNUMBER"]
del mutagen_tags.tags["MusicBrainz Album Artist Id"]
del mutagen_tags.tags["MusicBrainz Album Id"]
del mutagen_tags.tags["MusicBrainz Album Release Country"]
del mutagen_tags.tags["MusicBrainz Album Status"]
del mutagen_tags.tags["MusicBrainz Album Type"]
del mutagen_tags.tags["MusicBrainz Artist Id"]
del mutagen_tags.tags["MusicBrainz Release Group Id"]
del mutagen_tags.tags["MusicBrainz Release Track Id"]
del mutagen_tags.tags["SCRIPT"]
del mutagen_tags.tags["originalyear"]
del mutagen_tags.tags["artist"]
del mutagen_tags.tags["album"]
del mutagen_tags.tags["ALBUMARTIST"]
del mutagen_tags.tags["title"]
del mutagen_tags.tags["TORY"]
del mutagen_tags.tags["TDOR"]
del mutagen_tags.tags["publisher"]
del mutagen_tags.tags["TRACKNUMBER"]
del mutagen_tags.tags["DISCNUMBER"]
del mutagen_tags.tags["replaygain_track_peak"]
del mutagen_tags.tags["replaygain_track_gain"]
del mutagen_tags.tags["date"]
return mutagen_tags.tags
new_tags = get_rid_of_known_tags()
if new_tags is not None:
print(new_tags)
return audio
return media
def main():
parser = argparse.ArgumentParser()
parser.add_argument("db")
parser.add_argument("paths", nargs="*")
parser.add_argument("-a", "--audio", action="store_true")
parser.add_argument("-s", "--subtitle", action="store_true")
parser.add_argument("-yt", "--youtube-only", action="store_true")
parser.add_argument("-sl", "--subliminal-only", action="store_true")
parser.add_argument("-f", "--force-rescan", action="store_true")
parser.add_argument("-v", "--verbose", action="count", default=0)
args = parser.parse_args()
if args.force_rescan:
Path(args.db).unlink(missing_ok=True)
if Path(args.db).exists():
cmd(f"sqlite-utils optimize {args.db}")
columns = cmd(f"sqlite-utils tables {args.db} --columns | jq -r '.[0].columns[]' ", quiet=True).stdout.splitlines()
for column in columns:
cmd(f"sqlite-utils create-index --if-not-exists --analyze {args.db} media {column}")
con = sqlite_con(args.db)
for path in args.paths:
path = Path(path).resolve()
print(f"{path} : Scanning...")
video_files = get_video_files(path)
new_files = set(video_files)
try:
existing = set(
map(
lambda x: x["filename"],
fetchall_dict(con, f"select filename from media where filename like '{path}%'"),
)
)
except:
video_files = list(new_files)
else:
video_files = list(new_files - existing)
deleted_files = list(existing - new_files)
if len(deleted_files) > 0:
print(f"Removing {len(deleted_files)} orphaned metadata")
df_chunked = chunks(deleted_files, 32765) # sqlite_param_limit
for l in df_chunked:
con.execute(
"delete from media where filename in (" + ",".join(["?"] * len(l)) + ")",
(*l,),
)
con.commit()
if len(video_files) > 0:
print(f"Adding {len(video_files)} new media")
log.info(video_files)
metadata = (
Parallel(n_jobs=-1 if args.verbose == 0 else 1, backend="threading")(
delayed(extract_metadata)(args, file) for file in video_files
)
or []
)
DF = pd.DataFrame(list(filter(None, metadata)))
if args.audio:
if DF.get(["year"]) is not None:
DF.year = DF.year.astype(str)
DF.apply(pd.to_numeric, errors="ignore").convert_dtypes().to_sql( # type: ignore
"media",
con=con,
if_exists="append",
index=False,
chunksize=70,
method="multi",
)
if args.subtitle:
Parallel(n_jobs=5)(delayed(get_subtitle)(args, file) for file in video_files)
if __name__ == "__main__":
main()
|
the-stack_0_1304 | import math
import csv
import numpy as np
from math import sin, cos
from numpy.random.mtrand import seed
import kalman
import matplotlib.pyplot as plt
import particle
magic_coeff = 0.047
wheel_radius = 2.7
wheel_base_half = 7.5
sonar_zero_distance = 13.8
init_x = 0.0
init_y = 0.0
init_angle = 0.0
x_cam_noise = (0.0, 49.0)
y_cam_noise = (0.0, 49.0)
gyro_noise = (0.0, math.radians(16.0))
sonar_normal_noise = (0.0, 4.0)
sonar_invalid_noise = (0.0, 1e+6)
def print_plot(plots=None, coords=None, bounded=True, title=None):
if plots is not None:
(t_plot, x_plot, y_plot) = plots
else:
t_plot = []
x_plot = []
y_plot = []
for tuple in coords:
t_plot.append(tuple[0])
x_plot.append(tuple[2])
y_plot.append(tuple[1])
def print_p(xlabel, t_plot, y_axis, boundary=None):
plt.ylabel(xlabel)
plt.xlabel("y(t)")
plt.plot(t_plot, y_axis)
if title is not None:
plt.title(title)
if boundary is not None:
plt.axis(boundary)
plt.show()
# print_p("x(t)", t_plot, x_plot, [1509976324.240, 1509976340.20860, 0, 140] if bounded else None)
# print_p("y(t)", t_plot, y_plot, [1509976324.240, 1509976340.20860, -10, 40] if bounded else None)
print_p("x(t)", y_plot, x_plot, [-10, 40, 0, 140] if bounded else None)
def follow_by_wheels():
coords = []
with open('log_robot_2.csv') as csvfile:
spamreader = csv.reader(csvfile, delimiter=';')
x = init_x
y = init_y
angle = init_angle
t_prev = 0
is_init = False
for row in spamreader:
try:
t = float(row[0])
if not is_init:
t_prev = t
vl = float(row[3]) * magic_coeff
vr = float(row[4]) * magic_coeff
is_init = True
dt = t - t_prev
if abs(vr - vl) < 0.0001:
x_next = x + vl * dt * cos(angle)
y_next = y + vl * dt * sin(angle)
angle_next = angle
else:
R = wheel_base_half * (vl + vr) / (vr - vl)
wt = (vr - vl) / (wheel_base_half * 2) * dt
ICCx = x - R * sin(angle)
ICCy = y + R * cos(angle)
x_next = cos(wt) * (x - ICCx) - sin(wt) * (y - ICCy) + ICCx
y_next = sin(wt) * (x - ICCx) + cos(wt) * (y - ICCy) + ICCy
angle_next = angle + wt
x = x_next
y = y_next
angle = angle_next
vl = float(row[3]) * magic_coeff
vr = float(row[4]) * magic_coeff
t_prev = t
coords.append((t, -y, x))
except ValueError:
pass
print_plot(coords=coords, title="By wheels")
def follow_by_gyro():
coords = []
with open('log_robot_2.csv') as csvfile:
spamreader = csv.reader(csvfile, delimiter=';')
x = init_x
y = init_y
# angle = init_angle
t_prev = 0
is_init = False
for row in spamreader:
try:
t = float(row[0])
angle = float(row[2]) * math.pi / 180
if not is_init:
t_prev = t
vl = float(row[3]) * magic_coeff
vr = float(row[4]) * magic_coeff
is_init = True
# print(t, d, a, vl, vr, sep=', ')
dt = t - t_prev
avg_speed = (vr + vl) / 2
x_next = x + avg_speed * dt * sin(angle)
y_next = y + avg_speed * dt * cos(angle)
x = x_next
y = y_next
vl = float(row[3]) * magic_coeff
vr = float(row[4]) * magic_coeff
t_prev = t
coords.append((t, x, y))
except ValueError:
pass
print_plot(coords=coords, title="By gyro")
def print_log_camera():
t_plot = []
x_plot = []
y_plot = []
with open('log_camera_2.csv') as csvfile:
spamreader = csv.reader(csvfile, delimiter=';')
k = False
for row in spamreader:
if not k:
k = True
continue
t_plot.append(float(row[0]))
x_plot.append(float(row[1]))
y_plot.append(float(row[2]))
print_plot(plots=(t_plot, x_plot, y_plot), title="From camera")
def print_log_camera_kalman():
t_plot = []
x_plot = []
y_plot = []
with open('log_camera_2.csv') as csvfile:
spamreader = csv.reader(csvfile, delimiter=';')
k = False
for row in spamreader:
if not k:
k = True
continue
t_plot.append(float(row[0]))
x_plot.append(float(row[1]))
y_plot.append(float(row[2]))
Q = 1
x_plot = kalman.apply_filter(x_plot, Q, x_cam_noise[1])
y_plot = kalman.apply_filter(y_plot, Q, y_cam_noise[1])
print_plot(plots=(t_plot, x_plot, y_plot), title="From camera with Kalman Q=" + str(Q))
def follow_by_gyro_kalman():
coords = []
v = []
t = []
angle = [0]
Q = 0.05
with open('log_robot_2.csv') as csvfile:
spamreader = csv.reader(csvfile, delimiter=';')
x = init_x
y = init_y
is_init = False
for row in spamreader:
try:
t.append(float(row[0]))
if not is_init:
t.append(float(row[0]))
v.append((float(row[4]) + float(row[3])) * magic_coeff / 2)
is_init = True
angle.append(float(row[2]) * math.pi / 180)
v.append((float(row[4]) + float(row[3])) * magic_coeff / 2)
except ValueError:
pass
angle = kalman.apply_filter(angle, Q=Q, R=gyro_noise[1])
for i in range(1, len(t)):
dt = t[i] - t[i - 1]
x_next = x + v[i - 1] * dt * sin(angle[i])
y_next = y + v[i - 1] * dt * cos(angle[i])
x = x_next
y = y_next
coords.append((t[i], x, y))
print_plot(coords=coords, title="By gyro with Kalman, Q=" + str(Q))
def sensor_fusion():
coords_gyro = []
coords_wheels = []
vl = []
vr = []
t = []
angle = [0]
Q = 0.1
with open('log_robot_2.csv') as csvfile:
spamreader = csv.reader(csvfile, delimiter=';')
x = init_x
y = init_y
is_init = False
for row in spamreader:
try:
t.append(float(row[0]))
if not is_init:
t.append(float(row[0]))
vl.append((float(row[3])) * magic_coeff)
vr.append((float(row[4])) * magic_coeff)
is_init = True
angle.append(float(row[2]) * math.pi / 180)
vl.append((float(row[3])) * magic_coeff)
vr.append((float(row[4])) * magic_coeff)
except ValueError:
pass
for i in range(1, len(t)):
dt = t[i] - t[i - 1]
avg_speed = (vl[i - 1] + vr[i - 1]) / 2
x_next = x + avg_speed * dt * sin(angle[i])
y_next = y + avg_speed * dt * cos(angle[i])
x = x_next
y = y_next
coords_gyro.append((t[i], x, y))
a = init_angle
x = init_x
y = init_y
for i in range(1, len(t)):
dt = t[i] - t[i - 1]
if abs(vr[i - 1] - vl[i - 1]) < 0.0001:
x_next = x + vl[i - 1] * dt * cos(a)
y_next = y + vl[i - 1] * dt * sin(a)
angle_next = a
else:
R = wheel_base_half * (vl[i - 1] + vr[i - 1]) / (vr[i - 1] - vl[i - 1])
wt = (vr[i - 1] - vl[i - 1]) / (wheel_base_half * 2) * dt
ICCx = x - R * sin(a)
ICCy = y + R * cos(a)
x_next = cos(wt) * (x - ICCx) - sin(wt) * (y - ICCy) + ICCx
y_next = sin(wt) * (x - ICCx) + cos(wt) * (y - ICCy) + ICCy
angle_next = a + wt
x = x_next
y = y_next
a = angle_next
coords_wheels.append((t[i], -y, x))
x_w = [0]
x_g = [0]
for i in range(0, len(coords_gyro)):
x_w.append(coords_wheels[i][1])
x_g.append(coords_gyro[i][1])
x_matrix = np.matrix([x_w, x_g]).transpose()
Q = 0.5
R = np.matrix([[100, 0], [0, 100]]).transpose()
y_w = [0]
y_g = [0]
for i in range(0, len(coords_gyro)):
y_w.append(coords_wheels[i][2])
y_g.append(coords_gyro[i][2])
y_matrix = np.matrix([y_w, y_g]).transpose()
x_kalman = kalman.apply_filter_x(x_matrix, Q, R, (len(x_w),)).tolist()
y_kalman = kalman.apply_filter_x(y_matrix, Q, R, (len(y_w),)).tolist()
print_plot(plots=(t, y_kalman, x_kalman), title="Kalman with 2 sensors")
def particle_filter():
vl = []
vr = []
t = []
dist = [sonar_zero_distance]
angle = [0]
with open('log_robot_2.csv') as csvfile:
spamreader = csv.reader(csvfile, delimiter=';')
is_init = False
for row in spamreader:
try:
t.append(float(row[0]))
if not is_init:
t.append(float(row[0]))
vl.append((float(row[3])) * magic_coeff)
vr.append((float(row[4])) * magic_coeff)
is_init = True
dist.append(float(row[1]))
angle.append(float(row[2]) * math.pi / 180)
vl.append((float(row[3])) * magic_coeff)
vr.append((float(row[4])) * magic_coeff)
except ValueError:
pass
particle.run_pf1(N=5000, plot_particles=True, vl=vl, vr=vr, t=t, angle=angle, dist=dist,
initial_x=(10, 10, np.pi / 4))
if __name__ == '__main__':
follow_by_wheels()
follow_by_gyro()
follow_by_gyro_kalman()
print_log_camera()
print_log_camera_kalman()
sensor_fusion()
# seed(2)
# particle_filter()
|
the-stack_0_1307 | #! /usr/local/bin/python3
# Consume and display messages from a Kafka topic
import argparse
from kafka import KafkaConsumer
def parse():
"""Parse command line"""
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--brokers', default='kafka:9092', help='Kafka bootstrap brokers')
parser.add_argument('-t', '--topic', default='test-topic', help='Name of topic to consume from')
return parser.parse_args()
if __name__ == '__main__':
args = parse()
# Create Kafka consumer client
consumer = KafkaConsumer(bootstrap_servers=args.brokers)
# Subscribe to topic
print('Subscribing to topic {}'.format(args.topic))
consumer.subscribe(args.topic)
try:
# Poll the topic for new messages
for msg in consumer:
# Decode the value for display
decoded_val = msg.value.decode('utf-8')
# Display the value of the message that was consumed
print('Consumed message from {}: "{}"'.format(args.topic, decoded_val))
except KeyboardInterrupt:
consumer.close()
|
the-stack_0_1308 | import json
from sqlalchemy import Column, Integer, SmallInteger, String, ForeignKey, Text, JSON, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from ...policy import Policy, ALLOW_ACCESS, DENY_ACCESS, TYPE_STRING_BASED
from ...rules.base import Rule
from ...parser import compile_regex
Base = declarative_base()
class PolicySubjectModel(Base):
"""Storage model for policy subjects"""
__tablename__ = 'vakt_policy_subjects'
id = Column(Integer, primary_key=True)
uid = Column(String(255), ForeignKey('vakt_policies.uid', ondelete='CASCADE'))
subject = Column(JSON(), comment='JSON value for rule-based policies')
subject_string = Column(String(255), index=True, comment='Initial string value for string-based policies')
subject_regex = Column(String(520),
index=True,
comment='Regexp from initial string value for string-based policies')
class PolicyResourceModel(Base):
"""Storage model for policy resources"""
__tablename__ = 'vakt_policy_resources'
id = Column(Integer, primary_key=True)
uid = Column(String(255), ForeignKey('vakt_policies.uid', ondelete='CASCADE'))
resource = Column(JSON(), comment='JSON value for rule-based policies')
resource_string = Column(String(255), index=True, comment='Initial string value for string-based policies')
resource_regex = Column(String(520),
index=True,
comment='Regexp from initial string value for string-based policies')
class PolicyActionModel(Base):
"""Storage model for policy actions"""
__tablename__ = 'vakt_policy_actions'
id = Column(Integer, primary_key=True)
uid = Column(String(255), ForeignKey('vakt_policies.uid', ondelete='CASCADE'))
action = Column(JSON(), comment='JSON value for rule-based policies')
action_string = Column(String(255), index=True, comment='Initial string value for string-based policies')
action_regex = Column(String(520),
index=True,
comment='Regexp from initial string value for string-based policies')
class PolicyModel(Base):
"""Storage model for policy"""
__tablename__ = 'vakt_policies'
uid = Column(String(255), primary_key=True)
type = Column(SmallInteger)
description = Column(Text())
effect = Column(Boolean())
context = Column(JSON())
subjects = relationship(PolicySubjectModel, passive_deletes=True, lazy='joined')
resources = relationship(PolicyResourceModel, passive_deletes=True, lazy='joined')
actions = relationship(PolicyActionModel, passive_deletes=True, lazy='joined')
@classmethod
def from_policy(cls, policy):
"""
Instantiate from policy object
:param policy: object of type policy
"""
rvalue = cls()
return cls._save(policy, model=rvalue)
def update(self, policy):
"""
Update object attributes to match given policy
:param policy: object of type policy
"""
self._save(policy, model=self)
def to_policy(self):
"""
Create a policy object
:return: object of type `Policy`
"""
return Policy(uid=self.uid,
effect=ALLOW_ACCESS if self.effect else DENY_ACCESS,
description=self.description,
context=Rule.from_json(self.context),
subjects=[
self._policy_element_from_db(self.type, x.subject, x.subject_string)
for x in self.subjects
],
resources=[
self._policy_element_from_db(self.type, x.resource, x.resource_string)
for x in self.resources
],
actions=[
self._policy_element_from_db(self.type, x.action, x.action_string)
for x in self.actions
])
@classmethod
def _save(cls, policy, model):
"""
Helper to create PolicyModel from Policy object for add and update operations.
:param policy: object of type Policy
:param model: object of type PolicyModel
"""
policy_json = policy.to_json()
policy_dict = json.loads(policy_json)
model.uid = policy_dict['uid']
model.type = policy_dict['type']
model.effect = policy_dict['effect'] == ALLOW_ACCESS
model.description = policy_dict['description']
model.context = json.dumps(policy_dict['context'])
model.subjects = [
PolicySubjectModel(subject=x, subject_string=string, subject_regex=compiled)
for y in policy_dict['subjects']
for (x, string, compiled) in cls._policy_element_to_db(policy, y)
]
model.resources = [
PolicyResourceModel(resource=x, resource_string=string, resource_regex=compiled)
for y in policy_dict['resources']
for (x, string, compiled) in cls._policy_element_to_db(policy, y)
]
model.actions = [
PolicyActionModel(action=x, action_string=string, action_regex=compiled)
for y in policy_dict['actions']
for (x, string, compiled) in cls._policy_element_to_db(policy, y)
]
return model
@classmethod
def _policy_element_to_db(cls, policy, el):
json_value, string_value, compiled = None, None, None
if policy.type == TYPE_STRING_BASED:
string_value = el
if policy.start_tag in el and policy.end_tag in el:
compiled = compile_regex(el, policy.start_tag, policy.end_tag).pattern
else: # it's a rule-based policy and it's value is a json
json_value = json.dumps(el)
yield (json_value, string_value, compiled)
@classmethod
def _policy_element_from_db(cls, policy_type, element_json, element_string):
if policy_type == TYPE_STRING_BASED:
return element_string
return Rule.from_json(element_json)
|
the-stack_0_1309 | import collections
import contextlib
import inspect
import json
import jsonschema
import numpy as np
import pandas as pd
# If DEBUG_MODE is True, then schema objects are converted to dict and
# validated at creation time. This slows things down, particularly for
# larger specs, but leads to much more useful tracebacks for the user.
# Individual schema classes can override this by setting the
# class-level _class_is_valid_at_instantiation attribute to False
DEBUG_MODE = True
def enable_debug_mode():
global DEBUG_MODE
DEBUG_MODE = True
def disable_debug_mode():
global DEBUG_MODE
DEBUG_MODE = True
@contextlib.contextmanager
def debug_mode(arg):
global DEBUG_MODE
original = DEBUG_MODE
DEBUG_MODE = arg
try:
yield
finally:
DEBUG_MODE = original
def _subclasses(cls):
"""Breadth-first sequence of all classes which inherit from cls."""
seen = set()
current_set = {cls}
while current_set:
seen |= current_set
current_set = set.union(*(set(cls.__subclasses__()) for cls in current_set))
for cls in current_set - seen:
yield cls
def _todict(obj, validate, context):
"""Convert an object to a dict representation."""
if isinstance(obj, SchemaBase):
return obj.to_dict(validate=validate, context=context)
elif isinstance(obj, (list, tuple, np.ndarray)):
return [_todict(v, validate, context) for v in obj]
elif isinstance(obj, dict):
return {
k: _todict(v, validate, context)
for k, v in obj.items()
if v is not Undefined
}
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif isinstance(obj, np.number):
return float(obj)
elif isinstance(obj, (pd.Timestamp, np.datetime64)):
return pd.Timestamp(obj).isoformat()
else:
return obj
def _resolve_references(schema, root=None):
"""Resolve schema references."""
resolver = jsonschema.RefResolver.from_schema(root or schema)
while "$ref" in schema:
with resolver.resolving(schema["$ref"]) as resolved:
schema = resolved
return schema
class SchemaValidationError(jsonschema.ValidationError):
"""A wrapper for jsonschema.ValidationError with friendlier traceback"""
def __init__(self, obj, err):
super(SchemaValidationError, self).__init__(**self._get_contents(err))
self.obj = obj
@staticmethod
def _get_contents(err):
"""Get a dictionary with the contents of a ValidationError"""
try:
# works in jsonschema 2.3 or later
contents = err._contents()
except AttributeError:
try:
# works in Python >=3.4
spec = inspect.getfullargspec(err.__init__)
except AttributeError:
# works in Python <3.4
spec = inspect.getargspec(err.__init__)
contents = {key: getattr(err, key) for key in spec.args[1:]}
return contents
def __str__(self):
cls = self.obj.__class__
schema_path = ["{}.{}".format(cls.__module__, cls.__name__)]
schema_path.extend(self.schema_path)
schema_path = "->".join(
str(val)
for val in schema_path[:-1]
if val not in ("properties", "additionalProperties", "patternProperties")
)
return """Invalid specification
{}, validating {!r}
{}
""".format(
schema_path, self.validator, self.message
)
class UndefinedType(object):
"""A singleton object for marking undefined attributes"""
__instance = None
def __new__(cls, *args, **kwargs):
if not isinstance(cls.__instance, cls):
cls.__instance = object.__new__(cls, *args, **kwargs)
return cls.__instance
def __repr__(self):
return "Undefined"
Undefined = UndefinedType()
class SchemaBase(object):
"""Base class for schema wrappers.
Each derived class should set the _schema class attribute (and optionally
the _rootschema class attribute) which is used for validation.
"""
_schema = None
_rootschema = None
_class_is_valid_at_instantiation = True
def __init__(self, *args, **kwds):
# Two valid options for initialization, which should be handled by
# derived classes:
# - a single arg with no kwds, for, e.g. {'type': 'string'}
# - zero args with zero or more kwds for {'type': 'object'}
if self._schema is None:
raise ValueError(
"Cannot instantiate object of type {}: "
"_schema class attribute is not defined."
"".format(self.__class__)
)
if kwds:
assert len(args) == 0
else:
assert len(args) in [0, 1]
# use object.__setattr__ because we override setattr below.
object.__setattr__(self, "_args", args)
object.__setattr__(self, "_kwds", kwds)
if DEBUG_MODE and self._class_is_valid_at_instantiation:
self.to_dict(validate=True)
def copy(self, deep=True, ignore=()):
"""Return a copy of the object
Parameters
----------
deep : boolean or list, optional
If True (default) then return a deep copy of all dict, list, and
SchemaBase objects within the object structure.
If False, then only copy the top object.
If a list or iterable, then only copy the listed attributes.
ignore : list, optional
A list of keys for which the contents should not be copied, but
only stored by reference.
"""
def _shallow_copy(obj):
if isinstance(obj, SchemaBase):
return obj.copy(deep=False)
elif isinstance(obj, list):
return obj[:]
elif isinstance(obj, dict):
return obj.copy()
else:
return obj
def _deep_copy(obj, ignore=()):
if isinstance(obj, SchemaBase):
args = tuple(_deep_copy(arg) for arg in obj._args)
kwds = {
k: (_deep_copy(v, ignore=ignore) if k not in ignore else v)
for k, v in obj._kwds.items()
}
with debug_mode(False):
return obj.__class__(*args, **kwds)
elif isinstance(obj, list):
return [_deep_copy(v, ignore=ignore) for v in obj]
elif isinstance(obj, dict):
return {
k: (_deep_copy(v, ignore=ignore) if k not in ignore else v)
for k, v in obj.items()
}
else:
return obj
try:
deep = list(deep)
except TypeError:
deep_is_list = False
else:
deep_is_list = True
if deep and not deep_is_list:
return _deep_copy(self, ignore=ignore)
with debug_mode(False):
copy = self.__class__(*self._args, **self._kwds)
if deep_is_list:
for attr in deep:
copy[attr] = _shallow_copy(copy._get(attr))
return copy
def _get(self, attr, default=Undefined):
"""Get an attribute, returning default if not present."""
attr = self._kwds.get(attr, Undefined)
if attr is Undefined:
attr = default
return attr
def __getattr__(self, attr):
# reminder: getattr is called after the normal lookups
if attr == "_kwds":
raise AttributeError()
if attr in self._kwds:
return self._kwds[attr]
else:
try:
_getattr = super(SchemaBase, self).__getattr__
except AttributeError:
_getattr = super(SchemaBase, self).__getattribute__
return _getattr(attr)
def __setattr__(self, item, val):
self._kwds[item] = val
def __getitem__(self, item):
return self._kwds[item]
def __setitem__(self, item, val):
self._kwds[item] = val
def __repr__(self):
if self._kwds:
args = (
"{}: {!r}".format(key, val)
for key, val in sorted(self._kwds.items())
if val is not Undefined
)
args = "\n" + ",\n".join(args)
return "{0}({{{1}\n}})".format(
self.__class__.__name__, args.replace("\n", "\n ")
)
else:
return "{}({!r})".format(self.__class__.__name__, self._args[0])
def __eq__(self, other):
return (
type(self) is type(other)
and self._args == other._args
and self._kwds == other._kwds
)
def to_dict(self, validate=True, ignore=None, context=None):
"""Return a dictionary representation of the object
Parameters
----------
validate : boolean or string
If True (default), then validate the output dictionary
against the schema. If "deep" then recursively validate
all objects in the spec. This takes much more time, but
it results in friendlier tracebacks for large objects.
ignore : list
A list of keys to ignore. This will *not* passed to child to_dict
function calls.
context : dict (optional)
A context dictionary that will be passed to all child to_dict
function calls
Returns
-------
dct : dictionary
The dictionary representation of this object
Raises
------
jsonschema.ValidationError :
if validate=True and the dict does not conform to the schema
"""
if context is None:
context = {}
if ignore is None:
ignore = []
sub_validate = "deep" if validate == "deep" else False
if self._args and not self._kwds:
result = _todict(self._args[0], validate=sub_validate, context=context)
elif not self._args:
result = _todict(
{k: v for k, v in self._kwds.items() if k not in ignore},
validate=sub_validate,
context=context,
)
else:
raise ValueError(
"{} instance has both a value and properties : "
"cannot serialize to dict".format(self.__class__)
)
if validate:
try:
self.validate(result)
except jsonschema.ValidationError as err:
raise SchemaValidationError(self, err)
return result
def to_json(
self, validate=True, ignore=[], context={}, indent=2, sort_keys=True, **kwargs
):
"""Emit the JSON representation for this object as a string.
Parameters
----------
validate : boolean or string
If True (default), then validate the output dictionary
against the schema. If "deep" then recursively validate
all objects in the spec. This takes much more time, but
it results in friendlier tracebacks for large objects.
ignore : list
A list of keys to ignore. This will *not* passed to child to_dict
function calls.
context : dict (optional)
A context dictionary that will be passed to all child to_dict
function calls
indent : integer, default 2
the number of spaces of indentation to use
sort_keys : boolean, default True
if True, sort keys in the output
**kwargs
Additional keyword arguments are passed to ``json.dumps()``
Returns
-------
spec : string
The JSON specification of the chart object.
"""
dct = self.to_dict(validate=validate, ignore=ignore, context=context)
return json.dumps(dct, indent=indent, sort_keys=sort_keys, **kwargs)
@classmethod
def _default_wrapper_classes(cls):
"""Return the set of classes used within cls.from_dict()"""
return _subclasses(SchemaBase)
@classmethod
def from_dict(cls, dct, validate=True, _wrapper_classes=None):
"""Construct class from a dictionary representation
Parameters
----------
dct : dictionary
The dict from which to construct the class
validate : boolean
If True (default), then validate the input against the schema.
_wrapper_classes : list (optional)
The set of SchemaBase classes to use when constructing wrappers
of the dict inputs. If not specified, the result of
cls._default_wrapper_classes will be used.
Returns
-------
obj : Schema object
The wrapped schema
Raises
------
jsonschema.ValidationError :
if validate=True and dct does not conform to the schema
"""
if validate:
cls.validate(dct)
if _wrapper_classes is None:
_wrapper_classes = cls._default_wrapper_classes()
converter = _FromDict(_wrapper_classes)
return converter.from_dict(dct, cls)
@classmethod
def from_json(cls, json_string, validate=True, **kwargs):
"""Instantiate the object from a valid JSON string
Parameters
----------
json_string : string
The string containing a valid JSON chart specification.
validate : boolean
If True (default), then validate the input against the schema.
**kwargs :
Additional keyword arguments are passed to json.loads
Returns
-------
chart : Chart object
The altair Chart object built from the specification.
"""
dct = json.loads(json_string, **kwargs)
return cls.from_dict(dct, validate=validate)
@classmethod
def validate(cls, instance, schema=None):
"""
Validate the instance against the class schema in the context of the
rootschema.
"""
if schema is None:
schema = cls._schema
resolver = jsonschema.RefResolver.from_schema(cls._rootschema or cls._schema)
return jsonschema.validate(instance, schema, resolver=resolver)
@classmethod
def resolve_references(cls, schema=None):
"""Resolve references in the context of this object's schema or root schema."""
return _resolve_references(
schema=(schema or cls._schema),
root=(cls._rootschema or cls._schema or schema),
)
@classmethod
def validate_property(cls, name, value, schema=None):
"""
Validate a property against property schema in the context of the
rootschema
"""
value = _todict(value, validate=False, context={})
props = cls.resolve_references(schema or cls._schema).get("properties", {})
resolver = jsonschema.RefResolver.from_schema(cls._rootschema or cls._schema)
return jsonschema.validate(value, props.get(name, {}), resolver=resolver)
def __dir__(self):
return list(self._kwds.keys())
class _FromDict(object):
"""Class used to construct SchemaBase class hierarchies from a dict
The primary purpose of using this class is to be able to build a hash table
that maps schemas to their wrapper classes. The candidate classes are
specified in the ``class_list`` argument to the constructor.
"""
_hash_exclude_keys = ("definitions", "title", "description", "$schema", "id")
def __init__(self, class_list):
# Create a mapping of a schema hash to a list of matching classes
# This lets us quickly determine the correct class to construct
self.class_dict = collections.defaultdict(list)
for cls in class_list:
if cls._schema is not None:
self.class_dict[self.hash_schema(cls._schema)].append(cls)
@classmethod
def hash_schema(cls, schema, use_json=True):
"""
Compute a python hash for a nested dictionary which
properly handles dicts, lists, sets, and tuples.
At the top level, the function excludes from the hashed schema all keys
listed in `exclude_keys`.
This implements two methods: one based on conversion to JSON, and one based
on recursive conversions of unhashable to hashable types; the former seems
to be slightly faster in several benchmarks.
"""
if cls._hash_exclude_keys and isinstance(schema, dict):
schema = {
key: val
for key, val in schema.items()
if key not in cls._hash_exclude_keys
}
if use_json:
s = json.dumps(schema, sort_keys=True)
return hash(s)
else:
def _freeze(val):
if isinstance(val, dict):
return frozenset((k, _freeze(v)) for k, v in val.items())
elif isinstance(val, set):
return frozenset(map(_freeze, val))
elif isinstance(val, list) or isinstance(val, tuple):
return tuple(map(_freeze, val))
else:
return val
return hash(_freeze(schema))
def from_dict(self, dct, cls=None, schema=None, rootschema=None):
"""Construct an object from a dict representation"""
if (schema is None) == (cls is None):
raise ValueError("Must provide either cls or schema, but not both.")
if schema is None:
schema = schema or cls._schema
rootschema = rootschema or cls._rootschema
rootschema = rootschema or schema
def _passthrough(*args, **kwds):
return args[0] if args else kwds
if isinstance(dct, SchemaBase):
return dct
if cls is None:
# If there are multiple matches, we use the first one in the dict.
# Our class dict is constructed breadth-first from top to bottom,
# so the first class that matches is the most general match.
matches = self.class_dict[self.hash_schema(schema)]
cls = matches[0] if matches else _passthrough
schema = _resolve_references(schema, rootschema)
if "anyOf" in schema or "oneOf" in schema:
schemas = schema.get("anyOf", []) + schema.get("oneOf", [])
for possible_schema in schemas:
resolver = jsonschema.RefResolver.from_schema(rootschema)
try:
jsonschema.validate(dct, possible_schema, resolver=resolver)
except jsonschema.ValidationError:
continue
else:
return self.from_dict(
dct,
schema=possible_schema,
rootschema=rootschema,
)
if isinstance(dct, dict):
# TODO: handle schemas for additionalProperties/patternProperties
props = schema.get("properties", {})
kwds = {}
for key, val in dct.items():
if key in props:
val = self.from_dict(val, schema=props[key], rootschema=rootschema)
kwds[key] = val
return cls(**kwds)
elif isinstance(dct, list):
item_schema = schema.get("items", {})
dct = [
self.from_dict(val, schema=item_schema, rootschema=rootschema)
for val in dct
]
return cls(dct)
else:
return cls(dct)
|
the-stack_0_1310 | #!/usr/bin/python
# -_- encoding: utf8 -_-
import sys
import time
sys.path.append('./t')
from http_utils import *
VERBOSE = False
BASE_URL = 'http://0.0.0.0:8081'
# =============
#
print('[+] Test status codes')
http_codes = [
200,
201,
202,
# NO-Content !!!
# 204,
206,
# Moved !!! [[
# 300,
# 301,
# 302,
# ]]
# See others !!! [[
# 303,
# ]]
# Not modified [[
# 304,
# ]]
# Temorary redirected [[
# 307,
# ]]
400,
401,
403,
404,
405,
408,
409,
411,
412,
413,
414,
415,
416,
421,
500,
501,
502,
503,
504,
507
]
def do_post(url, code, headers):
return post_2(url, {'params':[1, 2]}, headers)
def do_get(url, code, headers):
# Python's urllib2 does not suppor these codes! [[
if code > 200:
return (True, [])
# ]]
return get_2(url, [], headers)
methods = [
[do_post, 'POST'],
[do_get, 'GET']
]
prev_result = None
for method in methods:
for code in http_codes:
curl = BASE_URL + '/lua?status_code=' + str(code)
(rcode, result) = method[0](curl, code, {'X-From': 'eval_basic'})
# Python does not work if server returns some codes!
if rcode == True:
continue;
assert(code == rcode)
print('[+] OK')
|
the-stack_0_1311 | import nltk
from nltk.model import build_vocabulary, count_ngrams, LaplaceNgramModel, LidstoneNgramModel
'''
lincoln_address_file = open('files/FirstInauguralAddress.txt')
raw_lincoln_address = lincoln_address_file.read().lower()
# lb_train_1 = raw_lincoln_address.lower().split()
lb_train_1_sents = nltk.sent_tokenize(raw_lincoln_address, language="english")
lb_train_1_words = nltk.word_tokenize(raw_lincoln_address, language='english')
'''
'''
gettysburg_address_file = open('files/Gettysburg.txt')
raw_gettysburg_address = gettysburg_address_file.read().lower()
# lb_train_2 = raw_gettysburg_address.lower().split()
lb_train_2_sents = nltk.sent_tokenize(raw_gettysburg_address, language="english")
lb_train_2_words = nltk.word_tokenize(raw_gettysburg_address, language='english')
'''
lb_train_file = open('files/LB-Train.txt')
raw_lb_train_file = lb_train_file.read().lower()
lb_train_words = nltk.word_tokenize(raw_lb_train_file, language='english')
lb_vocab = build_vocabulary(2, lb_train_words)
# lb_vocab = build_vocabulary(1, lb_train_1_words, lb_train_2_words)
# print(lb_vocab)
lb_train = []
lb_train.append(lb_train_words)
'''
lb_train.append(lb_train_1_words)
lb_train.append(lb_train_2_words)
'''
# print(lb_train)
lb_bigram_counts = count_ngrams(2, lb_vocab, lb_train)
# print(lb_bigram_counts.ngrams[2])
# print(sorted(lb_bigram_counts.ngrams[2].conditions()))
lb = LidstoneNgramModel(0.2, lb_bigram_counts)
# print("lincoln score ", lb.score("never", ["had"]))
lincoln_address_file_2 = open('files/SecondInauguralAddress.txt')
lb_test = lincoln_address_file_2.read().lower()
lb_test_words = nltk.word_tokenize(lb_test)
print("Perplexity of LB on LB-Test = ", lb.perplexity(lb_test_words))
'''
for ngram in lb_bigram_counts.to_ngrams(lb_test_words):
print(ngram)
'''
'''
nelson_address_file = open('files/IamPreparedToDie.txt')
raw_nelson_address = nelson_address_file.read().lower()
# mb_train_1 = raw_nelson_address.lower().split()
mb_train_1_sents = nltk.sent_tokenize(raw_nelson_address, language="english")
mb_train_1_words = nltk.word_tokenize(raw_nelson_address, language="english")
freedom_award_file = open('files/InternationalFreedomAward.txt')
raw_freedom_award = freedom_award_file.read().lower()
# mb_train_2 = raw_freedom_award.lower().split()
mb_train_2_sents = nltk.sent_tokenize(raw_freedom_award, language='english')
mb_train_2_words = nltk.word_tokenize(raw_freedom_award, language='english')
'''
mb_train_file = open('files/MB-Train.txt')
raw_mb_train_file = mb_train_file.read().lower()
mb_train_words = nltk.word_tokenize(raw_mb_train_file, language='english')
mb_vocab = build_vocabulary(2, mb_train_words)
# mb_vocab = build_vocabulary(1, mb_train_1_words, mb_train_2_words)
mb_train = []
mb_train.append(mb_train_words)
'''
mb_train.append(lb_train_1_words)
mb_train.append(lb_train_2_words)
'''
mb_bigram_counts = count_ngrams(2, mb_vocab, mb_train)
mb = LidstoneNgramModel(0.2, mb_bigram_counts)
# print("mandela score ", mb.score("the", ["and"]))
nelson_address_file_2 = open('files/AfricanNationalCongress.txt')
mb_test = nelson_address_file_2.read()
mb_test_words = nltk.word_tokenize(mb_test)
print("Perplexity of MB on MB-Test = ", mb.perplexity(mb_test_words))
# print("Perplexity of MB on LB-Test = ", mb.perplexity(lb_test_words))
# print("Perplexity of LB on MB-Test = ", lb.perplexity(mb_test_words))
print("Perplexity of LB on LB-Train = ", lb.perplexity(lb_train_words))
print("Perplexity of MB on MB-Train = ", mb.perplexity(mb_train_words))
print("Perplexity of MB on LB-Train = ", mb.perplexity(lb_train_words))
print("Perplexity of LB on MB-Train = ", lb.perplexity(mb_train_words))
|
the-stack_0_1313 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
autodoc_mock_imports = ['numpy', 'tifffile']
# -- Project information -----------------------------------------------------
project = 'pyCUDAdecon'
copyright = '2019, Talley Lambert'
author = 'Talley Lambert'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.1.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyCUDAdecondoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyCUDAdecon.tex', 'pyCUDAdecon Documentation',
'Talley Lambert', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pycudadecon', 'pyCUDAdecon Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyCUDAdecon', 'pyCUDAdecon Documentation',
author, 'pyCUDAdecon', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
|
the-stack_0_1314 | # Copyright The IETF Trust 2013-2020, All Rights Reserved
# -*- coding: utf-8 -*-
import io
import os
import debug # pyflakes:ignore
from pyquery import PyQuery
from io import StringIO
from textwrap import wrap
from django.conf import settings
from django.urls import reverse as urlreverse
from ietf.doc.factories import DocumentFactory, IndividualRfcFactory, WgRfcFactory
from ietf.doc.models import ( Document, DocAlias, State, DocEvent,
BallotPositionDocEvent, NewRevisionDocEvent, TelechatDocEvent, WriteupDocEvent )
from ietf.doc.utils import create_ballot_if_not_open
from ietf.doc.views_status_change import default_approval_text
from ietf.group.models import Person
from ietf.iesg.models import TelechatDate
from ietf.utils.test_utils import TestCase
from ietf.utils.mail import outbox, empty_outbox, get_payload_text
from ietf.utils.test_utils import login_testing_unauthorized
class StatusChangeTests(TestCase):
def test_start_review(self):
url = urlreverse('ietf.doc.views_status_change.start_rfc_status_change')
login_testing_unauthorized(self, "secretary", url)
# normal get should succeed and get a reasonable form
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('form select[name=create_in_state]')),1)
ad_strpk = str(Person.objects.get(name='Areað Irector').pk)
state_strpk = str(State.objects.get(slug='adrev',type__slug='statchg').pk)
# faulty posts
## Must set a responsible AD
r = self.client.post(url,dict(document_name="bogus",title="Bogus Title",ad="",create_in_state=state_strpk,notify='[email protected]'))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue(len(q('form .is-invalid')) > 0)
## Must set a name
r = self.client.post(url,dict(document_name="",title="Bogus Title",ad=ad_strpk,create_in_state=state_strpk,notify='[email protected]'))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue(len(q('form .is-invalid')) > 0)
## Must not choose a document name that already exists
r = self.client.post(url,dict(document_name="imaginary-mid-review",title="Bogus Title",ad=ad_strpk,create_in_state=state_strpk,notify='[email protected]'))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue(len(q('form .is-invalid')) > 0)
## Must set a title
r = self.client.post(url,dict(document_name="bogus",title="",ad=ad_strpk,create_in_state=state_strpk,notify='[email protected]'))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue(len(q('form .is-invalid')) > 0)
# successful status change start
r = self.client.post(url,dict(document_name="imaginary-new",title="A new imaginary status change",ad=ad_strpk,
create_in_state=state_strpk,notify='[email protected]',new_relation_row_blah="rfc9999",
statchg_relation_row_blah="tois"))
self.assertEqual(r.status_code, 302)
status_change = Document.objects.get(name='status-change-imaginary-new')
self.assertEqual(status_change.get_state('statchg').slug,'adrev')
self.assertEqual(status_change.rev,'00')
self.assertEqual(status_change.ad.name,'Areað Irector')
self.assertEqual(status_change.notify,'[email protected]')
self.assertTrue(status_change.relateddocument_set.filter(relationship__slug='tois',target__docs__name='draft-ietf-random-thing'))
def test_change_state(self):
doc = Document.objects.get(name='status-change-imaginary-mid-review')
url = urlreverse('ietf.doc.views_status_change.change_state',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "ad", url)
# normal get
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('form select[name=new_state]')),1)
# faulty post
r = self.client.post(url,dict(new_state=""))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue(len(q('form .is-invalid')) > 0)
# successful change to AD Review
adrev_pk = str(State.objects.get(slug='adrev',type__slug='statchg').pk)
r = self.client.post(url,dict(new_state=adrev_pk,comment='RDNK84ZD'))
self.assertEqual(r.status_code, 302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertEqual(doc.get_state('statchg').slug,'adrev')
self.assertTrue(doc.latest_event(DocEvent,type="added_comment").desc.startswith('RDNK84ZD'))
self.assertFalse(doc.active_ballot())
# successful change to Last Call Requested
messages_before = len(outbox)
doc.ad = Person.objects.get(user__username='ad')
doc.save_with_history([DocEvent.objects.create(doc=doc, rev=doc.rev, type="changed_document", by=Person.objects.get(user__username="secretary"), desc="Test")])
lc_req_pk = str(State.objects.get(slug='lc-req',type__slug='statchg').pk)
r = self.client.post(url,dict(new_state=lc_req_pk))
self.assertEqual(r.status_code, 200)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertEqual(doc.get_state('statchg').slug,'lc-req')
self.assertEqual(len(outbox), messages_before + 1)
self.assertTrue('Last Call:' in outbox[-1]['Subject'])
# successful change to IESG Evaluation
iesgeval_pk = str(State.objects.get(slug='iesgeval',type__slug='statchg').pk)
r = self.client.post(url,dict(new_state=iesgeval_pk,comment='TGmZtEjt'))
self.assertEqual(r.status_code, 302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertEqual(doc.get_state('statchg').slug,'iesgeval')
self.assertTrue(doc.latest_event(DocEvent,type="added_comment").desc.startswith('TGmZtEjt'))
self.assertTrue(doc.active_ballot())
self.assertEqual(doc.latest_event(BallotPositionDocEvent, type="changed_ballot_position").pos_id,'yes')
def test_edit_notices(self):
doc = Document.objects.get(name='status-change-imaginary-mid-review')
url = urlreverse('ietf.doc.views_doc.edit_notify;status-change',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "ad", url)
# normal get
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('form input[name=notify]')),1)
self.assertEqual(doc.notify,q('form input[name=notify]')[0].value)
# change notice list
newlist = '"Foo Bar" <[email protected]>'
r = self.client.post(url,dict(notify=newlist,save_addresses="1"))
self.assertEqual(r.status_code,302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertEqual(doc.notify,newlist)
self.assertTrue(doc.latest_event(DocEvent,type="added_comment").desc.startswith('Notification list changed'))
# Some additional setup so there's something to put in a generated notify list
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9999'),relationship_id='tois')
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9998'),relationship_id='tohist')
# Ask the form to regenerate the list
r = self.client.post(url,dict(regenerate_addresses="1"))
self.assertEqual(r.status_code,200)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
# Regenerate does not save!
self.assertEqual(doc.notify,newlist)
q = PyQuery(r.content)
formlist = q('form input[name=notify]')[0].value
self.assertEqual(None,formlist)
def test_edit_title(self):
doc = Document.objects.get(name='status-change-imaginary-mid-review')
url = urlreverse('ietf.doc.views_status_change.edit_title',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "ad", url)
# normal get
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('input[name=title]')),1)
# change title
r = self.client.post(url,dict(title='New title'))
self.assertEqual(r.status_code,302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertEqual(doc.title,'New title')
self.assertTrue(doc.latest_event(DocEvent,type="added_comment").desc.startswith('Title changed'))
def test_edit_ad(self):
doc = Document.objects.get(name='status-change-imaginary-mid-review')
url = urlreverse('ietf.doc.views_status_change.edit_ad',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "ad", url)
# normal get
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('select[name=ad]')),1)
# change ads
ad2 = Person.objects.get(name='Ad No2')
r = self.client.post(url,dict(ad=str(ad2.pk)))
self.assertEqual(r.status_code,302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertEqual(doc.ad,ad2)
self.assertTrue(doc.latest_event(DocEvent,type="added_comment").desc.startswith('Shepherding AD changed'))
def test_edit_telechat_date(self):
doc = Document.objects.get(name='status-change-imaginary-mid-review')
url = urlreverse('ietf.doc.views_doc.telechat_date;status-change',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "ad", url)
# normal get
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('select[name=telechat_date]')),1)
# set a date
self.assertFalse(doc.latest_event(TelechatDocEvent, "scheduled_for_telechat"))
telechat_date = TelechatDate.objects.active().order_by('date')[0].date
r = self.client.post(url,dict(telechat_date=telechat_date.isoformat()))
self.assertEqual(r.status_code,302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertEqual(doc.latest_event(TelechatDocEvent, "scheduled_for_telechat").telechat_date,telechat_date)
# move it forward a telechat (this should NOT set the returning item bit)
telechat_date = TelechatDate.objects.active().order_by('date')[1].date
r = self.client.post(url,dict(telechat_date=telechat_date.isoformat()))
self.assertEqual(r.status_code,302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertFalse(doc.returning_item())
# set the returning item bit without changing the date
r = self.client.post(url,dict(telechat_date=telechat_date.isoformat(),returning_item="on"))
self.assertEqual(r.status_code,302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertTrue(doc.returning_item())
# clear the returning item bit
r = self.client.post(url,dict(telechat_date=telechat_date.isoformat()))
self.assertEqual(r.status_code,302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertFalse(doc.returning_item())
# Take the doc back off any telechat
r = self.client.post(url,dict(telechat_date=""))
self.assertEqual(r.status_code, 302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertEqual(doc.latest_event(TelechatDocEvent, "scheduled_for_telechat").telechat_date,None)
def test_edit_lc(self):
doc = Document.objects.get(name='status-change-imaginary-mid-review')
url = urlreverse('ietf.doc.views_status_change.last_call',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "ad", url)
# additional setup
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9999'),relationship_id='tois')
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9998'),relationship_id='tohist')
doc.ad = Person.objects.get(name='Ad No2')
doc.save_with_history([DocEvent.objects.create(doc=doc, rev=doc.rev, type="changed_document", by=Person.objects.get(user__username="secretary"), desc="Test")])
# get
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('form.edit-last-call-text')),1)
self.assertContains(r, 'RFC9999 from Proposed Standard to Internet Standard')
self.assertContains(r, 'RFC9998 from Informational to Historic')
# save
r = self.client.post(url,dict(last_call_text="Bogus last call text",save_last_call_text="1"))
self.assertEqual(r.status_code, 200)
last_call_event = doc.latest_event(WriteupDocEvent, type="changed_last_call_text")
self.assertEqual(last_call_event.text,"Bogus last call text")
# reset
r = self.client.post(url,dict(regenerate_last_call_text="1"))
self.assertEqual(r.status_code,200)
self.assertContains(r, 'RFC9999 from Proposed Standard to Internet Standard')
self.assertContains(r, 'RFC9998 from Informational to Historic')
# request last call
messages_before = len(outbox)
r = self.client.post(url,dict(last_call_text='stuff',send_last_call_request='Save+and+Request+Last+Call'))
self.assertEqual(r.status_code,200)
self.assertContains(r, 'Last call requested')
self.assertEqual(len(outbox), messages_before + 1)
self.assertTrue('Last Call:' in outbox[-1]['Subject'])
self.assertTrue('Last Call Request has been submitted' in ''.join(wrap(outbox[-1].as_string(), width=2**16)))
def test_approve(self):
doc = Document.objects.get(name='status-change-imaginary-mid-review')
url = urlreverse('ietf.doc.views_status_change.approve',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "secretary", url)
# Some additional setup
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9999'),relationship_id='tois')
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9998'),relationship_id='tohist')
create_ballot_if_not_open(None, doc, Person.objects.get(user__username="secretary"), "statchg")
doc.set_state(State.objects.get(slug='appr-pend',type='statchg'))
# get
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('[type=submit]:contains("Send announcement")')), 1)
# There should be two messages to edit
self.assertEqual(q('input#id_form-TOTAL_FORMS').val(),'2')
self.assertContains(r, '(rfc9999) to Internet Standard')
self.assertContains(r, '(rfc9998) to Historic')
# submit
messages_before = len(outbox)
msg0=default_approval_text(doc,doc.relateddocument_set.all()[0])
msg1=default_approval_text(doc,doc.relateddocument_set.all()[1])
r = self.client.post(url,{'form-0-announcement_text':msg0,'form-1-announcement_text':msg1,'form-TOTAL_FORMS':'2','form-INITIAL_FORMS':'2','form-MAX_NUM_FORMS':''})
self.assertEqual(r.status_code, 302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertEqual(doc.get_state_slug(),'appr-sent')
self.assertFalse(doc.ballot_open("statchg"))
self.assertEqual(len(outbox), messages_before + 2)
self.assertTrue('Action:' in outbox[-1]['Subject'])
self.assertTrue('ietf-announce' in outbox[-1]['To'])
self.assertTrue('rfc-editor' in outbox[-1]['Cc'])
self.assertTrue('(rfc9998) to Historic' in ''.join(wrap(outbox[-1].as_string()+outbox[-2].as_string(), 2**16)))
self.assertTrue('(rfc9999) to Internet Standard' in ''.join(wrap(outbox[-1].as_string()+outbox[-2].as_string(),2**16)))
self.assertTrue(doc.latest_event(DocEvent,type="added_comment").desc.startswith('The following approval message was sent'))
def approval_pend_notice_test_helper(self, role):
"""Test notification email when review state changed to the appr-pend state"""
doc = Document.objects.get(name='status-change-imaginary-mid-review')
url = urlreverse('ietf.doc.views_status_change.change_state',kwargs=dict(name=doc.name))
# Add some status change related documents
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9999'),relationship_id='tois')
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9998'),relationship_id='tohist')
# And a non-status change related document
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc14'),relationship_id='updates')
login_testing_unauthorized(self, role, url)
empty_outbox()
# Issue the request
appr_pend_pk = str(State.objects.get(used=True,
slug='appr-pend',
type__slug='statchg').pk)
r = self.client.post(url,dict(new_state=appr_pend_pk,comment='some comment or other'))
# Check the results
self.assertEqual(r.status_code, 302)
if role == 'ad':
self.assertEqual(len(outbox), 1)
notification = outbox[0]
self.assertIn(doc.title, notification['Subject'])
self.assertIn('[email protected]', notification['To'])
self.assertTrue(notification['Subject'].startswith('Approved:'))
notification_text = get_payload_text(notification)
self.assertIn('The AD has approved changing the status', notification_text)
self.assertIn(DocAlias.objects.get(name='rfc9999').document.canonical_name(), notification_text)
self.assertIn(DocAlias.objects.get(name='rfc9998').document.canonical_name(), notification_text)
self.assertNotIn(DocAlias.objects.get(name='rfc14').document.canonical_name(), notification_text)
self.assertNotIn('No value found for', notification_text) # make sure all interpolation values were set
else:
self.assertEqual(len(outbox), 0)
def test_approval_pend_notice_ad(self):
"""Test that an approval notice is sent to secretariat when AD approves status change"""
self.approval_pend_notice_test_helper('ad')
def test_no_approval_pend_notice_secr(self):
"""Test that no approval notice is sent when secretariat approves status change"""
self.approval_pend_notice_test_helper('secretariat')
def test_edit_relations(self):
doc = Document.objects.get(name='status-change-imaginary-mid-review')
url = urlreverse('ietf.doc.views_status_change.edit_relations',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "secretary", url)
# Some additional setup
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9999'),relationship_id='tois')
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9998'),relationship_id='tohist')
# get
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('#content [type=submit]:contains("Save")')),1)
# There should be three rows on the form
self.assertEqual(len(q('#content .input-group')),3)
# Try to add a relation to an RFC that doesn't exist
r = self.client.post(url,dict(new_relation_row_blah="rfc9997",
statchg_relation_row_blah="tois"))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue(len(q('form ul.errorlist')) > 0)
# Try to add a relation leaving the relation type blank
r = self.client.post(url,dict(new_relation_row_blah="rfc9999",
statchg_relation_row_blah=""))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue(len(q('form ul.errorlist')) > 0)
# Try to add a relation with an unknown relationship type
r = self.client.post(url,dict(new_relation_row_blah="rfc9999",
statchg_relation_row_blah="badslug"))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue(len(q('form ul.errorlist')) > 0)
# Successful change of relations
r = self.client.post(url,dict(new_relation_row_blah="rfc9999",
statchg_relation_row_blah="toexp",
new_relation_row_foo="rfc9998",
statchg_relation_row_foo="tobcp",
new_relation_row_nob="rfc14",
statchg_relation_row_nob="tohist"))
self.assertEqual(r.status_code, 302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertEqual(doc.relateddocument_set.count(),3)
def verify_relations(doc,target_name,status):
target_doc=doc.relateddocument_set.filter(target__name=target_name)
self.assertTrue(target_doc)
self.assertEqual(target_doc.count(),1)
self.assertEqual(target_doc[0].relationship.slug,status)
verify_relations(doc,'rfc9999','toexp' )
verify_relations(doc,'rfc9998','tobcp' )
verify_relations(doc,'rfc14' ,'tohist')
self.assertTrue(doc.latest_event(DocEvent,type="added_comment").desc.startswith('Affected RFC list changed.'))
def setUp(self):
super().setUp()
IndividualRfcFactory(alias2__name='rfc14',name='draft-was-never-issued',std_level_id='unkn')
WgRfcFactory(alias2__name='rfc9999',name='draft-ietf-random-thing',std_level_id='ps')
WgRfcFactory(alias2__name='rfc9998',name='draft-ietf-random-other-thing',std_level_id='inf')
DocumentFactory(type_id='statchg',name='status-change-imaginary-mid-review',notify='[email protected]')
class StatusChangeSubmitTests(TestCase):
settings_temp_path_overrides = TestCase.settings_temp_path_overrides + ['STATUS_CHANGE_PATH']
def test_initial_submission(self):
doc = Document.objects.get(name='status-change-imaginary-mid-review')
url = urlreverse('ietf.doc.views_status_change.submit',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "ad", url)
# normal get
r = self.client.get(url)
self.assertEqual(r.status_code,200)
q = PyQuery(r.content)
self.assertTrue(q('textarea')[0].text.strip().startswith("Provide a description"))
# Faulty posts using textbox
# Right now, nothing to test - we let people put whatever the web browser will let them put into that textbox
# sane post using textbox
path = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev))
self.assertEqual(doc.rev,'00')
self.assertFalse(os.path.exists(path))
r = self.client.post(url,dict(content="Some initial review text\n",submit_response="1"))
self.assertEqual(r.status_code,302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertEqual(doc.rev,'00')
with io.open(path) as f:
self.assertEqual(f.read(),"Some initial review text\n")
self.assertTrue( "mid-review-00" in doc.latest_event(NewRevisionDocEvent).desc)
def test_subsequent_submission(self):
doc = Document.objects.get(name='status-change-imaginary-mid-review')
url = urlreverse('ietf.doc.views_status_change.submit',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "ad", url)
# A little additional setup
# doc.rev is u'00' per the test setup - double-checking that here - if it fails, the breakage is in setUp
self.assertEqual(doc.rev,'00')
path = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev))
with io.open(path,'w') as f:
f.write('This is the old proposal.')
f.close()
# Put the old proposal into IESG review (exercises ballot tab when looking at an older revision below)
state_change_url = urlreverse('ietf.doc.views_status_change.change_state',kwargs=dict(name=doc.name))
iesgeval_pk = str(State.objects.get(slug='iesgeval',type__slug='statchg').pk)
r = self.client.post(state_change_url,dict(new_state=iesgeval_pk))
self.assertEqual(r.status_code, 302)
# normal get
r = self.client.get(url)
self.assertEqual(r.status_code,200)
q = PyQuery(r.content)
self.assertTrue(q('textarea')[0].text.strip().startswith("This is the old proposal."))
# faulty posts trying to use file upload
# Copied from wgtracker tests - is this really testing the server code, or is it testing
# how client.post populates Content-Type?
test_file = StringIO("\x10\x11\x12") # post binary file
test_file.name = "unnamed"
r = self.client.post(url, dict(txt=test_file,submit_response="1"))
self.assertEqual(r.status_code, 200)
self.assertContains(r, "does not appear to be a text file")
# sane post uploading a file
test_file = StringIO("This is a new proposal.")
test_file.name = "unnamed"
r = self.client.post(url,dict(txt=test_file,submit_response="1"))
self.assertEqual(r.status_code, 302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertEqual(doc.rev,'01')
path = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev))
with io.open(path) as f:
self.assertEqual(f.read(),"This is a new proposal.")
f.close()
self.assertTrue( "mid-review-01" in doc.latest_event(NewRevisionDocEvent).desc)
# verify reset text button works
r = self.client.post(url,dict(reset_text="1"))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue(q('textarea')[0].text.strip().startswith("Provide a description"))
# make sure we can see the old revision
url = urlreverse('ietf.doc.views_doc.document_main',kwargs=dict(name=doc.name,rev='00'))
r = self.client.get(url)
self.assertEqual(r.status_code,200)
self.assertContains(r, "This is the old proposal.")
def setUp(self):
super().setUp()
DocumentFactory(type_id='statchg',name='status-change-imaginary-mid-review',notify='[email protected]') |
the-stack_0_1317 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Author : Viacheslav Zamaraev
# email : [email protected]
# Script Name : 02_csv2xlsx.py
# Created : 25th September 2019
# Last Modified : 25th September 2019
# Version : 1.0
# PIP : pip install pandas openpyxl
# RESULT : Excel File
# Modifications : 1.1 -
# : 1.2 -
#
# Description : This script will conver csv file to Excel file
import os.path
from datetime import datetime
from sys import platform as _platform
import os.path
try:
import pandas as pd
except:
print("we need pands. try: pip install pandas")
#some global configurations
import cfg
def get_output_directory():
dir_out = str(os.getcwd())
# Linux platform
if _platform == "linux" or _platform == "linux2" or _platform == "darwin":
dir_out = cfg.folder_out_linux
if os.path.exists(dir_out) and os.path.isdir(dir_out):
print('Using Output directory: ' + dir_out)
return dir_out
if _platform == "win32" or _platform == "win64": # Windows or Windows 64-bit
dir_out = cfg.folder_out_win
if os.path.exists(dir_out) and os.path.isdir(dir_out):
print('Using Output directory: ' + dir_out)
return dir_out
else:
dir_out = str(os.getcwd())
print('Output directories from config wrong: ' + cfg.folder_out_win + ' or ' + cfg.folder_out_linux + ' Using current directory: ' + dir_out)
print('Using Output directory: ' + dir_out)
return dir_out
def csv2xls(filename=''):
if (os.path.exists(filename) and os.path.isfile(filename)):
file_excel = filename.split('.')[0] + '.xlsx'
df_new = pd.read_csv(filename, sep=cfg.csv_delimiter)
writer = pd.ExcelWriter(file_excel)
df_new.to_excel(writer, index=False)
writer.save()
else:
print('ERROR! can\'t read a file OR file does not exist. File: ' + filename)
# ---------------- do main --------------------------------
def main():
time1 = datetime.now()
print('Starting at :' + str(time1))
file_csv = str(os.path.join(get_output_directory(), cfg.file_csv))
csv2xls(file_csv)
time2 = datetime.now()
print('Finishing at :' + str(time2))
print('Total time : ' + str(time2 - time1))
print('DONE !!!!')
if __name__ == '__main__':
main() |
the-stack_0_1318 | # %% [markdown]
from tensorflow.keras.models import load_model
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.layers import Dense, GRU, Embedding
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.sequence import pad_sequences
import re
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras import models
from tensorflow.keras.datasets import imdb
(X_train, y_train), (X_test, y_test) = imdb.load_data()
print(X_train[0])
print(y_train[0])
# %% [markdown]
word_to_index = imdb.get_word_index()
index_to_word = {}
for key, value in word_to_index.items():
index_to_word[value + 3] = key
# %%
vocab_size = 10000
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=vocab_size)
max_len = 500
X_train = pad_sequences(sequences=X_train, maxlen=max_len)
X_test = pad_sequences(sequences=X_test, maxlen=max_len)
# %% [markdown]
model = Sequential()
model.add(Embedding(input_dim=vocab_size, output_dim=100))
model.add(GRU(units=128))
model.add(Dense(units=1, activation="sigmoid"))
es = EarlyStopping(monitor="val_loss", mode="min", verbose=1, patience=4)
mc = ModelCheckpoint(
"GRU_model.h5", monitor="val_acc", mode="max", verbose=1, save_best_only=True
)
model.compile(optimizer="rmsprop", loss="binary_crossentropy", metrics=["acc"])
history = model.fit(
X_train, y_train, epochs=15, callbacks=[es, mc], batch_size=60, validation_split=0.2
)
# %%
loaded_model = load_model("GRU_model.h5")
print("\n 테스트 정확도: %.4f" % (loaded_model.evaluate(X_test, y_test)[1]))
# %%
def sentiment_predict(new_sentence):
# 알파벳과 숫자를 제외하고 모두 제거 및 알파벳 소문자화
new_sentence = re.sub("[^0-9a-zA-Z ]", "", new_sentence).lower()
# 정수 인코딩
encoded = []
for word in new_sentence.split():
# 단어 집합의 크기를 10,000으로 제한.
try:
if word_to_index[word] <= 10000:
encoded.append(word_to_index[word] + 3)
else:
# 10,000 이상의 숫자는 <unk> 토큰으로 취급.
encoded.append(2)
# 단어 집합에 없는 단어는 <unk> 토큰으로 취급.
except KeyError:
encoded.append(2)
pad_new = pad_sequences([encoded], maxlen=max_len) # 패딩
score = float(loaded_model.predict(pad_new)) # 예측
if score > 0.5:
print("{:.2f}% 확률로 긍정 리뷰입니다.".format(score * 100))
else:
print("{:.2f}% 확률로 부정 리뷰입니다.".format((1 - score) * 100))
# %%
a = "This movie was just way too overrated. The fighting was not professional and in slow motion. I was expecting more from a 200 million budget movie. The little sister of T.Challa was just trying too hard to be funny. The story was really dumb as well. Don't watch this movie if you are going because others say its great unless you are a Black Panther fan or Marvels fan."
sentiment_predict(a)
|
the-stack_0_1320 | from pocketsphinx import *
import pyaudio
from time import sleep
import requests
import json
from configobj import ConfigObj
import os.path
import wave
#pyaudio needed to open a audio stream needed for
pyAudio = pyaudio.PyAudio()
headers = {'Content-Type':'application/json'}
config = None
commandAudioFile = None
class CommandListener:
def __init__(self):
#config
self.hmm = config['pocketsphinx']['hmm']
self.lm = config['pocketsphinx']['lm']
self.dict = config['pocketsphinx']['dict']
self.log = config['pocketsphinx']['log']
self.bitsize = int(config['audio']['bitsize'])
self.bufferSize = int(config['audio']['buffersize'])
self.channels = int(config['audio']['channels'])
self.sampleRate = int(config['audio']['samplerate'])
#set decoder configuration
self.config = Decoder.default_config()
self.config.set_string('-hmm',self.hmm)
self.config.set_string('-lm',self.lm)
self.config.set_string('-dict',self.dict)
self.config.set_string('-logfn', self.log)
self.config.set_boolean("-allphone_ci", True)
self.decoder = Decoder(self.config)
def fromAudio(self):
config = {
'hmm':self.hmm,
'lm': self.lm,
'dict':self.dict
}
ps = Pocketsphinx(**config)
ps.decode(
audio_file= commandAudioFile,
buffer_size= self.bufferSize,
no_search= False,
full_utt= False,
)
return ps.hypothesis()
def listen(self):
# get stream from pyAudio
# open stream
self.stream = pyAudio.open(format=pyaudio.paInt16, channels=self.channels, rate=self.sampleRate, input=True, frames_per_buffer=self.bitsize)
utterance = False
#start utterance
self.decoder.start_utt()
print("Listening...")
# now we are starting to listen
while True:
#check if an external command is used by the user
if os.path.isfile(commandAudioFile):
#stop the utterance
self.decoder.end_utt()
print("external command detected - Processing...")
#get the command from the audio file
commandFromAudio = self.fromAudio()
#check if a command is detected
if not commandFromAudio:
commandFromAudio = ""
print("No command found in file!")
#audio file not needed anymore
os.remove(commandAudioFile)
print("external command processed - Deleting...")
self.stream.stop_stream()
self.stream.close()
#return the command from the audio
return commandFromAudio
try:
soundBite = self.stream.read(self.bitsize)
except Exception as e:
pass
if soundBite:
self.decoder.process_raw(soundBite, False, False)
inSpeech = self.decoder.get_in_speech()
if inSpeech and not utterance:
utterance = True
if utterance:
#end utterance
self.decoder.end_utt()
utterance = False
#get hypothesis of from the decoder
hypothesis = self.decoder.hyp()
if hypothesis is not None:
bestGuess = hypothesis.hypstr
#check for empty command
if not bestGuess.strip():
#restart utterance
sleep(0.5)
self.decoder.start_utt()
else:
#stop the stream
self.stream.stop_stream()
self.stream.close()
#return the bestGuess of the decoder
return bestGuess
if __name__ == "__main__":
config = ConfigObj('pihome.conf')
#get path to audio file
commandAudioFile = config['audio']['audiofile']
#get backend url
backendUrl = config['backend']['url']
#Listener for the commands the user is speaking out
listener = CommandListener()
while True:
#listen for the next command of the user
command = listener.listen()
print("command:" + command)
#let the backend know what the user said
try:
res = requests.post(backendUrl, data=json.dumps({'command':command}), headers=headers);
except Exception as ex:
print(ex)
pass
|
the-stack_0_1321 | """Reader is module to read the url list and return shards"""
import pandas as pd
import math
import fsspec
class Reader:
"""
The reader class reads an url list and returns shards
It provides an iter method
It provides attributes:
- column_list: the list of columns to read
- input_format: the format of the input file
- url_col: the column name of the url
- caption_col: the column name of the caption
- save_additional_columns: the list of additional columns to save
- number_sample_per_shard: the number of samples per shard
- start_shard_id: the id of the first shard
"""
def __init__(
self,
url_list,
input_format,
url_col,
caption_col,
save_additional_columns,
number_sample_per_shard,
start_shard_id,
tmp_path,
) -> None:
self.input_format = input_format
self.url_col = url_col
self.caption_col = caption_col
self.save_additional_columns = save_additional_columns
self.number_sample_per_shard = number_sample_per_shard
self.start_shard_id = start_shard_id
fs, url_path = fsspec.core.url_to_fs(url_list)
self.fs = fs
self.tmp_path = tmp_path
if fs.isdir(url_path):
self.input_files = sorted(fs.glob(url_path + "/*." + input_format))
else:
self.input_files = [url_path]
if self.input_format == "txt":
self.column_list = ["url"]
elif self.input_format in ["json", "csv", "tsv", "tsv.gz", "parquet"]:
self.column_list = self.save_additional_columns if self.save_additional_columns is not None else []
if self.caption_col is not None:
self.column_list = self.column_list + ["caption", "url"]
else:
self.column_list = self.column_list + ["url"]
def _save_to_arrow(self, input_file):
"""Read the input file and save to arrow files in a temporary directory"""
if self.input_format in ["txt", "json", "csv", "tsv"]:
with self.fs.open(input_file, encoding="utf-8", mode="r") as file:
if self.input_format == "txt":
df = pd.DataFrame([(url.rstrip(),) for url in file.readlines()], columns=self.column_list)
elif self.input_format == "json":
df = pd.read_json(file)
elif self.input_format == "csv":
df = pd.read_csv(file)
elif self.input_format == "tsv":
df = pd.read_table(file)
elif self.input_format in ["tsv", "tsv.gz", "parquet"]:
with self.fs.open(input_file, mode="rb") as file:
if self.input_format == "tsv.gz":
df = pd.read_table(file, compression="gzip")
elif self.input_format == "parquet":
columns_to_read = [self.url_col]
if self.caption_col is not None:
columns_to_read += [self.caption_col]
if self.save_additional_columns is not None:
columns_to_read += self.save_additional_columns
df = pd.read_parquet(file, columns=columns_to_read)
else:
assert False, f"Unexpected input format ({self.input_format})."
df = df.rename(columns={self.caption_col: "caption", self.url_col: "url"})
df = df.where(pd.notnull(df), None)
number_samples = len(df)
number_shards = math.ceil(len(df) / self.number_sample_per_shard)
shards = []
for shard_id in range(number_shards):
begin_shard = shard_id * self.number_sample_per_shard
end_shard = min(number_samples, (1 + shard_id) * self.number_sample_per_shard)
df_shard = df[begin_shard:end_shard][self.column_list]
df_shard = df_shard.reset_index(drop=True)
tmp_file = self.tmp_path + f"/{shard_id + self.start_shard_id}.feather"
fs, tmp_path = fsspec.core.url_to_fs(tmp_file)
with fs.open(tmp_path, "wb") as file:
df_shard.to_feather(file)
shards.append((shard_id, tmp_file))
del df
return shards
def __iter__(self):
"""
Iterate over shards, yield shards of size number_sample_per_shard or less for the last one
Each shard is a tuple (shard_id, shard)
shard is a tuple (sample id, sample)
sample is a tuple of the columns
"""
for i, input_file in enumerate(self.input_files):
print(
"Downloading file number " + str(i + 1) + " of " + str(len(self.input_files)) + " called " + input_file
)
shards = self._save_to_arrow(input_file)
num_shard = 0
for num_shard, arrow_file in shards:
yield (
num_shard + self.start_shard_id,
arrow_file,
)
num_shard += 1
self.start_shard_id += num_shard
|
the-stack_0_1323 | """
Demonstrates the use of the ProgressMeter class.
Author: Tucker Beck
Last Tested: 3/2/2009
Verified with: Python 2.6, Tkinter 8.4
"""
from __future__ import division
from Tkinter import *
from random import randint
from time import sleep
class ProgressMeter( Frame ):
"""
The ProgressMeter is-a Frame widget provides a progress bar and
accompanying information to a user regarding a long, computationaly
intensive process. A ProgressMetar can control any generator function
that returns string message or None after each iteration. Furthermore,
the ProgressMeter can interrupt the process at any time.
"""
def __init__( self, parent, height=30 ):
"""
Initializes this ProgressMeter
Arguments:
parent: The master widget for this ProgressMeter
height: The desired height of the progress bar
"""
self.parent = parent
Frame.__init__( self, parent )
self.columnconfigure( 0, weight=1 ) # Forces the canv object to resize any time this widget is resized
self.rowconfigure( 0, weight=1 )
self.statusMessage = 'Normal'
self.w = 0
self.h = 0
self.canv = Canvas( self, height=height) # This canvas will display the progress bar and accompanying percentage text
self.canv.grid( row=1, column=0, sticky=N+S+E+W )
self.canv.bind( '<Configure>', lambda e:
self.resize( e.width, e.height ) ) # When the canvas is resized the progress bar should be redrawn.
self.killVar = IntVar() # The killBtn can cancel execution
self.killVar.set( 0 )
self.killBtn = Button( self, text='Cancel',
command=lambda: self.killVar.set(1) )
self.killBtn.configure( state=DISABLED )
self.killBtn.grid( row=1, column=1 )
self.targetGen = None # Placekeeper for the generator function that will be metered
self.targetArgs = [] # Argument list for the generator function
self.targetKwds = {} # Keyword dictionary for the generator funciton
self.targetIdx = 0 # Keeps track of which step in iteration is currently being executed
self.targetLen = 0 # Total number of steps in exectuion
def resize( self, w, h ):
"""
Handles resize events for the canv widget. Adjusts the height and width
of the canvas for the progress bar calculations.
Arguments:
w: The new width
h: The new height
"""
self.w = w
self.h = h
self.canv.delete( 'frame' )
self.canv.create_rectangle( 1, 1, self.w, self.h, outline='black',
fill='gray75', tag='frame' )
def reset( self ):
"""
Resets the control values or the generator function and also clears the
progress bar
"""
self.canv.delete( 'bar' )
self.canv.delete( 'text' )
self.killBtn.configure( state=DISABLED )
self.targetGen = None
self.targetArgs = []
self.targetKwds = []
self.killVar.set( 0 )
self.targetIdx = 0
self.targetLen = 0
def clearStatus( self ):
""""
Clears the statusMessage member. Might be used by parent GUI that
reports child status.
"""
self.statusMessage = 'Normal'
def drawBar( self ):
"""
Updates the status bar for the percentage of completion.
"""
pct = self.targetIdx / self.targetLen # The percentage of completion
x0 = 2 # The bar is inset by 2 pixels
x1 = pct * ( self.w - 3 ) + 2
y0 = 2
y1 = self.h
self.canv.delete( 'bar' )
self.canv.create_rectangle( x0, y0, x1, y1, fill='SteelBlue3',
outline='', tag='bar' )
self.canv.delete( 'text' )
pctTxt = '%02.2f%%' % ( pct*100, )
self.canv.create_text( self.w/2, self.h/2, text=pctTxt,
anchor=CENTER, tag='text' )
def startGen( self, targetGen, targetLen, targetArgs=[], targetKwds={} ):
"""
Initializes the target generator function with supplied arguments and
keyword. Requests Tk to call iterGen after all idle events have been
handled.
Arguments:
targetGen: The target generator function
targetLen: The number of iterations in the target generator
targetArgs: The arguments for the generator function
targetKwds: The keyword arguments fo the generator function
Note:
Having iterGen called by Tk ensures that redraws and other sorts of
normal Tkinter events can be processed. Results in the status bar
updating real-time with execution while allowing the GUI to function
normally.
"""
self.targetGen = targetGen( *targetArgs, **targetKwds )
self.targetLen = targetLen
self.killBtn.configure( state=NORMAL )
self.after_idle( self.iterGen )
def iterGen( self ):
"""
Iterates through the target generator using delayed self referencing
funcition calls to allow GUI updates between iterations
"""
try:
msg = self.targetGen.next() # Execute the next iteration of the genrator
except StopIteration:
self.reset() # When the generator is finished, a StopIteration exception is raised. This signals a normal finish in the generator
self.statusMessage = 'Completed'
self.event_generate( '<<Finished>>' ) # A <<Finished>> virtual event signals the GUI that the progress meter is finished
return
self.targetIdx += 1
self.drawBar()
if msg == None:
pass
elif msg.startswith( 'AbortIteration' ): # The target generator can signal that something irrevocable has happend by yielding a value of 'AbortIteration'
self.reset()
self.statusMessage = msg
self.event_generate( '<<Finished>>' )
return
else:
self.statusMessage = msg # If the generator yields a value other than None or 'AbortIteration', this message will be sent out to the controlling gui
self.event_generate( '<<StatusRequest>>' )
if self.killVar.get() == 1: # Occurs if the user clicks the killBtn
self.reset()
self.statusMessage = 'Canceled'
self.event_generate( '<<Finished>>' )
return
self.update_idletasks()
self.after_idle( self.iterGen )
def dummy_gen( alices, bobs ):
"""
A simple, stupid example of a ProgressMeter iterable generator function
"""
for alice in alices:
for bob in bobs:
if bob==alice:
yield 'Match: %s==%s' % ( str(alice), str(bob) )
else:
yield 'No Match: %s!=%s' % ( str(alice), str(bob) )
def main():
root = Tk()
root.title( 'ProgressMeter Demo' )
pgress = ProgressMeter( root ) # Initialize the ProgressMeter with default arguments
pgress.grid( row=1 )
alices = range( 53 )
bobs = [ randint( 0,53 ) for i in range( 53 ) ]
btn = Button( root, text="Go!", command=lambda:
pgress.startGen( dummy_gen, len(alices) * len(bobs), [alices, bobs] ) )# Starts the ProgressMeter going when the button is clicked
btn.grid( row=0 )
statusVar = StringVar( root, 'None' )
status = Label( root, textvariable=statusVar )
status.grid( row=2 ) # This label will be used to display status messages from the ProgressMeter
root.bind( '<<StatusRequest>>', lambda event:
statusVar.set(pgress.statusMessage) )
root.bind( '<<Finished>>', lambda event:
statusVar.set( pgress.statusMessage ) )
root.mainloop()
if __name__=='__main__':
main()
|
the-stack_0_1324 | from bs4 import BeautifulSoup
import requests
def get_articles_from_page_data(page, depth = 1):
base_url = page.url.replace("http://","").replace("www.","")
print(base_url)
soup = BeautifulSoup(page.content(), 'html.parser')
url_strings = [link.get('href') for link in soup.find_all('a')]
internal_url_strings = [link for link in url_strings if possible_article_link(link,base_url)]
return internal_url_strings
def possible_article_link(url, base_url):
is_part_of_site = base_url in url or './' in url
ends_as_webpage = ".htm" in url
not_an_index_page = "index.html" not in url
return is_part_of_site and ends_as_webpage and not_an_index_page
|
the-stack_0_1325 | """
Test for management command generating exchange rates
"""
from unittest.mock import patch
from django.test import TestCase
from django.test.utils import override_settings
from financialaid.constants import get_currency_exchange_rate_api_request_url
from financialaid.management.commands import update_exchange_rates
from financialaid.models import CurrencyExchangeRate
@patch('financialaid.tasks.requests.get')
class GenerateExchangeRatesTest(TestCase):
"""
Tests for generate_exchange_rates management command
"""
@classmethod
def setUpTestData(cls):
cls.command = update_exchange_rates.Command()
def setUp(self):
super(GenerateExchangeRatesTest, self).setUp()
self.data = {
"extraneous information": "blah blah blah",
"rates": {
"CBA": "3.5",
"FED": "1.9",
"RQP": "0.5"
}
}
@override_settings(OPEN_EXCHANGE_RATES_APP_ID='foo_id', OPEN_EXCHANGE_RATES_URL='http://foo.bar.com')
def test_currency_exchange_rate_command(self, mocked_request):
"""
Assert currency exchange rates are created using management command
"""
mocked_request.return_value.json.return_value = self.data
mocked_request.return_value.status_code = 200
assert CurrencyExchangeRate.objects.count() == 0
self.command.handle("generate_exchange_rates")
called_args, _ = mocked_request.call_args
assert called_args[0] == get_currency_exchange_rate_api_request_url()
assert CurrencyExchangeRate.objects.count() == 3
currency_cba = CurrencyExchangeRate.objects.get(currency_code="CBA")
assert currency_cba.exchange_rate == 3.5
currency_fed = CurrencyExchangeRate.objects.get(currency_code="FED")
assert currency_fed.exchange_rate == 1.9
currency_rqp = CurrencyExchangeRate.objects.get(currency_code="RQP")
assert currency_rqp.exchange_rate == 0.5
|
the-stack_0_1327 | import os
from setuptools import setup, find_packages
from relationships import VERSION
f = open(os.path.join(os.path.dirname(__file__), 'README.rst'))
readme = f.read()
f.close()
setup(
name='django-relationships',
version=".".join(map(str, VERSION)),
description='descriptive relationships between auth.User',
long_description=readme,
author='Charles Leifer',
author_email='[email protected]',
url='http://github.com/coleifer/django-relationships/tree/master',
packages=find_packages(),
package_data={
'relationships': [
'fixtures/*.json',
'templates/*.html',
'templates/*/*.html',
'locale/*/LC_MESSAGES/*',
'relationships_tests/fixtures/*.json',
],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
test_suite='runtests.runtests',
)
|
the-stack_0_1328 | # Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2014-2015 The Dash developers
# Copyright (c) 2015-2017 The PIVX developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "praxis.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
praxisd and praxis-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run praxisd:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "praxisd"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("BITCOINCLI", "praxis-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:[email protected]:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in praxis.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a praxisd and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("BITCOIND", "praxisd"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("BITCOINCLI", "praxis-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple praxisds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
|
the-stack_0_1331 | import json
from kubernetes import client
from django.conf import settings
from libs.paths.data_paths import get_data_paths
from libs.paths.jobs import get_job_logs_path, get_job_outputs_path
from libs.utils import get_list
from polyaxon_k8s import constants as k8s_constants
from polyaxon_schemas.exceptions import PolyaxonConfigurationError
from polyaxon_schemas.utils import to_list
from scheduler.spawners.templates import constants
from scheduler.spawners.templates.env_vars import (
get_env_var,
get_job_env_vars,
get_resources_env_vars
)
from scheduler.spawners.templates.gpu_volumes import get_gpu_volumes_def
from scheduler.spawners.templates.init_containers import InitCommands, get_output_args
from scheduler.spawners.templates.node_selectors import get_node_selector
from scheduler.spawners.templates.resources import get_resources
from scheduler.spawners.templates.sidecars import get_sidecar_args, get_sidecar_container
from scheduler.spawners.templates.volumes import get_pod_outputs_volume
class PodManager(object):
def __init__(self,
namespace,
name,
project_name,
project_uuid,
job_name,
job_uuid,
job_docker_image,
job_container_name=None,
sidecar_container_name=None,
sidecar_docker_image=None,
init_container_name=None,
init_docker_image=None,
role_label=None,
type_label=None,
ports=None,
use_sidecar=False,
sidecar_config=None,
log_level=None):
self.namespace = namespace
self.name = name
self.project_name = project_name
self.project_uuid = project_uuid
self.job_name = job_name
self.job_uuid = job_uuid
self.job_container_name = job_container_name or settings.CONTAINER_NAME_JOB
self.job_docker_image = job_docker_image
self.sidecar_container_name = sidecar_container_name or settings.CONTAINER_NAME_SIDECAR
self.sidecar_docker_image = sidecar_docker_image or settings.JOB_SIDECAR_DOCKER_IMAGE
self.init_container_name = init_container_name or settings.CONTAINER_NAME_INIT
self.init_docker_image = init_docker_image or settings.JOB_INIT_DOCKER_IMAGE
self.role_label = role_label or settings.ROLE_LABELS_WORKER
self.type_label = type_label or settings.TYPE_LABELS_EXPERIMENT
self.app_label = settings.APP_LABELS_JOB
self.labels = self.get_labels()
self.k8s_job_name = self.get_k8s_job_name()
self.ports = to_list(ports) if ports else []
self.use_sidecar = use_sidecar
if use_sidecar and not sidecar_config:
raise PolyaxonConfigurationError(
'In order to use a `sidecar_config` is required. '
'The `sidecar_config` must correspond to the sidecar docker image used.')
self.sidecar_config = sidecar_config
self.log_level = log_level
def get_k8s_job_name(self):
return constants.JOB_NAME.format(name=self.name, job_uuid=self.job_uuid)
def get_labels(self):
labels = {
'project_name': self.project_name,
'project_uuid': self.project_uuid,
'job_name': self.job_name,
'job_uuid': self.job_uuid,
'role': self.role_label,
'type': self.type_label,
'app': self.app_label
}
return labels
def get_pod_container(self,
volume_mounts,
persistence_outputs,
persistence_data,
outputs_refs_jobs=None,
outputs_refs_experiments=None,
env_vars=None,
command=None,
args=None,
resources=None):
"""Pod job container for task."""
env_vars = get_list(env_vars)
env_vars += get_job_env_vars(
log_level=self.log_level,
outputs_path=get_job_outputs_path(persistence_outputs=persistence_outputs,
job_name=self.job_name),
data_paths=get_data_paths(persistence_data),
logs_path=get_job_logs_path(job_name=self.job_name),
outputs_refs_jobs=outputs_refs_jobs,
outputs_refs_experiments=outputs_refs_experiments
)
env_vars += [
get_env_var(name=constants.CONFIG_MAP_JOB_INFO_KEY_NAME, value=json.dumps(self.labels)),
]
env_vars += get_resources_env_vars(resources=resources)
ports = [client.V1ContainerPort(container_port=port) for port in self.ports]
return client.V1Container(name=self.job_container_name,
image=self.job_docker_image,
command=command,
args=args,
ports=ports or None,
env=env_vars,
resources=get_resources(resources),
volume_mounts=volume_mounts)
def get_sidecar_container(self):
"""Pod sidecar container for job logs."""
return get_sidecar_container(
job_name=self.k8s_job_name,
job_container_name=self.job_container_name,
sidecar_container_name=self.sidecar_container_name,
sidecar_docker_image=self.sidecar_docker_image,
namespace=self.namespace,
app_label=self.app_label,
sidecar_config=self.sidecar_config,
sidecar_args=get_sidecar_args(pod_id=self.k8s_job_name))
def get_init_container(self, persistence_outputs):
"""Pod init container for setting outputs path."""
outputs_path = get_job_outputs_path(persistence_outputs=persistence_outputs,
job_name=self.job_name)
_, outputs_volume_mount = get_pod_outputs_volume(persistence_outputs=persistence_outputs)
return client.V1Container(
name=self.init_container_name,
image=self.init_docker_image,
command=["/bin/sh", "-c"],
args=to_list(get_output_args(command=InitCommands.CREATE,
outputs_path=outputs_path)),
volume_mounts=outputs_volume_mount)
def get_task_pod_spec(self,
volume_mounts,
volumes,
persistence_outputs=None,
persistence_data=None,
outputs_refs_jobs=None,
outputs_refs_experiments=None,
env_vars=None,
command=None,
args=None,
resources=None,
node_selector=None,
restart_policy='OnFailure'):
"""Pod spec to be used to create pods for tasks: master, worker, ps."""
volume_mounts = get_list(volume_mounts)
volumes = get_list(volumes)
gpu_volume_mounts, gpu_volumes = get_gpu_volumes_def(resources)
volume_mounts += gpu_volume_mounts
volumes += gpu_volumes
pod_container = self.get_pod_container(volume_mounts=volume_mounts,
persistence_outputs=persistence_outputs,
persistence_data=persistence_data,
outputs_refs_jobs=outputs_refs_jobs,
outputs_refs_experiments=outputs_refs_experiments,
env_vars=env_vars,
command=command,
args=args,
resources=resources)
containers = [pod_container]
if self.use_sidecar:
sidecar_container = self.get_sidecar_container()
containers.append(sidecar_container)
node_selector = get_node_selector(
node_selector=node_selector,
default_node_selector=settings.NODE_SELECTORS_JOBS)
service_account_name = None
if settings.K8S_RBAC_ENABLED:
service_account_name = settings.K8S_SERVICE_ACCOUNT_NAME
return client.V1PodSpec(
restart_policy=restart_policy,
service_account_name=service_account_name,
init_containers=to_list(self.get_init_container(persistence_outputs)),
containers=containers,
volumes=volumes,
node_selector=node_selector)
def get_pod(self,
volume_mounts,
volumes,
persistence_outputs=None,
persistence_data=None,
outputs_refs_jobs=None,
outputs_refs_experiments=None,
env_vars=None,
command=None,
args=None,
resources=None,
node_selector=None,
restart_policy=None):
metadata = client.V1ObjectMeta(name=self.k8s_job_name,
labels=self.labels,
namespace=self.namespace)
pod_spec = self.get_task_pod_spec(
volume_mounts=volume_mounts,
volumes=volumes,
persistence_outputs=persistence_outputs,
persistence_data=persistence_data,
outputs_refs_jobs=outputs_refs_jobs,
outputs_refs_experiments=outputs_refs_experiments,
env_vars=env_vars,
command=command,
args=args,
resources=resources,
node_selector=node_selector,
restart_policy=restart_policy)
return client.V1Pod(api_version=k8s_constants.K8S_API_VERSION_V1,
kind=k8s_constants.K8S_POD_KIND,
metadata=metadata,
spec=pod_spec)
|
the-stack_0_1333 | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.1, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_1 import models
class AdminApiTokenGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'continuation_token': 'str',
'total_item_count': 'int',
'items': 'list[AdminApiToken]'
}
attribute_map = {
'continuation_token': 'continuation_token',
'total_item_count': 'total_item_count',
'items': 'items'
}
required_args = {
}
def __init__(
self,
continuation_token=None, # type: str
total_item_count=None, # type: int
items=None, # type: List[models.AdminApiToken]
):
"""
Keyword args:
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the `continuation_token` to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The `continuation_token` is generated if the `limit` is less than the remaining number of items, and the default sort is used (no sort is specified).
total_item_count (int): Total number of items after applying `filter` params.
items (list[AdminApiToken]): A list of administrator API tokens.
"""
if continuation_token is not None:
self.continuation_token = continuation_token
if total_item_count is not None:
self.total_item_count = total_item_count
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `AdminApiTokenGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AdminApiTokenGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AdminApiTokenGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_1335 | """
This file offers the methods to automatically retrieve the graph Sodalis glossinidius.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def SodalisGlossinidius(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Sodalis glossinidius graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Sodalis glossinidius graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="SodalisGlossinidius",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_0_1336 | # (c) 2019-2020 Mikhail Paulyshka
# SPDX-License-Identifier: MIT
import logging
import json
import os
import random
import string
import sys
import pprint
import threading
from urllib.parse import parse_qs
from typing import Dict, List
import common.mglx_http
from .gw2_constants import GW2AuthorizationResult
class GW2API(object):
API_DOMAIN = 'https://api.guildwars2.com'
API_URL_ACHIEVEMENTS = '/v2/achievements'
API_URL_ACCOUNT = '/v2/account'
API_URL_ACCOUNT_ACHIVEMENTS = '/v2/account/achievements'
LOCALSERVER_HOST = '127.0.0.1'
LOCALSERVER_PORT = 13338
RETRIES_COUNT = 5
def __init__(self, plugin_version):
self.__http = common.mglx_http.MglxHttp(user_agent='gog_gw2/%s' % plugin_version, verify_ssl=False)
self.__logger = logging.getLogger('gw2_api')
self._api_key = None
self._account_info = None
async def shutdown(self):
await self.__http.shutdown()
#
# Getters
#
def get_api_key(self) -> str:
return self._api_key
def get_account_id(self) -> str:
if self._account_info is None:
self.__logger.error('get_account_id: account info is None', exc_info=True)
return None
return self._account_info['id']
def get_account_name(self) -> str:
if self._account_info is None:
self.__logger.error('get_account_name: account info is None', exc_info=True)
return None
return self._account_info['name']
def get_owned_games(self) -> List[str]:
if self._account_info is None:
self.__logger.error('get_owned_games: account info is None', exc_info=True)
return list()
return self._account_info['access']
def get_account_age(self) -> int:
if self._account_info is None:
self.__logger.error('get_account_age: account info is None', exc_info=True)
return None
if 'age' not in self._account_info:
return 0
return self._account_info['age']
async def get_account_achievements(self) -> List[int]:
result = list()
if not self._api_key:
self.__logger.error('get_account_achievements: api_key is None', exc_info=True)
return result
(status, achievements_account) = await self.__api_get_response(self._api_key, self.API_URL_ACCOUNT_ACHIVEMENTS)
if status != 200:
self.__logger.warn('get_account_achievements: failed to get achievements %s' % status)
return result
for achievement in achievements_account:
if achievement['done'] == True:
result.append(achievement['id'])
return result
#
# Authorization server
#
async def do_auth_apikey(self, api_key : str) -> GW2AuthorizationResult:
self._api_key = None
self._account_info = None
if not api_key:
self.__logger.warn('do_auth_apikey: api_key is is None')
return GW2AuthorizationResult.FAILED
(status_code, account_info) = await self.__api_get_response(api_key, self.API_URL_ACCOUNT)
if status_code != 200:
if (account_info is not None) and ('text' in account_info):
if account_info['text'] == 'Invalid access token':
return GW2AuthorizationResult.FAILED_INVALID_TOKEN
elif account_info['text'] == 'invalid key':
return GW2AuthorizationResult.FAILED_INVALID_KEY
elif account_info['text'] == 'no game account':
return GW2AuthorizationResult.FAILED_NO_ACCOUNT
elif account_info['text'] == 'ErrBadData':
return GW2AuthorizationResult.FAILED_BAD_DATA
elif account_info['text'] == 'ErrTimeout':
return GW2AuthorizationResult.FAILED_TIMEOUT
else:
self.__logger.error('do_auth_apikey: unknown error description %s, %s' % (status_code, account_info))
self.__logger.warn('do_auth_apikey: %s, %s' % (status_code, account_info))
return GW2AuthorizationResult.FAILED
if account_info is None:
self.__logger.warn('do_auth_apikey: account info is None')
return GW2AuthorizationResult.FAILED
self._api_key = api_key
self._account_info = account_info
return GW2AuthorizationResult.FINISHED
async def __api_get_response(self, api_key, url, parameters = None):
result = None
#update authorization cookie
self.__http.update_headers({'Authorization': 'Bearer ' + api_key})
#make request
retries = self.RETRIES_COUNT
while retries > 0:
#decrement remaining retries counter
retries = retries - 1
#send request
resp = None
try:
resp = await self.__http.request_get(self.API_DOMAIN+url, params=parameters)
except Exception:
self.__logger.exception('__api_get_response: failed to perform GET request for url %s' % url)
return (0, None)
#log response status
if resp.status == 400:
self.__logger.warning('__api_get_response: TIMEOUT for url %s' % url)
elif resp.status == 404:
self.__logger.error('__api_get_response: NOT FOUND for url %s' % url)
elif resp.status == 502:
self.__logger.warning('__api_get_response: BAD GATEWAY for url %s' % url)
elif resp.status == 504:
self.__logger.warning('__api_get_response: GATEWAY TIMEOUT for url %s' % url)
elif (resp.status == 200) and (resp.text is not None):
try:
result = json.loads(resp.text)
except Exception:
self.__logger.exception('__api_get_response: failed to parse response, url=%s, status=%s, text=%s' % (url, resp.status, resp.text))
else:
self.__logger.error('__api_get_response: unknown error, url=%s, status=%s, text=%s' % (url, resp.status, resp.text))
return (resp.status, result)
|
the-stack_0_1341 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 29 09:56:48 2019
@author: adamreidsmith
"""
'''
Create datafiles of 1D solution to the Van der Pol equation: x'' - a*(1 - x^2)*x' + b*y = f(t).
Datafiles include the computed solution, its fast Fourier transform, a histogram
of x(t) vs t mod T where T is the first period of f, the phase(s) of f, and the
parameters a and b.
These datafiles are created for use in 'nn_hist.py', 'nn_ft.py', and 'nn_wavelet.py'.
'''
import numpy as np
from scipy.integrate import odeint
from os import path, mkdir
###############################################################################
'''
Inputs:
tmax: The upper bound of the interval [0,tmax] on which to solve
the Van der Pol equation.
initial_cond: Initial condition. Can be set to 'random' or a list of length 2.
n_points: The number of time steps to include in each solution.
num_ab_pairs: The number of times to solve the equation, i.e. the number
of data points.
n_periods: Number of periodic terms in the forcing function.
include_phase: Include or exclude a random phase in the forcing terms.
C: Coefficients of the terms in the forcing function. Must be
a list of length 'n_periods'.
T: Periods of the terms in the forcing function. Must be a list
of length 'n_periods'.
file_name: Name of the datafile.
'''
###############################################################################
def generate_data(tmax=500,
initial_cond='random',
n_points=2**10,
num_ab_pairs=800,
n_periods=3,
include_phase=True,
C=[1, np.sqrt(2), np.pi/3],
T=[5, 10*np.sqrt(2)-2, 30*np.sqrt(3)],
file_name=None):
twopi = 2*np.pi
#Create a directory to store datafiles if it doesn't aready exist
if not path.exists('./datafiles'):
mkdir('./datafiles')
assert type(C) == type(T) == list and n_periods == len(C) == len(T), \
'C and T must be lists of length \'n_periods\'.'
#RHS
def f(t, phi, C, T):
val = 0
if include_phase:
for i in range(n_periods):
val += C[i] * np.cos(twopi/T[i]*t + phi[i])
return val
else:
for i in range(n_periods):
val += C[i] * np.cos(twopi/T[i]*t)
return val
data = []
for i in range(num_ab_pairs):
a = np.random.rand() #Random number in [0,1)
b = np.random.rand() #Random number in [0,1)
if initial_cond == 'random':
ic = [2*np.random.rand() - 1, 2*np.random.rand() - 1]
else:
ic = initial_cond
phi = []
if include_phase:
for _ in range(n_periods):
phi.append(twopi*np.random.rand())
#Van der Pol oscillator equation
def vanderpol(ic,t):
x = ic[0]
y = ic[1]
yd = f(t, phi, C, T) + a*(1 - x**2)*y - b*x
xd = y
return [xd,yd]
#Solve the ivp numerically
npoints = 10*n_points
tfull = np.linspace(0,tmax,npoints)
sol = odeint(vanderpol, ic, tfull)
#Keep every tenth data point
indices = [i for i in range(npoints) if i % 10 == 0]
t = np.array([tfull[i] for i in indices])
tmodT1 = t % T[0]
x = [sol[i][0] for i in indices]
n_bins = 100
soln = np.array([[t[i],x[i]] for i in range(len(t))])
fftdata = np.fft.fft(x)
FT = np.array([[t[i],fftdata[i]] for i in range(len(t))])
data.append(soln)
data.append(FT)
data.append(np.histogram2d(tmodT1, x, bins=n_bins)[0])
data.append(phi)
data.append([a,b])
if i % 10 == 0 and __name__ == '__main__':
print('Iteration:', i, 'of', num_ab_pairs)
if file_name is None:
file_name = 'vdp_data_' + str(num_ab_pairs) + 'pts_[soln,FT,hist,phase(' + \
str(include_phase) + '),param]'
file_path = './datafiles/' + file_name
print('Writing datafile to', file_path + '.npy')
np.save(file_path, data)
print('Done')
if __name__ == '__main__':
generate_data() |
the-stack_0_1342 | from collections import deque
import numpy as np
import time
import os
import json
from isaac.utilities import *
# set the current working directory to the deployed package folder. This is required by isaac.
os.chdir("/home/davis/deploy/davis/rm_isaac_bridge-pkg")
from engine.pyalice import Codelet
from engine.pyalice.gui.composite_widget import CompositeWidget
class IsaacEffector:
def __init__(self, config):
self.config = config
self._timeout = config['timeout']
self._widgets = self.config['widgets']
self._stream_articulations = self.config['stream_articulations']
# Since isaac does not support closed kinematics (4-bar linkage), there are 4 dof, where
# left_finger == left_finger_upper and right_finger == right_finger_upper
self.joint_names = self._load_kinematics(config['effector_type'])
self.joints = CompositeArray(self.joint_names, 'position', [0]*len(self.joint_names))
if self._widgets:
self.finger_widget = CompositeWidget(self.joint_names, 'position', [[-np.pi/2, np.pi/2]]*6)
self._command_queue = deque()
def _load_kinematics(self, effector_type):
valid_kinematics = ['smarthand']
if effector_type not in valid_kinematics:
raise ValueError('No valid kinematic file found for '+effector_type+'. Valid kinematics exist for '+', '.join(valid_kinematics))
self._kinematic_file = "apps/assets/kinematic_trees/{}.kinematic.json".format(effector_type)
joint_names = []
with open(self._kinematic_file,'r') as fd:
kt = json.load(fd)
for link in kt['links']:
if 'motor' in link and link['motor']['type'] != 'constant':
joint_names.append(link['name'])
return joint_names
def command(self, action, payload=None):
if action not in ['get_articulation_angles', 'set_articulation_angles']:
raise ValueError(action+' is not a valid action type')
if type(payload) in [list, np.ndarray]:
if len(list(payload)) == 2:
payload = [payload[0], payload[0], payload[0], payload[1], payload[1], payload[1]]
payload = CompositeArray(self.joint_names, 'position', payload)
command = Command(action, payload)
self._command_queue.append(command)
elapsed = 0
while command.response is None and elapsed < self._timeout:
elapsed += 0.01
time.sleep(0.01)
return command.response
def enable_articulation_stream(self):
self._stream_articulations = True
def disable_articulation_stream(self):
self._stream_articulations = False
def enable_all_streams(self):
self._stream_articulations = True
def disable_all_streams(self):
self._stream_articulations = False
def _JointReciever(self):
parent = self
class JointReciever(Codelet):
def start(self):
self.rx = self.isaac_proto_rx("CompositeProto", "state")
self.tick_on_message(self.rx)
def tick(self):
if len(parent._command_queue) > 0 and parent._command_queue[0].action == 'get_articulation_angles':
msg = self.rx.message
parent.joints.composite = msg
command = parent._command_queue.popleft()
values = parent.joints.values
command.response = values
elif parent._stream_articulations:
msg = self.rx.message
parent.joints.composite = msg
else:
return
if parent._widgets:
parent.finger_widget.composite = msg
return JointReciever
def _JointTransmitter(self):
parent = self
class JointTransmitter(Codelet):
def start(self):
self.tx = self.isaac_proto_tx("CompositeProto", "command")
self.tick_periodically(0.03)
def tick(self):
if len(parent._command_queue) > 0 and parent._command_queue[0].action == 'set_articulation_angles':
self.tx._msg = parent._command_queue[0].payload.composite
self.tx.publish()
command = parent._command_queue.popleft()
command.response = True
elif parent._widgets and parent._stream_articulations:
self.tx._msg = parent.finger_widget.composite
self.tx.publish()
return JointTransmitter
def connect_app(self, app):
# load dependency subgraphs
app.load(filename="packages/planner/apps/multi_joint_lqr_control.subgraph.json", prefix="lqr_gripper")
simulation_interface = app.nodes["simulation.interface"]
lqr_interface = app.nodes["lqr_gripper.subgraph"]["interface"]
# configs
app.nodes["lqr_gripper.kinematic_tree"]["KinematicTree"].config.kinematic_file = self._kinematic_file
lqr_planner = app.nodes["lqr_gripper.local_plan"]["MultiJointLqrPlanner"]
lqr_planner.config.speed_min = [-self.config['joint_speed']] * len(self.joint_names)
lqr_planner.config.speed_max = [self.config['joint_speed']] * len(self.joint_names)
lqr_planner.config.acceleration_min = [-self.config['joint_accel']] * len(self.joint_names)
lqr_planner.config.acceleration_max = [self.config['joint_accel']] * len(self.joint_names)
# create nodes
joints_in_node = app.add("joints_input")
joints_in_node.add(self._JointReciever(), 'articulation_reciever')
joints_out_node = app.add("joints_output")
joints_out_node.add(self._JointTransmitter(), 'articulation_transmitter')
# connect edges
app.connect(simulation_interface["output"], "joint_state", lqr_interface, "joint_state")
app.connect(simulation_interface["output"], "joint_state", joints_in_node['articulation_reciever'], "state")
app.connect(joints_out_node['articulation_transmitter'], "command", lqr_interface, "joint_target")
app.connect(lqr_interface, "joint_command", simulation_interface["input"], "joint_position")
return app |
the-stack_0_1344 | # created April 2017
# by TEASER Development Team
from teaser.logic.archetypebuildings.tabula.de.singlefamilyhouse import \
SingleFamilyHouse
class ApartmentBlock(SingleFamilyHouse):
"""Archetype for TABULA Apartment Block
Archetype according to TABULA building typology
(http://webtool.building-typology.eu/#bm).
Description of:
- estimation factors
- always 4 walls, 1 roof, 1 floor, 4 windows, one door (default
orientation?)
- how we calculate facade and window area
- calculate u-values
- zones (one zone)
- differences between TABULA und our approach (net floor area, height
and number of storeys)
- how to proceed with rooftops (keep them as flat roofs or pitched
roofs? what orientation?)
Parameters
----------
parent: Project()
The parent class of this object, the Project the Building belongs to.
Allows for better control of hierarchical structures. If not None it
adds this Building instance to Project.buildings.
(default: None)
name : str
Individual name
year_of_construction : int
Year of first construction
height_of_floors : float [m]
Average height of the buildings' floors
number_of_floors : int
Number of building's floors above ground
net_leased_area : float [m2]
Total net leased area of building. This is area is NOT the footprint
of a building
with_ahu : Boolean
If set to True, an empty instance of BuildingAHU is instantiated and
assigned to attribute central_ahu. This instance holds information for
central Air Handling units. Default is False.
internal_gains_mode: int [1, 2, 3]
mode for the internal gains calculation by persons:
1: Temperature and activity degree dependent calculation. The
calculation is based on SIA 2024 (default)
2: Temperature and activity degree independent calculation, the max.
heatflowrate is prescribed by the parameter
fixed_heat_flow_rate_persons.
3: Temperature and activity degree dependent calculation with
consideration of moisture. The calculation is based on SIA 2024
construction_type : str
Construction type of used wall constructions default is "existing
state"
existing state:
construction of walls according to existing state in TABULA
usual refurbishment:
construction of walls according to usual refurbishment in
TABULA
advanced refurbishment:
construction of walls according to advanced refurbishment in
TABULA
"""
def __init__(
self,
parent,
name=None,
year_of_construction=None,
number_of_floors=None,
height_of_floors=None,
net_leased_area=None,
with_ahu=False,
internal_gains_mode=1,
construction_type=None):
super(ApartmentBlock, self).__init__(
parent,
name,
year_of_construction,
number_of_floors,
height_of_floors,
net_leased_area,
with_ahu,
internal_gains_mode,
construction_type)
self.construction_type = construction_type
self.number_of_floors = number_of_floors
self.height_of_floors = height_of_floors
self._construction_type_1 = self.construction_type + '_1_AB'
self._construction_type_2 = self.construction_type + '_2_AB'
self.zone_area_factors = {"SingleDwelling": [1, "Living"]}
self._outer_wall_names_1 = {
"ExteriorFacadeNorth_1": [90.0, 0.0],
"ExteriorFacadeEast_1": [90.0, 90.0],
"ExteriorFacadeSouth_1": [90.0, 180.0],
"ExteriorFacadeWest_1": [90.0, 270.0]}
self._outer_wall_names_2 = {
"ExteriorFacadeNorth_2": [90.0, 0.0],
"ExteriorFacadeEast_2": [90.0, 90.0],
"ExteriorFacadeSouth_2": [90.0, 180.0],
"ExteriorFacadeWest_2": [90.0, 270.0]}
self.roof_names_1 = {"Rooftop_1": [0, -1]} # [0, -1]
self.roof_names_2 = {"Rooftop_2": [0, -1]}
self.ground_floor_names_1 = {
"GroundFloor_1": [0, -2]} # [0, -2]
self.ground_floor_names_2 = {
"GroundFloor_2": [0, -2]}
self.door_names = {"Door": [90.0, 270]}
self.window_names_1 = {
"WindowFacadeNorth_1": [90.0, 0.0],
"WindowFacadeEast_1": [90.0, 90.0],
"WindowFacadeSouth_1": [90.0, 180.0],
"WindowFacadeWest_1": [90.0, 270.0]}
self.window_names_2 = {
"WindowFacadeNorth_2": [90.0, 0.0],
"WindowFacadeEast_2": [90.0, 90.0],
"WindowFacadeSouth_2": [90.0, 180.0],
"WindowFacadeWest_2": [90.0, 270.0]}
# [tilt, orientation]
self.inner_wall_names = {"InnerWall": [90.0, 0.0]}
self.ceiling_names = {"Ceiling": [0.0, -1]}
self.floor_names = {"Floor": [0.0, -2]}
# Rooftop1, Rooftop2, Wall1, Wall2, GroundFloor1, GroundFloor2,
# Window1, Window2, Door
# Area/ReferenceFloorArea
self.facade_estimation_factors = {
(1860, 1918): {
'rt1': 0.27961,
'rt2': 0.0,
'ow1': 0.36840,
'ow2': 0.0,
'gf1': 0.19747,
'gf2': 0.0,
'win1': 0.16429,
'win2': 0.0,
'door': 0.00241},
(1919, 1948): {
'rt1': 0.25889,
'rt2': 0.0,
'ow1': 0.83827,
'ow2': 0.0,
'gf1': 0.26658,
'gf2': 0.0,
'win1': 0.18767,
'win2': 0.0,
'door': 0.00135},
(1949, 1957): {
'rt1': 0.22052,
'rt2': 0.0,
'ow1': 0.85839,
'ow2': 0.0,
'gf1': 0.22052,
'gf2': 0.0,
'win1': 0.18397,
'win2': 0.0,
'door': 0.00125},
(1958, 1968): {
'rt1': 0.12339,
'rt2': 0.0,
'ow1': 0.83555,
'ow2': 0.0,
'gf1': 0.11814,
'gf2': 0.0,
'win1': 0.17674,
'win2': 0.0,
'door': 0.00051},
(1969, 1978): {
'rt1': 0.16255,
'rt2': 0.0,
'ow1': 0.64118,
'ow2': 0.0,
'gf1': 0.16255,
'gf2': 0.0,
'win1': 0.16406,
'win2': 0.0,
'door': 0.0006}}
self.building_age_group = None
if self.with_ahu is True:
self.central_ahu.temperature_profile = (
7 * [293.15] +
12 * [295.15] +
6 * [293.15])
self.central_ahu.min_relative_humidity_profile = (25 * [0.45])
self.central_ahu.max_relative_humidity_profile = (25 * [0.55])
self.central_ahu.v_flow_profile = (
7 * [0.0] + 12 * [1.0] + 6 * [0.0])
|
the-stack_0_1347 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import os
from numpy.testing import assert_allclose
import pytest
from jax import jit, random
import jax.numpy as jnp
import numpyro
import numpyro.distributions as dist
from numpyro.distributions.transforms import AffineTransform
from numpyro.infer import MCMC, NUTS
from numpyro.infer.reparam import TransformReparam
def test_dist_pytree():
from tensorflow_probability.substrates.jax import distributions as tfd
from numpyro.contrib.tfp.distributions import TFPDistribution
@jit
def f(x):
with numpyro.handlers.seed(rng_seed=0), numpyro.handlers.trace() as tr:
numpyro.sample("x", tfd.Normal(x, 1))
return tr["x"]["fn"]
res = f(0.0)
assert isinstance(res, TFPDistribution)
assert res.loc == 0
assert res.scale == 1
@pytest.mark.filterwarnings("ignore:can't resolve package")
def test_transformed_distributions():
from tensorflow_probability.substrates.jax import (
bijectors as tfb,
distributions as tfd,
)
d = dist.TransformedDistribution(dist.Normal(0, 1), dist.transforms.ExpTransform())
d1 = tfd.TransformedDistribution(tfd.Normal(0, 1), tfb.Exp())
x = random.normal(random.PRNGKey(0), (1000,))
d_x = d.log_prob(x).sum()
d1_x = d1.log_prob(x).sum()
assert_allclose(d_x, d1_x)
@pytest.mark.filterwarnings("ignore:can't resolve package")
def test_logistic_regression():
from tensorflow_probability.substrates.jax import distributions as tfd
N, dim = 3000, 3
num_warmup, num_samples = (1000, 1000)
data = random.normal(random.PRNGKey(0), (N, dim))
true_coefs = jnp.arange(1.0, dim + 1.0)
logits = jnp.sum(true_coefs * data, axis=-1)
labels = tfd.Bernoulli(logits=logits).sample(seed=random.PRNGKey(1))
def model(labels):
coefs = numpyro.sample("coefs", tfd.Normal(jnp.zeros(dim), jnp.ones(dim)))
logits = numpyro.deterministic("logits", jnp.sum(coefs * data, axis=-1))
return numpyro.sample("obs", tfd.Bernoulli(logits=logits), obs=labels)
kernel = NUTS(model)
mcmc = MCMC(kernel, num_warmup=num_warmup, num_samples=num_samples)
mcmc.run(random.PRNGKey(2), labels)
mcmc.print_summary()
samples = mcmc.get_samples()
assert samples["logits"].shape == (num_samples, N)
expected_coefs = jnp.array([0.97, 2.05, 3.18])
assert_allclose(jnp.mean(samples["coefs"], 0), expected_coefs, atol=0.22)
@pytest.mark.filterwarnings("ignore:can't resolve package")
# TODO: remove after https://github.com/tensorflow/probability/issues/1072 is resolved
@pytest.mark.filterwarnings("ignore:Explicitly requested dtype")
def test_beta_bernoulli():
from tensorflow_probability.substrates.jax import distributions as tfd
num_warmup, num_samples = (500, 2000)
def model(data):
alpha = jnp.array([1.1, 1.1])
beta = jnp.array([1.1, 1.1])
p_latent = numpyro.sample("p_latent", tfd.Beta(alpha, beta))
numpyro.sample("obs", tfd.Bernoulli(p_latent), obs=data)
return p_latent
true_probs = jnp.array([0.9, 0.1])
data = tfd.Bernoulli(true_probs).sample(
seed=random.PRNGKey(1), sample_shape=(1000, 2)
)
kernel = NUTS(model=model, trajectory_length=0.1)
mcmc = MCMC(kernel, num_warmup=num_warmup, num_samples=num_samples)
mcmc.run(random.PRNGKey(2), data)
mcmc.print_summary()
samples = mcmc.get_samples()
assert_allclose(jnp.mean(samples["p_latent"], 0), true_probs, atol=0.05)
def make_kernel_fn(target_log_prob_fn):
import tensorflow_probability.substrates.jax as tfp
return tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
step_size=0.5 / jnp.sqrt(0.5 ** jnp.arange(4)[..., None]),
num_leapfrog_steps=5,
)
@pytest.mark.parametrize(
"kernel, kwargs",
[
("HamiltonianMonteCarlo", dict(step_size=0.05, num_leapfrog_steps=10)),
("NoUTurnSampler", dict(step_size=0.05)),
("RandomWalkMetropolis", dict()),
("SliceSampler", dict(step_size=1.0, max_doublings=5)),
(
"UncalibratedHamiltonianMonteCarlo",
dict(step_size=0.05, num_leapfrog_steps=10),
),
("UncalibratedRandomWalk", dict()),
],
)
@pytest.mark.filterwarnings("ignore:can't resolve package")
# TODO: remove after https://github.com/tensorflow/probability/issues/1072 is resolved
@pytest.mark.filterwarnings("ignore:Explicitly requested dtype")
def test_mcmc_kernels(kernel, kwargs):
from numpyro.contrib.tfp import mcmc
kernel_class = getattr(mcmc, kernel)
true_coef = 0.9
num_warmup, num_samples = 1000, 1000
def model(data):
alpha = numpyro.sample("alpha", dist.Uniform(0, 1))
with numpyro.handlers.reparam(config={"loc": TransformReparam()}):
loc = numpyro.sample(
"loc",
dist.TransformedDistribution(
dist.Uniform(0, 1), AffineTransform(0, alpha)
),
)
numpyro.sample("obs", dist.Normal(loc, 0.1), obs=data)
data = true_coef + random.normal(random.PRNGKey(0), (1000,))
tfp_kernel = kernel_class(model=model, **kwargs)
mcmc = MCMC(tfp_kernel, num_warmup=num_warmup, num_samples=num_samples)
mcmc.warmup(random.PRNGKey(2), data, collect_warmup=True)
warmup_samples = mcmc.get_samples()
mcmc.run(random.PRNGKey(3), data)
samples = mcmc.get_samples()
assert len(warmup_samples["loc"]) == num_warmup
assert len(samples["loc"]) == num_samples
assert_allclose(jnp.mean(samples["loc"], 0), true_coef, atol=0.05)
@pytest.mark.parametrize(
"kernel, kwargs",
[
("MetropolisAdjustedLangevinAlgorithm", dict(step_size=1.0)),
("RandomWalkMetropolis", dict()),
("SliceSampler", dict(step_size=1.0, max_doublings=5)),
("UncalibratedLangevin", dict(step_size=0.1)),
(
"ReplicaExchangeMC",
dict(
inverse_temperatures=0.5 ** jnp.arange(4), make_kernel_fn=make_kernel_fn
),
),
],
)
@pytest.mark.parametrize("num_chains", [1, 2])
@pytest.mark.skipif(
"XLA_FLAGS" not in os.environ,
reason="without this mark, we have duplicated tests in Travis",
)
@pytest.mark.filterwarnings("ignore:There are not enough devices:UserWarning")
@pytest.mark.filterwarnings("ignore:can't resolve package")
# TODO: remove after https://github.com/tensorflow/probability/issues/1072 is resolved
@pytest.mark.filterwarnings("ignore:Explicitly requested dtype")
def test_unnormalized_normal_chain(kernel, kwargs, num_chains):
from numpyro.contrib.tfp import mcmc
# TODO: remove when this issue is fixed upstream
# https://github.com/tensorflow/probability/pull/1087
if num_chains == 2 and kernel == "ReplicaExchangeMC":
pytest.xfail("ReplicaExchangeMC is not fully compatible with omnistaging yet.")
kernel_class = getattr(mcmc, kernel)
true_mean, true_std = 1.0, 0.5
num_warmup, num_samples = (1000, 8000)
def potential_fn(z):
return 0.5 * ((z - true_mean) / true_std) ** 2
init_params = jnp.array(0.0) if num_chains == 1 else jnp.array([0.0, 2.0])
tfp_kernel = kernel_class(potential_fn=potential_fn, **kwargs)
mcmc = MCMC(
tfp_kernel,
num_warmup=num_warmup,
num_samples=num_samples,
num_chains=num_chains,
progress_bar=False,
)
mcmc.run(random.PRNGKey(0), init_params=init_params)
mcmc.print_summary()
hmc_states = mcmc.get_samples()
assert_allclose(jnp.mean(hmc_states), true_mean, rtol=0.07)
assert_allclose(jnp.std(hmc_states), true_std, rtol=0.07)
# test if sampling from tfp distributions works as expected using
# numpyro sample function: numpyro.sample("name", dist) (bug)
@pytest.mark.filterwarnings("ignore:can't resolve package")
@pytest.mark.filterwarnings("ignore:Importing distributions")
def test_sample_tfp_distributions():
from tensorflow_probability.substrates.jax import distributions as tfd
from numpyro.contrib.tfp.distributions import TFPDistribution
# test no error raised
d = TFPDistribution[tfd.Normal](0, 1)
with numpyro.handlers.seed(rng_seed=random.PRNGKey(0)):
numpyro.sample("normal", d)
# test intermediates are []
value, intermediates = d(sample_intermediates=True, rng_key=random.PRNGKey(0))
assert intermediates == []
# test that sampling from unwrapped tensorflow_probability distributions works as
# expected using numpyro.sample primitive
@pytest.mark.parametrize(
"dist,args",
[
["Bernoulli", (0,)],
["Beta", (1, 1)],
["Binomial", (10, 0)],
["Categorical", ([0, 1, -1],)],
["Cauchy", (0, 1)],
["Dirichlet", ([1, 2, 0.5],)],
["Exponential", (1,)],
["InverseGamma", (1, 1)],
["Normal", (0, 1)],
["OrderedLogistic", ([0, 1], 0.5)],
["Pareto", (1,)],
],
)
def test_sample_unwrapped_tfp_distributions(dist, args):
from tensorflow_probability.substrates.jax import distributions as tfd
# test no error is raised
with numpyro.handlers.seed(rng_seed=random.PRNGKey(0)):
# since we import tfd inside the test, distributions have to be parametrized as
# strings, which is why we use getattr here
numpyro.sample("sample", getattr(tfd, dist)(*args))
# test mixture distributions
def test_sample_unwrapped_mixture_same_family():
from tensorflow_probability.substrates.jax import distributions as tfd
# test no error is raised
with numpyro.handlers.seed(rng_seed=random.PRNGKey(0)):
numpyro.sample(
"sample",
tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(probs=[0.3, 0.7]),
components_distribution=tfd.Normal(
loc=[-1.0, 1], scale=[0.1, 0.5] # One for each component.
),
),
)
# test that MCMC works with unwrapped tensorflow_probability distributions
def test_mcmc_unwrapped_tfp_distributions():
from tensorflow_probability.substrates.jax import distributions as tfd
def model(y):
theta = numpyro.sample("p", tfd.Beta(1, 1))
with numpyro.plate("plate", y.size):
numpyro.sample("y", tfd.Bernoulli(probs=theta), obs=y)
mcmc = MCMC(NUTS(model), num_warmup=1000, num_samples=1000)
mcmc.run(random.PRNGKey(0), jnp.array([0, 0, 1, 1, 1]))
samples = mcmc.get_samples()
assert_allclose(jnp.mean(samples["p"]), 4 / 7, atol=0.05)
|
Subsets and Splits