importtorchfromtorchimportnn,optimfromtorch.utils.dataimportDataset,DataLoaderimporttorchvisionfromtorchvisionimporttransformsfromtorchvision.datasetsimportImageFolderimporttimmimportmatplotlib.pyplotasplt## For data vizimportpandasaspdimportnumpyasnpimportsysfromtqdm.notebookimporttqdmprint('System Version:',sys.version)print('PyTorch version',torch.__version__)print('Torchvision version',torchvision.__version__)print('Numpy version',np.__version__)print('Pandas version',pd.__version__)
seed=42random.seed(seed)np.random.seed(seed)torch.manual_seed(seed)torch.cuda.manual_seed_all(seed)torch.backends.cudnn.deterministic=Truetorch.backends.cudnn.benchmark=False# hit on performance
int8 is an integer type, it can be used for any operation which needs integers
qint8 is a quantized tensor type which represents a compressed floating point tensor, it has an underlying int8 data layer, a scale, a zero_point and a qscheme
# ❌ creates a copytensor=torch.tensor(array,device=device)# ✅ avoids copyingtensor=torch.as_tensor(array,device=device)tensor=torch.from_numpy(array,device=device)# however, changing array will also affect tensor