Weights & Biases (wandb)/fr: Difference between revisions

Jump to navigation Jump to search
Updating to match new version of source page
No edit summary
(Updating to match new version of source page)
Line 44: Line 44:




<div class="mw-translate-fuzzy">
module load python/3.8
module load python/3.8
virtualenv --no-download $SLURM_TMPDIR/env
virtualenv --no-download $SLURM_TMPDIR/env
source $SLURM_TMPDIR/env/bin/activate
source $SLURM_TMPDIR/env/bin/activate
pip install torchvision wandb --no-index
pip install torchvision wandb --no-index
</div>


### Save your wandb API key in your .bash_profile or replace $API_KEY with your actual API key. Uncomment the line below and comment out 'wandb offline'. if running on Cedar ###
### Save your wandb API key in your .bash_profile or replace $API_KEY with your actual API key. Uncomment the line below and comment out 'wandb offline'. if running on Cedar ###
Line 58: Line 60:
}}
}}


<div class="mw-translate-fuzzy">
Le script wandb-test.py utilise la méthode <tt>watch()</tt> pour journaliser les métriques. Voir [https://docs.wandb.ai la  documentation complète].
Le script wandb-test.py utilise la méthode <tt>watch()</tt> pour journaliser les métriques. Voir [https://docs.wandb.ai la  documentation complète].
</div>


<div class="mw-translate-fuzzy">
{{File
{{File
   |name=wandb-test.py
   |name=wandb-test.py
Line 69: Line 74:
import torch.optim as optim
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.backends.cudnn as cudnn
</div>


import torchvision
wandb.init(project="wandb-pytorch-test")
import torchvision.transforms as transforms
from torchvision.datasets import CIFAR10
from torch.utils.data import DataLoader


import argparse
for my_metric in range(10):
    wandb.log({'my_metric': my_metric})


import wandb
}}
 
 
parser = argparse.ArgumentParser(description='cifar10 classification models, wandb test')
parser.add_argument('--lr', default=0.1, help='')
parser.add_argument('--batch_size', type=int, default=768, help='')
parser.add_argument('--max_epochs', type=int, default=4, help='')
parser.add_argument('--num_workers', type=int, default=0, help='')
 
def main():
   
    args = parser.parse_args()
 
    print("Starting Wandb...")
 
    wandb.init(project="wandb-pytorch-test", config=args)
 
    class Net(nn.Module):
 
      def __init__(self):
          super(Net, self).__init__()
 
          self.conv1 = nn.Conv2d(3, 6, 5)
          self.pool = nn.MaxPool2d(2, 2)
          self.conv2 = nn.Conv2d(6, 16, 5)
          self.fc1 = nn.Linear(16 * 5 * 5, 120)
          self.fc2 = nn.Linear(120, 84)
          self.fc3 = nn.Linear(84, 10)
 
      def forward(self, x):
          x = self.pool(F.relu(self.conv1(x)))
          x = self.pool(F.relu(self.conv2(x)))
          x = x.view(-1, 16 * 5 * 5)
          x = F.relu(self.fc1(x))
          x = F.relu(self.fc2(x))
          x = self.fc3(x)
          return x
 
    net = Net()
 
    transform_train = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
 
    dataset_train = CIFAR10(root='./data', train=True, download=False, transform=transform_train)
 
    train_loader = DataLoader(dataset_train, batch_size=args.batch_size, num_workers=args.num_workers)
 
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=args.lr)
 
    wandb.watch(net)


    for epoch in range(args.max_epochs):
After a training run in offline mode, there will be a new folder <code>./wandb/offline-run*</code>. You can send the metrics to the server using the command <code>wandb sync ./wandb/offline-run*</code>. Note that using <code>*</code> will sync all runs.
 
        train(epoch, net, criterion, optimizer, train_loader)
 
 
def train(epoch, net, criterion, optimizer, train_loader):
 
    for batch_idx, (inputs, targets) in enumerate(train_loader):
 
      outputs = net(inputs)
      loss = criterion(outputs, targets)
 
      optimizer.zero_grad()
      loss.backward()
      optimizer.step()
 
 
if __name__=='__main__':
  main()
 
}}
38,757

edits

Navigation menu