Here is the code with an example that runs:
def lp_norm(mdl: nn.Module, p: int = 2) -> Tensor:
lp_norms = [w.norm(p) for name, w in mdl.named_parameters()]
return sum(lp_norms)
def reset_all_weights(model: nn.Module) -> None:
"""
refs:
- https://discuss.pytorch.org/t/how-to-re-set-alll-parameters-in-a-network/20819/6
- https://mcmap.net/q/1419521/-reset-parameters-of-a-neural-network-in-pytorch
- https://pytorch.org/docs/stable/generated/torch.nn.Module.html
"""
@torch.no_grad()
def weight_reset(m: nn.Module):
# - check if the current module has reset_parameters & if it's callabed called it on m
reset_parameters = getattr(m, "reset_parameters", None)
if callable(reset_parameters):
m.reset_parameters()
# Applies fn recursively to every submodule see: https://pytorch.org/docs/stable/generated/torch.nn.Module.html
model.apply(fn=weight_reset)
def reset_all_linear_layer_weights(model: nn.Module) -> nn.Module:
"""
Resets all weights recursively for linear layers.
ref:
- https://pytorch.org/docs/stable/generated/torch.nn.Module.html
"""
@torch.no_grad()
def init_weights(m):
if type(m) == nn.Linear:
m.weight.fill_(1.0)
# Applies fn recursively to every submodule see: https://pytorch.org/docs/stable/generated/torch.nn.Module.html
model.apply(init_weights)
def reset_all_weights_with_specific_layer_type(model: nn.Module, modules_type2reset) -> nn.Module:
"""
Resets all weights recursively for linear layers.
ref:
- https://pytorch.org/docs/stable/generated/torch.nn.Module.html
"""
@torch.no_grad()
def init_weights(m):
if type(m) == modules_type2reset:
# if type(m) == torch.nn.BatchNorm2d:
# m.weight.fill_(1.0)
m.reset_parameters()
# Applies fn recursively to every submodule see: https://pytorch.org/docs/stable/generated/torch.nn.Module.html
model.apply(init_weights)
# -- tests
def reset_params_test():
import torchvision.models as models
from uutils.torch_uu import lp_norm
resnet18 = models.resnet18(pretrained=True)
resnet18_random = models.resnet18(pretrained=False)
print(f'{lp_norm(resnet18)=}')
print(f'{lp_norm(resnet18_random)=}')
print(f'{lp_norm(resnet18)=}')
reset_all_weights(resnet18)
print(f'{lp_norm(resnet18)=}')
if __name__ == '__main__':
reset_params_test()
print('Done! \a\n')
output:
lp_norm(resnet18)=tensor(517.5472, grad_fn=<AddBackward0>)
lp_norm(resnet18_random)=tensor(668.3687, grad_fn=<AddBackward0>)
lp_norm(resnet18)=tensor(517.5472, grad_fn=<AddBackward0>)
lp_norm(resnet18)=tensor(476.0836, grad_fn=<AddBackward0>)
Done!
I am assuming this works because I calculated the norm twice for the pre-trained net and it was the same both times before calling reset.
Though I was unhappy it wasn't closer to the norm of the random net I must admit but I think this is good enough.
same: https://discuss.pytorch.org/t/how-to-re-set-alll-parameters-in-a-network/20819/11