calcoloscientifico:cluster:softwareapplicativo:conda
Differenze
Queste sono le differenze tra la revisione selezionata e la versione attuale della pagina.
| Entrambe le parti precedenti la revisioneRevisione precedenteProssima revisione | Revisione precedente | ||
| calcoloscientifico:cluster:softwareapplicativo:conda [14/06/2024 09:22] – federico.prost | calcoloscientifico:cluster:softwareapplicativo:conda [05/07/2024 10:04] (versione attuale) – federico.prost | ||
|---|---|---|---|
| Linea 88: | Linea 88: | ||
| #SBATCH --mem=512M | #SBATCH --mem=512M | ||
| #SBATCH --partition=vrt | #SBATCH --partition=vrt | ||
| + | #SBATCH --qos=vrt | ||
| #SBATCH --time=0-00: | #SBATCH --time=0-00: | ||
| ##SBATCH --account=< | ##SBATCH --account=< | ||
| Linea 102: | Linea 103: | ||
| </ | </ | ||
| - | Script '' | + | Script '' |
| - | <code bash slurm-statistical-genetics-bdw.sh> | + | <code bash slurm-statistical-genetics-cpu.sh> |
| #!/bin/bash | #!/bin/bash | ||
| #SBATCH --job-name=loop | #SBATCH --job-name=loop | ||
| Linea 112: | Linea 113: | ||
| #SBATCH --ntasks-per-node=4 | #SBATCH --ntasks-per-node=4 | ||
| #SBATCH --mem=512M | #SBATCH --mem=512M | ||
| - | #SBATCH --partition=bdw | + | #SBATCH --partition=cpu |
| + | #SBATCH --qos=cpu | ||
| #SBATCH --time=0-00: | #SBATCH --time=0-00: | ||
| ##SBATCH --account=< | ##SBATCH --account=< | ||
| Linea 250: | Linea 252: | ||
| #SBATCH --nodes=1 | #SBATCH --nodes=1 | ||
| #SBATCH --ntasks-per-node=1 | #SBATCH --ntasks-per-node=1 | ||
| - | #SBATCH --gres=gpu:tesla:1 | + | #SBATCH --gres=gpu:p100:1 |
| #SBATCH --partition=gpu | #SBATCH --partition=gpu | ||
| + | #SBATCH --qos=gpu | ||
| #SBATCH --mem=16G | #SBATCH --mem=16G | ||
| #SBATCH --time=0-00: | #SBATCH --time=0-00: | ||
| Linea 314: | Linea 317: | ||
| #SBATCH --mem=1G | #SBATCH --mem=1G | ||
| #SBATCH --partition=vrt | #SBATCH --partition=vrt | ||
| + | #SBATCH --qos=vrt | ||
| #SBATCH --time=0-01: | #SBATCH --time=0-01: | ||
| ##SBATCH --account=< | ##SBATCH --account=< | ||
| Linea 331: | Linea 335: | ||
| </ | </ | ||
| - | Script '' | + | Script '' |
| - | <code bash slurm-cryolo-bdw.sh> | + | <code bash slurm-cryolo-cpu.sh> |
| #!/bin/bash | #!/bin/bash | ||
| #SBATCH --job-name=cryolo | #SBATCH --job-name=cryolo | ||
| Linea 341: | Linea 345: | ||
| #SBATCH --ntasks-per-node=4 | #SBATCH --ntasks-per-node=4 | ||
| #SBATCH --mem=4G | #SBATCH --mem=4G | ||
| - | #SBATCH --partition=bdw | + | #SBATCH --partition=cpu |
| + | #SBATCH --qos=cpu | ||
| #SBATCH --time=0-01: | #SBATCH --time=0-01: | ||
| ##SBATCH --account=< | ##SBATCH --account=< | ||
| Linea 488: | Linea 493: | ||
| </ | </ | ||
| + | |||
| + | Script '' | ||
| + | |||
| + | <code pyton Dihedral_analysis-MDA.py> | ||
| + | # | ||
| + | |||
| + | |||
| + | import numpy as np | ||
| + | import MDAnalysis as MDA | ||
| + | from matplotlib import pyplot as plt | ||
| + | #from MDAnalysis import analysis as MDAa | ||
| + | from MDAnalysis.analysis.dihedrals import Dihedral as MDAaD | ||
| + | import MDAnalysis.lib.distances as MDAld | ||
| + | |||
| + | |||
| + | ## EDIT HERE the resname and traj-file-name(no dcd) | ||
| + | resn=' | ||
| + | trajfn=' | ||
| + | # number of frames to skip | ||
| + | nskip=500 | ||
| + | |||
| + | # loading trajectory | ||
| + | u=MDA.Universe(resn+' | ||
| + | # | ||
| + | |||
| + | nmols=512 | ||
| + | nframes=len(u.trajectory) | ||
| + | |||
| + | # | ||
| + | #for i, ttr in enumerate(u.trajectory[:: | ||
| + | # print(i, | ||
| + | |||
| + | if resn==' | ||
| + | dh_atoms=[[" | ||
| + | elif resn==' | ||
| + | dh_atoms=[[" | ||
| + | elif resn==' | ||
| + | dh_atoms=[[" | ||
| + | # add PCZ case here | ||
| + | | ||
| + | ndhs=len(dh_atoms) | ||
| + | print(ndhs) | ||
| + | |||
| + | fig, ax = plt.subplots(ndhs, | ||
| + | |||
| + | print(' | ||
| + | print(len(u.trajectory[:: | ||
| + | |||
| + | dhs=np.zeros( (len(u.trajectory[:: | ||
| + | |||
| + | bindh=np.linspace(-180, | ||
| + | |||
| + | |||
| + | for kkd, a4dh in enumerate(dh_atoms): | ||
| + | |||
| + | dA1=[res.atoms.select_atoms(" | ||
| + | dA2=[res.atoms.select_atoms(" | ||
| + | dA3=[res.atoms.select_atoms(" | ||
| + | dA4=[res.atoms.select_atoms(" | ||
| + | |||
| + | for i1, ts in enumerate(u.trajectory[:: | ||
| + | #for d11 in d1: | ||
| + | # | ||
| + | # | ||
| + | # | ||
| + | # | ||
| + | for jj in range(nmols): | ||
| + | x1=MDAld.calc_dihedrals(dA1[jj], | ||
| + | dhs[i1, | ||
| + | # | ||
| + | |||
| + | # | ||
| + | | ||
| + | ax[kkd].hist(dhs[:,:, | ||
| + | ax[kkd].legend() | ||
| + | |||
| + | |||
| + | |||
| + | |||
| + | np.savetxt(' | ||
| + | |||
| + | print(' | ||
| + | print(dhs) | ||
| + | print() | ||
| + | print(' | ||
| + | print(dhs.reshape(-1, | ||
| + | dhsnew = [] | ||
| + | dhsresh = dhs.reshape(-1, | ||
| + | print(len(dhsresh[:, | ||
| + | for i in range(0, | ||
| + | if dhsresh[i, | ||
| + | dhsnew = np.append(dhsnew, | ||
| + | |||
| + | dhsnew = np.reshape(dhsnew, | ||
| + | # | ||
| + | |||
| + | fig.suptitle(resn, | ||
| + | fig.savefig(' | ||
| + | |||
| + | </ | ||
| ==== Virtualenv " | ==== Virtualenv " | ||
| Linea 547: | Linea 652: | ||
| conda deactivate | conda deactivate | ||
| + | </ | ||
| + | |||
| + | ==== Virtualenv " | ||
| + | |||
| + | Available versions of the " | ||
| + | |||
| + | ^ condaenv | ||
| + | | pennylane | ||
| + | | pennylane-gpu | ||
| + | |||
| + | Enable the default version of the " | ||
| + | |||
| + | < | ||
| + | module load miniconda3 | ||
| + | source " | ||
| + | conda activate pennylane | ||
| + | </ | ||
| + | |||
| + | Enable the gpu version of the " | ||
| + | |||
| + | < | ||
| + | module load miniconda3 | ||
| + | source " | ||
| + | conda activate pennylane-gpu | ||
| + | </ | ||
| + | |||
| + | Packages in the " | ||
| + | |||
| + | < | ||
| + | conda list | ||
| + | </ | ||
| + | |||
| + | Disable the " | ||
| + | |||
| + | < | ||
| + | conda deactivate | ||
| + | </ | ||
| + | |||
| + | Script '' | ||
| + | |||
| + | <code bash slurm-pennylane-gpu.sh> | ||
| + | #!/bin/bash | ||
| + | #SBATCH --job-name=plExample | ||
| + | #SBATCH --output=%x.o%j | ||
| + | #SBATCH --error=%x.e%j | ||
| + | #SBATCH --nodes=1 | ||
| + | #SBATCH --partition=gpu | ||
| + | #SBATCH --qos=gpu | ||
| + | #SBATCH --gres=gpu: | ||
| + | #SBATCH --mem=8G | ||
| + | #SBATCH --time=0-10: | ||
| + | #SBATCH --ntasks-per-node 4 | ||
| + | #SBATCH --account=< | ||
| + | |||
| + | module load miniconda3 | ||
| + | source " | ||
| + | conda activate pennylane-gpu | ||
| + | |||
| + | PYTHON_VERSION=$(python -V | awk '{ print $2 }') | ||
| + | echo $PYTHON_VERSION | ||
| + | |||
| + | python -u ./ | ||
| + | |||
| + | conda deactivate | ||
| + | </ | ||
| + | |||
| + | |||
| + | Script '' | ||
| + | |||
| + | <code pyton plExample.py> | ||
| + | # | ||
| + | |||
| + | # Simple example where we train a parameterized quantum circuit | ||
| + | # to minimize the expectation value of a Pauli-Z operator. | ||
| + | import pennylane as qml | ||
| + | from pennylane import numpy as np | ||
| + | |||
| + | # Define a quantum device with one qubit | ||
| + | dev = qml.device(" | ||
| + | |||
| + | # Define a variational quantum circuit | ||
| + | @qml.qnode(dev) | ||
| + | def circuit(params): | ||
| + | qml.RX(params[0], | ||
| + | qml.RY(params[1], | ||
| + | return qml.expval(qml.PauliZ(0)) | ||
| + | |||
| + | # Define the cost function to minimize | ||
| + | def cost(params): | ||
| + | return circuit(params) | ||
| + | |||
| + | # Initialize parameters | ||
| + | init_params = np.array([0.01, | ||
| + | |||
| + | # Set up the optimizer | ||
| + | opt = qml.GradientDescentOptimizer(stepsize=0.1) | ||
| + | |||
| + | # Number of optimization steps | ||
| + | steps = 100 | ||
| + | |||
| + | # Optimize the circuit parameters | ||
| + | params = init_params | ||
| + | for i in range(steps): | ||
| + | params = opt.step(cost, | ||
| + | if (i + 1) % 10 == 0: | ||
| + | print(f" | ||
| + | |||
| + | print(f" | ||
| + | print(f" | ||
| + | |||
| </ | </ | ||
calcoloscientifico/cluster/softwareapplicativo/conda.1718349739.txt.gz · Ultima modifica: da federico.prost
