Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 12 additions & 35 deletions .github/workflows/CPU_inferencce_validation.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,53 +8,30 @@ on:
- master
jobs:
build_validation:
runs-on: [self-hosted, linux, x64]
runs-on: [self-hosted, linux, x64, 4060Ti]
if: github.repository_owner == 'deepmodeling'
container:
image: ubuntu:20.04
options: --shm-size=5g # shared memory size = 5GB
steps:
- name: Checkout
uses: actions/checkout@v3
- name: install dependencies
env:
DEBIAN_FRONTEND: noninteractive
run: |
apt-get update
apt-get install -y sudo wget unzip git software-properties-common make cmake g++ mpich openmpi-bin libopenmpi-dev libscalapack-mpi-dev vim git-core
sudo sh -c "wget -O - https://dl.openfoam.org/gpg.key | apt-key add -"
sudo add-apt-repository http://dl.openfoam.org/ubuntu
sudo apt-get update
export DEBIAN_FRONTEND=noninteractive
apt-get -y install openfoam7
wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh
bash Miniconda3-latest-Linux-x86_64.sh -b
. ~/miniconda3/etc/profile.d/conda.sh
conda create -n libcantera python=3.8
conda activate libcantera
conda install -c cantera libcantera-devel=2.6
conda install -c cantera cantera
conda install pytorch pybind11
conda install pkg-config
conda install --channel https://conda.anaconda.org/zhaofeng-shu33 easydict
git clone https://github.com/deepmodeling/deepflame-dev.git
cd deepflame-dev


- name: build and validation with CPU inference
env:
OMPI_ALLOW_RUN_AS_ROOT: 1
OMPI_ALLOW_RUN_AS_ROOT_CONFIRM: 1
OMPI_MCA_btl_vader_single_copy_mechanism: none
run:
/bin/bash -c " echo $PWD && ls $PWD
&& wget --content-disposition https://aisquare.oss-us-east-1.aliyuncs.com/data/datasets/14b50df5-dbe9-4f1c-bf58-032b8bc40a20
&& unzip flare_CH4_SandiaD_4D.zip
/bin/bash -c " echo $PWD
&& ls $PWD
&& cp -r flare_CH4_SandiaD_4D.tbl examples/dfLowMachFoam/fgm/twoD_SandiaD_flareFGM
&& source ~/miniconda3/etc/profile.d/conda.sh && conda activate libcantera && source /opt/openfoam7/etc/bashrc
&& . configure.sh --use_pytorch&& source ./bashrc && . install.sh
&& cd test && ./Allrun && conda deactivate "
&& cp -r /root/actions-runner/data/flare_CH4_SandiaD_4D.tbl examples/dfLowMachFoam/fgm/twoD_SandiaD_flareFGM
&& source ~/miniconda3/etc/profile.d/conda.sh
&& conda activate libcantera
&& source /opt/openfoam7/etc/bashrc
&& . configure.sh --use_pytorch
&& source ./bashrc
&& . install.sh
&& cd test
&& ./Allrun
&& conda deactivate "

- name: test
run: |
Expand Down
2 changes: 1 addition & 1 deletion examples/dfLowMachFoam/fgm/twoD_SandiaD_flareFGM/Allrun
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#!/bin/sh
cd ${0%/*} || exit 1 # Run from this directory

if [ -e flare_CH4_SandiaD_4D.tbl]
if [ -e flare_CH4_SandiaD_4D.tbl ]
then
echo "flare_CH4_SandiaD_4D.tbl exists. Make sure correct table has been used!"
else
Expand Down
Binary file modified test/Tu500K-Phi1/DNN_model.pt
100755 → 100644
Binary file not shown.
Empty file modified test/Tu500K-Phi1/constant/CanteraTorchProperties
100755 → 100644
Empty file.
34 changes: 28 additions & 6 deletions test/Tu500K-Phi1/inference.py
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import cantera as ct

device_main = "cuda:0"
device_list = range(torch.cuda.device_count())
device_list = [0] #range(torch.cuda.device_count())

torch.set_printoptions(precision=10)

Expand Down Expand Up @@ -95,16 +95,18 @@ def forward(self, x):
Ymu2 = Ymu0
Ystd2 = Ystd0


"""
#load model
layers = [n_species +2, 1600, 800, 400, 1]

model0list = []
for i in range(n_species-1):
model0list.append(NN_MLP(layers))

for i in range(n_species-1):
model0list[i].load_state_dict(state_dict[f'net{i}'])


for i in range(n_species-1):
model0list[i].eval()
Expand All @@ -113,7 +115,23 @@ def forward(self, x):
if len(device_ids) > 1:
for i in range(n_species-1):
model0list[i] = torch.nn.DataParallel(model0list[i], device_ids=device_ids)
"""

#load model
layers = [2+n_species]+[400]*4+[ n_species-1]
# layers = [2+n_species]+[800,400,200,100]+[n_species-1]


model = NN_MLP(layers)

model.load_state_dict(state_dict['net'])

model.eval()
model.to(device=device)

if len(device_ids) > 1:
model = torch.nn.DataParallel(model, device_ids=device_ids)

except Exception as e:
print(e.args)

Expand All @@ -126,6 +144,8 @@ def inference(vec0):
'''
vec0 = np.abs(np.reshape(vec0, (-1, 3+n_species))) # T, P, Yi(7), Rho
vec0[:,1] *= 101325
# vec0[:,1] *= 0
# vec0[:,1] += 101325
mask = vec0[:,0] > frozenTemperature
vec0_input = vec0[mask, :]
print(f'real inference points number: {vec0_input.shape[0]}')
Expand All @@ -148,9 +168,11 @@ def inference(vec0):
#inference

output0_normalized = []
for i in range(n_species-1):
output0_normalized.append(model0list[i](input0_normalized))
output0_normalized = torch.cat(output0_normalized, dim=1)

#for i in range(n_species-1):
# output0_normalized.append(model0list[i](input0_normalized))
#output0_normalized = torch.cat(output0_normalized, dim=1)
output0_normalized = model(input0_normalized)

# post_processing
output0_bct = output0_normalized * Ystd0 + Ymu0 + input0_bct[:, 2:-1]
Expand Down