My Project
Loading...
Searching...
No Matches
tpfalinearizer.hh
Go to the documentation of this file.
1// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2// vi: set et ts=4 sw=4 sts=4:
3/*
4 This file is part of the Open Porous Media project (OPM).
5
6 OPM is free software: you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation, either version 2 of the License, or
9 (at your option) any later version.
10
11 OPM is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with OPM. If not, see <http://www.gnu.org/licenses/>.
18
19 Consult the COPYING file in the top-level source directory of this
20 module for the precise wording of the license and the list of
21 copyright holders.
22*/
28#ifndef TPFA_LINEARIZER_HH
29#define TPFA_LINEARIZER_HH
30
31#include <dune/common/version.hh>
32#include <dune/common/fvector.hh>
33#include <dune/common/fmatrix.hh>
34
35#include <opm/common/Exceptions.hpp>
36#include <opm/common/TimingMacros.hpp>
37
38#include <opm/grid/utility/SparseTable.hpp>
39
40#include <opm/input/eclipse/EclipseState/Grid/FaceDir.hpp>
41#include <opm/input/eclipse/Schedule/BCProp.hpp>
42
46
47#include <exception> // current_exception, rethrow_exception
48#include <iostream>
49#include <numeric>
50#include <set>
51#include <type_traits>
52#include <vector>
53
54namespace Opm::Parameters {
55
56struct SeparateSparseSourceTerms { static constexpr bool value = false; };
57
58} // namespace Opm::Parameters
59
60namespace Opm {
61
62// forward declarations
63template<class TypeTag>
65
75template<class TypeTag>
77{
85
94
95 using Element = typename GridView::template Codim<0>::Entity;
96 using ElementIterator = typename GridView::template Codim<0>::Iterator;
97
98 using Vector = GlobalEqVector;
99
102 enum { dimWorld = GridView::dimensionworld };
103
104 using MatrixBlock = typename SparseMatrixAdapter::MatrixBlock;
105 using VectorBlock = Dune::FieldVector<Scalar, numEq>;
107
109 static const bool enableEnergy = getPropValue<TypeTag, Properties::EnableEnergy>();
110 static const bool enableDiffusion = getPropValue<TypeTag, Properties::EnableDiffusion>();
111 static const bool enableMICP = getPropValue<TypeTag, Properties::EnableMICP>();
112
113 // copying the linearizer is not a good idea
114 TpfaLinearizer(const TpfaLinearizer&) = delete;
116
117public:
119 : jacobian_()
120 {
121 simulatorPtr_ = 0;
122 separateSparseSourceTerms_ = Parameters::Get<Parameters::SeparateSparseSourceTerms>();
123 }
124
126 {
127 }
128
132 static void registerParameters()
133 {
134 Parameters::Register<Parameters::SeparateSparseSourceTerms>
135 ("Treat well source terms all in one go, instead of on a cell by cell basis.");
136 }
137
147 void init(Simulator& simulator)
148 {
149 simulatorPtr_ = &simulator;
150 eraseMatrix();
151 }
152
161 {
162 jacobian_.reset();
163 }
164
175 {
178 }
179
191 {
192 int succeeded;
193 try {
194 linearizeDomain(fullDomain_);
195 succeeded = 1;
196 }
197 catch (const std::exception& e)
198 {
199 std::cout << "rank " << simulator_().gridView().comm().rank()
200 << " caught an exception while linearizing:" << e.what()
201 << "\n" << std::flush;
202 succeeded = 0;
203 }
204 catch (...)
205 {
206 std::cout << "rank " << simulator_().gridView().comm().rank()
207 << " caught an exception while linearizing"
208 << "\n" << std::flush;
209 succeeded = 0;
210 }
211 succeeded = simulator_().gridView().comm().min(succeeded);
212
213 if (!succeeded)
214 throw NumericalProblem("A process did not succeed in linearizing the system");
215 }
216
227 template <class SubDomainType>
229 {
231 // we defer the initialization of the Jacobian matrix until here because the
232 // auxiliary modules usually assume the problem, model and grid to be fully
233 // initialized...
234 if (!jacobian_)
235 initFirstIteration_();
236
237 // Called here because it is no longer called from linearize_().
238 if (domain.cells.size() == model_().numTotalDof()) {
239 // We are on the full domain.
240 resetSystem_();
241 } else {
242 resetSystem_(domain);
243 }
244
245 linearize_(domain);
246 }
247
248 void finalize()
249 { jacobian_->finalize(); }
250
256 {
258 // flush possible local caches into matrix structure
259 jacobian_->commit();
260
261 auto& model = model_();
262 const auto& comm = simulator_().gridView().comm();
263 for (unsigned auxModIdx = 0; auxModIdx < model.numAuxiliaryModules(); ++auxModIdx) {
264 bool succeeded = true;
265 try {
266 model.auxiliaryModule(auxModIdx)->linearize(*jacobian_, residual_);
267 }
268 catch (const std::exception& e) {
269 succeeded = false;
270
271 std::cout << "rank " << simulator_().gridView().comm().rank()
272 << " caught an exception while linearizing:" << e.what()
273 << "\n" << std::flush;
274 }
275
276 succeeded = comm.min(succeeded);
277
278 if (!succeeded)
279 throw NumericalProblem("linearization of an auxiliary equation failed");
280 }
281 }
282
286 const SparseMatrixAdapter& jacobian() const
287 { return *jacobian_; }
288
289 SparseMatrixAdapter& jacobian()
290 { return *jacobian_; }
291
295 const GlobalEqVector& residual() const
296 { return residual_; }
297
298 GlobalEqVector& residual()
299 { return residual_; }
300
301 void setLinearizationType(LinearizationType linearizationType){
302 linearizationType_ = linearizationType;
303 };
304
305 const LinearizationType& getLinearizationType() const{
306 return linearizationType_;
307 };
308
314 const auto& getFlowsInfo() const{
315
316 return flowsInfo_;
317 }
318
324 const auto& getFloresInfo() const{
325
326 return floresInfo_;
327 }
328
334 const auto& getVelocityInfo() const{
335
336 return velocityInfo_;
337 }
338
339 void updateDiscretizationParameters()
340 {
341 updateStoredTransmissibilities();
342 }
343
344 void updateBoundaryConditionData() {
345 for (auto& bdyInfo : boundaryInfo_) {
346 const auto [type, massrateAD] = problem_().boundaryCondition(bdyInfo.cell, bdyInfo.dir);
347
348 // Strip the unnecessary (and zero anyway) derivatives off massrate.
349 VectorBlock massrate(0.0);
350 for (size_t ii = 0; ii < massrate.size(); ++ii) {
351 massrate[ii] = massrateAD[ii].value();
352 }
353 if (type != BCType::NONE) {
354 const auto& exFluidState = problem_().boundaryFluidState(bdyInfo.cell, bdyInfo.dir);
355 bdyInfo.bcdata.type = type;
356 bdyInfo.bcdata.massRate = massrate;
357 bdyInfo.bcdata.exFluidState = exFluidState;
358 }
359 }
360 }
361
367 const std::map<unsigned, Constraints> constraintsMap() const
368 { return {}; }
369
370 template <class SubDomainType>
371 void resetSystem_(const SubDomainType& domain)
372 {
373 if (!jacobian_) {
374 initFirstIteration_();
375 }
376 for (int globI : domain.cells) {
377 residual_[globI] = 0.0;
378 jacobian_->clearRow(globI, 0.0);
379 }
380 }
381
382private:
383 Simulator& simulator_()
384 { return *simulatorPtr_; }
385 const Simulator& simulator_() const
386 { return *simulatorPtr_; }
387
388 Problem& problem_()
389 { return simulator_().problem(); }
390 const Problem& problem_() const
391 { return simulator_().problem(); }
392
393 Model& model_()
394 { return simulator_().model(); }
395 const Model& model_() const
396 { return simulator_().model(); }
397
398 const GridView& gridView_() const
399 { return problem_().gridView(); }
400
401 void initFirstIteration_()
402 {
403 // initialize the BCRS matrix for the Jacobian of the residual function
404 createMatrix_();
405
406 // initialize the Jacobian matrix and the vector for the residual function
407 residual_.resize(model_().numTotalDof());
408 resetSystem_();
409
410 // initialize the sparse tables for Flows and Flores
411 createFlows_();
412 }
413
414 // Construct the BCRS matrix for the Jacobian of the residual function
415 void createMatrix_()
416 {
418 if (!neighborInfo_.empty()) {
419 // It is ok to call this function multiple times, but it
420 // should not do anything if already called.
421 return;
422 }
423 const auto& model = model_();
424 Stencil stencil(gridView_(), model_().dofMapper());
425
426 // for the main model, find out the global indices of the neighboring degrees of
427 // freedom of each primary degree of freedom
428 using NeighborSet = std::set< unsigned >;
429 std::vector<NeighborSet> sparsityPattern(model.numTotalDof());
430 const Scalar gravity = problem_().gravity()[dimWorld - 1];
431 unsigned numCells = model.numTotalDof();
432 neighborInfo_.reserve(numCells, 6 * numCells);
433 std::vector<NeighborInfo> loc_nbinfo;
434 for (const auto& elem : elements(gridView_())) {
435 stencil.update(elem);
436
437 for (unsigned primaryDofIdx = 0; primaryDofIdx < stencil.numPrimaryDof(); ++primaryDofIdx) {
438 unsigned myIdx = stencil.globalSpaceIndex(primaryDofIdx);
439 loc_nbinfo.resize(stencil.numDof() - 1); // Do not include the primary dof in neighborInfo_
440
441 for (unsigned dofIdx = 0; dofIdx < stencil.numDof(); ++dofIdx) {
442 unsigned neighborIdx = stencil.globalSpaceIndex(dofIdx);
444 if (dofIdx > 0) {
445 const Scalar trans = problem_().transmissibility(myIdx, neighborIdx);
446 const auto scvfIdx = dofIdx - 1;
447 const auto& scvf = stencil.interiorFace(scvfIdx);
448 const Scalar area = scvf.area();
449 const Scalar Vin = problem_().model().dofTotalVolume(myIdx);
450 const Scalar Vex = problem_().model().dofTotalVolume(neighborIdx);
451 const Scalar zIn = problem_().dofCenterDepth(myIdx);
452 const Scalar zEx = problem_().dofCenterDepth(neighborIdx);
453 const Scalar dZg = (zIn - zEx)*gravity;
454 const Scalar thpres = problem_().thresholdPressure(myIdx, neighborIdx);
455 Scalar inAlpha {0.};
456 Scalar outAlpha {0.};
457 Scalar diffusivity {0.};
458 Scalar dispersivity {0.};
459 if constexpr(enableEnergy){
460 inAlpha = problem_().thermalHalfTransmissibility(myIdx, neighborIdx);
461 outAlpha = problem_().thermalHalfTransmissibility(neighborIdx, myIdx);
462 }
463 if constexpr(enableDiffusion){
464 diffusivity = problem_().diffusivity(myIdx, neighborIdx);
465 }
466 if (simulator_().vanguard().eclState().getSimulationConfig().rock_config().dispersion()) {
467 dispersivity = problem_().dispersivity(myIdx, neighborIdx);
468 }
469 const auto dirId = scvf.dirId();
470 auto faceDir = dirId < 0 ? FaceDir::DirEnum::Unknown
471 : FaceDir::FromIntersectionIndex(dirId);
472 loc_nbinfo[dofIdx - 1] = NeighborInfo{neighborIdx, {trans, area, thpres, dZg, faceDir, Vin, Vex, inAlpha, outAlpha, diffusivity, dispersivity}, nullptr};
473
474 }
475 }
476 neighborInfo_.appendRow(loc_nbinfo.begin(), loc_nbinfo.end());
477 if (problem_().nonTrivialBoundaryConditions()) {
478 for (unsigned bfIndex = 0; bfIndex < stencil.numBoundaryFaces(); ++bfIndex) {
479 const auto& bf = stencil.boundaryFace(bfIndex);
480 const int dir_id = bf.dirId();
481 // not for NNCs
482 if (dir_id < 0)
483 continue;
484 const auto [type, massrateAD] = problem_().boundaryCondition(myIdx, dir_id);
485 // Strip the unnecessary (and zero anyway) derivatives off massrate.
486 VectorBlock massrate(0.0);
487 for (size_t ii = 0; ii < massrate.size(); ++ii) {
488 massrate[ii] = massrateAD[ii].value();
489 }
490 const auto& exFluidState = problem_().boundaryFluidState(myIdx, dir_id);
491 BoundaryConditionData bcdata{type,
492 massrate,
493 exFluidState.pvtRegionIndex(),
494 bfIndex,
495 bf.area(),
496 bf.integrationPos()[dimWorld - 1],
497 exFluidState};
498 boundaryInfo_.push_back({myIdx, dir_id, bfIndex, bcdata});
499 }
500 }
501 }
502 }
503
504 // add the additional neighbors and degrees of freedom caused by the auxiliary
505 // equations
506 size_t numAuxMod = model.numAuxiliaryModules();
507 for (unsigned auxModIdx = 0; auxModIdx < numAuxMod; ++auxModIdx)
508 model.auxiliaryModule(auxModIdx)->addNeighbors(sparsityPattern);
509
510 // allocate raw matrix
511 jacobian_.reset(new SparseMatrixAdapter(simulator_()));
512 diagMatAddress_.resize(numCells);
513 // create matrix structure based on sparsity pattern
514 jacobian_->reserve(sparsityPattern);
515 for (unsigned globI = 0; globI < numCells; globI++) {
516 const auto& nbInfos = neighborInfo_[globI];
517 diagMatAddress_[globI] = jacobian_->blockAddress(globI, globI);
518 for (auto& nbInfo : nbInfos) {
519 nbInfo.matBlockAddress = jacobian_->blockAddress(nbInfo.neighbor, globI);
520 }
521 }
522
523 // Create dummy full domain.
524 fullDomain_.cells.resize(numCells);
525 std::iota(fullDomain_.cells.begin(), fullDomain_.cells.end(), 0);
526 }
527
528 // reset the global linear system of equations.
529 void resetSystem_()
530 {
531 residual_ = 0.0;
532 // zero all matrix entries
533 jacobian_->clear();
534 }
535
536 // Initialize the flows, flores, and velocity sparse tables
537 void createFlows_()
538 {
540 // If FLOWS/FLORES is set in any RPTRST in the schedule, then we initializate the sparse tables
541 // For now, do the same also if any block flows are requested (TODO: only save requested cells...)
542 // If DISPERC is in the deck, we initialize the sparse table here as well.
543 const bool anyFlows = simulator_().problem().eclWriter().outputModule().getFlows().anyFlows() ||
544 simulator_().problem().eclWriter().outputModule().getFlows().hasBlockFlows();
545 const bool anyFlores = simulator_().problem().eclWriter().outputModule().getFlows().anyFlores();
546 const bool enableDispersion = simulator_().vanguard().eclState().getSimulationConfig().rock_config().dispersion();
547 if (((!anyFlows || !flowsInfo_.empty()) && (!anyFlores || !floresInfo_.empty())) && (!enableDispersion && !enableMICP)) {
548 return;
549 }
550 const auto& model = model_();
551 const auto& nncOutput = simulator_().problem().eclWriter().getOutputNnc();
552 Stencil stencil(gridView_(), model_().dofMapper());
553 unsigned numCells = model.numTotalDof();
554 std::unordered_multimap<int, std::pair<int, int>> nncIndices;
555 std::vector<FlowInfo> loc_flinfo;
556 std::vector<VelocityInfo> loc_vlinfo;
557 unsigned int nncId = 0;
558 VectorBlock flow(0.0);
559
560 // Create a nnc structure to use fast lookup
561 for (unsigned int nncIdx = 0; nncIdx < nncOutput.size(); ++nncIdx) {
562 const int ci1 = nncOutput[nncIdx].cell1;
563 const int ci2 = nncOutput[nncIdx].cell2;
564 nncIndices.emplace(ci1, std::make_pair(ci2, nncIdx));
565 }
566
567 if (anyFlows) {
568 flowsInfo_.reserve(numCells, 6 * numCells);
569 }
570 if (anyFlores) {
571 floresInfo_.reserve(numCells, 6 * numCells);
572 }
573 if (enableDispersion || enableMICP) {
574 velocityInfo_.reserve(numCells, 6 * numCells);
575 }
576
577 for (const auto& elem : elements(gridView_())) {
578 stencil.update(elem);
579 for (unsigned primaryDofIdx = 0; primaryDofIdx < stencil.numPrimaryDof(); ++primaryDofIdx) {
580 unsigned myIdx = stencil.globalSpaceIndex(primaryDofIdx);
581 int numFaces = stencil.numBoundaryFaces() + stencil.numInteriorFaces();
582 loc_flinfo.resize(numFaces);
583 loc_vlinfo.resize(stencil.numDof() - 1);
584
585 for (unsigned dofIdx = 0; dofIdx < stencil.numDof(); ++dofIdx) {
586 unsigned neighborIdx = stencil.globalSpaceIndex(dofIdx);
587 if (dofIdx > 0) {
588 const auto scvfIdx = dofIdx - 1;
589 const auto& scvf = stencil.interiorFace(scvfIdx);
590 int faceId = scvf.dirId();
591 const int cartMyIdx = simulator_().vanguard().cartesianIndex(myIdx);
592 const int cartNeighborIdx = simulator_().vanguard().cartesianIndex(neighborIdx);
593 const auto& range = nncIndices.equal_range(cartMyIdx);
594 for (auto it = range.first; it != range.second; ++it) {
595 if (it->second.first == cartNeighborIdx){
596 // -1 gives problem since is used for the nncInput from the deck
597 faceId = -2;
598 // the index is stored to be used for writting the outputs
599 nncId = it->second.second;
600 }
601 }
602 loc_flinfo[dofIdx - 1] = FlowInfo{faceId, flow, nncId};
603 loc_vlinfo[dofIdx - 1] = VelocityInfo{flow};
604 }
605 }
606
607 for (unsigned bdfIdx = 0; bdfIdx < stencil.numBoundaryFaces(); ++bdfIdx) {
608 const auto& scvf = stencil.boundaryFace(bdfIdx);
609 int faceId = scvf.dirId();
610 loc_flinfo[stencil.numInteriorFaces() + bdfIdx] = FlowInfo{faceId, flow, nncId};
611 }
612
613
614 if (anyFlows) {
615 flowsInfo_.appendRow(loc_flinfo.begin(), loc_flinfo.end());
616 }
617 if (anyFlores) {
618 floresInfo_.appendRow(loc_flinfo.begin(), loc_flinfo.end());
619 }
620 if (enableDispersion || enableMICP) {
621 velocityInfo_.appendRow(loc_vlinfo.begin(), loc_vlinfo.end());
622 }
623 }
624 }
625 }
626
627public:
628 void setResAndJacobi(VectorBlock& res, MatrixBlock& bMat, const ADVectorBlock& resid) const
629 {
630 for (unsigned eqIdx = 0; eqIdx < numEq; eqIdx++)
631 res[eqIdx] = resid[eqIdx].value();
632
633 for (unsigned eqIdx = 0; eqIdx < numEq; eqIdx++) {
634 for (unsigned pvIdx = 0; pvIdx < numEq; pvIdx++) {
635 // A[dofIdx][focusDofIdx][eqIdx][pvIdx] is the partial derivative of
636 // the residual function 'eqIdx' for the degree of freedom 'dofIdx'
637 // with regard to the focus variable 'pvIdx' of the degree of freedom
638 // 'focusDofIdx'
639 bMat[eqIdx][pvIdx] = resid[eqIdx].derivative(pvIdx);
640 }
641 }
642 }
643
644 void updateFlowsInfo() {
646 const bool enableFlows = simulator_().problem().eclWriter().outputModule().getFlows().hasFlows() ||
647 simulator_().problem().eclWriter().outputModule().getFlows().hasBlockFlows();
648 const bool enableFlores = simulator_().problem().eclWriter().outputModule().getFlows().hasFlores();
649 if (!enableFlows && !enableFlores) {
650 return;
651 }
652 const unsigned int numCells = model_().numTotalDof();
653#ifdef _OPENMP
654#pragma omp parallel for
655#endif
656 for (unsigned globI = 0; globI < numCells; ++globI) {
658 const auto& nbInfos = neighborInfo_[globI];
659 ADVectorBlock adres(0.0);
661 const IntensiveQuantities& intQuantsIn = model_().intensiveQuantities(globI, /*timeIdx*/ 0);
662 // Flux term.
663 {
665 short loc = 0;
666 for (const auto& nbInfo : nbInfos) {
668 unsigned globJ = nbInfo.neighbor;
669 assert(globJ != globI);
670 adres = 0.0;
671 darcyFlux = 0.0;
672 const IntensiveQuantities& intQuantsEx = model_().intensiveQuantities(globJ, /*timeIdx*/ 0);
673 LocalResidual::computeFlux(adres,darcyFlux, globI, globJ, intQuantsIn, intQuantsEx, nbInfo.res_nbinfo, problem_().moduleParams());
674 adres *= nbInfo.res_nbinfo.faceArea;
675 if (enableFlows) {
676 for (unsigned eqIdx = 0; eqIdx < numEq; ++ eqIdx) {
677 flowsInfo_[globI][loc].flow[eqIdx] = adres[eqIdx].value();
678 }
679 }
680 if (enableFlores) {
681 for (unsigned eqIdx = 0; eqIdx < numEq; ++ eqIdx) {
682 floresInfo_[globI][loc].flow[eqIdx] = darcyFlux[eqIdx].value();
683 }
684 }
685 ++loc;
686 }
687 }
688 }
689
690 // Boundary terms. Only looping over cells with nontrivial bcs.
691 for (const auto& bdyInfo : boundaryInfo_) {
692 if (bdyInfo.bcdata.type == BCType::NONE)
693 continue;
694
695 ADVectorBlock adres(0.0);
696 const unsigned globI = bdyInfo.cell;
697 const auto& nbInfos = neighborInfo_[globI];
698 const IntensiveQuantities& insideIntQuants = model_().intensiveQuantities(globI, /*timeIdx*/ 0);
699 LocalResidual::computeBoundaryFlux(adres, problem_(), bdyInfo.bcdata, insideIntQuants, globI);
700 adres *= bdyInfo.bcdata.faceArea;
701 const unsigned bfIndex = bdyInfo.bfIndex;
702 if (enableFlows) {
703 for (unsigned eqIdx = 0; eqIdx < numEq; ++ eqIdx) {
704 flowsInfo_[globI][nbInfos.size() + bfIndex].flow[eqIdx] = adres[eqIdx].value();
705 }
706 }
707 // TODO also store Flores?
708 }
709 }
710
711private:
712 template <class SubDomainType>
713 void linearize_(const SubDomainType& domain)
714 {
715 // This check should be removed once this is addressed by
716 // for example storing the previous timesteps' values for
717 // rsmax (for DRSDT) and similar.
718 if (!problem_().recycleFirstIterationStorage()) {
719 if (!model_().storeIntensiveQuantities() && !model_().enableStorageCache()) {
720 OPM_THROW(std::runtime_error, "Must have cached either IQs or storage when we cannot recycle.");
721 }
722 }
723
725
726 // We do not call resetSystem_() here, since that will set
727 // the full system to zero, not just our part.
728 // Instead, that must be called before starting the linearization.
729 const bool enableDispersion = simulator_().vanguard().eclState().getSimulationConfig().rock_config().dispersion();
730 const unsigned int numCells = domain.cells.size();
731 const bool on_full_domain = (numCells == model_().numTotalDof());
732
733#ifdef _OPENMP
734#pragma omp parallel for
735#endif
736 for (unsigned ii = 0; ii < numCells; ++ii) {
738 const unsigned globI = domain.cells[ii];
739 const auto& nbInfos = neighborInfo_[globI];
740 VectorBlock res(0.0);
741 MatrixBlock bMat(0.0);
742 ADVectorBlock adres(0.0);
744 const IntensiveQuantities& intQuantsIn = model_().intensiveQuantities(globI, /*timeIdx*/ 0);
745
746 // Flux term.
747 {
749 short loc = 0;
750 for (const auto& nbInfo : nbInfos) {
752 unsigned globJ = nbInfo.neighbor;
753 assert(globJ != globI);
754 res = 0.0;
755 bMat = 0.0;
756 adres = 0.0;
757 darcyFlux = 0.0;
758 const IntensiveQuantities& intQuantsEx = model_().intensiveQuantities(globJ, /*timeIdx*/ 0);
759 LocalResidual::computeFlux(adres,darcyFlux, globI, globJ, intQuantsIn, intQuantsEx, nbInfo.res_nbinfo, problem_().moduleParams());
760 adres *= nbInfo.res_nbinfo.faceArea;
761 if (enableDispersion || enableMICP) {
762 for (unsigned phaseIdx = 0; phaseIdx < numEq; ++ phaseIdx) {
763 velocityInfo_[globI][loc].velocity[phaseIdx] = darcyFlux[phaseIdx].value() / nbInfo.res_nbinfo.faceArea;
764 }
765 }
766 setResAndJacobi(res, bMat, adres);
767 residual_[globI] += res;
768 //SparseAdapter syntax: jacobian_->addToBlock(globI, globI, bMat);
769 *diagMatAddress_[globI] += bMat;
770 bMat *= -1.0;
771 //SparseAdapter syntax: jacobian_->addToBlock(globJ, globI, bMat);
772 *nbInfo.matBlockAddress += bMat;
773 ++loc;
774 }
775 }
776
777 // Accumulation term.
778 double dt = simulator_().timeStepSize();
779 double volume = model_().dofTotalVolume(globI);
780 Scalar storefac = volume / dt;
781 adres = 0.0;
782 {
783 OPM_TIMEBLOCK_LOCAL(computeStorage);
784 LocalResidual::computeStorage(adres, intQuantsIn);
785 }
786 setResAndJacobi(res, bMat, adres);
787 // Either use cached storage term, or compute it on the fly.
788 if (model_().enableStorageCache()) {
789 // The cached storage for timeIdx 0 (current time) is not
790 // used, but after storage cache is shifted at the end of the
791 // timestep, it will become cached storage for timeIdx 1.
792 model_().updateCachedStorage(globI, /*timeIdx=*/0, res);
793 if (model_().newtonMethod().numIterations() == 0) {
794 // Need to update the storage cache.
795 if (problem_().recycleFirstIterationStorage()) {
796 // Assumes nothing have changed in the system which
797 // affects masses calculated from primary variables.
798 if (on_full_domain) {
799 // This is to avoid resetting the start-of-step storage
800 // to incorrect numbers when we do local solves, where the iteration
801 // number will start from 0, but the starting state may not be identical
802 // to the start-of-step state.
803 // Note that a full assembly must be done before local solves
804 // otherwise this will be left un-updated.
805 model_().updateCachedStorage(globI, /*timeIdx=*/1, res);
806 }
807 } else {
808 Dune::FieldVector<Scalar, numEq> tmp;
809 IntensiveQuantities intQuantOld = model_().intensiveQuantities(globI, 1);
810 LocalResidual::computeStorage(tmp, intQuantOld);
811 model_().updateCachedStorage(globI, /*timeIdx=*/1, tmp);
812 }
813 }
814 res -= model_().cachedStorage(globI, 1);
815 } else {
817 Dune::FieldVector<Scalar, numEq> tmp;
818 IntensiveQuantities intQuantOld = model_().intensiveQuantities(globI, 1);
819 LocalResidual::computeStorage(tmp, intQuantOld);
820 // assume volume do not change
821 res -= tmp;
822 }
823 res *= storefac;
824 bMat *= storefac;
825 residual_[globI] += res;
826 //SparseAdapter syntax: jacobian_->addToBlock(globI, globI, bMat);
827 *diagMatAddress_[globI] += bMat;
828
829 // Cell-wise source terms.
830 // This will include well sources if SeparateSparseSourceTerms is false.
831 res = 0.0;
832 bMat = 0.0;
833 adres = 0.0;
834 if (separateSparseSourceTerms_) {
835 LocalResidual::computeSourceDense(adres, problem_(), intQuantsIn, globI, 0);
836 } else {
837 LocalResidual::computeSource(adres, problem_(), intQuantsIn, globI, 0);
838 }
839 adres *= -volume;
840 setResAndJacobi(res, bMat, adres);
841 residual_[globI] += res;
842 //SparseAdapter syntax: jacobian_->addToBlock(globI, globI, bMat);
843 *diagMatAddress_[globI] += bMat;
844 } // end of loop for cell globI.
845
846 // Add sparse source terms. For now only wells.
847 if (separateSparseSourceTerms_) {
848 problem_().wellModel().addReservoirSourceTerms(residual_, diagMatAddress_);
849 }
850
851 // Boundary terms. Only looping over cells with nontrivial bcs.
852 for (const auto& bdyInfo : boundaryInfo_) {
853 if (bdyInfo.bcdata.type == BCType::NONE)
854 continue;
855
856 VectorBlock res(0.0);
857 MatrixBlock bMat(0.0);
858 ADVectorBlock adres(0.0);
859 const unsigned globI = bdyInfo.cell;
860 const IntensiveQuantities& insideIntQuants = model_().intensiveQuantities(globI, /*timeIdx*/ 0);
861 LocalResidual::computeBoundaryFlux(adres, problem_(), bdyInfo.bcdata, insideIntQuants, globI);
862 adres *= bdyInfo.bcdata.faceArea;
863 setResAndJacobi(res, bMat, adres);
864 residual_[globI] += res;
866 *diagMatAddress_[globI] += bMat;
867 }
868 }
869
870 void updateStoredTransmissibilities()
871 {
872 if (neighborInfo_.empty()) {
873 // This function was called before createMatrix_() was called.
874 // We call initFirstIteration_(), not createMatrix_(), because
875 // that will also initialize the residual consistently.
876 initFirstIteration_();
877 }
878 unsigned numCells = model_().numTotalDof();
879#ifdef _OPENMP
880#pragma omp parallel for
881#endif
882 for (unsigned globI = 0; globI < numCells; globI++) {
883 auto nbInfos = neighborInfo_[globI]; // nbInfos will be a SparseTable<...>::mutable_iterator_range.
884 for (auto& nbInfo : nbInfos) {
885 unsigned globJ = nbInfo.neighbor;
886 nbInfo.res_nbinfo.trans = problem_().transmissibility(globI, globJ);
887 }
888 }
889 }
890
891
892 Simulator *simulatorPtr_;
893
894 // the jacobian matrix
895 std::unique_ptr<SparseMatrixAdapter> jacobian_;
896
897 // the right-hand side
898 GlobalEqVector residual_;
899
900 LinearizationType linearizationType_;
901
902 using ResidualNBInfo = typename LocalResidual::ResidualNBInfo;
903 struct NeighborInfo
904 {
905 unsigned int neighbor;
906 ResidualNBInfo res_nbinfo;
907 MatrixBlock* matBlockAddress;
908 };
909 SparseTable<NeighborInfo> neighborInfo_;
910 std::vector<MatrixBlock*> diagMatAddress_;
911
912 struct FlowInfo
913 {
914 int faceId;
915 VectorBlock flow;
916 unsigned int nncId;
917 };
918 SparseTable<FlowInfo> flowsInfo_;
919 SparseTable<FlowInfo> floresInfo_;
920
921 struct VelocityInfo
922 {
923 VectorBlock velocity;
924 };
925 SparseTable<VelocityInfo> velocityInfo_;
926
927 using ScalarFluidState = typename IntensiveQuantities::ScalarFluidState;
928 struct BoundaryConditionData
929 {
930 BCType type;
931 VectorBlock massRate;
932 unsigned pvtRegionIdx;
933 unsigned boundaryFaceIndex;
934 double faceArea;
935 double faceZCoord;
936 ScalarFluidState exFluidState;
937 };
938 struct BoundaryInfo
939 {
940 unsigned int cell;
941 int dir;
942 unsigned int bfIndex;
943 BoundaryConditionData bcdata;
944 };
945 std::vector<BoundaryInfo> boundaryInfo_;
946 bool separateSparseSourceTerms_ = false;
947 struct FullDomain
948 {
949 std::vector<int> cells;
950 std::vector<bool> interior;
951 };
952 FullDomain fullDomain_;
953};
954
955} // namespace Opm
956
957#endif // TPFA_LINEARIZER
Base class for specifying auxiliary equations.
The base class for the element-centered finite-volume discretization scheme.
Definition ecfvdiscretization.hh:147
Definition matrixblock.hh:227
Manages the initializing and running of time dependent problems.
Definition simulator.hh:97
Scalar timeStepSize() const
Returns the time step length so that we don't miss the beginning of the next episode or cross the en...
Definition simulator.hh:461
Vanguard & vanguard()
Return a reference to the grid manager of simulation.
Definition simulator.hh:281
Problem & problem()
Return the object which specifies the pysical setup of the simulation.
Definition simulator.hh:312
const GridView & gridView() const
Return the grid view for which the simulation is done.
Definition simulator.hh:293
Model & model()
Return the physical model used in the simulation.
Definition simulator.hh:299
The common code for the linearizers of non-linear systems of equations.
Definition tpfalinearizer.hh:77
const auto & getFloresInfo() const
Return constant reference to the floresInfo.
Definition tpfalinearizer.hh:324
void linearize()
Linearize the full system of non-linear equations.
Definition tpfalinearizer.hh:174
const auto & getFlowsInfo() const
Return constant reference to the flowsInfo.
Definition tpfalinearizer.hh:314
void linearizeDomain(const SubDomainType &domain)
Linearize the part of the non-linear system of equations that is associated with a part of the spatia...
Definition tpfalinearizer.hh:228
void init(Simulator &simulator)
Initialize the linearizer.
Definition tpfalinearizer.hh:147
static void registerParameters()
Register all run-time parameters for the Jacobian linearizer.
Definition tpfalinearizer.hh:132
void linearizeDomain()
Linearize the part of the non-linear system of equations that is associated with the spatial domain.
Definition tpfalinearizer.hh:190
const SparseMatrixAdapter & jacobian() const
Return constant reference to global Jacobian matrix backend.
Definition tpfalinearizer.hh:286
void eraseMatrix()
Causes the Jacobian matrix to be recreated from scratch before the next iteration.
Definition tpfalinearizer.hh:160
const auto & getVelocityInfo() const
Return constant reference to the velocityInfo.
Definition tpfalinearizer.hh:334
const std::map< unsigned, Constraints > constraintsMap() const
Returns the map of constraint degrees of freedom.
Definition tpfalinearizer.hh:367
void linearizeAuxiliaryEquations()
Linearize the part of the non-linear system of equations that is associated with the spatial domain.
Definition tpfalinearizer.hh:255
const GlobalEqVector & residual() const
Return constant reference to global residual vector.
Definition tpfalinearizer.hh:295
Declare the properties used by the infrastructure code of the finite volume discretizations.
The common code for the linearizers of non-linear systems of equations.
This file contains a set of helper functions used by VFPProd / VFPInj.
Definition blackoilboundaryratevector.hh:37
constexpr auto getPropValue()
get the value data member of a property
Definition propertysystem.hh:242
typename Properties::Detail::GetPropImpl< TypeTag, Property >::type::type GetPropType
get the type alias defined in the property (equivalent to old macro GET_PROP_TYPE(....
Definition propertysystem.hh:235
Definition tpfalinearizer.hh:56