# This is an auto-generated file, DO NOT EDIT!
# Run ant to generate it.

# This file is part of CPAchecker,
# a tool for configurable software verification:
# https://cpachecker.sosy-lab.org
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0

# Possible log levels in descending order 
# (lower levels include higher ones):
# OFF:      no logs published
# SEVERE:   error messages
# WARNING:  warnings
# INFO:     messages
# FINE:     logs on main application level
# FINER:    logs on central CPA algorithm level
# FINEST:   logs published by specific CPAs
# ALL:      debugging information
# Care must be taken with levels of FINER or lower, as output files may
# become quite large and memory usage might become an issue.

# single levels to be excluded from being logged
log.consoleExclude = []

# log level of console output
log.consoleLevel = Level.INFO

# name of the log file
log.file = "CPALog.txt"

# single levels to be excluded from being logged
log.fileExclude = []

# log level of file output
log.level = Level.OFF

# Maximum size of log output strings before they will be truncated. Note that
# truncation is not precise and truncation to small values has no effect. Use
# 0 for disabling truncation completely.
log.truncateSize = 10000

# use colors for log messages on console
log.useColors = true

# disable all default output files
# (any explicitly given file will still be written)
output.disable = false

# directory to put all output files in
output.path = "output/"

# base directory for all paths in default values
rootDirectory = "."

# SPDX-FileCopyrightText: 2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0

# Further options for Boolector in addition to the default options. Format: 
# "Optionname=value" with ’,’ to seperate options. Optionname and value can
# be found in BtorOption or Boolector C Api.Example:
# "BTOR_OPT_MODEL_GEN=2,BTOR_OPT_INCREMENTAL=1".
solver.boolector.furtherOptions = ""

# The SAT solver used by Boolector.
solver.boolector.satSolver = CADICAL
  enum:     [LINGELING, PICOSAT, MINISAT, CMS, CADICAL]

# Counts all operations and interactions towards the SMT solver.
solver.collectStatistics = false

# Default rounding mode for floating point operations.
solver.floatingPointRoundingMode = NEAREST_TIES_TO_EVEN
  enum:     [NEAREST_TIES_TO_EVEN, NEAREST_TIES_AWAY, TOWARD_POSITIVE, TOWARD_NEGATIVE,
             TOWARD_ZERO]

# Export solver queries in SmtLib format into a file.
solver.logAllQueries = false
solver.logfile = no default value

# Further options that will be passed to Mathsat in addition to the default
# options. Format is 'key1=value1,key2=value2'
solver.mathsat5.furtherOptions = ""

# Load less stable optimizing version of mathsat5 solver.
solver.mathsat5.loadOptimathsat5 = false

# Use non-linear arithmetic of the solver if supported and throw exception
# otherwise, approximate non-linear arithmetic with UFs if unsupported, or
# always approximate non-linear arithmetic. This affects only the theories of
# integer and rational arithmetic.
solver.nonLinearArithmetic = USE
  enum:     [USE, APPROXIMATE_FALLBACK, APPROXIMATE_ALWAYS]

# Enable additional assertion checks within Princess. The main usage is
# debugging. This option can cause a performance overhead.
solver.princess.enableAssertions = false

# log all queries as Princess-specific Scala code
solver.princess.logAllQueriesAsScala = false

# file for Princess-specific dump of queries as Scala code
solver.princess.logAllQueriesAsScalaFile = "princess-query-%03d-"

# The number of atoms a term has to have before it gets abbreviated if there
# are more identical terms.
solver.princess.minAtomsForAbbreviation = 100

# Random seed for SMT solver.
solver.randomSeed = 42

# If logging from the same application, avoid conflicting logfile names.
solver.renameLogfileToAvoidConflicts = true

# Double check generated results like interpolants and models whether they
# are correct
solver.smtinterpol.checkResults = false

# Further options that will be set to true for SMTInterpol in addition to the
# default options. Format is 'option1,option2,option3'
solver.smtinterpol.furtherOptions = []

# Which SMT solver to use.
solver.solver = SMTINTERPOL
  enum:     [MATHSAT5, SMTINTERPOL, Z3, PRINCESS, BOOLECTOR, CVC4, CVC5, YICES2]

# Sequentialize all solver actions to allow concurrent access!
solver.synchronize = false

# Use provers from a seperate context to solve queries. This allows more
# parallelity when solving larger queries.
solver.synchronized.useSeperateProvers = false

# Log solver actions, this may be slow!
solver.useLogger = false

# Activate replayable logging in Z3. The log can be given as an input to the
# solver and replayed.
solver.z3.log = no default value

# Ordering for objectives in the optimization context
solver.z3.objectivePrioritizationMode = "box"
  allowed values: [lex, pareto, box]

# Engine to use for the optimization
solver.z3.optimizationEngine = "basic"
  allowed values: [basic, farkas, symba]

# Require proofs from SMT solver
solver.z3.requireProofs = false

# Whether to use PhantomReferences for discarding Z3 AST
solver.z3.usePhantomReferences = false

# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
# SPDX-FileCopyrightText: 2007-2021 Dirk Beyer <https://www.sosy-lab.org>
# SPDX-FileCopyrightText: 2007-2022 Dirk Beyer <https://www.sosy-lab.org>
# SPDX-FileCopyrightText: 2014-2017 Université Grenoble Alpes
# SPDX-FileCopyrightText: 2020 Dirk Beyer <https://www.sosy-lab.org>
# SPDX-FileCopyrightText: 2021 Dirk Beyer <https://www.sosy-lab.org>
# SPDX-FileCopyrightText: 2022 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0

# Refiner that SlicingDelegatingRefiner should delegate to
SlicingDelegatingRefiner.refiner = no default value

# maximum number of condition adjustments (-1 for infinite)
adjustableconditions.adjustmentLimit = -1

# number of threads, positive values match exactly, with -1 we use the number
# of available cores or the machine automatically.
algorithm.parallelBam.numberOfThreads = -1

# export number of running RSE instances as CSV
algorithm.parallelBam.runningRSESeriesFile = "RSESeries.csv"

# use a BMC like algorithm that checks for satisfiability after the analysis
# has finished, works only with PredicateCPA
analysis.algorithm.BMC = false

# use CEGAR algorithm for lazy counter-example guided analysis
# You need to specify a refiner with the cegar.refiner option.
# Currently all refiner require the use of the ARGCPA.
analysis.algorithm.CEGAR = false

# use McMillan's interpolation-based model checking algorithm, works only
# with PredicateCPA and large-block encoding
analysis.algorithm.IMC = false

# Use MPI for running analyses in new subprocesses. The resulting reachedset
# is the one of the first analysis returning in time. All other mpi-processes
# will get aborted.
analysis.algorithm.MPI = false

# use MPV algorithm for checking multiple properties
analysis.algorithm.MPV = false

# use a analysis which proves if the program satisfies a specified property
# with the help of an enabler CPA to separate differnt program paths
analysis.algorithm.analysisWithEnabler = false

# use adjustable conditions algorithm
analysis.algorithm.conditionAdjustment = false

# Distribute predicate analysis to multiple workers
analysis.algorithm.configurableComponents = false

# for found property violation, perform fault localization with coverage
analysis.algorithm.faultLocalization.by_coverage = false

# Use fault localization with distance metrics
analysis.algorithm.faultLocalization.by_distance = false

# for found property violation, perform fault localization with trace
# formulas
analysis.algorithm.faultLocalization.by_traceformula = false

# Use McMillan's Impact algorithm for lazy interpolation
analysis.algorithm.impact = false

# use nontermination witness validator to check a violation witness for
# termination
analysis.algorithm.nonterminationWitnessCheck = false

# use PDR algorithm
analysis.algorithm.pdr = false

# use a proof check algorithm to validate a previously generated proof
analysis.algorithm.proofCheck = false

# use a proof check algorithm to validate a previously generated proofand
# extract requirements on a (reconfigurable) HW from the proof
analysis.algorithm.proofCheckAndGetHWRequirements = false

# use a proof check algorithm to validate a previously generated proofand
# read the configuration for checking from the proof
analysis.algorithm.proofCheckReadConfig = false

# use a proof check algorithm that using pcc.strategy=arg.ARG_CMCStrategy to
# validate a previously generated proof
analysis.algorithm.proofCheckWithARGCMCStrategy = false

# do analysis and then check if reached set fulfills property specified by
# ConfigurableProgramAnalysisWithPropertyChecker
analysis.algorithm.propertyCheck = false

# Use termination algorithm to prove (non-)termination. This needs the
# TerminationCPA as root CPA and an automaton CPA with
# termination_as_reach.spc in the tree of CPAs.
analysis.algorithm.termination = false

# collect undefined functions
analysis.algorithm.undefinedFunctionCollector = false

# run the parallel BAM algortihm.
analysis.algorithm.useParallelBAM = false

# If not already done by the analysis, store a found counterexample in the
# ARG for later re-use. Does nothing if no ARGCPA is used
analysis.alwaysStoreCounterexamples = false

# Construct a residual program from condition and verify residual program
analysis.asConditionalVerifier = false

# use a second model checking run (e.g., with CBMC or a different CPAchecker
# configuration) to double-check counter-examples
analysis.checkCounterexamples = false

# use counterexample check and the BDDCPA Restriction option
analysis.checkCounterexamplesWithBDDCPARestriction = false

# do analysis and then check analysis result
analysis.checkProof = false

# use assumption collecting algorithm
analysis.collectAssumptions = false

# Construct the program slice for the given configuration.
analysis.constructProgramSlice = false

# Solely construct the residual program for a given condition/assumption.
analysis.constructResidualProgram = false

# continue analysis after a unsupported code was found on one path
analysis.continueAfterUnsupportedCode = false

# Maximum number of counterexamples to be created.
analysis.counterexampleLimit = 0

# stop CPAchecker after startup (internal option, not intended for users)
analysis.disable = false

# entry function
analysis.entryFunction = "main"

# do analysis and then extract pre- and post conditions for custom
# instruction from analysis result
analysis.extractRequirements.customInstruction = false

# create all potential function pointer call edges
analysis.functionPointerCalls = true

# Create edge for skipping a function pointer call if its value is unknown.
analysis.functionPointerEdgesForUnknownPointer = true

# potential targets for call edges created for function pointer parameter
# calls
analysis.functionPointerParameterTargets = {
          FunctionSet.USED_IN_CODE, FunctionSet.RETURN_VALUE, FunctionSet.EQ_PARAM_TYPES}

# potential targets for call edges created for function pointer calls
analysis.functionPointerTargets = {
          FunctionSet.USED_IN_CODE,
          FunctionSet.RETURN_VALUE,
          FunctionSet.EQ_PARAM_TYPES,
          FunctionSet.EQ_PARAM_SIZES,
          FunctionSet.EQ_PARAM_COUNT}

# What CFA nodes should be the starting point of the analysis?
analysis.initialStatesFor = Sets.newHashSet(InitialStatesFor.ENTRY)

# run interprocedural analysis
analysis.interprocedural = true

# Runs an algorithm that produces and exports invariants
analysis.invariantExport = false

# the machine model, which determines the sizes of types like int
analysis.machineModel = LINUX32
  enum:     [LINUX32, LINUX64, ARM, ARM64]

# Use as targets for call edges only those shich are assigned to the
# particular expression (structure field).
analysis.matchAssignedFunctionPointers = false

# If a no target function was assigned to a function pointer, use the origin
# heuristic instead of replacing with empty calls
analysis.matchAssignedFunctionPointers.ignoreUnknownAssignments = false

# memorize previously used (incomplete) reached sets after a restart of the
# analysis
analysis.memorizeReachedAfterRestart = false

# Name of the used analysis, defaults to the name of the used configuration
analysis.name = no default value

# Partition the initial states based on the type of location they were
# created for (see 'initialStatesFor')
analysis.partitionInitialStates = false

# A String, denoting the programs to be analyzed
analysis.programNames = []

# which reached set implementation to use?
# NORMAL: just a simple set
# LOCATIONMAPPED: a different set per location (faster, states with different
# locations cannot be merged)
# PARTITIONED: partitioning depending on CPAs (e.g Location, Callstack etc.)
# PSEUDOPARTITIONED: based on PARTITIONED, uses additional info about the
# states' lattice (maybe faster for some special analyses which use merge_sep
# and stop_sep
analysis.reachedSet = PARTITIONED
  enum:     [NORMAL, LOCATIONMAPPED, PARTITIONED, PSEUDOPARTITIONED, USAGE]

# track more statistics about the reachedset
analysis.reachedSet.withStatistics = false

# Use if you are going to change function with function pionter parameter
analysis.replaceFunctionWithParameterPointer = false

# Functions with function pointer parameter which will be instrumented
analysis.replacedFunctionsWithParameters = {"pthread_create"}

# restart the analysis using a different configuration after unknown result
analysis.restartAfterUnknown = false

# Use heuristics to select the analysis
analysis.selectAnalysisHeuristically = false

# if this option is used, the CFA will be loaded from the given file instead
# of parsed from sourcefile.
analysis.serializedCfaFile = no default value

# Split program in subprograms which can be analyzed separately afterwards
analysis.split.program = false

# stop after the first error has been found
analysis.stopAfterError = true

# create summary call statement edges
analysis.summaryEdges = false

# Enable converting test goals to conditions.
analysis.testGoalConverter = no default value

# Replace thread creation operations with a special function callsso, any
# analysis can go through the function
analysis.threadOperationsTransform = false

# Patterns for detecting block starts (ldv_ like functions)
analysis.traversal.blockFunctionPatterns = {"ldv_%_instance_%"}

# resource limit for the block
analysis.traversal.blockResourceLimit = 1000

# save resources for the block if it is empty
analysis.traversal.blockSaveResources = true

# traverse in the order defined by the values of an automaton variable
analysis.traversal.byAutomatonVariable = no default value

# resource limit for the entry block
analysis.traversal.entryResourceLimit = 100000

# which strategy to adopt for visiting states?
analysis.traversal.order = DFS
  enum:     [DFS, BFS, RAND, RANDOM_PATH, ROUND_ROBIN]

# Exponent of random function.This value influences the probability
# distribution over the waitlist elementswhen choosing the next element.Has
# to be a double in the range [0, INF)
analysis.traversal.random.exponent = 1

# Seed for random values.
analysis.traversal.random.seed = 0

# handle abstract states with more automaton matches first? (only if
# AutomatonCPA enabled)
analysis.traversal.useAutomatonInformation = false

# use blocks and set resource limits for its traversal, blocks are handled in
# DFS order
analysis.traversal.useBlocks = false

# handle states with a deeper callstack first
# This needs the CallstackCPA instance to have any effect.
analysis.traversal.useCallstack = false

# handle more abstract states (with less information) first? (only for
# ExplicitCPA)
analysis.traversal.useExplicitInformation = false

# handle states with more loop iterations first.
analysis.traversal.useLoopIterationCount = false

# handle states with a deeper loopstack first.
analysis.traversal.useLoopstack = false

# handle abstract states with fewer heap objects first? (needs SMGCPA)
analysis.traversal.useNumberOfHeapObjects = false

# handle abstract states with fewer running threads first? (needs
# ThreadingCPA)
analysis.traversal.useNumberOfThreads = false

# Use an implementation of postorder strategy that allows to select a
# secondary strategy that is used if there are two states with the same
# postorder id. The secondary strategy is selected with
# 'analysis.traversal.order'.
analysis.traversal.usePostorder = false

# handle states with fewer loop iterations first.
analysis.traversal.useReverseLoopIterationCount = false

# handle states with a more shallow loopstack first.
analysis.traversal.useReverseLoopstack = false

# Use an implementation of reverse postorder strategy that allows to select a
# secondary strategy that is used if there are two states with the same
# reverse postorder id. The secondary strategy is selected with
# 'analysis.traversal.order'.
analysis.traversal.useReversePostorder = false

# perform a weighted random selection based on the branching depth
analysis.traversal.weightedBranches = false

# perform a weighted random selection based on the depth in the ARG
analysis.traversal.weightedDepth = false

# After an incomplete analysis constructs a residual program which contains
# all program paths which are not fully explored
analysis.unexploredPathsAsProgram = false

# Do not report unknown if analysis terminated, report true (UNSOUND!).
analysis.unknownAsTrue = false

# stop the analysis with the result unknown if the program does not satisfies
# certain restrictions.
analysis.unknownIfUnrestrictedProgram = false

# Use array abstraction by program transformation.
analysis.useArrayAbstraction = false

# select an analysis from a set of analyses after unknown result
analysis.useCompositionAnalysis = false

# add declarations for global variables before entry function
analysis.useGlobalVars = true

# add loop-structure information to CFA.
analysis.useLoopStructure = true

# Use analyses parallely. The resulting reachedset is the one of the first
# analysis finishing in time. All other analyses are terminated.
analysis.useParallelAnalyses = false

# generate random test cases
analysis.useRandomTestCaseGeneratorAlgorithm = false

# generate test cases for covered test targets
analysis.useTestCaseGeneratorAlgorithm = false

# converts a witness to an ACSL annotated program
analysis.useWitnessToACSLAlgorithm = false

# converts a graphml witness to invariant witness
analysis.witnessToInvariant = false

# Whether to allow imprecise array abstraction that may lead to false alarms.
arrayAbstraction.allowImprecision = false

# Whether to export the CFA with abstracted arrays as C source file.
arrayAbstraction.cfa.c.export = true

# C source file path for CFA with abstracted arrays.
arrayAbstraction.cfa.c.file = "abstracted-arrays.c"

# Whether to export the CFA with abstracted arrays as DOT file.
arrayAbstraction.cfa.dot.export = true

# DOT file path for CFA with abstracted arrays.
arrayAbstraction.cfa.dot.file = "cfa-abstracted-arrays.dot"

# Use a second delegate analysis run to check counterexamples on the original
# program that contains (non-abstracted) arrays for imprecise array
# abstractions.
arrayAbstraction.checkCounterexamples = false

# Configuration file path of the delegate analysis running on the transformed
# program.
arrayAbstraction.delegateAnalysis = no default value

# Add a threshold to the automaton, after so many branches on a path the
# automaton will be ignored (0 to disable)
assumptions.automatonBranchingThreshold = 0

# write collected assumptions as automaton to file
assumptions.automatonFile = "AssumptionAutomaton.txt"

# If it is enabled, automaton does not add assumption which is considered to
# continue path with corresponding this edge.
assumptions.automatonIgnoreAssumptions = false

# If it is enabled, automaton adds transitions to later ARG states first
assumptions.automatonOrderedTransitions = false

# compress the produced assumption automaton using GZIP compression.
assumptions.compressAutomaton = false

# export assumptions as automaton to dot file
assumptions.dotExport = false

# write collected assumptions as automaton to dot file
assumptions.dotFile = "AssumptionAutomaton.dot"

# write collected assumptions to file
assumptions.export = true

# export assumptions collected per location
assumptions.export.location = true

# write collected assumptions to file
assumptions.file = "assumptions.txt"

# If it is enabled, check if a state that should lead to false state indeed
# has successors.
assumptions.removeNonExploredWithoutSuccessors = false

# comma-separated list of files with specifications that should be used 
# in a backwards analysis; used if the analysis starts at the target states!
# (see config/specification/ for examples)
backwardSpecification = []

# Count accesses for the BDD library. Counting works for concurrent accesses.
bdd.countLibraryAccess = false

# Size of the BDD cache in relation to the node table size (set to 0 to use
# fixed BDD cache size).
bdd.javabdd.cacheRatio = 0.1

# Initial size of the BDD cache, use 0 for cacheRatio*initTableSize.
bdd.javabdd.cacheSize = 0

# Initial size of the BDD node table in percentage of available Java heap
# memory (only used if initTableSize is 0).
bdd.javabdd.initTableRatio = 0.001

# Initial size of the BDD node table, use 0 for size based on initTableRatio.
bdd.javabdd.initTableSize = 0

# Measure the time spent in the BDD library. The behaviour in case of
# concurrent accesses is undefined!
bdd.measureLibraryAccess = false

# Which BDD package should be used?
# - java:   JavaBDD (default, no dependencies, many features)
# - sylvan: Sylvan (only 64bit Linux, uses multiple threads)
# - cudd:   CUDD (native library required, reordering not supported)
# - micro:  MicroFactory (maximum number of BDD variables is 1024, slow, but
# less memory-comsumption)
# - buddy:  Buddy (native library required)
# - cal:    CAL (native library required)
# - jdd:    JDD
# - pjbdd:  A java native parallel bdd framework
bdd.package = "JAVA"
  allowed values: [JAVA, SYLVAN, CUDD, MICRO, BUDDY, CAL, JDD, PJBDD]

# Size of the BDD cache in relation to the node table size (set to 0 to use
# fixed BDD cache size).
bdd.pjbdd.cacheRatio = 0.1

# size of the BDD cache.
bdd.pjbdd.cacheSize = 0

# Disable thread safe bdd operations.
bdd.pjbdd.disableThreadSafety = false

# increase factor for resizing tables
bdd.pjbdd.increaseFactor = 1

# Initial size of the BDD node table in percentage of available Java heap
# memory (only used if initTableSize is 0).
bdd.pjbdd.initTableRatio = 0.001

# Initial size of the BDD node table, use 0 for size based on initTableRatio.
bdd.pjbdd.initTableSize = 0

# unique table's concurrency factor
bdd.pjbdd.tableParallelism = 10000

# Number of worker threads, Runtime.getRuntime().availableProcessors()
# default
bdd.pjbdd.threads = Runtime.getRuntime().availableProcessors()

# Use bdd chaining.
bdd.pjbdd.useChainedBDD = false

# Use internal a int based bdd representation.
bdd.pjbdd.useInts = false

# initial variable count
bdd.pjbdd.varCount = 100

# Granularity of the Sylvan BDD operations cache (recommended values 4-8).
bdd.sylvan.cacheGranularity = 4

# Log2 size of the BDD cache.
bdd.sylvan.cacheSize = 24

# Log2 size of the BDD node table.
bdd.sylvan.tableSize = 26

# Number of worker threads, 0 for automatic.
bdd.sylvan.threads = 0

# sequentialize all accesses to the BDD library.
bdd.synchronizeLibraryAccess = false

# output file for visualizing the block graph
blockCFAFile = "block_analysis/blocks.json"

# Allow reduction of function entries; calculate abstractions always at
# function entries?
blockreducer.allowReduceFunctionEntries = true

# Allow reduction of function exits; calculate abstractions always at
# function exits?
blockreducer.allowReduceFunctionExits = true

# Allow reduction of loop heads; calculate abstractions always at loop heads?
blockreducer.allowReduceLoopHeads = false

# write the reduced cfa to the specified file.
blockreducer.reducedCfaFile = "ReducedCfa.rsf"

# Do at most n summarizations on a node.
blockreducer.reductionThreshold = 100

# If BMC did not find a bug, check whether the bounding did actually remove
# parts of the state space (this is similar to CBMC's unwinding assertions).
bmc.boundingAssertions = true

# If BMC did not find a bug, check which parts of the boundary actually
# reachableand prevent them from being unrolled any further.
bmc.boundingAssertionsSlicing = false

# Check reachability of target states after analysis (classical BMC). The
# alternative is to check the reachability as soon as the target states are
# discovered, which is done if cpa.predicate.targetStateSatCheck=true.
bmc.checkTargetStates = true

# try using induction to verify programs with loops
bmc.induction = false

# Strategy for generating auxiliary invariants
bmc.invariantGenerationStrategy = REACHED_SET
  enum:     [INDUCTION, REACHED_SET, DO_NOTHING, INVARIANT_STORE]

# k-induction configuration to be used as an invariant generator for
# k-induction (ki-ki(-ai)).
bmc.invariantGeneratorConfig = no default value

# Controls how long the invariant generator is allowed to run before the
# k-induction procedure starts.
bmc.invariantGeneratorHeadStartStrategy = NONE
  enum:     [NONE, AWAIT_TERMINATION, WAIT_UNTIL_EXPENSIVE_ADJUSTMENT]

# Export auxiliary invariants used for induction.
bmc.invariantsExport = no default value

# Propagates the interrupts of the invariant generator.
bmc.propagateInvGenInterrupts = false

# Try to simplify the structure of formulas for the sat check of BMC. The
# improvement depends on the underlying SMT solver.
bmc.simplifyBooleanFormula = false

# Use generalized counterexamples to induction as candidate invariants.
bmc.usePropertyDirection = false

# File name where to put the path program that is generated as input for
# CBMC. A temporary file is used if this is unspecified. If specified, the
# file name should end with '.i' because otherwise CBMC runs the
# pre-processor on the file.
cbmc.dumpCBMCfile = no default value

# maximum time limit for CBMC (use milliseconds or specify a unit; 0 for
# infinite)
cbmc.timelimit = 0ms

# continue analysis after a failed refinement (e.g. due to interpolation)
# other paths may still contain errors that could be found
cegar.continueAfterFailedRefinement = false

# if this score is exceeded by the first analysis, the auxilliary analysis
# will be refined
cegar.domainScoreThreshold = 1024

# Whether to do refinement immediately after finding an error state, or
# globally after the ARG has been unrolled completely.
# whether or not global refinement is performed
cegar.globalRefinement = false

# Max number of refinement iterations, -1 for no limit
cegar.maxIterations = -1

# Which refinement algorithm to use? (give class name, required for CEGAR) If
# the package name starts with 'org.sosy_lab.cpachecker.', this prefix can be
# omitted.
cegar.refiner = no default value

# whether or not to use refinement selection to decide which domain to refine
cegar.useRefinementSelection = false

# Which functions should be interpreted as encoding assumptions
cfa.assumeFunctions = {"__VERIFIER_assume"}

# dump a simple call graph
cfa.callgraph.export = true

# file name for call graph as .dot file
cfa.callgraph.file = "functionCalls.dot"
cfa.callgraph.fileUsed = "functionCallsUsed.dot"

# how often do we clone a function?
cfa.cfaCloner.numberOfCopies = 5

# while this option is activated, before each use of a PointerExpression, or
# a dereferenced field access the expression is checked if it is 0
cfa.checkNullPointers = false

# Whether to have a single target node per function for all invalid null
# pointer dereferences or to have separate nodes for each dereference
cfa.checkNullPointers.singleTargetPerFunction = true

# When a function pointer array element is written with a variable as index,
# create a series of if-else edges with explicit indizes instead.
cfa.expandFunctionPointerArrayAssignments = false

# export CFA as .dot file
cfa.export = true

# export individual CFAs for function as .dot files
cfa.exportPerFunction = true

# export CFA as C file
cfa.exportToC = false
cfa.exportToC.file = "cfa.c"

# produce C programs more similar to the input program
# (only possible for a single input file)
cfa.exportToC.stayCloserToInput = false

# export CFA as .dot file
cfa.file = "cfa.dot"

# By enabling this option the variables that are live are computed for each
# edge of the cfa. Live means that their value is read later on.
cfa.findLiveVariables = false

# how often can a function appear in the callstack as a clone of the original
# function?
cfa.functionCalls.recursionDepth = 5

# Also initialize local variables with default values, or leave them
# uninitialized.
cfa.initializeAllVariables = false

# With this option, all declarations in each function will be movedto the
# beginning of each function. Do only use this option if you arenot able to
# handle initializer lists and designated initializers (like they can be used
# for arrays and structs) in your analysis anyway. this option will otherwise
# create c code which is not the same as the original one
cfa.moveDeclarationsToFunctionStart = false

# Which functions should be interpreted as never returning to their call site
cfa.nonReturningFunctions = {"abort", "exit"}

# Export CFA as pixel graphic to the given file name. The suffix is added
# corresponding to the value of option pixelgraphic.export.formatIf set to
# 'null', no pixel graphic is exported.
cfa.pixelGraphicFile = "cfaPixel"

# export CFA as .ser file (dump Java objects)
cfa.serialize = false
cfa.serializeFile = "cfa.ser.gz"

# Show messages when dead code is encountered during parsing.
cfa.showDeadCode = true

# Remove all edges which don't have any effect on the program
cfa.simplifyCfa = true

# simplify simple const expressions like 1+2
cfa.simplifyConstExpressions = true

# simplify pointer expressions like s->f to (*s).f with this option the cfa
# is simplified until at maximum one pointer is allowed for left- and
# rightHandSide
cfa.simplifyPointerExpressions = false

# A name of thread_create function
cfa.threads.threadCreate = "pthread_create"

# A name of thread_join function
cfa.threads.threadJoin = "pthread_join"

# A name of thread_create_N function
cfa.threads.threadSelfCreate = "pthread_create_N"

# A name of thread_join_N function
cfa.threads.threadSelfJoin = "pthread_join_N"

# clone functions of the CFA, such that there are several identical CFAs for
# each function, only with different names.
cfa.useCFACloningForMultiThreadedPrograms = false

# unwind recursive functioncalls (bounded to max call stack size)
cfa.useFunctionCallUnwinding = false

# Dump domain type statistics to a CSV file.
cfa.variableClassification.domainTypeStatisticsFile = no default value

# Dump variable classification to a file.
cfa.variableClassification.logfile = "VariableClassification.log"

# Print some information about the variable classification.
cfa.variableClassification.printStatsOnStartup = false

# Dump variable type mapping to a file.
cfa.variableClassification.typeMapFile = "VariableTypeMapping.txt"

# Output an input file, with invariants embedded as assume constraints.
cinvariants.export = false

# File name for exporting invariants. Only supported if invariant export for
# specified lines is enabled.
cinvariants.external.file = no default value

# Specify lines for which an invariant should be written. Lines are specified
# as comma separated list of individual lines x and line ranges x-y.
cinvariants.forLines = ""

# If enabled only export invariants for specified lines.
cinvariants.onlyForSpecifiedLines = false

# Prefix to add to an output file, which would contain assumed invariants.
cinvariants.prefix = no default value

# Attempt to simplify the invariant before exporting [may be very expensive].
cinvariants.simplify = false

# If adaptTimeLimits is set and all configurations support progress reports,
# in each cycle the time limits per configuration are newly calculated based
# on the progress
compositionAlgorithm.circular.adaptTimeLimits = false

# where to store initial condition, when generated
compositionAlgorithm.condition.file = "AssumptionAutomaton.txt"

# list of files with configurations to use, which are optionally suffixed
# according to one of the followig schemes:either ::MODE or ::MODE_LIMIT,
# where MODE and LIMIT are place holders.MODE may take one of the following
# values continue (i.e., continue analysis with same CPA and reached set),
# reuse-precision (i.e., reuse the aggregation of the precisions from the
# previous analysis run), noreuse (i.e., start from scratch).LIMIT is a
# positive integer number specifying the time limit of the analysis in each
# round.If no (correct) limit is given a default limit is used.
compositionAlgorithm.configFiles = no default value

# Whether or not to create an initial condition, that excludes no paths,
# before first analysis is run.Required when first analysis uses condition
# from conditional model checking
compositionAlgorithm.initCondition = false

# print the statistics of each component of the composition algorithm
# directly after the component's computation is finished
compositionAlgorithm.intermediateStatistics = NONE
  enum:     [EXECUTE, NONE, PRINT]

# Enable when composition algorithm is used to check a specification
compositionAlgorithm.propertyChecked = true

# Qualified name for class which implements strategy that decides how to
# compose given analyses
compositionAlgorithm.strategy = no default value

# let each analysis part of the composition algorithm write output files and
# not only the last one that is executed
compositionAlgorithm.writeIntermediateOutputFiles = true

# configuration of the residual program generator
conditional.verifier.generatorConfig = no default value

# configuration for the verification of the residual program which is
# constructed from another verifier's condition
conditional.verifier.verifierConfig = no default value

# The input file with all goals that were previously reached
conditional_testing.inputfile = no default value

# The strategy to use
conditional_testing.strategy = no default value
  enum:     [NAIVE, PROPAGATION]

# Dump the complete configuration to a file.
configuration.dumpFile = "UsedConfiguration.properties"

# True if the path to the error state can not always be uniquely determined
# from the ARG.
# This is the case e.g. for Slicing Abstractions, where the abstraction
# states in the ARG
# do not form a tree!
counterexample.ambigiousARG = false

# Which model checker to use for verifying counterexamples as a second check.
# Currently CBMC or CPAchecker with a different config or the concrete
# execution 
# checker can be used.
counterexample.checker = CBMC
  enum:     [CBMC, CPACHECKER, CONCRETE_EXECUTION]

# counterexample information should provide more precise information from
# counterexample check, if available
counterexample.checker.changeCEXInfo = false

# configuration file for counterexample checks with CPAchecker
counterexample.checker.config = no default value

# counterexample check should fully replace existing counterexamples with own
# ones, if available
counterexample.checker.forceCEXChange = false

# File name where to put the path specification that is generated as input
# for the counterexample check. A temporary file is used if this is
# unspecified.
counterexample.checker.path.file = no default value

# The file in which the generated C code is saved.
counterexample.concrete.dumpFile = no default value

# Path to the compiler. Can be absolute or only the name of the program if it
# is in the PATH
counterexample.concrete.pathToCompiler = "/usr/bin/gcc"

# Maximum time limit for the concrete execution checker.
# This limit is used for compilation as well as execution so overall, twice
# the time of this limit may be consumed.
# (use milliseconds or specify a unit; 0 for infinite)
counterexample.concrete.timelimit = 0ms

# continue analysis after an counterexample was found that was denied by the
# second check
counterexample.continueAfterInfeasibleError = true

# An imprecise counterexample of the Predicate CPA is usually a bug, but
# expected in some configurations. Should it be treated as a bug or accepted?
counterexample.export.allowImpreciseCounterexamples = false

# Always use imprecise counterexamples of the predicate analysis. If this
# option is set to true, counterexamples generated by the predicate analysis
# will be exported as-is. This means that no information like variable
# assignments will be added and imprecise or potentially wrong program paths
# will be exported as counterexample.
counterexample.export.alwaysUseImpreciseCounterexamples = false

# If the option assumeLinearArithmetics is set, this option can be used to
# allow division and modulo by constants.
counterexample.export.assumptions.allowDivisionAndModuloByConstants = false

# If the option assumeLinearArithmetics is set, this option can be used to
# allow multiplication between operands with at least one constant.
counterexample.export.assumptions.allowMultiplicationWithConstants = false

# Try to avoid using operations that exceed the capabilities of linear
# arithmetics when extracting assumptions from the model. This option aims to
# prevent witnesses that are inconsistent with  models that are, due to an
# analysis limited to linear arithmetics, actually incorrect.
#  This option does not magically produce a correct witness from an incorrect
# model, and since the difference between an incorrect witness consistent
# with the model and an incorrect witness that is inconsistent with the model
# is academic, you usually want this option to be off.
counterexample.export.assumptions.assumeLinearArithmetics = false

# export counterexample as automaton
counterexample.export.automaton = "Counterexample.%d.spc"

# exports either CMBC format or a concrete path program
counterexample.export.codeStyle = CBMC
  enum:     [CBMC, CONCRETE_EXECUTION]

# compress the produced error-witness automata using GZIP compression.
counterexample.export.compressWitness = true

# export counterexample core as text file
counterexample.export.core = "Counterexample.%d.core.txt"

# export counterexample to file, if one is found
counterexample.export.enabled = true

# export error paths to files immediately after they were found, including
# spurious error-paths before executing a refinement. Note that we do not
# track already exported error-paths and export them at every refinement as
# long as they are not removed from the reached-set. Most helpful for
# debugging refinements.
counterexample.export.exportAllFoundErrorPaths = false

# export counterexample as source file
counterexample.export.exportAsSource = true

# export coverage information for every witness: requires using an Assumption
# Automaton as part of the specification. Lines are considered to be covered
# only when the path reaching the statement does not reach the __FALSE state
# in the Assumption Automaton.
counterexample.export.exportCounterexampleCoverage = false

# Export extended witness in addition to regular witness
counterexample.export.exportExtendedWitness = false

# export test harness
counterexample.export.exportHarness = false

# export error paths to files immediately after they were found
counterexample.export.exportImmediately = false

# export test case that represents the counterexample. Further options can be
# set with options 'testcase.*'
counterexample.export.exportTestCase = false

# export counterexample as witness/graphml file
counterexample.export.exportWitness = true

# Extended witness with specific analysis information file
counterexample.export.extendedWitnessFile = "extendedWitness.%d.graphml"

# export counterexample as text file
counterexample.export.file = "Counterexample.%d.txt"

# Filter for irrelevant counterexamples to reduce the number of similar
# counterexamples reported. Only relevant with analysis.stopAfterError=false
# and counterexample.export.exportImmediately=true. Put the weakest and
# cheapest filter first, e.g., PathEqualityCounterexampleFilter.
counterexample.export.filters = no default value

# where to dump the counterexample formula in case a specification violation
# is found
counterexample.export.formula = "Counterexample.%d.smt2"

# export counterexample as Dot/Graphviz visualization
counterexample.export.graph = "Counterexample.%d.dot"

# export counterexample witness as GraphML automaton
counterexample.export.graphml = "Counterexample.%d.graphml"

# export test harness to file as code
counterexample.export.harness = "Counterexample.%d.harness.c"

# where to dump the counterexample model in case a specification violation is
# found
counterexample.export.model = "Counterexample.%d.assignment.txt"

# export counterexample coverage information, considering only spec prefix as
# covered (up until reaching __FALSE state in Assumption Automaton).
counterexample.export.prefixCoverageFile = "Counterexample.%d.aa-prefix.coverage-info"

# The files where the BDDCPARestrictionAlgorithm should write the presence
# conditions for the counterexamples to.
counterexample.export.presenceCondition = "Counterexample.%d.presenceCondition.txt"

# File name for analysis report in case a counterexample was found.
counterexample.export.report = "Counterexample.%d.html"

# export counterexample as source file
counterexample.export.source = "Counterexample.%d.c"

# export counterexample witness as Dot/Graphviz visualization
counterexample.export.witnessGraph = "Counterexample.%d.witness.dot"

# If continueAfterInfeasibleError is true, remove the error state that is
# proven to be unreachable before continuing. Set this to false if
# analyis.collectAssumptions=true is also set.
counterexample.removeInfeasibleErrorState = true

# If continueAfterInfeasibleError is true, attempt to remove the whole path
# of the infeasible counterexample before continuing. Setting this to false
# may prevent a lot of similar infeasible counterexamples to get discovered,
# but is unsound
counterexample.removeInfeasibleErrors = false

# If true, the counterexample checker will not assume a counterexample as
# infeasible because of unsupported code. But will try different paths
# anyway.
counterexample.skipCounterexampleForUnsupportedCode = false

# Compute and export information about the verification coverage?
coverage.enabled = true

# print coverage info to file
coverage.file = "coverage.info"

# CPA to use (see doc/Configuration.md for more documentation on this)
cpa = CompositeCPA.class.getCanonicalName()

# Where to perform abstraction
cpa.abe.abstractionLocations = LOOPHEAD
  enum:     [ALL, LOOPHEAD, MERGE]

# Check target states reachability
cpa.abe.checkTargetStates = true

# Cache formulas produced by path formula manager
cpa.abe.useCachingPathFormulaManager = true

# only store pure C expressions without ACSL-specific constructs
cpa.acsl.usePureExpressionsOnly = true

# Use this to change the underlying abstract domain in the APRON library
cpa.apron.domain = OCTAGON
  enum:     [BOX, OCTAGON, POLKA, POLKA_STRICT, POLKA_EQ]

# get an initial precision from file
cpa.apron.initialPrecisionFile = no default value

# this option determines which initial precision should be used
cpa.apron.initialPrecisionType = "STATIC_FULL"
  allowed values: [STATIC_FULL, REFINEABLE_EMPTY]

# with this option enabled the states are only merged at loop heads
cpa.apron.mergeop.onlyMergeAtLoopHeads = false

# of which type should the merge be?
cpa.apron.mergeop.type = "SEP"
  allowed values: [SEP, JOIN, WIDENING]

# target file to hold the exported precision
cpa.apron.precisionFile = no default value

# Timelimit for the backup feasibility check with the apron analysis.(use
# seconds or specify a unit; 0 for infinite)
cpa.apron.refiner.timeForApronFeasibilityCheck = 0ns

# split disequalities considering integer operands into two states or use
# disequality provided by apron library 
cpa.apron.splitDisequalities = true

# translate final ARG into this C file
cpa.arg.CTranslation.file = "ARG.c"

# minimum ratio of branch compared to whole program to be exported
cpa.arg.automaton.branchRatio = 0.5

# what data should be exported from the ARG nodes? A different strategy might
# result in a smaller automaton.
cpa.arg.automaton.dataStrategy = LOCATION
  enum:     [LOCATION, CALLSTACK]

# translate final ARG into an automaton
cpa.arg.automaton.export = false

# export as zip-files, depends on 'automaton.export=true'
cpa.arg.automaton.exportCompressed = true

# translate final ARG into an automaton, depends on 'automaton.export=true'
cpa.arg.automaton.exportDotFile = "ARG_parts/ARG.%06d.spc.dot"
cpa.arg.automaton.exportSpcFile = "ARG_parts/ARG.%06d.spc"
cpa.arg.automaton.exportSpcZipFile = "ARG_parts.zip"

# export all automata into one zip-file, depends on 'automaton.export=true'
cpa.arg.automaton.exportZipped = true

# after determining branches, which one of them should be exported?
cpa.arg.automaton.selectionStrategy = LEAVES
  enum:     [NONE, ALL, LEAVES, WEIGHTED, FIRST_BFS]

# minimum ratio of siblings such that one of them will be exported
cpa.arg.automaton.siblingRatio = 0.4

# when using FIRST_BFS, how many nodes should be skipped? ZERO will only
# export the root itself, MAX_INT will export only LEAFS.
cpa.arg.automaton.skipFirstNum = 10

# which coarse strategy should be applied when analyzing the ARG?
cpa.arg.automaton.splitStrategy = TARGETS
  enum:     [NONE, GLOBAL_CONDITIONS, LEAVES, TARGETS]

# compress the produced correctness-witness automata using GZIP compression.
cpa.arg.compressWitness = true

# prevent the stop-operator from aborting the stop-check early when it
# crosses a target state
cpa.arg.coverTargetStates = false

# inform merge operator in CPA enabled analysis that it should delete the
# subgraph of the merged node which is required to get at most one successor
# per CFA edge.
cpa.arg.deleteInCPAEnabledAnalysis = false

# Dump all ARG related statistics files after each iteration of the CPA
# algorithm? (for debugging and demonstration)
cpa.arg.dumpAfterIteration = false

# Enable reduction for nested abstract states when entering or leaving a
# block abstraction for BAM. The reduction can lead to a higher
# cache-hit-rate for BAM and a faster sub-analysis for blocks.
cpa.arg.enableStateReduction = true

# export final ARG as .dot file
cpa.arg.export = true

# Enable the integration of __VERIFIER_assume statements for non-true
# assumption in states. Disable if you want to create residual programs.
cpa.arg.export.code.addAssumptions = true

# Only enable CLOSEFUNCTIONBLOCK if you are sure that the ARG merges
# different flows through a function at the end of the function.
cpa.arg.export.code.blockAtFunctionEnd = KEEPBLOCK
  enum:     [CLOSEFUNCTIONBLOCK, ADDNEWBLOCK, KEEPBLOCK]

# How to deal with target states during code generation
cpa.arg.export.code.handleTargetStates = NONE
  enum:     [NONE, RUNTIMEVERIFICATION, ASSERTFALSE, FRAMACPRAGMA, VERIFIERERROR,
             REACHASMEMSAFETY, REACHASOVERFLOW, REACHASTERMINATION]

# write include directives
cpa.arg.export.code.header = true

# If specified, metadata about the produced C program will be exported to
# this file
cpa.arg.export.code.metadataOutput = no default value

# export final ARG as .dot file
cpa.arg.file = "ARG.dot"

# inform ARG CPA if it is run in an analysis with enabler CPA because then it
# must behave differently during merge.
cpa.arg.inCPAEnabledAnalysis = false

# whether to keep covered states in the reached set as addition to keeping
# them in the ARG
cpa.arg.keepCoveredStatesInReached = false

# What do to on a late merge, i.e., if the second parameter of the merge
# already has child states (cf. issue #991):
# - ALLOW: Just merge as usual.
# - ALLOW_WARN: Log a warning the first time this happens, then ALLOW.
# - PREVENT: Do not merge, i.e., enforce merge-sep for such situations.
# - PREVENT_WARN: Log a warning the first time this happens, then PREVENT.
# - CRASH: Crash CPAchecker as soon as this happens
#   (useful for cases where a late merge should never happen).
cpa.arg.lateMerge = ALLOW
  enum:     [ALLOW, ALLOW_WARN, PREVENT, PREVENT_WARN, CRASH]

# write the ARG at various stages during execution into dot files whose name
# is specified by this option. Only works if 'cpa.arg.logARGs=true'
cpa.arg.log.fileTemplate = "ARG_log/ARG_%04d.dot"

# Enable logging of ARGs at various positions
cpa.arg.logARGs = false

# If this option is enabled, ARG states will also be merged if the first
# wrapped state is subsumed by the second wrapped state (and the parents are
# not yet subsumed).
cpa.arg.mergeOnWrappedSubsumption = false

# Export final ARG as pixel graphic to the given file name. The suffix is
# added  corresponding to the value of option pixelgraphic.export.formatIf
# set to 'null', no pixel graphic is exported.
cpa.arg.pixelGraphicFile = "ARG"

# export a proof as .graphml file
cpa.arg.proofWitness = no default value

# export a proof as dot/graphviz file
cpa.arg.proofWitness.dot = no default value

# export simplified ARG that shows all refinements to .dot file
cpa.arg.refinements.file = "ARGRefinements.dot"

# export final ARG as .dot file, showing only loop heads and function
# entries/exits
cpa.arg.simplifiedARG.file = "ARGSimplified.dot"

# translate final ARG into C program
cpa.arg.translateToC = false

# Verification witness: Include the considered case of an assume?
cpa.arg.witness.exportAssumeCaseInfo = true

# Verification witness: Include assumptions (C statements)?
cpa.arg.witness.exportAssumptions = true

# Verification witness: Include function calls and function returns?
cpa.arg.witness.exportFunctionCallsAndReturns = true

# Verification witness: Include the (starting) line numbers of the operations
# on the transitions?
cpa.arg.witness.exportLineNumbers = true

# Verification witness: Export labels for nodes in GraphML for easier visual
# representation?
cpa.arg.witness.exportNodeLabel = false

# Verification witness: Include the offset within the file?
cpa.arg.witness.exportOffset = true

# Always export source file name, even default
cpa.arg.witness.exportSourceFileName = false

# Verification witness: Include the sourcecode of the operations?
cpa.arg.witness.exportSourcecode = false

# Verification witness: Include an thread-identifier within the file?
cpa.arg.witness.exportThreadId = false

# Verification witness: Include (not necessarily globally unique) thread
# names for concurrent tasks for debugging?
cpa.arg.witness.exportThreadName = false

# Produce an invariant witness instead of a correctness witness. Constructing
# an invariant witness makes use of a different merge for quasi-invariants:
# Instead of computing the disjunction of two invariants present when merging
# nodes, 'true' is ignored when constructing the disjunction. This may be
# unsound in some situations, so be careful when using this option.
cpa.arg.witness.produceInvariantWitnesses = false

# Some redundant transitions will be removed
cpa.arg.witness.removeInsufficientEdges = true

# Verification witness: Revert escaping/renaming of functions for threads?
cpa.arg.witness.revertThreadFunctionRenaming = false

# signal the analysis to break in case the given number of error state is
# reached. Use -1 to disable this limit.
cpa.automaton.breakOnTargetState = 1

# export automaton to file
cpa.automaton.dotExport = false

# file for saving the automaton in DOT format (%s will be replaced with
# automaton name)
cpa.automaton.dotExportFile = "%s.dot"

# the maximum number of iterations performed after the initial error is
# found, despite the limit given as cpa.automaton.breakOnTargetState is not
# yet reached. Use -1 to disable this limit.
cpa.automaton.extraIterationsLimit = -1

# file with automaton specification for ObserverAutomatonCPA and
# ControlAutomatonCPA
cpa.automaton.inputFile = no default value

# Merge two automata states if one of them is TOP.
cpa.automaton.mergeOnTop = false

# An implicit precision: consider states with a self-loop and no other
# outgoing edges as TOP.
cpa.automaton.prec.topOnFinalSelfLoopingState = false

# file for saving the automaton in spc format (%s will be replaced with
# automaton name)
cpa.automaton.spcExportFile = "%s.spc"

# Whether to treat automaton states with an internal error state as targets.
# This should be the standard use case.
cpa.automaton.treatErrorsAsTargets = true

# If enabled, cache queries also consider blocks with non-matching precision
# for reuse.
cpa.bam.aggressiveCaching = true

# export blocked ARG as .dot file
cpa.bam.argFile = "BlockedARG.dot"

# Type of partitioning (FunctionAndLoopPartitioning or
# DelayedFunctionAndLoopPartitioning)
# or any class that implements a PartitioningHeuristic
cpa.bam.blockHeuristic = no default value

# only consider functions with a matching name, i.e., select only some
# functions directly.
cpa.bam.blockHeuristic.functionPartitioning.matchFunctions = no default value

# only consider function with a minimum number of calls. This approach is
# similar to 'inlining' functions used only a few times. Info: If a function
# is called several times in a loop, we only count 'one' call.
cpa.bam.blockHeuristic.functionPartitioning.minFunctionCalls = 0

# only consider function with a minimum number of CFA nodes. This approach is
# similar to 'inlining' small functions, when using BAM.
cpa.bam.blockHeuristic.functionPartitioning.minFunctionSize = 0

# file for exporting detailed statistics about blocks
cpa.bam.blockStatisticsFile = "block_statistics.txt"

# abort current analysis when finding a missing block abstraction
cpa.bam.breakForMissingBlock = true

# This flag determines which precisions should be updated during refinement.
# We can choose between the minimum number of states and all states that are
# necessary to re-explore the program along the error-path.
cpa.bam.doPrecisionRefinementForAllStates = false

# Heuristic: This flag determines which precisions should be updated during
# refinement. This flag also updates the precision of the most inner block.
cpa.bam.doPrecisionRefinementForMostInnerBlock = true

# export blocks
cpa.bam.exportBlocksPath = "block_cfa.dot"

# If enabled, the reached set cache is analysed for each cache miss to find
# the cause of the miss.
cpa.bam.gatherCacheMissStatistics = false

# BAM allows to analyse recursive procedures. This strongly depends on the
# underlying CPA. The current support includes only ValueAnalysis and
# PredicateAnalysis (with tree interpolation enabled).
cpa.bam.handleRecursiveProcedures = false

# export single blocked ARG as .dot files, should contain '%d'
cpa.bam.indexedArgFile = "ARGs/ARG_%d.dot"

# if we cannot determine a repeating/covering call-state, we will run into
# CallStackOverflowException. Thus we bound the stack size (unsound!). This
# option only limits non-covered recursion, but not a recursion where we find
# a coverage and re-use the cached block several times. The value '-1'
# disables this option.
cpa.bam.maximalDepthForExplicitRecursion = -1

# By default, the CPA algorithm terminates when finding the first target
# state, which makes it easy to identify this last state. For special
# analyses, we need to search for more target states in the reached-set, when
# reaching a block-exit. This flag is needed if the option
# 'cpa.automaton.breakOnTargetState' is unequal to 1.
cpa.bam.searchTargetStatesOnExit = false

# export used parts of blocked ARG as .dot file
cpa.bam.simplifiedArgFile = "BlockedARGSimplified.dot"

# Should the nested CPA-algorithm be wrapped with CEGAR within BAM?
cpa.bam.useCEGAR = false

# This flag determines which refinement procedure we should use. We can
# choose between an in-place refinement and a copy-on-write refinement.
cpa.bam.useCopyOnWriteRefinement = false

# In some cases BAM cache can not be easily applied. If the option is enabled
# CPAs can inform BAM that the result states should not be used even if there
# will a cache hit.
cpa.bam.useDynamicAdjustment = false

# max bitsize for values and vars, initial value
cpa.bdd.bitsize = 64

# use a smaller bitsize for all vars, that have only intEqual values
cpa.bdd.compressIntEqual = true

# add some additional variables (with prefix) for each variable that can be
# used for more complex BDD operations later. In the ordering, we declare
# them as narrow as possible to the original variable, such that the overhead
# for using them stays small. A value 0 disables this feature.
cpa.bdd.initAdditionalVariables = 0

# declare the bits of a var from 0 to N or from N to 0
cpa.bdd.initBitsIncreasing = true

# declare first bit of all vars, then second bit,...
cpa.bdd.initBitwise = true

# declare vars partitionwise
cpa.bdd.initPartitions = Ordered = true

# declare partitions ordered
cpa.bdd.initPartitionsOrdered = true

# Dump tracked variables to a file.
cpa.bdd.logfile = "BDDCPA_tracked_variables.log"

# mergeType
cpa.bdd.merge = "join"
  allowed values: [sep, join]

# reduce and expand BDD states for BAM, otherwise use plain identity
cpa.bdd.useBlockAbstraction = false

# Dump tracked variables to a file.
cpa.bdd.variablesFile = "BDDCPA_ordered_variables.txt"

# depth of recursion bound
cpa.callstack.depth = 0

# which abstract domain to use for callstack cpa, typically FLAT which is
# faster since it uses only object equivalence
cpa.callstack.domain = "FLAT"
  allowed values: [FLAT, FLATPCC]

# Skip recursion if it happens only by going via a function pointer (this is
# unsound). Imprecise function pointer tracking often lead to false
# recursions.
cpa.callstack.skipFunctionPointerRecursion = false

# Skip recursion (this is unsound). Treat function call as a statement (the
# same as for functions without bodies)
cpa.callstack.skipRecursion = false

# Skip recursion if it happens only by going via a void function (this is
# unsound).
cpa.callstack.skipVoidRecursion = false

# analyse the CFA backwards
cpa.callstack.traverseBackwards = false

# Blacklist of extern functions that will make the analysis abort if called
cpa.callstack.unsupportedFunctions = {
          "pthread_create",
          "pthread_key_create",
          "longjmp",
          "siglongjmp",
          "__builtin_va_arg",
          "atexit"}

# firing relation to be used in the precision adjustment operator
cpa.chc.firingRelation = "Always"
  allowed values: [Always, Maxcoeff, Sumcoeff, Homeocoeff]

# generalization operator to be used in the precision adjustment operator
cpa.chc.generalizationOperator = "Widen"
  allowed values: [Top, Widen, WidenMax, WidenSum]

# By enabling this option the CompositeTransferRelation will compute abstract
# successors for as many edges as possible in one call. For any chain of
# edges in the CFA which does not have more than one outgoing or leaving edge
# the components of the CompositeCPA are called for each of the edges in this
# chain. Strengthening is still computed after every edge. The main
# difference is that while this option is enabled not every ARGState may have
# a single edge connecting to the child/parent ARGState but it may instead be
# a list.
cpa.composite.aggregateBasicBlocks = false

# inform Composite CPA if it is run in a CPA enabled analysis because then it
# must behave differently during merge.
cpa.composite.inCPAEnabledAnalysis = false

# which composite merge operator to use (plain or agree)
# Both delegate to the component cpas, but agree only allows merging if all
# cpas agree on this. This is probably what you want.
cpa.composite.merge = "AGREE"
  allowed values: [PLAIN, AGREE]

# Limit for Java heap memory used by CPAchecker (in MB, not MiB!; -1 for
# infinite)
cpa.conditions.global.memory.heap = -1

# Limit for process memory used by CPAchecker (in MB, not MiB!; -1 for
# infinite)
cpa.conditions.global.memory.process = -1

# Limit for size of reached set (-1 for infinite)
cpa.conditions.global.reached.size = -1

# Limit for cpu time used by CPAchecker (use milliseconds or specify a unit;
# -1 for infinite)
cpa.conditions.global.time.cpu = -1

# Hard limit for cpu time used by CPAchecker (use milliseconds or specify a
# unit; -1 for infinite)
# When using adjustable conditions, analysis will end after this threshold
cpa.conditions.global.time.cpu.hardlimit = -1

# Limit for wall time used by CPAchecker (use milliseconds or specify a unit;
# -1 for infinite)
cpa.conditions.global.time.wall = -1

# Hard limit for wall time used by CPAchecker (use milliseconds or specify a
# unit; -1 for infinite)
# When using adjustable conditions, analysis will end after this threshold
cpa.conditions.global.time.wall.hardlimit = -1

# Number of times the path condition may be adjusted, i.e., the path
# condition threshold may be increased (-1 to always adjust)
cpa.conditions.path.adjustment.threshold = -1

# determines if there should be one single assignment state per state, one
# per path segment between assume edges, or a global one for the whole
# program.
cpa.conditions.path.assignments.scope = STATE
  enum:     [STATE, PATH, PROGRAM]

# sets the threshold for assignments (-1 for infinite), and it is upto, e.g.,
# ValueAnalysisPrecisionAdjustment to act accordingly to this threshold
# value.
cpa.conditions.path.assignments.threshold = DISABLED

# maximum number of assume edges length (-1 for infinite)
cpa.conditions.path.assumeedges.limit = -1

# The condition
cpa.conditions.path.condition = no default value

# maximum path length (-1 for infinite)
cpa.conditions.path.length.limit = -1

# maximum repetitions of any edge in a path (-1 for infinite)
cpa.conditions.path.repetitions.limit = -1

# Generate congruences for sums of variables (<=> x and y have same/different
# evenness)
cpa.congruence.trackCongruenceSum = false

# Cache formulas produced by path formula manager
cpa.congruence.useCachingPathFormulaManager = true

# Whether to perform caching of constraint satisfiability results
cpa.constraints.cache = true

# Whether to use subset caching
cpa.constraints.cacheSubsets = true

# Whether to use superset caching
cpa.constraints.cacheSupersets = true

# Type of less-or-equal operator to use
cpa.constraints.lessOrEqualType = SUBSET
  enum:     [SUBSET]

# Type of merge operator to use
cpa.constraints.mergeType = SEP
  enum:     [SEP, JOIN_FITTING_CONSTRAINT]

# Whether to perform SAT checks only for the last added constraint
cpa.constraints.minimalSatCheck = true

# Type of precision to use. Has to be LOCATION if PredicateExtractionRefiner
# is used.
cpa.constraints.refinement.precisionType = CONSTRAINTS
  enum:     [CONSTRAINTS, LOCATION]

# Whether to remove constraints that can't add any more information
# toanalysis during simplification
cpa.constraints.removeOutdated = true

# Whether to remove trivial constraints from constraints states during
# simplification
cpa.constraints.removeTrivial = false

# Resolve definite assignments
cpa.constraints.resolveDefinites = true

# When to check the satisfiability of constraints
cpa.constraints.satCheckStrategy = AT_ASSUME
  enum:     [AT_ASSUME, AT_TARGET]

# Export the trace-abtraction automaton to a file in dot-format.
cpa.dca.refiner.dotExport = false

# Filename that the interpolation automaton will be written to. %s will get
# replaced by the automaton name.
cpa.dca.refiner.dotExportFile = "%s.dot"

# If set to true, all infeasible dummy states will be kept in the ARG.
cpa.dca.refiner.keepInfeasibleStates = false

# The max amount of refinements for the trace abstraction algorithm. Setting
# it to 0 leads to an analysis of the ARG without executing any refinements.
# This is used for debugging purposes.
cpa.dca.refiner.maxRefinementIterations = 10

# Skip the analysis (including the refinement) entirely, so that the ARG is
# left unmodified. This is used for debugging purposes.
cpa.dca.refiner.skipAnalysis = false

# which merge operator to use for DefUseCPA
cpa.defuse.merge = "sep"
  allowed values: [sep, join]

# Which strategy to use for forced coverings (empty for none)
cpa.forcedCovering = no default value

# When an invalid function pointer is called, do not assume all functions as
# possible targets and instead call no function.
cpa.functionpointer.ignoreInvalidFunctionPointerCalls = false

# When an unknown function pointer is called, do not assume all functions as
# possible targets and instead call no function (this is unsound).
cpa.functionpointer.ignoreUnknownFunctionPointerCalls = false

# whether function pointers with invalid targets (e.g., 0) should be tracked
# in order to find calls to such pointers
cpa.functionpointer.trackInvalidFunctionPointers = false

# which type of merge operator to use for IntervalAnalysisCPA
cpa.interval.merge = "SEP"
  allowed values: [SEP, JOIN]

# decides whether one (false) or two (true) successors should be created when
# an inequality-check is encountered
cpa.interval.splitIntervals = false

# at most that many intervals will be tracked per variable, -1 if number not
# restricted
cpa.interval.threshold = -1

# controls whether to use abstract evaluation always, never, or depending on
# entering edges.
cpa.invariants.abstractionStateFactory = ENTERING_EDGES
  enum:     [ALWAYS, ENTERING_EDGES, NEVER]

# enables the over-approximation of unsupported features instead of failing
# fast; this is imprecise
cpa.invariants.allowOverapproximationOfUnsupportedFeatures = true

# determine variables relevant to the decision whether or not a target path
# assume edge is taken and limit the analyis to those variables.
cpa.invariants.analyzeRelevantVariablesOnly = true

# determine target locations in advance and analyse paths to the target
# locations only.
cpa.invariants.analyzeTargetPathsOnly = true

# controls the condition adjustment logic: STATIC means that condition
# adjustment is a no-op, INTERESTING_VARIABLES increases the interesting
# variable limit, MAXIMUM_FORMULA_DEPTH increases the maximum formula depth,
# ABSTRACTION_STRATEGY tries to choose a more precise abstraction strategy,
# COMPOUND combines the other strategies (minus STATIC).
cpa.invariants.conditionAdjusterFactory = COMPOUND
  enum:     [STATIC, INTERESTING_VARIABLES, MAXIMUM_FORMULA_DEPTH,
             ABSTRACTION_STRATEGY, COMPOUND]

# include type information for variables, such as x >= MIN_INT && x <=
# MAX_INT
cpa.invariants.includeTypeInformation = true

# the maximum number of variables to consider as interesting. -1 one disables
# the limit, but this is not recommended. 0 means that no variables are
# considered to be interesting.
cpa.invariants.interestingVariableLimit = 2

# the maximum number of adjustments of the interestingVariableLimit. -1 one
# disables the limit
cpa.invariants.maxInterestingVariableAdjustments = -1

# the maximum tree depth of a formula recorded in the environment.
cpa.invariants.maximumFormulaDepth = 4

# which merge operator to use for InvariantCPA
cpa.invariants.merge = "PRECISIONDEPENDENT"
  allowed values: [JOIN, SEP, PRECISIONDEPENDENT]

# use modulo-2 template during widening if applicable.
cpa.invariants.useMod2Template = false

# use pointer-alias information in strengthening, if available.
cpa.invariants.usePointerAliasStrengthening = true

# With this option the handling of global variables during the analysis can
# be fine-tuned. For example while doing a function-wise analysis it is
# important to assume that all global variables are live. In contrast to
# that, while doing a global analysis, we do not need to assume global
# variables being live.
cpa.liveVar.assumeGlobalVariablesAreAlwaysLive = true

# functions, which allocate new free memory
cpa.local.allocateFunctionPattern = {}
cpa.local.allocatefunctions = {}

# functions, which do not change sharedness of parameters
cpa.local.conservativefunctions = {}

# variables, which are always local
cpa.local.localvariables = {}

# With this option enabled, function calls that occur in the CFA are
# followed. By disabling this option one can traverse a function without
# following function calls (in this case FunctionSummaryEdges are used)
cpa.location.followFunctionCalls = true

# What are we searching for: race or deadlock
cpa.lock.analysisMode = RACE
  enum:     [RACE, DEADLOCK]

#  annotated functions, which are known to works right
cpa.lock.annotate = no default value

# contains all lock names
cpa.lock.lockinfo = {}

# which merge operator to use for LockCPA
cpa.lock.merge = "SEP"
  allowed values: [SEP, JOIN]

# reduce recursive locks to a single access
cpa.lock.reduceLockCounters = BLOCK
  enum:     [NONE, BLOCK, ALL]

# reduce unused locks
cpa.lock.reduceUselessLocks = false

# Enable refinement procedure
cpa.lock.refinement = false

# stop path exploration if a lock limit is reached
cpa.lock.stopAfterLockLimit = false

# Consider or not special cases with empty lock sets
cpa.lock.stopMode = DEFAULT
  enum:     [DEFAULT, EMPTYLOCKSET]

# Only checks for targets after loops were unrolled exactly a number of times
# that is contained in this list. The default is an empty list, which means
# targets are checked in every iteration
cpa.loopbound.checkOnlyAtBounds = []

# Use a stop operator that will identify loop states who's depth is congruent
# regarding the modulus of this number. Values smaller or equal to zero will
# deactivate this feature.
cpa.loopbound.cyclicStopModulus = -1

# Number of loop iterations before the loop counter is abstracted. Zero is
# equivalent to no limit.
cpa.loopbound.loopIterationsBeforeAbstraction = 0

# this option controls how the maxLoopIterations condition is adjusted when a
# condition adjustment is invoked.
cpa.loopbound.maxLoopIterationAdjusterFactory = STATIC
  enum:     [STATIC, INCREMENT, DOUBLE]

# threshold for unrolling loops of the program (0 is infinite)
# works only if assumption storage CPA is enabled, because otherwise it would
# be unsound
cpa.loopbound.maxLoopIterations = 0

# threshold for adjusting the threshold for unrolling loops of the program (0
# is infinite).
# only relevant in combination with a non-static maximum loop iteration
# adjuster.
cpa.loopbound.maxLoopIterationsUpperBound = 0

# Only checks for error after loops were unrolled at least this amount of
# times.
cpa.loopbound.startAtBound = 0

# enable stack-based tracking of loops
cpa.loopbound.trackStack = false

# Where to perform abstraction
cpa.lpi.abstractionLocations = LOOPHEAD
  enum:     [ALL, LOOPHEAD, MERGE]

# Attach extra invariant from other CPAs during the value determination
# computation
cpa.lpi.attachExtraInvariantDuringValueDetermination = true

# Check whether the policy depends on the initial value
cpa.lpi.checkPolicyInitialCondition = true

# Check target states reachability
cpa.lpi.checkTargetStates = true

# Compute abstraction for larger templates using decomposition
cpa.lpi.computeAbstractionByDecomposition = false

# Do not compute the abstraction until strengthen is called. This speeds up
# the computation, but does not let other CPAs use the output of LPI.
cpa.lpi.delayAbstractionUntilStrengthen = false

# Value to substitute for the epsilon
cpa.lpi.epsilon = Rational.ONE

# Generate new templates using polyhedra convex hull
cpa.lpi.generateTemplatesUsingConvexHull = false

# Remove UFs and ITEs from policies.
cpa.lpi.linearizePolicy = true

# Attempt to weaken interpolants in order to make them more general
cpa.lpi.refinement.generalizeInterpolants = true

# Run naive value determination first, switch to namespaced if it fails.
cpa.lpi.runHopefulValueDetermination = true

# Remove redundant items when abstract values.
cpa.lpi.simplifyDotOutput = false

# Algorithm for converting a formula to a set of lemmas
cpa.lpi.toLemmasAlgorithm = "RCNF"
  allowed values: [CNF, RCNF, NONE]

# Number of refinements after which the unrolling depth is increased.Set to
# -1 to never increase the depth.
cpa.lpi.unrollingRefinementThreshold = 2

# Cache formulas produced by path formula manager
cpa.lpi.useCachingPathFormulaManager = true

# Syntactically pre-compute dependencies for value determination
cpa.lpi.valDetSyntacticCheck = true

# Number of value determination steps allowed before widening is run. Value
# of '-1' runs value determination until convergence.
cpa.lpi.wideningThreshold = -1

# time limit for a single post computation (use milliseconds or specify a
# unit; 0 for infinite)
cpa.monitor.limit = 0

# time limit for all computations on a path in milliseconds (use milliseconds
# or specify a unit; 0 for infinite)
cpa.monitor.pathcomputationlimit = 0

# keep tracking nondeterministically-assigned variables even if they are used
# in assumptions
cpa.nondeterminism.acceptConstrained = true

# this option determines which initial precision should be used
cpa.octagon.initialPrecisionType = "STATIC_FULL"
  allowed values: [STATIC_FULL, REFINEABLE_EMPTY]

# with this option enabled the states are only merged at loop heads
cpa.octagon.mergeop.onlyMergeAtLoopHeads = false

# of which type should the merge be?
cpa.octagon.mergeop.type = "SEP"
  allowed values: [SEP, JOIN, WIDENING]

# with this option the number representation in the library will be changed
# between floats and ints.
cpa.octagon.octagonLibrary = "INT"
  allowed values: [INT, FLOAT]

# Timelimit for the backup feasibility check with the octagon analysis.(use
# seconds or specify a unit; 0 for infinite)
cpa.octagon.refiner.timeForOctagonFeasibilityCheck = 0ns

# which merge operator to use for PointerCPA
cpa.pointer2.merge = "JOIN"
  allowed values: [JOIN, SEP]

# which merge operator to use for PointerACPA
cpa.pointerA.merge = "JOIN"
  allowed values: [SEP, JOIN]

# which stop operator to use for PointerACPA
cpa.pointerA.stop = "SEP"
  allowed values: [SEP, JOIN, NEVER]

# Whether to give up immediately if a very large array is encountered
# (heuristic, often we would just waste time otherwise)
cpa.predicate.abortOnLargeArrays = true

# Predicate ordering
cpa.predicate.abs.predicateOrdering.method = CHRONOLOGICAL
  enum:     [CHRONOLOGICAL, FRAMEWORK_RANDOM, FRAMEWORK_SIFT, FRAMEWORK_SIFTITE,
             FRAMEWORK_WIN2, FRAMEWORK_WIN2ITE, FRAMEWORK_WIN3, FRAMEWORK_WIN3ITE]

# use caching of abstractions
# use caching of region to formula conversions
cpa.predicate.abs.useCache = true

# DEPRECATED: whether to use Boolean (false) or Cartesian (true) abstraction
cpa.predicate.abstraction.cartesian = false

# whether to use Boolean or Cartesian abstraction or both
cpa.predicate.abstraction.computation = BOOLEAN
  enum:     [CARTESIAN, CARTESIAN_BY_WEAKENING, BOOLEAN, COMBINED, ELIMINATION]

# dump the abstraction formulas if they took to long
cpa.predicate.abstraction.dumpHardQueries = false

# Identify those predicates where the result is trivially known before
# abstraction computation and omit them.
cpa.predicate.abstraction.identifyTrivialPredicates = false

# get an initial map of predicates from a list of files (see source
# doc/examples/predmap.txt for an example)
cpa.predicate.abstraction.initialPredicates = []

# Apply location-specific predicates to all locations in their function
cpa.predicate.abstraction.initialPredicates.applyFunctionWide = false

# Apply location- and function-specific predicates globally (to all locations
# in the program)
cpa.predicate.abstraction.initialPredicates.applyGlobally = false

# when reading predicates from file, convert them from Integer- to BV-theory
# or reverse.
cpa.predicate.abstraction.initialPredicates.encodePredicates = DISABLE
  enum:     [DISABLE, INT2BV, BV2INT]

# initial predicates are added as atomic predicates
cpa.predicate.abstraction.initialPredicates.splitIntoAtoms = false

# An initial set of comptued abstractions that might be reusable
cpa.predicate.abstraction.reuseAbstractionsFrom = no default value

# Simplify the abstraction formula that is stored to represent the state
# space. Helpful when debugging (formulas get smaller).
cpa.predicate.abstraction.simplify = false

# What to use for storing abstractions
cpa.predicate.abstraction.type = "BDD"
  allowed values: [BDD, FORMULA]

# Export one abstraction formula for each abstraction state into a file?
cpa.predicate.abstractions.export = true

# file that consists of one abstraction formula for each abstraction state
cpa.predicate.abstractions.file = "abstractions.txt"

# Add constraints for the range of the return-value of a nondet-method. For
# example the assignment 'X=nondet_int()' produces the constraint
# 'MIN<=X<=MAX', where MIN and MAX are computed from the type of the method
# (signature, not name!).
cpa.predicate.addRangeConstraintsForNondet = false

# Allow the given extern functions and interpret them as pure functions
# although the predicate analysis does not support their semantics and this
# can produce wrong results.
cpa.predicate.allowedUnsupportedFunctions = {}

# Check satisfiability for plain conjunction of edge and assumptions.
cpa.predicate.assumptionStrengtheningSatCheck = false

# Enable/disable abstraction reduction at the BAM block entry
cpa.predicate.bam.useAbstractionReduction = true

# Enable/disable precision reduction at the BAM block entry
cpa.predicate.bam.usePrecisionReduction = true

# The bitsize is used to encode integers as bitvectors.
cpa.predicate.bitsize = 32

# force abstractions immediately after threshold is reached (no effect if
# threshold = 0)
cpa.predicate.blk.alwaysAfterThreshold = true

# abstraction always and only on explicitly computed abstraction nodes.
cpa.predicate.blk.alwaysAndOnlyAtExplicitNodes = false

# force abstractions at each branch node, regardless of threshold
cpa.predicate.blk.alwaysAtBranch = false

# force abstractions at the head of the analysis-entry function (first node
# in the body), regardless of threshold
cpa.predicate.blk.alwaysAtEntryFunctionHead = false

# abstraction always at explicitly computed abstraction nodes.
cpa.predicate.blk.alwaysAtExplicitNodes = false

# force abstractions at each function call (node before entering the body),
# regardless of threshold
cpa.predicate.blk.alwaysAtFunctionCallNodes = false

# force abstractions at each function head (first node in the body),
# regardless of threshold
cpa.predicate.blk.alwaysAtFunctionHeads = false

# force abstractions at each function calls/returns, regardless of threshold
cpa.predicate.blk.alwaysAtFunctions = true

# force abstractions at each join node, regardless of threshold
cpa.predicate.blk.alwaysAtJoin = false

# force abstractions at loop heads, regardless of threshold
cpa.predicate.blk.alwaysAtLoops = true

# force abstractions at program exit (program end, abort, etc.), regardless
# of threshold
cpa.predicate.blk.alwaysAtProgramExit = false

# abstractions at function calls/returns if threshold has been reached (no
# effect if threshold = 0)
cpa.predicate.blk.functions = false

# abstractions at CFA nodes with more than one incoming edge if threshold has
# been reached (no effect if threshold = 0)
cpa.predicate.blk.join = false

# abstractions at loop heads if threshold has been reached (no effect if
# threshold = 0)
cpa.predicate.blk.loops = false

# maximum blocksize before abstraction is forced
# (non-negative number, special values: 0 = don't check threshold, 1 = SBE)
cpa.predicate.blk.threshold = 0

# use caching of path formulas
cpa.predicate.blk.useCache = true

# always check satisfiability at end of block, even if precision is empty
cpa.predicate.checkBlockFeasibility = false

# The default size in bytes for memory allocations when the value cannot be
# determined.
cpa.predicate.defaultAllocationSize = 4

# The length for arrays we assume for variably-sized arrays.
cpa.predicate.defaultArrayLength = 20

# Use deferred allocation heuristic that tracks void * variables until the
# actual type of the allocation is figured out.
cpa.predicate.deferUntypedAllocations = true

# Direction of the analysis?
cpa.predicate.direction = FORWARD
  enum:     [FORWARD, BACKWARD]

# Enable the possibility to precompute explicit abstraction locations.
cpa.predicate.enableBlockreducer = false

# Theory to use as backend for bitvectors. If different from BITVECTOR, the
# specified theory is used to approximate bitvectors. This can be used for
# solvers that do not support bitvectors, or for increased performance. If
# UNSUPPORTED, solvers can be used that support none of the possible
# alternatives, but CPAchecker will crash if bitvectors are required by the
# analysis.
cpa.predicate.encodeBitvectorAs = BITVECTOR
  enum:     [UNSUPPORTED, INTEGER, RATIONAL, BITVECTOR, FLOAT]

# Theory to use as backend for floats. If different from FLOAT, the specified
# theory is used to approximate floats. This can be used for solvers that do
# not support floating-point arithmetic, or for increased performance. If
# UNSUPPORTED, solvers can be used that support none of the possible
# alternatives, but CPAchecker will crash if floats are required by the
# analysis.
cpa.predicate.encodeFloatAs = FLOAT
  enum:     [UNSUPPORTED, INTEGER, RATIONAL, BITVECTOR, FLOAT]

# Theory to use as backend for integers. If different from INTEGER, the
# specified theory is used to approximate integers. This can be used for
# solvers that do not support integers, or for increased performance. If
# UNSUPPORTED, solvers can be used that support none of the possible
# alternatives, but CPAchecker will crash if integers are required by the
# analysis.
cpa.predicate.encodeIntegerAs = INTEGER
  enum:     [UNSUPPORTED, INTEGER, RATIONAL, BITVECTOR, FLOAT]

# Replace possible overflows with an ITE-structure, which returns either the
# normal value or an UF representing the overflow.
cpa.predicate.encodeOverflowsWithUFs = false

# Name of an external function that will be interpreted as if the function
# call would be replaced by an externally defined expression over the program
# variables. This will only work when all variables referenced by the dimacs
# file are global and declared before this function is called.
cpa.predicate.externModelFunctionName = "__VERIFIER_externModelSatisfied"

# where to dump interpolation and abstraction problems (format string)
cpa.predicate.formulaDumpFilePattern = "%s%04d-%s%03d.smt2"

# Handle field access via extract and concat instead of new variables.
cpa.predicate.handleFieldAccess = false

# If disabled, all implicitly initialized fields and elements are treated as
# non-dets
cpa.predicate.handleImplicitInitialization = true

# Handle aliasing of pointers. This adds disjunctions to the formulas, so be
# careful when using cartesian abstraction.
cpa.predicate.handlePointerAliasing = true

# When a string literal initializer is encountered, initialize the contents
# of the char array with the contents of the string literal instead of just
# assigning a fresh non-det address to it
cpa.predicate.handleStringLiteralInitializers = false

# Allows to ignore Concat and Extract Calls when Bitvector theory was
# replaced with Integer or Rational.
cpa.predicate.ignoreExtractConcat = true

# Ignore fields that are not relevant for reachability properties. This is
# unsound in case fields are accessed by pointer arithmetic with hard-coded
# field offsets. Only relvant if ignoreIrrelevantVariables is enabled.
cpa.predicate.ignoreIrrelevantFields = true

# Ignore variables that are not relevant for reachability properties.
cpa.predicate.ignoreIrrelevantVariables = true

# do not include assumptions of states into path formula during strengthening
cpa.predicate.ignoreStateAssumptions = false

# Add computed invariants to the precision. Invariants do not need to be
# generated with the PredicateCPA they can also be given from outside.
cpa.predicate.invariants.addToPrecision = false

# Strengthen the abstraction formula during abstraction with invariants if
# some are generated. Invariants do not need to be generated with the
# PredicateCPA they can also be given from outside.
cpa.predicate.invariants.appendToAbstractionFormula = false

# Strengthen the pathformula during abstraction with invariants if some are
# generated. Invariants do not need to be generated with the PredicateCPA
# they can also be given from outside.
cpa.predicate.invariants.appendToPathFormula = false

# Should the automata used for invariant generation be dumped to files?
cpa.predicate.invariants.dumpInvariantGenerationAutomata = false

# Where to dump the automata that are used to narrow the analysis used for
# invariant generation.
cpa.predicate.invariants.dumpInvariantGenerationAutomataFile = "invgen.%d.spc"

# export final loop invariants
cpa.predicate.invariants.export = true

# export invariants as precision file?
cpa.predicate.invariants.exportAsPrecision = true

# file for exporting final loop invariants
cpa.predicate.invariants.file = "invariants.txt"

# Which strategy should be used for generating invariants, a comma separated
# list can be specified. Usually later specified strategies serve as fallback
# for earlier ones. (default is no invariant generation at all)
cpa.predicate.invariants.generationStrategy = new ArrayList<>()

# How often should generating invariants from sliced prefixes with
# k-induction be tried?
cpa.predicate.invariants.kInductionTries = 3

# file for precision that consists of invariants.
cpa.predicate.invariants.precisionFile = "invariantPrecs.txt"

# Timelimit for invariant generation which may be used during refinement.
# (Use seconds or specify a unit; 0 for infinite)
cpa.predicate.invariants.timeForInvariantGeneration = 10s

# Should the strategies be used all-together or only as fallback. If all
# together, the computation is done until the timeout is hit and the results
# up to this point are taken.
cpa.predicate.invariants.useAllStrategies = false

# Provide invariants generated with other analyses via the
# PredicateCPAInvariantsManager.
cpa.predicate.invariants.useGlobalInvariants = true

# Invariants that are not strong enough to refute the counterexample can be
# ignored with this option. (Weak invariants will lead to repeated
# counterexamples, thus taking time which could be used for the rest of the
# analysis, however, the found invariants may also be better for loops as
# interpolation.)
cpa.predicate.invariants.useStrongInvariantsOnly = true

# Max. number of edge of the abstraction tree to prescan for reuse
cpa.predicate.maxAbstractionReusePrescan = 1

# The maximum length up to which bulk assignments (e.g., initialization) for
# arrays will be handled. With option useArraysForHeap=false, elements beyond
# this bound will be ignored completely. Use -1 to disable the limit.
cpa.predicate.maxArrayLength = -1

# When builtin functions like memcmp/strlen/etc. are called, unroll them up
# to this bound.If the passed arguments are longer, the return value will be
# overapproximated.
cpa.predicate.maxPreciseStrFunctionSize = 100

# Set of functions that non-deterministically provide new memory on the heap,
# i.e. they can return either a valid pointer or zero.
cpa.predicate.memoryAllocationFunctions = {"malloc", "__kmalloc", "kmalloc", "alloca", "__builtin_alloca"}

# Memory allocation functions of which all parameters but the first should be
# ignored.
cpa.predicate.memoryAllocationFunctionsWithSuperfluousParameters = {"__kmalloc", "kmalloc", "kzalloc"}

# Set of functions that non-deterministically provide new zeroed memory on
# the heap, i.e. they can return either a valid pointer or zero.
cpa.predicate.memoryAllocationFunctionsWithZeroing = {"kzalloc", "calloc"}

# Setting this to true makes memoryAllocationFunctions always return a valid
# pointer.
cpa.predicate.memoryAllocationsAlwaysSucceed = false

# Function that is used to free allocated memory.
cpa.predicate.memoryFreeFunctionName = "free"

# which merge operator to use for predicate cpa (usually ABE should be used)
cpa.predicate.merge = "ABE"
  allowed values: [SEP, ABE]

# merge two abstraction states if their preceeding abstraction states are the
# same
cpa.predicate.merge.mergeAbstractionStatesWithSamePredecessor = false

# Set of functions that should be considered as giving a non-deterministic
# return value. If you specify this option, the default values are not added
# automatically to the list, so you need to specify them explicitly if you
# need them. Mentioning a function in this list has only an effect, if it is
# an 'external function', i.e., no source is given in the code for this
# function.
cpa.predicate.nondetFunctions = {"sscanf", "rand", "random"}

# Regexp pattern for functions that should be considered as giving a
# non-deterministic return value (c.f. cpa.predicate.nondedFunctions)
cpa.predicate.nondetFunctionsRegexp = "^(__VERIFIER_)?nondet_[a-zA-Z0-9_]*"

# Do not ignore variables that could lead to an overflow (only makes sense if
# ignoreIrrelevantVariables is set to true)
cpa.predicate.overflowVariablesAreRelevant = false

# Which path-formula builder to use.Depending on this setting additional
# terms are added to the path formulas,e.g. SYMBOLICLOCATIONS will add track
# the program counter symbolically with a special variable %pc
cpa.predicate.pathFormulaBuilderVariant = DEFAULT
  enum:     [DEFAULT, SYMBOLICLOCATIONS]

# Where to apply the found predicates to?
cpa.predicate.precision.sharing = LOCATION
  enum:     [GLOBAL, SCOPE, FUNCTION, LOCATION, LOCATION_INSTANCE]

# generate statistics about precisions (may be slow)
cpa.predicate.precisionStatistics = true

# export final predicate map
cpa.predicate.predmap.export = true

# file for exporting final predicate map
cpa.predicate.predmap.file = "predmap.txt"

# Format for exporting predicates from precisions.
cpa.predicate.predmap.predicateFormat = SMTLIB2
  enum:     [PLAIN, SMTLIB2]

# Specify whether to overapproximate quantified formula, if one or more
# quantifiers couldn't be eliminated.(Otherwise an exception will be thrown)
cpa.predicate.pseudoExistQE.overapprox = false

# Which solver tactic to use for Quantifier Elimination(Only used if
# useRealQuantifierElimination=true)
cpa.predicate.pseudoExistQE.solverQeTactic = LIGHT
  enum:     [NONE, LIGHT, FULL]

# Use Destructive Equality Resolution as simplification method
cpa.predicate.pseudoExistQE.useDER = true

# Use Unconnected Parameter Drop as simplification method
cpa.predicate.pseudoExistQE.useUPD = true

# If an abstraction is computed during refinement, use only the interpolant
# as input, not the concrete block.
cpa.predicate.refinement.abstractInterpolantOnly = false

# use only the atoms from the interpolantsas predicates, and not the whole
# interpolant
cpa.predicate.refinement.atomicInterpolants = true

# Direction for doing counterexample analysis: from start of trace, from end
# of trace, or in more complex patterns. In combination with
# incrementalCexTraceCheck=true the generated interpolants will refer to the
# minimal infeasible part of the trace according to this strategy (e.g., with
# FORWARDS a minimal infeasible prefix is found).
cpa.predicate.refinement.cexTraceCheckDirection = ZIGZAG
  enum:     [FORWARDS, BACKWARDS, ZIGZAG, LOOP_FREE_FIRST, RANDOM, LOWEST_AVG_SCORE,
             HIGHEST_AVG_SCORE, LOOP_FREE_FIRST_BACKWARDS]

# Actually compute an abstraction, otherwise just convert the interpolants to
# BDDs as they are.
cpa.predicate.refinement.doAbstractionComputation = false

# dump all interpolation problems
cpa.predicate.refinement.dumpInterpolationProblems = false

# After each refinement, dump the newly found predicates.
cpa.predicate.refinement.dumpPredicates = false

# File name for the predicates dumped after refinements.
cpa.predicate.refinement.dumpPredicatesFile = "refinement%04d-predicates.prec"

# apply deletion-filter to the abstract counterexample, to get a minimal set
# of blocks, before applying interpolation-based refinement
cpa.predicate.refinement.getUsefulBlocks = false

# Do a complete restart (clearing the reached set) after the refinement
cpa.predicate.refinement.global.restartAfterRefinement = false

# Instead of updating precision and arg we say that the refinement was not
# successful after N times of refining. A real error state is not necessary
# to be found. Use 0 for unlimited refinements (default).
cpa.predicate.refinement.global.stopAfterNRefinements = 0

# BlockFormulaStrategy for graph-like ARGs (e.g. Slicing Abstractions)
cpa.predicate.refinement.graphblockformulastrategy = false

# Enable/Disable adding partial state invariants into the PathFormulas
cpa.predicate.refinement.includePartialInvariants = AbstractionPosition.BOTH

# Use incremental search in counterexample analysis to find a minimal
# infeasible part of the trace. This will typically lead to interpolants that
# refer to this part only. The option cexTraceCheckDirection defines in which
# order the blocks of the trace are looked at.
cpa.predicate.refinement.incrementalCexTraceCheck = true

# Max. number of prefixes to extract
cpa.predicate.refinement.maxPrefixCount = 64

# Max. length of feasible prefixes to extract from if at least one prefix was
# already extracted
cpa.predicate.refinement.maxPrefixLength = 1024

# skip refinement if input formula is larger than this amount of bytes
# (ignored if 0)
cpa.predicate.refinement.maxRefinementSize = 0

# sets the level of the pathformulas to use for abstraction. 
#   EDGE : Based on Pathformulas of every edge in ARGPath
#   BLOCK: Based on Pathformulas at Abstractionstates
cpa.predicate.refinement.newtonrefinement.abstractionLevel = EDGE
  enum:     [BLOCK, EDGE]

# Activate fallback to interpolation. Typically in case of a repeated
# counterexample.
cpa.predicate.refinement.newtonrefinement.fallback = false

# use unsatisfiable Core in order to abstract the predicates produced while
# NewtonRefinement
cpa.predicate.refinement.newtonrefinement.infeasibleCore = true

# use live variables in order to abstract the predicates produced while
# NewtonRefinement
cpa.predicate.refinement.newtonrefinement.liveVariables = true

# use heuristic to extract predicates from the CFA statically on first
# refinement
cpa.predicate.refinement.performInitialStaticRefinement = false

# Which predicates should be used as basis for the new precision that will be
# attached to the refined part of the ARG:
# ALL: Collect predicates from the complete ARG.
# SUBGRAPH: Collect predicates from the removed subgraph of the ARG.
# CUTPOINT: Only predicates from the cut-point's (pivot state) precision are
# kept.
# TARGET: Only predicates from the target state's precision are kept.
cpa.predicate.refinement.predicateBasisStrategy = SUBGRAPH
  enum:     [ALL, SUBGRAPH, TARGET, CUTPOINT]

# which sliced prefix should be used for interpolation
cpa.predicate.refinement.prefixPreference = PrefixSelector.NO_SELECTION

# Do a complete restart (clearing the reached set) after N refinements. 0 to
# disable, 1 for always.
cpa.predicate.refinement.restartAfterRefinements = 0

# Use a single SMT solver environment for all interpolation queries and keep
# formulas pushed on solver stack between interpolation queries.
cpa.predicate.refinement.reuseInterpolationEnvironment = false

# In case we apply sequential interpolation, forward and backward directions
# return valid interpolants. We can either choose one of the directions,
# fallback to the other if one does not succeed, or even combine the
# interpolants.
cpa.predicate.refinement.sequentialStrategy = FWD
  enum:     [FWD, FWD_FALLBACK, BWD, BWD_FALLBACK, CONJUNCTION, DISJUNCTION, WEIGHTED,
             RANDOM]

# During refinement, add all new predicates to the precisions of all abstract
# states in the reached set.
cpa.predicate.refinement.sharePredicates = false

# slice block formulas, experimental feature!
cpa.predicate.refinement.sliceBlockFormulas = false

# split each arithmetic equality into two inequalities when extracting
# predicates from interpolants
cpa.predicate.refinement.splitItpAtoms = false

# Stop after refining the n-th spurious counterexample and export that. If 0,
# stop after finding the first spurious counterexample but before refinement.
# If -1, never stop. If this option is used with a value different from -1,
# option counterexample.export.alwaysUseImpreciseCounterexamples=true should
# be set. Then, an actually infeasible counterexample will be handed to
# export. So this option will also not work with additional counterexample
# checks or similar, because these may reject the (infeasible)
# counterexample.
cpa.predicate.refinement.stopAfter = -1

# Strategy how to interact with the intepolating prover. The analysis must
# support the strategy, otherwise the result will be useless!
# - SEQ_CPACHECKER: Generate an inductive sequence of interpolants by asking
# the solver individually for each of them. This allows us to fine-tune the
# queries with the option sequentialStrategy and is supported by all solvers.
# - SEQ: Generate an inductive sequence of interpolants by asking the solver
# for the whole sequence at once.
# - TREE: Use the tree-interpolation feature of the solver to get
# interpolants.
# - TREE_WELLSCOPED: Return each interpolant for i={0..n-1} for the
# partitions A=[lastFunctionEntryIndex..i] and
# B=[0..lastFunctionEntryIndex-1]+[i+1..n]. Based on a tree-like scheme.
# - TREE_NESTED: Use callstack and previous interpolants for next
# interpolants (cf. 'Nested Interpolants').
# - TREE_CPACHECKER: similar to TREE_NESTED, but the algorithm is taken from
# 'Tree Interpolation in Vampire'
cpa.predicate.refinement.strategy = SEQ_CPACHECKER
  enum:     [SEQ, SEQ_CPACHECKER, TREE, TREE_WELLSCOPED, TREE_NESTED, TREE_CPACHECKER]

# time limit for refinement (use milliseconds or specify a unit; 0 for
# infinite)
cpa.predicate.refinement.timelimit = 0ms

# After a failed interpolation query, try to solve the formulas again with
# different options instead of giving up immediately.
cpa.predicate.refinement.tryAgainOnInterpolationError = true

# When interpolation query fails, attempt to check feasibility of the current
# counterexample without interpolation
cpa.predicate.refinement.tryWithoutInterpolation = true

# Use BDDs to simplify interpolants (removing irrelevant predicates)
cpa.predicate.refinement.useBddInterpolantSimplification = false

# use Newton-based Algorithm for the CPA-Refinement, experimental feature!
cpa.predicate.refinement.useNewtonRefinement = false

# Should the path invariants be created and used (potentially additionally to
# the other invariants)
cpa.predicate.refinement.usePathInvariants = false

# use UCB predicates for the CPA-Refinement, experimental feature!
cpa.predicate.refinement.useUCBRefinement = false

# verify if the interpolants fulfill the interpolant properties
cpa.predicate.refinement.verifyInterpolants = false

# Enable the option to allow detecting the allocation type by type of the LHS
# of the assignment, e.g. char *arr = malloc(size) is detected as char[size]
cpa.predicate.revealAllocationTypeFromLhs = true

# maximum blocksize before a satisfiability check is done
# (non-negative number, 0 means never, if positive should be smaller than
# blocksize)
cpa.predicate.satCheck = 0

# Enables sat checks at abstraction location.
# Infeasible paths are already excluded by transfer relation and not later by
# precision adjustment. This property is required in proof checking.
cpa.predicate.satCheckAtAbstraction = false

# Call 'simplify' on generated formulas.
cpa.predicate.simplifyGeneratedPathFormulas = false

# Whether to perform dynamic block encoding as part of each refinement
# iteration
cpa.predicate.slicingabstractions.dynamicBlockEncoding = false

# Only slices the minimal amount of edges to guarantuee progress
cpa.predicate.slicingabstractions.minimalslicing = false

# Reduces the amount of solver calls by directely slicing some edgesthat are
# mathematically proven to be infeasible in any case
cpa.predicate.slicingabstractions.optimizeslicing = true

# Whether to remove parts fo the ARG from which no target state is reachable
cpa.predicate.slicingabstractions.removeSafeRegions = true

# C99 only defines the overflow of unsigned integer type.
cpa.predicate.solver.ufCheckingProver.isSignedOverflowSafe = true

# How often should we try to get a better evaluation?
cpa.predicate.solver.ufCheckingProver.maxIterationNum = 5

# which stop operator to use for predicate cpa (usually SEP should be used in
# analysis). SEPNAA works the same as SEP, except that it Never stops At
# Abstraction states. SEPNAA is used in bmc-IMC.properties for config
# bmc-incremental-ABEl to keep exploring covered states.
cpa.predicate.stop = "SEP"
  allowed values: [SEP, SEPPCC, SEPNAA]

# Use formula reporting states for strengthening.
cpa.predicate.strengthenWithFormulaReportingStates = false

# try to reuse old abstractions from file during strengthening
cpa.predicate.strengthenWithReusedAbstractions = false

# file that consists of old abstractions, to be used during strengthening
cpa.predicate.strengthenWithReusedAbstractionsFile = "abstractions.txt"

# The function used to model successful heap object allocation. This is only
# used, when pointer analysis with UFs is enabled.
cpa.predicate.successfulAllocFunctionName = "__VERIFIER_successful_alloc"

# The function used to model successful heap object allocation with zeroing.
# This is only used, when pointer analysis with UFs is enabled.
cpa.predicate.successfulZallocFunctionName = "__VERIFIER_successful_zalloc"

# whether to include the symbolic path formula in the coverage checks or do
# only the fast abstract checks
cpa.predicate.symbolicCoverageCheck = false

# check satisfiability when a target state has been found
cpa.predicate.targetStateSatCheck = false

# Whether to track values stored in variables of function-pointer type.
cpa.predicate.trackFunctionPointers = true

# Use SMT arrays for encoding heap memory instead of uninterpreted function
# (ignored if useByteArrayForHeap=true). This is more precise but may lead to
# interpolation failures.
cpa.predicate.useArraysForHeap = true

# try to add some useful static-learning-like axioms for bitwise operations
# (which are encoded as UFs): essentially, we simply collect all the numbers
# used in bitwise operations, and add axioms like (0 & n = 0)
cpa.predicate.useBitwiseAxioms = false

# Use SMT byte array for encoding heap memory instead of uninterpreted
# function. This is more close to c heap implementation but may be to
# expensive.
cpa.predicate.useByteArrayForHeap = false

# Use an optimisation for constraint generation
cpa.predicate.useConstraintOptimization = true

# For multithreaded programs this is an overapproximation of possible values
# of shared variables.
cpa.predicate.useHavocAbstraction = false

# Use regions for pointer analysis. So called Burstall&Bornat (BnB) memory
# regions will be used for pointer analysis. BnB regions are based not only
# on type, but also on structure field names. If the field is not accessed by
# an address then it is placed into a separate region.
cpa.predicate.useMemoryRegions = false

# add special information to formulas about non-deterministic functions
cpa.predicate.useNondetFlags = false

# Insert tmp-variables for parameters at function-entries. The variables are
# similar to return-variables at function-exit.
cpa.predicate.useParameterVariables = false

# Insert tmp-parameters for global variables at function-entries. The global
# variables are also encoded with return-variables at function-exit.
cpa.predicate.useParameterVariablesForGlobals = false

# Use quantifiers when encoding heap accesses. This requires an SMT solver
# that is capable of quantifiers (e.g. Z3 or PRINCESS).
cpa.predicate.useQuantifiersOnArrays = false

# Do not follow states which can not syntactically lead to a target location
cpa.property_reachability.noFollowBackwardsUnreachable = true

# Qualified name for class which checks that the computed abstraction adheres
# to the desired property.
cpa.propertychecker.className = org.sosy_lab.cpachecker.pcc.propertychecker.DefaultPropertyChecker.class

# List of parameters for constructor of propertychecker.className. Parameter
# values are specified in the order the parameters are defined in the
# respective constructor. Every parameter value is finished with ",". The
# empty string represents an empty parameter list.
cpa.propertychecker.parameters = ""

# Whether to consider constraints on program variables (e.g., x > 10) as
# definitions)
cpa.reachdef.constraintIsDef = false

# which merge operator to use for ReachingDefCPA
cpa.reachdef.merge = "JOIN"
  allowed values: [SEP, JOIN, IGNORECALLSTACK]

# which stop operator to use for ReachingDefCPA
cpa.reachdef.stop = "SEP"
  allowed values: [SEP, JOIN, IGNORECALLSTACK]

# Do not report 'False' result, return UNKNOWN instead.  Useful for
# incomplete analysis with no counterexample checking.
cpa.reportFalseAsUnknown = false

# which merge operator to use for SignCPA
cpa.sign.merge = "JOIN"
  allowed values: [SEP, JOIN]

# which stop operator to use for SignCPA
cpa.sign.stop = "SEP"
  allowed values: [SEP, JOIN]

# max length of a chain of states, -1 for infinity
cpa.singleSuccessorCompactor.maxChainLength = -1

# Apply AND- LBE transformation to loop transition relation.
cpa.slicing.applyLBETransformation = true

# Check target states reachability
cpa.slicing.checkTargetStates = true

# Filter lemmas by liveness
cpa.slicing.filterByLiveness = true

# Depth limit for the 'LEAST_REMOVALS' strategy.
cpa.slicing.leastRemovalsDepthLimit = 2

# Pre-run syntactic weakening
cpa.slicing.preRunSyntacticWeakening = true

# Whether to use a refinable slicing precision that starts with an empty
# slice, or a statically computed, fixed slicing precision
cpa.slicing.refinableSlice = false

# Allow counterexamples that are valid only on the program slice. If you set
# this to `false`, you may have to set takeEagerSlice=true to avoid failed
# refinements. If this is set to true, the counterexample check won't work
# (in general), so you have to turn it off.
cpa.slicing.refinement.counterexampleCheckOnSlice = false

# Which prefix provider to use? (give class name) If the package name starts
# with 'org.sosy_lab.cpachecker.', this prefix can be omitted.
cpa.slicing.refinement.prefixProvider = no default value

# How to refine the slice:
# - CEX_ASSUME_DEPS: Add the dependencies of all counterexample assume edges
# to the slice.
# - INFEASIBLE_PREFIX_ASSUME_DEPS: Find an infeasible prefix and add the
# dependencies of all assume edges that are part of the infeasible prefix to
# the slice. Requires a prefix provider
# ('cpa.slicing.refinement.prefixProvider').
# - CEX_FIRST_ASSUME_DEPS: Add the dependencies of the first counterexample
# assume edges, that is not already part of the slice, to the slice.
# - CEX_LAST_ASSUME_DEPS: Add the dependencies of the last counterexample
# assume edges, that is not already part of the slice, to the slice.
cpa.slicing.refinement.refineStrategy = CEX_ASSUME_DEPS
  enum:     [CEX_ASSUME_DEPS, INFEASIBLE_PREFIX_ASSUME_DEPS, CEX_FIRST_ASSUME_DEPS,
             CEX_LAST_ASSUME_DEPS]

# What kind of restart to do after a successful refinement
cpa.slicing.refinement.restartStrategy = PIVOT
  enum:     [PIVOT, ROOT]

# Use all assumptions of a target path as slicing criteria, not just the edge
# to the target location.
cpa.slicing.refinement.takeEagerSlice = false

# Strategy for abstracting children during CEX weakening
cpa.slicing.removalSelectionStrategy = ALL
  enum:     [ALL, FIRST, RANDOM, LEAST_REMOVALS]

# Time for loop generation before aborting.
# (Use seconds or specify a unit; 0 for infinite)
cpa.slicing.timeForLoopGeneration = 0s

# Inductive weakening strategy
cpa.slicing.weakeningStrategy = CEX
  enum:     [SYNTACTIC, DESTRUCTIVE, CEX]

# Enable GCC extension 'Arrays of Length Zero'.
cpa.smg.GCCZeroLengthArray = false

# Allocate memory on declaration of external variable
cpa.smg.allocateExternalVariables = true

# Array allocation functions
cpa.smg.arrayAllocationFunctions = {"calloc"}

# with this option enabled, a check for unreachable memory occurs whenever a
# function returns, and not only at the end of the main function
cpa.smg.checkForMemLeaksAtEveryFrameDrop = true

# Crash on unknown array dereferences
cpa.smg.crashOnUnknown = false

# Deallocation functions
cpa.smg.deallocationFunctions = {"free"}

# with this option enabled, heap abstraction will be enabled.
cpa.smg.enableHeapAbstraction = false

# If this Option is enabled, failure of mallocis simulated
cpa.smg.enableMallocFail = true

# Filename format for SMG graph dumps
cpa.smg.exportSMG.file = "smg/smg-%s.dot"

# Describes when SMG graphs should be dumped.
cpa.smg.exportSMGwhen = NEVER
  enum:     [NEVER, LEAF, INTERESTING, EVERY]

# Functions which indicate on external allocated memory
cpa.smg.externalAllocationFunction = {"ext_allocation"}

# Default size of externally allocated memory
cpa.smg.externalAllocationSize = Integer.MAX_VALUE

# Allocation size of memory that cannot be calculated.
cpa.smg.guessSize = 2

# Size of memory that cannot be calculated will be guessed.
cpa.smg.guessSizeOfUnknownMemorySize = false

# Handle external variables with incomplete type (extern int array[]) as
# external allocation
cpa.smg.handleIncompleteExternalVariableAsExternalAllocation = false

# with this option enabled, memory that is not freed before the end of main
# is reported as memleak even if it is reachable from local variables in main
cpa.smg.handleNonFreedMemoryInMainAsMemLeak = true

# Handle unknown dereference as safe and check error based on error
# predicate, depends on trackPredicates
cpa.smg.handleUnknownDereferenceAsSafe = false

# Sets how unknown functions are handled.
cpa.smg.handleUnknownFunctions = STRICT
  enum:     [STRICT, ASSUME_SAFE, ASSUME_EXTERNAL_ALLOCATED]

# Perform merge SMGStates by SMGJoin on ends of code block. Works with
# 'merge=JOIN'
cpa.smg.joinOnBlockEnd = true

# Memory allocation functions
cpa.smg.memoryAllocationFunctions = {"malloc", "__kmalloc", "kmalloc", "realloc"}

# Size parameter of memory allocation functions
cpa.smg.memoryAllocationFunctionsSizeParameter = 0

# Position of element size parameter for array allocation functions
cpa.smg.memoryArrayAllocationFunctionsElemSizeParameter = 1

# Position of number of element parameter for array allocation functions
cpa.smg.memoryArrayAllocationFunctionsNumParameter = 0

# Determines if memory errors are target states
cpa.smg.memoryErrors = true

# which merge operator to use for the SMGCPA
cpa.smg.merge = "SEP"
  allowed values: [SEP, JOIN]

# export interpolant smgs for every path interpolation to this path template
cpa.smg.refinement.exportInterpolantSMGs = "smg/interpolation-%d/%s"

# when to export the interpolation tree
# NEVER:   never export the interpolation tree
# FINAL:   export the interpolation tree once after each refinement
# ALWAYS:  export the interpolation tree once after each interpolation, i.e.
# multiple times per refinement
cpa.smg.refinement.exportInterpolationTree = "NEVER"
  allowed values: [NEVER, FINAL, ALWAYS]

# export interpolant smgs for every path interpolation to this path template
cpa.smg.refinement.exportRefinementSMGs = "smg/refinement-%d/smg-%s"

# export interpolation trees to this file template
cpa.smg.refinement.interpolationTreeExportFile = "interpolationTree.%d-%d.dot"

# Sets the level of runtime checking: NONE, HALF, FULL
cpa.smg.runtimeCheck = NONE
  enum:     [FORCED, NONE, HALF, FULL]

# Patterns of unknown functions which are always considered as safe
# functions, i.e., free of memory-related side-effects.
cpa.smg.safeUnknownFunctionsPatterns = {"abort"}

# which stop operator to use for the SMGCPA
cpa.smg.stop = "SEP"
  allowed values: [SEP, NEVER, END_BLOCK]

# Enable track predicates for possible memory safety error on SMG state
cpa.smg.trackErrorPredicates = false

# Enable track predicates on SMG state
cpa.smg.trackPredicates = false

# Emit messages when we encounter non-target undefined behavior
cpa.smg.unknownOnUndefined = true

# Allow SMG to check predicates
cpa.smg.verifyPredicates = false

# Allocation functions which set memory to zero
cpa.smg.zeroingMemoryAllocation = {"calloc", "kzalloc"}

# Enable GCC extension 'Arrays of Length Zero'.
cpa.smg2.GCCZeroLengthArray = false

# If heap values are to be abstracted based on CEGAR.
cpa.smg2.abstraction.abstractHeapValues = false

# Abstraction of all detected linked lists at loop heads.
cpa.smg2.abstraction.abstractLinkedLists = true

# Abstraction of program variables via CEGAR.
cpa.smg2.abstraction.abstractProgramVariables = false

# restrict abstraction computations to branching points
cpa.smg2.abstraction.alwaysAtBranch = false

# restrict abstraction computations to function calls/returns
cpa.smg2.abstraction.alwaysAtFunction = false

# restrict abstraction computations to join points
cpa.smg2.abstraction.alwaysAtJoin = false

# If enabled, abstraction computations at loop-heads are enabled. List
# abstraction has to be enabled for this.
cpa.smg2.abstraction.alwaysAtLoop = false

# threshold for level of determinism, in percent, up-to which abstraction
# computations are performed (and iteration threshold was reached)
cpa.smg2.abstraction.determinismThreshold = 85

# toggle liveness abstraction
cpa.smg2.abstraction.doLivenessAbstraction = false

# skip abstraction computations until the given number of iterations are
# reached, after that decision is based on then current level of determinism,
# setting the option to -1 always performs abstraction computations
cpa.smg2.abstraction.iterationThreshold = -1

# The minimum list segments directly following each other with the same value
# needed to abstract them.Minimum is 2.
cpa.smg2.abstraction.listAbstractionMinimumLengthThreshhold = 12

# restrict liveness abstractions to nodes with more than one entering and/or
# leaving edge
cpa.smg2.abstraction.onlyAtNonLinearCFA = false

# Allocate memory on declaration of external variable
cpa.smg2.allocateExternalVariables = true

# Array allocation functions
cpa.smg2.arrayAllocationFunctions = {"calloc"}

# Use equality assumptions to assign values (e.g., (x == 0) => x = 0)
cpa.smg2.assignEqualityAssumptions = true

# Treat symbolic values as unknowns and assign new concrete values to them.
cpa.smg2.assignSymbolicValues = true

# with this option enabled, a check for unreachable memory occurs whenever a
# function returns, and not only at the end of the main function
cpa.smg2.checkForMemLeaksAtEveryFrameDrop = true

# Crash on unknown array dereferences
cpa.smg2.crashOnUnknown = false

# Deallocation functions
cpa.smg2.deallocationFunctions = {"free"}

# with this option enabled, heap abstraction will be enabled.
cpa.smg2.enableHeapAbstraction = false

# If this Option is enabled, failure of mallocis simulated
cpa.smg2.enableMallocFail = true

# Filename format for SMG graph dumps
cpa.smg2.exportSMG.file = "smg/smg-%s.dot"

# Describes when SMG graphs should be dumped.
cpa.smg2.exportSMGwhen = NEVER
  enum:     [NEVER, LEAF, INTERESTING, EVERY]

# Functions which indicate on external allocated memory
cpa.smg2.externalAllocationFunction = {"ext_allocation"}

# Default size of externally allocated memory
cpa.smg2.externalAllocationSize = Integer.MAX_VALUE

# Allocation size of memory that cannot be calculated.
cpa.smg2.guessSize = BigInteger.valueOf(2)

# Size of memory that cannot be calculated will be guessed.
cpa.smg2.guessSizeOfUnknownMemorySize = false

# Handle external variables with incomplete type (extern int array[]) as
# external allocation
cpa.smg2.handleIncompleteExternalVariableAsExternalAllocation = false

# with this option enabled, memory that is not freed before the end of main
# is reported as memleak even if it is reachable from local variables in main
cpa.smg2.handleNonFreedMemoryInMainAsMemLeak = true

# Handle unknown dereference as safe and check error based on error
# predicate, depends on trackPredicates
cpa.smg2.handleUnknownDereferenceAsSafe = false

# Sets how unknown functions are handled.
cpa.smg2.handleUnknownFunctions = STRICT
  enum:     [STRICT, ASSUME_SAFE, ASSUME_EXTERNAL_ALLOCATED]

# If this option is enabled, a memory allocation (e.g. malloc or array
# declaration) for unknown memory sizes does not abort, but also does not
# create any memory.
cpa.smg2.ignoreUnknownMemoryAllocation = false

# if there is an assumption like (x!=0), this option sets unknown
# (uninitialized) variables to 1L, when the true-branch is handled.
cpa.smg2.initAssumptionVars = false

# get an initial precision from file
cpa.smg2.initialPrecisionFile = no default value

# get an initial precision from a predicate precision file
cpa.smg2.initialPredicatePrecisionFile = no default value

# Perform merge SMGStates by SMGJoin on ends of code block. Works with
# 'merge=JOIN'
cpa.smg2.joinOnBlockEnd = true

# Memory allocation functions
cpa.smg2.memoryAllocationFunctions = {"malloc", "__kmalloc", "kmalloc", "realloc"}

# Size parameter of memory allocation functions
cpa.smg2.memoryAllocationFunctionsSizeParameter = 0

# Position of element size parameter for array allocation functions
cpa.smg2.memoryArrayAllocationFunctionsElemSizeParameter = 1

# Position of number of element parameter for array allocation functions
cpa.smg2.memoryArrayAllocationFunctionsNumParameter = 0

# Determines if memory errors are target states
cpa.smg2.memoryErrors = true

# which merge operator to use for the SMGCPA
cpa.smg2.merge = "SEP"
  allowed values: [SEP]

# Assume that variables used only in a boolean context are either zero or
# one.
cpa.smg2.optimizeBooleanVariables = true

# target file to hold the exported precision
cpa.smg2.precisionFile = no default value

# whether or not to use heuristic to avoid similar, repeated refinements
cpa.smg2.refinement.avoidSimilarRepeatedRefinement = false

# Which base precision should be used for a new precision? ALL: During
# refinement, collect precisions from the complete ARG. SUBGRAPH: During
# refinement, keep precision from all removed parts (subgraph) of the ARG.
# CUTPOINT: Only the cut-point's precision is kept. TARGET: Only the target
# state's precision is kept.
cpa.smg2.refinement.basisStrategy = SUBGRAPH
  enum:     [ALL, SUBGRAPH, TARGET, CUTPOINT]

# whether or not to do lazy-abstraction
cpa.smg2.refinement.doLazyAbstraction = true

# whether to perform (more precise) edge-based interpolation or (more
# efficient) path-based interpolation
cpa.smg2.refinement.performEdgeBasedInterpolation = true

# whether or not to do lazy-abstraction
cpa.smg2.refinement.restart = PIVOT
  enum:     [ROOT, PIVOT, COMMON]

# Sets the level of runtime checking: NONE, HALF, FULL
cpa.smg2.runtimeCheck = NONE
  enum:     [FORCED, NONE, HALF, FULL]

# Which unknown function are always considered as safe functions, i.e., free
# of memory-related side-effects?
cpa.smg2.safeUnknownFunctions = {"abort"}

# which stop operator to use for the SMGCPA
cpa.smg2.stop = "SEP"
  allowed values: [SEP, NEVER, END_BLOCK]

# Enable track predicates for possible memory safety error on SMG state
cpa.smg2.trackErrorPredicates = false

# Enable track predicates on SMG state
cpa.smg2.trackPredicates = false

# Emit messages when we encounter non-target undefined behavior
cpa.smg2.unknownOnUndefined = true

# Allocation functions which set memory to zero
cpa.smg2.zeroingMemoryAllocation = {"calloc", "kzalloc"}

# set this to true when you only want to do a code analysis. If StatisticsCPA
# is combined with other CPAs to do queries use false.
cpa.statistics.analysis = true

# which merge operator to use for StatisticsCPA? Ignored when analysis is set
# to true
cpa.statistics.mergeSep = "sep"
  allowed values: [sep, join]

# count the number of traversed arithmetic operations.
cpa.statistics.metric.arithmeticOperationCount = true

# count the number of traversed variable definitions with array type.
cpa.statistics.metric.arrayVariablesCount = true

# count the number of traversed assume statements.
cpa.statistics.metric.assumeCount = true

# count the number of traversed bitwise operations.
cpa.statistics.metric.bitwiseOperationCount = true

# count the number of traversed edges with more then one outgoing edge.
cpa.statistics.metric.branchCount = true

# count the number of traversed dereference operations.
cpa.statistics.metric.dereferenceCount = true

# count the number of traversed variable definitions with floating type
# (float or double).
cpa.statistics.metric.floatVariablesCount = true

# count the number of traversed function calls.
cpa.statistics.metric.functionCallCount = true

# count the number of traversed function definitions.
cpa.statistics.metric.functionDefCount = true

# count the number of traversed global variable definitions.
cpa.statistics.metric.globalVariablesCount = true

# count the number of traversed gotos.
cpa.statistics.metric.gotoCount = true

# count the number of traversed variable definitions with integer type.
cpa.statistics.metric.integerVariablesCount = true

# count the number of traversed jumps.
cpa.statistics.metric.jumpCount = true

# count the number of traversed local variable definitions.
cpa.statistics.metric.localVariablesCount = true

# count the number of traversed loops.
cpa.statistics.metric.loopCount = true

# count the number of traversed nodes.
cpa.statistics.metric.nodeCount = true

# count the number of traversed variable definitions with pointer type.
cpa.statistics.metric.pointerVariablesCount = true

# count the number of traversed variable definitions with a complex structure
# type.
cpa.statistics.metric.structVariablesCount = true

# target file to hold the statistics
cpa.statistics.statisticsCPAFile = no default value

# Which refinement algorithm to use? (give class name, required for
# termination algorithm with CEGAR) If the package name starts with
# 'org.sosy_lab.cpachecker.', this prefix can be omitted.
cpa.termination.refiner = no default value

# Simple thread analysis from theory paper
cpa.thread.simpleMode = false

# The case when the same thread is created several times we do not support.We
# may skip or fail in this case.
cpa.thread.skipTheSameThread = false

# The case when the same thread is created several times we do not support.We
# may try to support it with self-parallelizm.
cpa.thread.supportSelfCreation = false

# allow assignments of a new thread to the same left-hand-side as an existing
# thread.
cpa.threading.allowMultipleLHS = false

# the maximal number of parallel threads, -1 for infinite. When combined with
# 'useClonedFunctions=true', we need at least N cloned functions. The option
# 'cfa.cfaCloner.numberOfCopies' should be set to N.
cpa.threading.maxNumberOfThreads = 5

# in case of witness validation we need to check all possible function calls
# of cloned CFAs.
cpa.threading.useAllPossibleClones = false

# atomic locks are used to simulate atomic statements, as described in the
# rules of SV-Comp.
cpa.threading.useAtomicLocks = true

# do not use the original functions from the CFA, but cloned ones. See
# cfa.postprocessing.CFACloner for detail.
cpa.threading.useClonedFunctions = true

# local access locks are used to avoid expensive interleaving, if a thread
# only reads and writes its own variables.
cpa.threading.useLocalAccessLocks = true

# The max amount of refinements for the trace abstraction algorithm. Setting
# it to 0 leads to an analysis of the ARG without executing any refinements.
# This is used for debugging purposes.
cpa.traceabstraction.refinementStrategy.maxRefinementIterations = -1

# which merge operator to use for UninitializedVariablesCPA?
cpa.uninitvars.merge = "sep"
  allowed values: [sep, join]

# print warnings during analysis when uninitialized variables are used
cpa.uninitvars.printWarnings = "true"

# which stop operator to use for UninitializedVariablesCPA?
cpa.uninitvars.stop = "sep"
  allowed values: [sep, join]

# functions, which stops analysis
cpa.usage.abortfunctions = {}

# functions, which are used to bind variables (like list elements are binded
# to list variable)
cpa.usage.binderFunctions = {}

# export counterexample core as text file
cpa.usage.export.witnessTemplate = "witness.%s.graphml"

# path to write results
cpa.usage.falseUnsafesOutput = "FalseUnsafes"

# if a file do not exist, do not include the corresponding edge
cpa.usage.filterMissedFiles = true

# filtered unsafes, which can not be removed using precision, may be hidden
cpa.usage.hideFilteredUnsafes = false

# The functions, which cannot be executed in parallel with themselves
cpa.usage.notSelfParallelFunctions = new HashSet<>()

# path to write results
cpa.usage.output = "unsafe_rawdata"

# all variables should be printed to the one file or to the different
cpa.usage.outputType = KLEVER
  enum:     [ETV, KLEVER, KLEVER_OLD]

# The way how to identify two paths as equal
cpa.usage.pathEquality = CFANodeId
  enum:     [ARGStateId, CFANodeId]

# The value of marked unsafes, after which the precision should be cleaned
cpa.usage.precisionReset = Integer.MAX_VALUE

# print all unsafe cases in report
cpa.usage.printFalseUnsafes = false

# output only true unsafes
cpa.usage.printOnlyTrueUnsafes = false

# print found unsafes in case of unknown verdict
cpa.usage.printUnsafesIfUnknown = true

# The order of refinement blocks
cpa.usage.refinementChain = no default value

# use single file for output or dump every error trace to its own file
cpa.usage.singleFileOutput = false

# The functions, which are executed in one thread
cpa.usage.singleThreadFunctions = new HashSet<>()

# functions, which we don't analize
cpa.usage.skippedfunctions = {}

# variables, which will be filtered by function location
cpa.usage.skippedvariables.byFunction = {}

# variables, which will be filtered by function prefix
cpa.usage.skippedvariables.byFunctionPrefix = {}

# variables, which will be filtered by its name
cpa.usage.skippedvariables.byName = {}

# variables, which will be filtered by its name prefix
cpa.usage.skippedvariables.byNamePrefix = {}

# variables, which will be filtered by its type
cpa.usage.skippedvariables.byType = {}

# clean all ARG or try to reuse some parts of it (memory consuming)
cpa.usage.totalARGCleaning = true

# ignore unsafes only with empty callstacks
cpa.usage.unsafedetector.ignoreEmptyLockset = true

# A name of interrupt lock for checking deadlock free
cpa.usage.unsafedetector.intLock = no default value

# defines what is unsafe
cpa.usage.unsafedetector.unsafeMode = RACE
  enum:     [RACE, DEADLOCKCIRCULAR, DEADLOCKDISPATCH]

# functions, which are marked as write access
cpa.usage.writeAccessFunctions = {}

# which merge operator to use for ValidVarsCPA
cpa.validVars.merge = "JOIN"
  allowed values: [SEP, JOIN]

# restrict abstraction computations to branching points
cpa.value.abstraction.alwaysAtBranch = false

# restrict abstraction computations to function calls/returns
cpa.value.abstraction.alwaysAtFunction = false

# restrict abstraction computations to join points
cpa.value.abstraction.alwaysAtJoin = false

# restrict abstraction computations to loop heads
cpa.value.abstraction.alwaysAtLoop = false

# threshold for level of determinism, in percent, up-to which abstraction
# computations are performed (and iteration threshold was reached)
cpa.value.abstraction.determinismThreshold = 85

# toggle liveness abstraction
cpa.value.abstraction.doLivenessAbstraction = false

# skip abstraction computations until the given number of iterations are
# reached, after that decision is based on then current level of determinism,
# setting the option to -1 always performs abstraction computations
cpa.value.abstraction.iterationThreshold = -1

# restrict liveness abstractions to nodes with more than one entering and/or
# leaving edge
cpa.value.abstraction.onlyAtNonLinearCFA = false

# Allow the given extern functions and interpret them as pure functions
# although the value analysis does not support their semantics and this can
# produce wrong results.
cpa.value.allowedUnsupportedFunctions = {}

# Use equality assumptions to assign values (e.g., (x == 0) => x = 0)
cpa.value.assignEqualityAssumptions = true

# Fixed set of values for function calls to VERIFIER_nondet_*. Does only
# work, if ignoreFunctionValueExceptRandom is enabled 
cpa.value.functionValuesForRandom = no default value

# Track or not function pointer values
cpa.value.ignoreFunctionValue = true

# If 'ignoreFunctionValue' is set to true, this option allows to provide a
# fixed set of values in the TestComp format. It is used for function-calls
# to calls of VERIFIER_nondet_*. The file is provided via the option
# functionValuesForRandom 
cpa.value.ignoreFunctionValueExceptRandom = false

# if there is an assumption like (x!=0), this option sets unknown
# (uninitialized) variables to 1L, when the true-branch is handled.
cpa.value.initAssumptionVars = false

# get an initial precision from file
cpa.value.initialPrecisionFile = no default value

# get an initial precision from a predicate precision file
cpa.value.initialPredicatePrecisionFile = no default value

# apply optimizations based on equality of input interpolant and candidate
# interpolant
cpa.value.interpolation.applyItpEqualityOptimization = true

# apply optimizations based on CFA edges with only variable-renaming
# semantics
cpa.value.interpolation.applyRenamingOptimization = true

# apply optimizations based on infeasibility of suffix
cpa.value.interpolation.applyUnsatSuffixOptimization = true

# whether or not to manage the callstack, which is needed for BAM
cpa.value.interpolation.manageCallstack = true

# which merge operator to use for ValueAnalysisCPA
cpa.value.merge = "SEP"
  allowed values: [SEP, JOIN]

# Assume that variables used only in a boolean context are either zero or
# one.
cpa.value.optimizeBooleanVariables = true

# target file to hold the exported precision
cpa.value.precisionFile = no default value

# whether or not to add assumptions to counterexamples, e.g., for supporting
# counterexample checks
cpa.value.refinement.addAssumptionsToCex = true

# whether or not to use heuristic to avoid similar, repeated refinements
cpa.value.refinement.avoidSimilarRepeatedRefinement = false

# Which base precision should be used for a new precision? ALL: During
# refinement, collect precisions from the complete ARG. SUBGRAPH: During
# refinement, keep precision from all removed parts (subgraph) of the ARG.
# CUTPOINT: Only the cut-point's precision is kept. TARGET: Only the target
# state's precision is kept.
cpa.value.refinement.basisStrategy = SUBGRAPH
  enum:     [ALL, SUBGRAPH, TARGET, CUTPOINT]

# completely disable the tracking of found error paths in the refiner, i.e.,
# disable the detection of repeated counterexamples
cpa.value.refinement.disableErrorPathTracking = false

# whether or not to do lazy-abstraction
cpa.value.refinement.doLazyAbstraction = true

# when to export the interpolation tree
# NEVER:   never export the interpolation tree
# FINAL:   export the interpolation tree once after each refinement
# ALWAYS:  export the interpolation tree once after each interpolation, i.e.
# multiple times per refinement
cpa.value.refinement.exportInterpolationTree = "NEVER"
  allowed values: [NEVER, FINAL, ALWAYS]

# export interpolation trees to this file template
cpa.value.refinement.interpolationTreeExportFile = "interpolationTree.%d-%d.dot"

# heuristic to sort targets based on the quality of interpolants derivable
# from them
cpa.value.refinement.itpSortedTargets = false

# File to which path constraints should be written.
cpa.value.refinement.pathConstraintsFile = "Counterexample.%d.symbolic-trace.txt"

# whether or not to perform path slicing before interpolation
cpa.value.refinement.pathSlicing = true

# whether to perform (more precise) edge-based interpolation or (more
# efficient) path-based interpolation
cpa.value.refinement.performEdgeBasedInterpolation = true

# which prefix of an actual counterexample trace should be used for
# interpolation
cpa.value.refinement.prefixPreference = [PrefixPreference.DOMAIN_MIN, PrefixPreference.LENGTH_MIN]

# whether or not to do lazy-abstraction
cpa.value.refinement.restart = PIVOT
  enum:     [ROOT, PIVOT, COMMON]

# instead of reporting a repeated counter-example, search and refine another
# error-path for the same target-state.
cpa.value.refinement.searchForFurtherErrorPaths = false

# store all refined paths
cpa.value.refinement.storeAllRefinedPaths = false

# if this option is set to false, constraints are never kept
cpa.value.refinement.trackConstraints = true

# whether to use the top-down interpolation strategy or the bottom-up
# interpolation strategy
cpa.value.refinement.useTopDownInterpolationStrategy = true

# Whether to write symbolic trace (including path constraints) for found
# erexamples
cpa.value.refinement.writePathConstraints = true

# Overall timelimit for computing initial value precision from given
# predicate precision(use seconds or specify a unit; 0 for infinite)
cpa.value.reuse.precision.predicate.adaptionLimit = 0ns

# also consider other binary operators then ==, !== when considering control
# dependencies while adapting predicate precision
cpa.value.reuse.precision.predicate.includeControlNonEquiv = false

# comma-separated list of files with property specifications that should be
# considered when determining the relevant edges for predicate precision
# adaption
cpa.value.reuse.precision.predicate.relevantProperties = []

# which strategy to use to convert predicate to value precision
cpa.value.reuse.precision.predicate.strategy = CONVERT_ONLY
  enum:     [CONVERT_ONLY, CONVERT_AND_ADD_FLOW_BACKWARD,
             CONVERT_AND_ADD_FLOW_BIDIRECTED]

# also consider control dependencies during adaption of predicate precision
cpa.value.reuse.precision.predicate.useControl = false

# which stop operator to use for ValueAnalysisCPA
cpa.value.stop = "SEP"
  allowed values: [SEP, JOIN, NEVER, EQUALS]

# Default size of arrays whose length can't be determined.
cpa.value.symbolic.defaultArraySize = 20

# If this option is set to true, an own symbolic identifier is assigned to
# each array slot when handling non-deterministic arrays of fixed length. If
# the length of the array can't be determined, it won't be handled in either
# cases.
cpa.value.symbolic.handleArrays = false

# Whether to handle non-deterministic pointers in symbolic value analysis.
cpa.value.symbolic.handlePointers = true

# If this option is set to true, an own symbolic identifier is assigned to
# each struct member when handling non-deterministic structs.
cpa.value.symbolic.handleStructs = true

# Whether to try to not use any constraints in refinement
cpa.value.symbolic.refinement.avoidConstraints = true

# The refinement strategy to use
cpa.value.symbolic.refinement.strategy = CONSTRAINTS_FIRST
  enum:     [CONSTRAINTS_FIRST, VALUES_FIRST, ALTERNATING, VALUES_ONLY]

# Whether to simplify symbolic expressions, if possible.
cpa.value.symbolic.simplifySymbolics = true

# Track Java array values in explicit value analysis. This may be costly if
# the verified program uses big or lots of arrays. Arrays in C programs will
# always be tracked, even if this value is false.
cpa.value.trackJavaArrayValues = true

# Tells the value analysis how to handle unknown values.
cpa.value.unknownValueHandling = DISCARD
  enum:     [DISCARD, INTRODUCE_SYMBOLIC]

# Specify simple custom instruction by specifying the binary operator op. All
# simple cis are of the form r = x op y. Leave empty (default) if you specify
# a more complex custom instruction within code.
custominstructions.binaryOperatorForSimpleCustomInstruction = PLUS
  enum:     [MULTIPLY, DIVIDE, MODULO, PLUS, MINUS, SHIFT_LEFT, SHIFT_RIGHT, LESS_THAN,
             GREATER_THAN, LESS_EQUAL, GREATER_EQUAL, BINARY_AND, BINARY_XOR, BINARY_OR,
             EQUALS, NOT_EQUALS]

# Name of function containing the custom instruction definition
custominstructions.ciFun = no default value

# Signature for custom instruction, describes names and order of input and
# output variables of a custom instruction
custominstructions.ciSignature = "ci_spec.txt"

# File specifying start locations of custom instruction applications
# File to dump start location of identified custom instruction applications
custominstructions.definitionFile = "ci_def.txt"

# Where to dump the requirements on custom instruction extracted from
# analysis
custominstructions.dumpCIRequirements = "ci%d.smt"

# Try to remove parts of requirements that are not related to custom
# instruction and are, thus, irrelevant for custom instruction behavior
custominstructions.enableRequirementsSlicing = false

# Specifies the mode how custom instruction applications in program are
# identified.
custominstructions.mode = OPERATOR
  enum:     [MANUAL, OPERATOR, AUTOMATIC]

# Try to remove requirements that are covered by another requirment and are,
# thus, irrelevant for custom instruction behavior
custominstructions.removeCoveredRequirements = false

# Qualified name of class for abstract state which provides custom
# instruction requirements.
custominstructions.requirementsStateClassName = no default value

# Option to change the behaviour of the loop detection for generating the
# Counterexample-C-Code that will probably be used to generate invariants.
# Note that last loop means the first loop encountered when backwards
# traversing the given ARGPath, thus, the last loop may contain other loops,
# which are in turn also counted to the last loop.
cwriter.withLoops.loopDetectionStrategy = ALL_LOOPS
  enum:     [ALL_LOOPS, ONLY_LAST_LOOP]

# When checking for the data race property, use this configuration file
# instead of the current one.
datarace.config = no default value

# Whether to consider pointees. Only if this option is set to true, a pointer
# analysis is run during system dependence graph (SDG) construction and
# dependencies of pointees are inserted into the SDG. If this option is set
# to false, pointers are completely ignored and the resulting SDG is an
# under-approximation that lacks all pointee dependencies.
dependencegraph.considerPointees = true

# Whether to take an assumption edge 'p' as control dependence if edge 'not
# p' is a control dependence. This creates a larger slice, but may reduce the
# size of the state space for deterministic programs. This behavior is also
# closer to the static program slicing based on control-flow graphs (CFGs),
# where branching is represented by a single assumption (with true- and
# false-edges)
dependencegraph.controldeps.considerInverseAssumption = true

# Whether to consider control dependencies.
dependencegraph.controldeps.use = true

# File to export dependence graph to. If `null`, dependence graph will not be
# exported as dot.
dependencegraph.exportDot = "DependenceGraph.dot"

# Whether to consider (data-)flow dependencies.
dependencegraph.flowdeps.use = true

# Whether to include only functions reachable from the main function in the
# dependence graph.
dependencegraph.onlyReachableFunctions = true

# The maximum duration a single pointer analysis method is allowed to run
# (use seconds or specify a unit; 0 for infinite).
dependencegraph.pointerAnalysisTime = 0s

# The computation methods used for pointer analysis. If no method is
# specified, an imprecise over-approximation of the global pointer state is
# created without running any actual pointer analysis. If at least one
# computation method is specified, the first one in the list is run with the
# time limit set by 'dependencegraph.pointerAnalysisTime'. If this method is
# able to create a valid global pointer state in time, the state is used and
# no other methods are run. Otherwise, if a second computation method is
# specified, the second method is run with the same time limit. If the method
# is able to create a valid global pointer state in time, the state is used
# and no other methods are run. The same is true for all subsequent
# computation methods specified in the list. If no computation method is able
# to create a valid global pointer state in time, an imprecise
# over-approximation of the global pointer state is created without running
# any actual pointer analysis. A pointer analysis is only run if
# 'dependencegraph.considerPointees' is set to true. Available computation
# methods: PointerStateComputationMethod.FLOW_SENSITIVE,
# PointerStateComputationMethod.FLOW_INSENSITIVE
dependencegraph.pointerStateComputationMethods = [PointerStateComputationMethod.FLOW_SENSITIVE]

# comma-separated list of files with property specifications that should be
# considered when determining the nodes that are in the reachability
# property.
differential.badstateProperties = []

# ignore declarations when detecting modifications, be careful when variables
# are renamed (could be unsound)
differential.ignoreDeclarations = false

# perform assumption implication check
differential.implicationCheck = true

# perform preprocessing to detect states from which error locations are
# reachable
differential.performPreprocessing = false

# Program to check against
differential.program = no default value

# safely stop analysis on pointer accesses and similar
differential.stopOnPointers = false

# Switch on/off to form the union of variable sets at identical location
# pairs. Set cpa.automaton.deleteDoubleEdges as well!
differential.variableSetMerge = false

# Allows to set the algorithm for decomposing the CFA. BLOCK_OPERATOR creates
# blocks from each merge/branching point to the next merge/branching point.
# GIVEN_SIZE merges blocks obtained by BLOCK_OPERATOR until
# distributedSummaries.desiredNumberOfBlocks blocks are present. SINGLE_BLOCK
# creates one block around the complete CFA.
distributedSummaries.decompositionType = BLOCK_OPERATOR
  enum:     [BLOCK_OPERATOR, GIVEN_SIZE, SINGLE_BLOCK]

# desired number of BlockNodes
distributedSummaries.desiredNumberOfBlocks = 5

# Whether to spawn util workers. Util workers listen to every message and
# create visual output for debugging. Workers consume resources and should
# not be used for benchmarks.
distributedSummaries.spawnUtilWorkers = true

# Configuration for backward analysis in computation of distributed summaries
distributedSummaries.worker.backwardConfiguration = "config/distributed-block-summaries/predicateAnalysis-block-backward.properties"

# whether error conditions are always checked for unsatisfiability
distributedSummaries.worker.checkEveryErrorCondition = true

# Configuration for forward analysis in computation of distributed summaries
distributedSummaries.worker.forwardConfiguration = "config/distributed-block-summaries/predicateAnalysis-block-forward.properties"

# Destination directory for the logfiles of all BlockSummaryWorkers. The
# logfiles have the same name as the ID of the worker.
distributedSummaries.worker.logDirectory = "block_summary/logfiles"

# Whether loop free programs have to deny all possible error messages. Enable
# this option to eagerly process every possible error message that occurs
# after an precondition update.
distributedSummaries.worker.sendEveryErrorMessage = false

# whether analysis worker store circular post conditions
distributedSummaries.worker.storeCircularPostConditions = false

# Choose the workers that are spawned for each block. Contrary to DEFAULT
# workers, SMART workers consume multiple messages at once.
distributedSummaries.workerType = DEFAULT
  enum:     [DEFAULT, SMART]

# Enable to use lazy refinement in current analysis instead of restarting
# from root after each refinement.
enabledanalysis.allowLazyRefinement = false

# Which CPA is used as enabler in the current analysis.
enabledanalysis.enablerCPA = PREDICATE
  enum:     [APRON, INTERVAL, OCTAGON, PREDICATE, VALUE]

# Ranking algorithm to use for fault localization
faultLocalization.by_coverage.type = TARANTULA
  enum:     [TARANTULA, DSTAR, OCHIAI]

# Configuration to use for initial program-state exploration
faultLocalization.by_distance.analysis = no default value

# The distance metric that ought to be used for the computation of the
# distance
faultLocalization.by_distance.metric = ADM
  enum:     [ADM, CFDM, PG]

# Maximum number of explorations to run for collecting error paths, before
# performing fault localization.  Exploration runs stop when the program
# under analysis is fully explored or the specified number of runs is
# reached. Fault localization may be more precise if more error paths are
# available.
faultLocalization.by_distance.stopAfter = 40

# Do not show faults that contain a certain variable. Use, e.g., 'main::x' to
# ban variable 'x' in the main function. Use, e.g., '::x' to ban all
# variables named 'x'. This is especially useful to filter specific faults if
# the first run results in many candidates. Provide a comma separated string
# to add variables, e.g., main::x,doStuff::y,::z
faultLocalization.by_traceformula.maxsat.ban = []

# which post-condition type to use
faultLocalization.by_traceformula.postConditionType = LAST_ASSUME_EDGES_ON_SAME_LINE
  enum:     [LAST_ASSUME_EDGE, LAST_ASSUME_EDGES_ON_SAME_LINE,
             LAST_ASSUME_EDGE_CLUSTER]

# By default, the precondition only contains the failing variable assignment
# of all nondet variables. Choose INITIAL_ASSIGNMENT to assignments like
# '<datatype> <variable-name> = <value>' to the precondition.
faultLocalization.by_traceformula.preconditionType = NONDETERMINISTIC_VARIABLES_ONLY
  enum:     [NONDETERMINISTIC_VARIABLES_ONLY, INITIAL_ASSIGNMENT, ALWAYS_TRUE]

# which algorithm to use
faultLocalization.by_traceformula.type = UNSAT
  enum:     [UNSAT, MAXSAT, MAXORG, ERRINV]

# Configuration for programs containing more than @Option adressedRatio
# addressed vars.
heuristicSelection.addressedConfig = no default value

# Ratio of addressed vars. Values bigger than the passed value lead to
# @option addressedConfig.
heuristicSelection.addressedRatio = 0

# Configuration for programs containing arrays.
heuristicSelection.arrayConfig = no default value

# Configuration for programs with loops and complex datastructures.
heuristicSelection.complexLoopConfig = no default value

# Configuration for programs containing composite types.
heuristicSelection.compositeTypeConfig = no default value

# Configuration for programs with loops.
heuristicSelection.loopConfig = no default value

# Configuration for loop-free programs.
heuristicSelection.loopFreeConfig = no default value

# Configuration for programs containing only relevant bool vars.
heuristicSelection.onlyBoolConfig = no default value

# Configuration for preliminary algorithm.
heuristicSelection.preAnalysisAlgorithmConfig = no default value

# Configuration for programs containing recursion.
heuristicSelection.recursionConfig = no default value

# Configuration for programs with a single loop.
heuristicSelection.singleLoopConfig = no default value

# toggle asserting targets at every iteration for IMC
imc.assertTargetsAtEveryIteration = false

# toggle checking forward conditions
imc.checkForwardConditions = true

# toggle checking whether the safety property is inductive
imc.checkPropertyInductiveness = false

# toggle falling back if interpolation or forward-condition is disabled
imc.fallBack = true

# toggle which strategy is used for computing fixed points in order to verify
# programs with loops. ITP enables IMC algorithm, and ITPSEQ enables ISMC
# algorithm. ITPSEQ_AND_ITP runs ISMC first, and if a fixed point is not
# reached by ISMC, IMC is invoked.
imc.fixedPointComputeStrategy = ITP
  enum:     [NONE, ITP, ITPSEQ, ITPSEQ_AND_ITP]

# toggle Impact-like covering for the ISMC fixed-point check
imc.impactLikeCovering = false

# toggle the strategy to determine the next loop iteration
# to execute BMC phase of IMC or ISMC
# CONST: increased by one (to guarantee a shortest counterexample)
# EAGER: skip all iterations where a bug cannot be found
imc.loopBoundIncrementStrategyForBMC = CONST
  enum:     [CONST, EAGER]

# toggle the strategy to determine the next loop iteration
# to execute interpolation phase of IMC
# CONST: increased by a constant (specified via
# loopBoundIncrementValueForIMC)
# EAGER: skip all iterations where a bug cannot be found
imc.loopBoundIncrementStrategyForIMC = CONST
  enum:     [CONST, EAGER]

# toggle the strategy to determine the next loop iteration
# to execute k-inductive check if "checkPropertyInductiveness" is enabled
# CONST: increased by by a constant (specified via
# loopBoundIncrementValueForKI)
# EAGER: skip all iterations where a bug cannot be found
imc.loopBoundIncrementStrategyForKI = CONST
  enum:     [CONST, EAGER]

# toggle the value to increment the loop bound by at each step for IMC
imc.loopBoundIncrementValueForIMC = 1

# toggle the value to increment the loop bound by at each step for KI
imc.loopBoundIncrementValueForKI = 1

# toggle removing unreachable stop states in ARG
imc.removeUnreachableStopStates = false

# enable the Forced Covering optimization
impact.useForcedCovering = true

# Configuration file for the K-Induction algorithm for checking candidates on
# invariance.
invariantChecker.kInductionConfig = "config/bmc-invgen.properties"

# configuration file for invariant generation
invariantGeneration.config = no default value

# Check candidate invariants in a separate thread asynchronously.
invariantGeneration.kInduction.async = true

# Guess some candidates for the k-induction invariant generator from the CFA.
invariantGeneration.kInduction.guessCandidatesFromCFA = ASSUME_EDGES_PLAIN
  enum:     [NONE, ASSUME_EDGES_PLAIN, ASSUME_EDGE_TEMPLATES, LINEAR_TEMPLATES]

# Provides additional candidate invariants to the k-induction invariant
# generator.
invariantGeneration.kInduction.invariantsAutomatonFile = no default value

# For correctness-witness validation: Shut down if a candidate invariant is
# found to be incorrect.
invariantGeneration.kInduction.terminateOnCounterexample = false

# The directory where the invariants are stored.
invariantStore.export.outDir = "invariantWitnesses"
invariantStore.import.storeDirectory = "invariantWitnesses"

# Strategy for generating invariants
invariantStore.invariantGenerationStrategy = REACHED_SET
  enum:     [INDUCTION, REACHED_SET, DO_NOTHING, INVARIANT_STORE]

# The witness from which invariants should be generated.
invariantStore.witness = "invariantwitness.yaml"

# Specify the class code path to search for java class or interface
# definitions
java.classpath = ""

# use the following encoding for java files
java.encoding = StandardCharsets.UTF_8

# export TypeHierarchy as .dot file
java.exportTypeHierarchy = true

# Specify the source code path to search for java class or interface
# definitions
java.sourcepath = ""

# export TypeHierarchy as .dot file
java.typeHierarchyFile = "typeHierarchy.dot"

# Specifies the java version of source code accepted
java.version = JavaCore.VERSION_1_7

# Programming language of the input program. If not given explicitly,
# auto-detection will occur
# C, Java, or LLVM IR?
language = C
  enum:     [C, JAVA, LLVM]
language = no default value
  enum:     [C, JAVA, LLVM]
language = C
  enum:     [C, JAVA, LLVM]

# Limit for cpu time used by CPAchecker (use seconds or specify a unit; -1
# for infinite)
limits.time.cpu = -1ns

# Limit for thread cpu time used by CPAchecker. This option will in general
# not work when multi-threading is used in more than one place, use only with
# great caution! (use seconds or specify a unit; -1 for infinite)
limits.time.cpu.thread = -1ns

# Enforce that the given CPU time limit is set as the value of
# limits.time.cpu.
limits.time.cpu::required = -1ns

# Limit for wall time used by CPAchecker (use seconds or specify a unit; -1
# for infinite)
limits.time.wall = -1ns

# By changing this option one can adjust the way how live variables are
# created. Function-wise means that each function is handled separately,
# global means that the whole cfa is used for the computation.
liveVar.evaluationStrategy = FUNCTION_WISE
  enum:     [FUNCTION_WISE, GLOBAL]

# Overall timelimit for collecting the liveness information.(use seconds or
# specify a unit; 0 for infinite)
liveVar.overallLivenessCheckTime = 0ns

# Timelimit for collecting the liveness information with one approach, (p.e.
# if global analysis is selected and fails in the specified timelimit the
# function wise approach will have the same time-limit afterwards to compute
# the live variables).(use seconds or specify a unit; 0 for infinite)
liveVar.partwiseLivenessCheckTime = 20s

# Write the tokenized version of the input program to this file.
locmapper.dumpTokenizedProgramToFile = no default value

# all used options are printed
log.usedOptions.export = false

# When checking for memory cleanup properties, use this configuration file
# instead of the current one.
memorycleanup.config = no default value

# When checking for memory safety properties, use this configuration file
# instead of the current one.
memorysafety.config = no default value

# which merge operator to use for LiveVariablesCPA
merge = "JOIN"
  allowed values: [SEP, JOIN]

# List of property-files to be run by the subprocesses.
mpiAlgorithm.configFiles = no default value

# The MCA parameter ('Modular Component Architecture') is available only on
# Open MPI frameworks. It might thus need to be disabled if unavailable on
# the working machine.
mpiAlgorithm.disableMCAOptions = no default value

# File containing the ip addresses to be used by MPI.
mpiAlgorithm.hostfile = no default value

# Max. amount of processes to be used by MPI.
mpiAlgorithm.numberProcesses = no default value

# Find all violations of each checked property.
mpv.findAllViolations = false

# Ignore exceptions, which may be caused by checking of some properties, to
# successfully check the others.
mpv.ignoreInnerExceptions = false

# Adjust resource limitations during the analysis.
# - NONE: do not adjust resource limitations (default).
# - DISTRIBUTE_REMAINING: distribute resources, which were allocated for some
# already checked property, but were not fully spent, between other
# properties, which are still checking.
# - DISTRIBUTE_BY_PROPERTY: scale resources for each property in accordance
# with the given ratio in the property distribution file.
mpv.limits.adjustmentStrategy = NONE
  enum:     [NONE, DISTRIBUTE_REMAINING, DISTRIBUTE_BY_PROPERTY]

# Set CPU time limit per each property in multi-property verification (use
# seconds or specify a unit; -1 to disable)
mpv.limits.cpuTimePerProperty = -1ns

# Change resource limitations for the first partition by the given ratio.
# This option will be ignored if NONE limits adjustment strategy is used.
mpv.limits.firstPartitionRatio = 1.0

# The ratio of CPU time limit in the first phase of Joint partitioning
# operator to CPU time limit per each property.
mpv.limits.joint.firstPhaseRatio = 1.3

# Get a resource limitation distribution per property from file. This option
# should be used only together with DISTRIBUTE_BY_PROPERTY limits adjustment
# strategy. The following format should be used in the file:
# '<property name>':<ratio>
mpv.limits.propertyDistributionFile = no default value

# The ratio of CPU time limit in the first phase of Relevance partitioning
# operator to CPU time limit per each property.
mpv.limits.relevance.firstPhaseRatio = 0.2

# The ratio of CPU time limit in the second phase of Relevance partitioning
# operator to CPU time limit per each property.
mpv.limits.relevance.secondPhaseRatio = 1.3

# Partitioning operator for multi-property verification.
mpv.partitionOperator = no default value

# Specifies how to separate a single property.
# - FILE: each .spc file represent a single property (i.e., property is
# represented by several automata).
# - AUTOMATON: each automaton represent a single property.
mpv.propertySeparator = FILE
  enum:     [FILE, AUTOMATON]

# When checking for the overflow property, use this configuration file
# instead of the current one.
overflow.config = no default value

# Simplify overflow assumptions.
overflow.simplifyExpressions = true

# Track overflows in additive(+/-) operations.
overflow.trackAdditiveOperations = true

# Track overflows in division(/ or %) operations.
overflow.trackDivisions = true

# Track overflows in left-shift operations.
overflow.trackLeftShifts = true

# Track overflows in multiplication operations.
overflow.trackMultiplications = true

# Track overflows in binary expressions involving pointers.
overflow.trackPointers = false

# Only check live variables for overflow, as compiler can remove dead
# variables.
overflow.useLiveness = true

# List of files with configurations to use. Files can be suffixed with
# ::supply-reached this signalizes that the (finished) reached set of an
# analysis can be used in other analyses (e.g. for invariants computation).
# If you use the suffix ::supply-reached-refinable instead this means that
# the reached set supplier is additionally continously refined (so one of the
# analysis has to be instanceof ReachedSetAdjustingCPA) to make this work
# properly.
parallelAlgorithm.configFiles = no default value

# The command line for calling the clang preprocessor. May contain binary
# name and arguments, but won't be expanded by a shell. The source file name
# will be appended to this string. Clang needs to print the output to stdout.
parser.clang = "clang-" + LlvmUtils.extractVersionNumberFromLlvmJ() + " -S -emit-llvm -o /dev/stdout"

# Whether to dump the results of the preprocessor to disk.
parser.clang.dumpResults = true

# Whether to collect ACSL annotations if present
parser.collectACSLAnnotations = false

# C dialect for parser
parser.dialect = GNUC
  enum:     [C99, GNUC]

# The command line for calling the preprocessor. May contain binary name and
# arguments, but won't be expanded by a shell. The source file name will be
# appended to this string. The preprocessor needs to print the output to
# stdout.
parser.preprocessor = "cpp"

# Directory where to dump the results of the preprocessor.
parser.preprocessor.dumpDirectory = "preprocessed"

# Whether to dump the results of the preprocessor to disk for debugging.
parser.preprocessor.dumpResults = false

# For C files, read #line preprocessor directives and use their information
# for outputting line numbers. (Always enabled when pre-processing is used.)
parser.readLineDirectives = false

# Preprocess the given C files before parsing: Put every single token onto a
# new line. Then the line number corresponds to the token number.
parser.transformTokensToLines = false

# For C files, convert to LLVM IR with clang first and then use the LLVM
# parser.
parser.useClang = false

# For C files, run the preprocessor on them before parsing. Note that all
# line numbers printed by CPAchecker will refer to the pre-processed file,
# not the original input file.
parser.usePreprocessor = false

# Specifies the mode how HW requirements are detected in the proof.
pcc.HWrequirements.extraction.mode = OPERATOR
  enum:     [MANUAL, OPERATOR, AUTOMATIC]

# Enable if used property checker implements satisfiesProperty(AbstractState)
# and checked property is violated for a set iff an element in this set
# exists for which violates the property
pcc.arg.checkPropertyPerElement = false

# Enable to store ARG states instead of abstract states wrapped by ARG state
pcc.backwardtargets.certificateStatesAsARGStates = false

# List of files with configurations to use. 
pcc.cmc.configFiles = no default value

# write collected assumptions to file
pcc.cmc.file = "AssumptionAutomaton.txt"

# collects information about value analysis states in proof
pcc.collectValueAnalysisStateInfo = false

# The number of cores used exclusively for proof reading. Must be less than
# pcc.useCores and may not be negative. Value 0 means that the cores used for
# reading and checking are shared
pcc.interleaved.useReadCores = 0

# enables parallel checking of partial certificate
pcc.parallel.io.enableParallelCheck = false

# Selects the strategy used for partial certificate construction
pcc.partial.certificateType = HEURISTIC
  enum:     [ALL, HEURISTIC, ARG, MONOTONESTOPARG]

# If enabled, distributes checking of partial elements depending on actual
# checking costs, else uses the number of elements
pcc.partial.enableLoadDistribution = false

# Enables proper PCC but may not work correctly for heuristics. Stops adding
# newly computed elements to reached set if size saved in proof is reached.
# If another element must be added, stops certificate checking and returns
# false.
pcc.partial.stopAddingAtReachedSetSize = false

# [Best-first] Balance criterion for pairwise optimization of partitions
pcc.partitioning.bestfirst.balancePrecision = 1.0d

# Evaluation function to determine exploration order of best-first-search
pcc.partitioning.bestfirst.chosenFunction = BEST_IMPROVEMENT_FIRST
  enum:     [BREADTH_FIRST, DEPTH_FIRST, BEST_IMPROVEMENT_FIRST]

# Balance criterion for pairwise optimization of partitions
pcc.partitioning.fm.balanceCriterion = 1.5d

# Heuristic for computing an initial partitioning of proof
pcc.partitioning.fm.initialPartitioningStrategy = RANDOM
  enum:     [RANDOM]

# [FM-k-way] Balance criterion for pairwise optimization of partitions
pcc.partitioning.kwayfm.balancePrecision = 1.3d

# [FM-k-way] Partitioning method to compute initial partitioning.
pcc.partitioning.kwayfm.globalHeuristic = BEST_IMPROVEMENT_FIRST
  enum:     [RANDOM, DFS, BFS, BEST_IMPROVEMENT_FIRST]

# [FM-k-way] Local optimization criterion to be minimized druing
# Fiduccia/Mattheyses refinment
pcc.partitioning.kwayfm.optimizationCriterion = NODECUT
  enum:     [EDGECUT, NODECUT]

# Specifies the maximum size of the partition. This size is used to compute
# the number of partitions if a proof (reached set) should be written.
# Default value 0 means always a single partition.
pcc.partitioning.maxNumElemsPerPartition = 0

# Partitioning method applied in multilevel heuristic to compute initial
# partitioning.
pcc.partitioning.multilevel.globalHeuristic = BEST_IMPROVEMENT_FIRST
  enum:     [RANDOM, DFS, BFS, BEST_IMPROVEMENT_FIRST]

# Matching method applied to coarsen graph down in multilevel heuristic.
pcc.partitioning.multilevel.matchingGenerator = HEAVY_EDGE
  enum:     [RANDOM, HEAVY_EDGE]

# Refinement method applied in multilevel heuristic's uncoarsening phase.
pcc.partitioning.multilevel.refinementHeuristic = FM_NODECUT
  enum:     [FM_NODECUT, FM_EDGECUT]

# Heuristic for computing partitioning of proof (partial reached set).
pcc.partitioning.partitioningStrategy = RANDOM
  enum:     [RANDOM, DFS, BFS, OPTIMAL, BEST_FIRST, FM, FM_K_WAY, MULTILEVEL]

# If enabled uses the number of nodes saved in certificate to compute
# partition number otherwise the number of states explored during analysis
pcc.partitioning.useGraphSizeToComputePartitionNumber = false

# file in which proof representation needed for proof checking is stored
pcc.proof = "arg.obj"

# file in which proof representation will be stored
pcc.proofFile = "arg.obj"

# Generate and dump a proof
pcc.proofgen.doPCC = false

# Configuration for proof checking if differs from analysis configuration
pcc.resultcheck.checkerConfig = no default value

# Enable to write proof and read it again for validation instead of using the
# in memory solution
pcc.resultcheck.writeProof = false

# Make proof more abstract, remove some of the information not needed to
# prove the property.
pcc.sliceProof = false

# writes the validation configuration required for checking to proof
pcc.storeConfig = false

# Qualified name for class which implements certification strategy, hence
# proof writing, to be used.
pcc.strategy = no default value

# number of cpus/cores which should be used in parallel for proof checking
pcc.useCores = 1

# Which strategy to use to perform abstraction of successful proof results or
# when lifting with the lifting strategy ABSTRACTION_BASED_LIFTING.
pdr.abstractionStrategy = NO_ABSTRACTION
  enum:     [NO_ABSTRACTION, ALLSAT_BASED_PREDICATE_ABSTRACTION]

# Whether to adjust conditions (i.e. increment k) after frontier extension.
pdr.conditionAdjustmentCriterion = NEVER
  enum:     [NEVER, ALWAYS]

# Which strategy to use to perform invariant refinement on successful proof
# results.
pdr.invariantRefinementStrategy = NO_STRENGTHENING
  enum:     [NO_STRENGTHENING, UNSAT_CORE_BASED_STRENGTHENING]

# Maximum number of ignored lifting abstraction failures within a
# proof-obligation trace.
pdr.liftingAbstractionFailureThreshold = 0

# Which strategy to use to abstract counterexamples to inductivity.
pdr.liftingStrategy = NO_LIFTING
  enum:     [NO_LIFTING, UNSAT_CORE_BASED_LIFTING, ABSTRACTION_BASED_LIFTING]

# Maximum number of accepted spurious transitions within a proof-obligation
# trace before a consecution abstraction failure triggers a refinement.
pdr.spuriousTransitionCountThreshold = 0

# Format to use for image output
pixelgraphic.export.format = "svg"

# Height of the bitmap in pixels. If set to -1, height is  computed in
# relation to the width. If both are set to -1, the optimal bitmap size to
# represent the graph is used. The final height is height*scaling
pixelgraphic.export.height = -1

# Scaling of the bitmap. If set to 1, 1 pixel represents one graph node. If
# set to 2, 2 * 2 pixels represent one graph node, and so on.
pixelgraphic.export.scaling = 2

# Highlight not only corresponding graph nodes, but background of
# corresponding line, too. This may give an better overview, but also
# introduces more clutter
pixelgraphic.export.strongHighlight = true

# Width of the bitmap in pixels. If set to -1, width is computed in relation
# to the height. If both are set to -1, the optimal bitmap size to represent
# the graph is used. The final width is width*scaling
pixelgraphic.export.width = -1

# Padding of the bitmap on the left and right (each) in pixels
pixelgraphic.export.xPadding = 2

# Padding of the bitmap on the top and bottom (each) in pixels
pixelgraphic.export.yPadding = 2

# A path to a precision output
# A path to precision
precision.path = "localsave"

# whether to track relevant variables only at the exact program location
# (sharing=location), or within their respective (function-/global-) scope
# (sharing=scoped).
precision.sharing = SCOPE
  enum:     [SCOPE, LOCATION]

# Allowed coefficients in a template.
precision.template.allowedCoefficients = {Rational.NEG_ONE, Rational.ONE}

# Generate difference constraints.This option is redundant for
# `maxExpressionSize` >= 2.
precision.template.generateDifferences = false

# Generate templates from assert statements
precision.template.generateFromAsserts = true

# Generate templates from all program statements
precision.template.generateFromStatements = false

# Force the inclusion of function parameters into the generated templates.
# Required for summaries computation.
precision.template.includeFunctionParameters = false

# Maximum size for the generated template
precision.template.maxExpressionSize = 1

# Perform refinement using enumerative template synthesis.
precision.template.performEnumerativeRefinement = true

# Do not generate templates with threshold larger than specified. Set to '-1'
# for no limit.
precision.template.templateConstantThreshold = 100

# Strategy for filtering variables out of templates using liveness
precision.template.varFiltering = ALL_LIVE
  enum:     [INTERPOLATION_BASED, ALL_LIVE, ONE_LIVE, ALL]

# If this option is used, variables that are addressed may get tracked
# depending on the rest of the precision. When this option is disabled, a
# variable that is addressed is definitely not tracked.
precision.trackAddressedVariables = true

# If this option is used, booleans from the cfa are tracked.
precision.trackBooleanVariables = true

# If this option is used, variables that have type double or float are
# tracked.
precision.trackFloatVariables = true

# If this option is used, variables, that are only used in simple
# calculations (add, sub, lt, gt, eq) are tracked.
precision.trackIntAddVariables = true

# If this option is used, variables that are only compared for equality are
# tracked.
precision.trackIntEqualVariables = true

# If this option is used, variables that are irrelevantare also tracked.
precision.trackIrrelevantVariables = true

# If this option is used, all variables that are of a different
# classification than IntAdd, IntEq and Boolean get tracked by the precision.
precision.trackVariablesBesidesEqAddBool = true

# blacklist regex for variables that won't be tracked by the CPA using this
# precision
precision.variableBlacklist = ""

# whitelist regex for variables that will always be tracked by the CPA using
# this precision
precision.variableWhitelist = ""

# where to export conditions
program.splitter.conditionFile = "Condition.%d.txt"

# export program splitting as conditions (assumption automata)
program.splitter.exportAsCondition = true

# Which program split heuristic to use
program.splitter.heuristic = no default value

# maximal number
program.splitter.max = 2

# Quantifier elimination strategy
rcnf.boundVarsHandling = QE_LIGHT_THEN_DROP
  enum:     [QE_LIGHT_THEN_DROP, QE, DROP]

# Expand equality atoms. E.g. 'x=a' gets expanded into 'x >= a AND x <= a'.
# Can lead to stronger weakenings.
rcnf.expandEquality = false

# Limit on the size of the resulting number of lemmas from the explicit
# expansion
rcnf.expansionResultSizeLimit = 100

# print reached set to graph file
reachedSet.dot = "reached.dot"

# print reached set to text file
reachedSet.export = false
reachedSet.file = "reached.txt"

# Generate HTML report with analysis result.
report.export = true

# File name for analysis report in case no counterexample was found.
report.file = "Report.html"

# output file for visualizing message exchange
reportFile = "block_analysis/block_analysis.json"

# set path to file which contains the condition
residualprogram.assumptionFile = no default value

# set specification file to automaton which guides analysis along assumption
# produced by incomplete analysis,e.g.,
# config/specification/AssumptionGuidingAutomaton.spc, to enable residual
# program from combination of program and assumption condition
residualprogram.assumptionGuider = no default value

# Export CFA of residual program as pixel graphic to the given file name. The
# suffix is added corresponding to the value of option
# pixelgraphic.export.formatIf set to 'null', no pixel graphic is exported.
residualprogram.cfa.pixelGraphicFile = "residProgPixel"

# Export residual program as pixel graphic
residualprogram.export.pixel = false

# write residual program to file
residualprogram.file = "residualProgram.c"

# Define kind of folder to use when combining condition with folding approach
# in residual program generation
residualprogram.folderType = CFA
  enum:     [CFA, FOLD_EXCEPT_LOOPS, LOOP_ALWAYS, LOOP_BOUND, LOOP_BOUND_SAME_CONTEXT,
             LOOP_SAME_CONTEXT]

# Collect statistical data about size of residual program
residualprogram.statistics.size = false

# which strategy to use to generate the residual program
residualprogram.strategy = CONDITION
  enum:     [REACHABILITY, SLICING, CONDITION, CONDITION_PLUS_FOLD, COMBINATION]

# How often may a loop be unrolled before it must be folded
residualprogram.unrollBound = 2

# wether to start next algorithm independently from the previous result
restartAlgorithm.alwaysRestart = false

# combine (partial) ARGs obtained by restarts of the analysis after an
# unknown result with a different configuration
restartAlgorithm.combineARGsAfterRestart = false

# List of files with configurations to use. A filename can be suffixed with
# :if-interrupted, :if-failed, and :if-terminated which means that this
# configuration will only be used if the previous configuration ended with a
# matching condition. What also can be added is :use-reached then the reached
# set of the preceding analysis is taken and provided to the next analysis.
restartAlgorithm.configFiles = no default value

# print the statistics of each component of the restart algorithm directly
# after the components computation is finished
restartAlgorithm.printIntermediateStatistics = true

# let each component of the restart algorithm write output files and not only
# the last one that is excuted
restartAlgorithm.writeIntermediateOutputFiles = false

# path to condition file
slicing.conditionFile = "output/AssumptionAutomaton.txt"

# path to condition files plus additional assumption guiding automaton when
# condition itself is in propriertary format and not in witness format
slicing.conditionFiles = {
          Path.of("output/AssumptionAutomaton.txt"),
          Classes.getCodeLocation(ReducerExtractor.class)
              .resolveSibling("config/specification/AssumptionGuidingAutomaton.spc")}

# Export the used slicing criteria to file
slicing.exportCriteria.enable = false

# File template for export of used slicing criteria
slicing.exportCriteria.file = "programSlice.%d.criteria.txt"

# Whether to export slices as C program files
slicing.exportToC.enable = false

# File template for exported C program slices
slicing.exportToC.file = "programSlice.%d.c"

# Whether to export program slices as DOT files.
slicing.exportToDot.enable = true

# File template for exported program slice DOT files.
slicing.exportToDot.file = "programSlice.%d.dot"

# which type of extractor for slicing criteria to use
slicing.extractor = ALL
  enum:     [ALL, REDUCER, SYNTAX]

# Whether to allow edges in the resulting slice that are only partially
# relevant (e.g. function calls where not every parameter is relevant).
# Setting this parameter to true can decrease the size of the resulting
# slice.
slicing.partiallyRelevantEdges = true

# what kind of slicing to use
slicing.type = STATIC
  enum:     [STATIC, IDENTITY]

# Extract and cache unsat cores for satisfiability checking
solver.cacheUnsatCores = true

# improve sat-checks with additional constraints for UFs
solver.checkUFs = false

# whether CPAchecker's logger should be used as logger for the solver,
# otherwise nothing is logged from the solver.
solver.enableLoggingInSolver = false

# Which solver to use specifically for interpolation (default is to use the
# main one).
solver.interpolationSolver = no default value
  enum:     [MATHSAT5, SMTINTERPOL, Z3, PRINCESS, BOOLECTOR, CVC4, CVC5, YICES2]

# Which SMT solver to use.
solver.solver = MATHSAT5
  enum:     [MATHSAT5, SMTINTERPOL, Z3, PRINCESS, BOOLECTOR, CVC4, CVC5, YICES2]

# Comma-separated list of files with specifications that should be checked
# (cf. config/specification/ for examples). Property files as used in SV-COMP
# can also be used here, but when these are specified inside a configuration
# file instead of on the command line, CPAchecker will ignore the entry
# function in the property file.
specification = []

# export abstract states as formula, e.g. for re-using them as
# PredicatePrecision.
statesToFormulas.exportFile = no default value

# export formulas for all program locations or just the important
# locations,which include loop-heads, funtion-calls and function-exits.
statesToFormulas.exportOnlyImporantLocations = false

# instead of writing the exact state-representation as a single formula,
# write its atoms as a list of formulas. Therefore we ignore operators for
# conjunction and disjunction.
statesToFormulas.splitFormulas = LOCATION
  enum:     [LOCATION, STATE, ATOM]

# Add all assumptions from the control flow automaton to the precision.
staticRefiner.addAllControlFlowAssumes = false

# Add all assumptions along a error trace to the precision.
staticRefiner.addAllErrorTraceAssumes = false

# Add all assumptions along the error trace to the precision.
staticRefiner.addAssumesByBoundedBackscan = true

# Apply mined predicates on the corresponding scope. false = add them to the
# global precision.
staticRefiner.applyScoped = true

# Dump CFA assume edges as SMTLIB2 formulas to a file.
staticRefiner.assumePredicatesFile = no default value

# split generated heuristic predicates into atoms
staticRefiner.atomicPredicates = true

# collect at most this number of assumes along a path, backwards from each
# target (= error) location
staticRefiner.maxBackscanPathAssumes = 1

# write some statistics to disk
statistics.export = true
statistics.file = "Statistics.txt"

# track memory usage of JVM during runtime
statistics.memory = true

# print statistics to console
statistics.print = false

# which stop operator to use for LiveVariablesCPA
stop = "SEP"
  allowed values: [SEP, JOIN, NEVER]

# compress the produced violation-witness automata using GZIP compression.
termination.compressWitness = true

# When checking for the termination property, use this configuration file
# instead of the current one.
termination.config = no default value

# enable to also analyze whether recursive calls terminate
termination.considerRecursion = false

# Number of generalized eigenvectors in the geometric nontermination
# argument.
termination.lassoAnalysis.eigenvectors = 3

# Shell command used to call the external SMT solver.
termination.lassoAnalysis.externalSolverCommand = NativeLibraries.getNativeLibraryPath().resolve("z3") + " -smt2 -in SMTLIB2_COMPLIANT=true "

# Analysis type used for synthesis of linear termination arguments.
termination.lassoAnalysis.linear.analysisType = LINEAR_WITH_GUESSES
  enum:     [DISABLED, LINEAR, LINEAR_WITH_GUESSES, NONLINEAR]

# If true, an external tool is used as SMT solver instead of SMTInterpol.
# This affects only synthesis of linear termination arguments.
termination.lassoAnalysis.linear.externalSolver = false

# Maximal number of functions used in a ranking function template.
termination.lassoAnalysis.maxTemplateFunctions = 3

# Number of non-strict supporting invariants for each Motzkin transformation
# during synthesis of termination arguments.
termination.lassoAnalysis.nonStrictInvariants = 3

# Analysis type used for synthesis of non-linear termination arguments.
termination.lassoAnalysis.nonlinear.analysisType = LINEAR_WITH_GUESSES
  enum:     [DISABLED, LINEAR, LINEAR_WITH_GUESSES, NONLINEAR]

# If true, an external tool is used as SMT solver instead of SMTInterpol.
# This affects only synthesis of non-linear termination arguments and
# non-termination arguments.
termination.lassoAnalysis.nonlinear.externalSolver = false

# Number of strict supporting invariants for each Motzkin transformation
# during synthesis of termination arguments.
termination.lassoAnalysis.strictInvariants = 2

# Simplifies loop and stem formulas.
termination.lassoBuilder.simplify = false

# maximal number of repeated ranking functions per loop before stopping
# analysis
termination.maxRepeatedRankingFunctionsPerLoop = 10

# Strategy used to prepare reched set and ARG for next iteration after
# successful refinement of the termination argument.
termination.resetReachedSetStrategy = REMOVE_LOOP
  enum:     [REMOVE_TARGET_STATE, REMOVE_LOOP, RESET]

# A human readable representation of the synthesized (non-)termination
# arguments is exported to this file.
termination.resultFile = "terminationAnalysisResult.txt"

# consider counterexamples for loops for which only pointer variables are
# relevant or which check that pointer is unequal to null pointer to be
# imprecise
termination.useCexImpreciseHeuristic = false

# Export termination counterexample to file as GraphML automaton 
termination.violation.witness = "nontermination_witness.graphml"

# Export termination counterexample to file as dot/graphviz automaton 
termination.violation.witness.dot = "nontermination_witness.dot"

# Only genenerate for __VERIFIER_nondet calls
testHarnessExport.onlyVerifierNondet = false

# Use the counterexample model to provide test-vector values
testHarnessExport.useModel = true

# zip all exported test cases into a single file
testcase.compress = false

# Do not output values for variables that are not initialized when declared
testcase.excludeInitialization = false

# export test harness to file as code
testcase.file = no default value

# set to true if run multiple test case generation instances in parallel
testcase.generate.parallel = false

# display all test targets and non-covered test targets in statistics
testcase.inStats = false

# how many mutated test cases should be additionally generated (disabled if
# <= 0)
testcase.mutants = 0

# Random seed for mutation of test cases
testcase.mutationSeed = 0

# Number of random test cases that should be generated
testcase.numRandomTests = 1

# Only convert literal value and do not add suffix, e.g., for unsigned, etc.
testcase.plainLiteralValue = false

# defines how progress is computed
testcase.progress = RELATIVE_TOTAL
  enum:     [ABSOLUTE, RELATIVE_TOTAL]

# Maximum value randomly generated
testcase.random.max = 20

# Number of random test cases that should be generated
testcase.random.maxLength = 20

# Minimum value randomly generated
testcase.random.min = 0

# Random seed for random test-case generation
testcase.randomInputSeed = 0

# when generating tests covering error call stop as soon as generated one
# test case and report false (only possible in combination with error call
# property specification
testcase.reportCoveredErrorCallAsError = false

# CFA edge if only a specific edge should be considered, e.g., in
# counterexample check
testcase.targets.edge = no default value

# Name of target function if target type is FUN_CALL
testcase.targets.funName = no default value

# Which strategy to use to optimize set of test target edges
testcase.targets.optimization.strategy = NONE
  enum:     [NONE, COVERED_NEXT_EDGE, BASIC_ESSENTIAL_EDGE, ESSENTIAL_EDGE_ORIGINAL,
             ESSENTIAL_EDGE, PORTFOLIO, SPANNING_SET, TESTCOMP]

# Which CFA edges to use as test targets
testcase.targets.type = ASSUME
  enum:     [ASSUME, TEST_COMP_ASSUME, ERROR_CALL, FUN_CALL, STATEMENT]

# export test values to file (line separated)
testcase.values = no default value

# export test cases to xm file (Test-Comp format)
testcase.xml = no default value

# Zip file into which all test case files are bundled
testcase.zip.file = no default value

# Usually every statement that is not part of the precondition gets a
# selector. If a certain variable is known to not cause the error, add it to
# this option, e.g., main::x,doStuff::y
traceformula.disable = []

# The alternative precondition consists of all initial variable assignments
# and a failing variable assignment for all nondet variables. By default only
#  variables in the main function are part of the precondition. Overwrite the
# default by adding functions to this option, e.g., "main,doStuff"
traceformula.filter = ["main"]

# The alternative precondition consists of all initial variable assignments.
# If a variable assignment seems suspicious, it might be useful to exclude it
# from the precondition. To do this, add these variables to this option,
# e.g., main::x,doStuff::y. Make sure to add the function in which the
# variable is used as prefix, separated by two ':'
traceformula.ignore = []

# Make trace formula flow-sensitive, i.e., assume edges imply the edges that
# are only reachable through the assume edge. Flow-sensitive traces remove
# assume edges from the trace. Hence, no assume edge will be part of a fault.
traceformula.makeFlowSensitive = false

# By default, every executed statement gets its own selector. If a loop is
# part of the program to analyze, the number of selectors can increase which
# also increases the run time of max-sat drastically. To use the same
# selector for equal statements (on the same line), set this option to true.
# Note that enabling this option  also decreases the quality of results.
traceformula.reduceSelectors = false

# Ignore functions that are defined by C11
undefinedFunctionsCollector.allowC11Functions = true

# Ignore functions that are defined by GNU C and not by C11/POSIX
undefinedFunctionsCollector.allowGnuCFunctions = true

# Ignore functions that are defined by POSIX
undefinedFunctionsCollector.allowPosixFunctions = true

# Set of functions that should be ignored
undefinedFunctionsCollector.allowedFunctions = ImmutableSet.of(

# Regexp matching function names that are allowed to be undefined
undefinedFunctionsCollector.allowedFunctionsRegexp = "^(__VERIFIER|pthread)_[a-zA-Z0-9_]*"

# Regexp matching function names that need not be declared
undefinedFunctionsCollector.allowedUndeclaredFunctionsRegexp = "^__builtin_[a-zA-Z0-9_]*"

# Memory-allocation function that will be used in stubs
undefinedFunctionsCollector.externAllocFunction = "external_alloc"

# export undefined functions as C file
undefinedFunctionsCollector.stubsFile = "stubs.c"

# select an analysis from a set of analyses after unknown result
useCompositionAnalysis = false

# Instead of comments, output the assertions into the original program as
# violations to unreach_call.prp
wacsl.makeDirectAssertions = false

# The directory where generated, ACSL annotated programs are stored.
wacsl.outDir = "annotated"

# Makes the annotated file's name identical to the original source file's
# name.
wacsl.useSameFileName = false

# The witness from which ACSL annotations should be generated.
wacsl.witness = no default value

# File for exporting the witness automaton in DOT format.
witness.automatonDumpFile = no default value

# remove assumptions from transitions in the ISA where they are not strictly
# neccessary.This option is intended to be used with an ISA (c.f. option
# witness.invariantsSpecificationAutomaton)
witness.checkInvariantViolations = true

# Check that the value of the programhash field of the witness matches the
# SHA-256 hash value computed for the source code.
witness.checkProgramHash = true

# Consider assumptions that are provided with the path automaton?
witness.considerAssumptions = true

# Fail-fast if invariants in the witness exist that would not be accounted
# for. There are cases where unaccounted invariants are perfectly fine, e.g.
# if those states in the witness automaton are actually unreachable in the
# program. This is however rarely the intention of the original producer of
# the witness, so this options can be used to debug those cases.
witness.debug.checkForMissedInvariants = false

# Validate correctness witness by specifying an invariants specification
# automaton
witness.invariantsSpecificationAutomaton = NO_ISA
  enum:     [NO_ISA, WITNESSBASED_ISA, TWOSTATES_ISA, CFABASED_ISA]

# Match the branching information at a branching location.
witness.matchAssumeCase = true

# Match the character offset within the file.
witness.matchOffset = true

# Match the line numbers within the origin (mapping done by preprocessor line
# markers).
witness.matchOriginLine = true

# This option can be used to ensure that no correctness witnesses are
# checked.
witness.noCorrectnessValidation = false

# This option can be used to ensure that no violation witnesses are checked.
witness.noViolationValidation = false

# remove assumptions from transitions in the ISA where they are not strictly
# neccessary.This option is intended to be used with an ISA (c.f. option
# witness.invariantsSpecificationAutomaton)
witness.optimizeInvariantsSpecificationAutomaton = true

# Represent sink states by bottom state instead of break state
witness.stopNotBreakAtSinkStates = true

# Enforce strict validity checks regarding the witness format, such as
# checking for the presence of required fields.
witness.strictChecking = true

# remove assumptions from transitions in the ISA where they are not strictly
# neccessary.This option is intended to be used with an ISA (c.f. option
# witness.invariantsSpecificationAutomaton)
witness.useInvariantsAsAssumptions = true

# Validate program using invariants from ACSL annotations.
witness.validation.correctness.acsl = false

# When validating a correctness witness, use this configuration file instead
# of the current one.
witness.validation.correctness.config = no default value

# Use correctness witness as invariants specification automaton (ISA).
witness.validation.correctness.isa = false

# The witness to validate.
witness.validation.file = no default value

# Use this configuration when checking that when reach recurrent set,
# execution can be extended to an infinite one
witness.validation.termination.inspectCycle.config = no default value

# Use this configuration when checking that recurrent set (at cycle head) is
# reachable. Configuration must be precise, i.e., may only report real
# counterexamples
witness.validation.termination.reachCycle.config = no default value

# Report a successful validation of the witness, i.e., a confirmation of the
# nontermination, as termination violation.
witness.validation.termination.successAsViolation = true

# Path to automaton specification describing which statements let the program
# terminate.
witness.validation.termination.terminatingStatements = "config/specification/TerminatingStatements.spc"

# When validating a violation witness, use this configuration file instead of
# the current one.
witness.validation.violation.config = no default value
