Scippy

SCIP

Solving Constraint Integer Programs

exprinterpret_cppad.cpp
Go to the documentation of this file.
1/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2/* */
3/* This file is part of the program and library */
4/* SCIP --- Solving Constraint Integer Programs */
5/* */
6/* Copyright (c) 2002-2024 Zuse Institute Berlin (ZIB) */
7/* */
8/* Licensed under the Apache License, Version 2.0 (the "License"); */
9/* you may not use this file except in compliance with the License. */
10/* You may obtain a copy of the License at */
11/* */
12/* http://www.apache.org/licenses/LICENSE-2.0 */
13/* */
14/* Unless required by applicable law or agreed to in writing, software */
15/* distributed under the License is distributed on an "AS IS" BASIS, */
16/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
17/* See the License for the specific language governing permissions and */
18/* limitations under the License. */
19/* */
20/* You should have received a copy of the Apache-2.0 license */
21/* along with SCIP; see the file LICENSE. If not visit scipopt.org. */
22/* */
23/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
24
25/**@file exprinterpret_cppad.cpp
26 * @brief methods to interpret (evaluate) an expression "fast" using CppAD
27 * @ingroup DEFPLUGINS_EXPRINT
28 * @author Stefan Vigerske
29 */
30
31/*---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2*/
32
33#include "scip/exprinterpret.h"
34#include "scip/def.h"
35#include "scip/intervalarith.h"
36#include "scip/pub_expr.h"
37#include "scip/scip_expr.h"
38#include "scip/expr_pow.h"
39#include "scip/expr_exp.h"
40#include "scip/expr_log.h"
41#include "scip/expr_varidx.h"
42
43#include <cmath>
44#include <cstring>
45#include <algorithm>
46#include <vector>
47using std::vector;
48
49/* Turn off lint warning "747: Significant prototype coercion" and "732: Loss of sign".
50 * The first warning is generated for expressions like t[0], where t is a vector, since 0 is an integer constant, but a
51 * size_t is expected (usually long unsigned). The second is generated for expressions like t[n], where n is an
52 * integer. Both code pieces are likely to be correct. It seems to be impossible to inhibit these messages for
53 * vector<*>::operator[] only. */
54/*lint --e{747,732}*/
55
56/* defining EVAL_USE_EXPRHDLR_ALWAYS uses the exprhdlr methods for the evaluation and differentiation of all expression (except for var and value)
57 * instead of only those that are not recognized in this code
58 * this is incomplete (no hessians) and slow (only forward-mode gradients), but can be useful for testing
59 */
60// #define EVAL_USE_EXPRHDLR_ALWAYS
61
62/* defining NO_CPPAD_USER_ATOMIC disables the use of our own implementation of derivatives of power operators
63 * via CppAD's user-atomic function feature
64 * our customized implementation should give better results (tighter intervals) for the interval data type
65 * but we don't use interval types here anymore and the atomic operator does not look threadsafe (might be better in newer CppAD version)
66 */
67#define NO_CPPAD_USER_ATOMIC
68
69/* fallback to non-thread-safe version if C++ is too old to have std::atomic */
70#if __cplusplus < 201103L && defined(SCIP_THREADSAFE)
71#undef SCIP_THREADSAFE
72#endif
73
74/* CppAD needs to know a fixed upper bound on the number of threads at compile time.
75 * It is wise to set it to a power of 2, so that if the tape id overflows, it is likely to start at 0 again, which avoids difficult to debug errors.
76 */
77#ifndef CPPAD_MAX_NUM_THREADS
78#ifdef SCIP_THREADSAFE
79#define CPPAD_MAX_NUM_THREADS 64
80#else
81#define CPPAD_MAX_NUM_THREADS 1
82#endif
83#endif
84
85/* disable -Wshadow warnings for upcoming includes of CppAD if using some old GCC
86 * -Wshadow was too strict with some versions of GCC 4 (https://stackoverflow.com/questions/2958457/gcc-wshadow-is-too-strict)
87 * disable -Wimplicit-fallthrough as I don't want to maintain extra comments in CppAD code to suppress these
88 */
89#ifdef __GNUC__
90#if __GNUC__ == 4
91#pragma GCC diagnostic ignored "-Wshadow"
92#endif
93#if __GNUC__ >= 7
94#pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
95#endif
96#endif
97
98// MS compiler doesn't have log2() - define an expensive workaround
99#ifdef _MSC_VER
100#define log2(x) (log((double)x) / log(2.0))
101#endif
102
103#include <cppad/cppad.hpp>
104#include <cppad/utility/error_handler.hpp>
105
106/* CppAD is not thread-safe by itself, but uses some static datastructures
107 * To run it in a multithreading environment, a special CppAD memory allocator that is aware of the multiple threads has to be used.
108 * This allocator requires to know the number of threads and a thread number for each thread.
109 * To implement this, we follow the team_pthread example of CppAD, which uses pthread's thread-specific data management.
110 */
111#ifdef SCIP_THREADSAFE
112
113#include <atomic>
114
115/** currently registered number of threads */
116static std::atomic_size_t ncurthreads{0};
117static thread_local int thread_number{-1};
118
119/** CppAD callback function that indicates whether we are running in parallel mode */
120static
121bool in_parallel(void)
122{
123 return ncurthreads > 1;
124}
125
126/** CppAD callback function that returns the number of the current thread
127 *
128 * assigns a new number to the thread if new
129 */
130static
131size_t thread_num(void)
132{
133 size_t threadnum;
134
135 /* if no thread_number for this thread yet, then assign a new thread number to the current thread
136 */
137 if( thread_number == -1 )
138 {
139 thread_number = static_cast<int>(ncurthreads.fetch_add(1, std::memory_order_relaxed));
140 }
141
142 threadnum = static_cast<size_t>(thread_number);
143
144 return threadnum;
145}
146
147/** sets up CppAD's datastructures for running in multithreading mode
148 *
149 * It must be called once before multithreading is started.
150 * For GCC-compatible compilers, this will happen automatically.
151 */
152extern "C" SCIP_EXPORT char SCIPexprintCppADInitParallel(void);
153#ifdef __GNUC__
154__attribute__((constructor))
155#endif
156char SCIPexprintCppADInitParallel(void)
157{
158 CppAD::thread_alloc::parallel_setup(CPPAD_MAX_NUM_THREADS, in_parallel, thread_num);
159 CppAD::parallel_ad<double>();
160
161 return 0;
162}
163
164#if !defined(__GNUC__)
165/** a dummy variable that is initialized to the result of init_parallel
166 *
167 * The purpose is to make sure that init_parallel() is called before any multithreading is started.
168 */
169static char init_parallel_return = SCIPexprintCppADInitParallel();
170#endif
171
172#endif // SCIP_THREADSAFE
173
174using CppAD::AD;
175
176class atomic_userexpr;
177
178/** expression specific interpreter data */
179struct SCIP_ExprIntData
180{
181public:
182 /** constructor */
183 SCIP_ExprIntData()
184 : val(0.0),
185 need_retape(true),
186 need_retape_always(false),
187 userevalcapability(SCIP_EXPRINTCAPABILITY_ALL),
188 hesrowidxs(NULL),
189 hescolidxs(NULL),
190 hesnnz(0),
191 hesconstant(false)
192 { }
193
194 /** destructor */
195 ~SCIP_ExprIntData()
196 { }/*lint --e{1540}*/
197
198 /** gives position of index in varidxs vector */
199 int getVarPos(
200 int varidx /**< variable index to look for */
201 ) const
202 {
203 // varidxs is sorted, so can use binary search functions
204 assert(std::binary_search(varidxs.begin(), varidxs.end(), varidx));
205 return std::lower_bound(varidxs.begin(), varidxs.end(), varidx) - varidxs.begin();
206 }
207
208 vector< int > varidxs; /**< variable indices used in expression (unique and sorted) */
209 vector< AD<double> > X; /**< vector of dependent variables (same size as varidxs) */
210 vector< AD<double> > Y; /**< result vector (size 1) */
211 CppAD::ADFun<double> f; /**< the function to evaluate as CppAD object */
212
213 vector<double> x; /**< current values of dependent variables (same size as varidxs) */
214 double val; /**< current function value */
215 bool need_retape; /**< will retaping be required for the next point evaluation? */
216 bool need_retape_always; /**< will retaping be always required? */
217
218 vector<atomic_userexpr*> userexprs; /**< vector of atomic_userexpr that are created during eval() and need to be kept as long as the tape exists */
219 SCIP_EXPRINTCAPABILITY userevalcapability;/**< (intersection of) capabilities of evaluation routines of user expressions */
220
221 int* hesrowidxs; /**< row indices of Hessian sparsity: indices are the variables used in expression */
222 int* hescolidxs; /**< column indices of Hessian sparsity: indices are the variables used in expression */
223 vector<SCIP_Real> hesvalues; /**< values of Hessian (a vector<> so we can pass it to CppAD) */
224 int hesnnz; /**< number of nonzeros in Hessian */
225 SCIP_Bool hesconstant; /**< whether Hessian is constant (because expr is at most quadratic) */
226
227 // Hessian data in CppAD style: indices are 0..n-1 and elements on both lower and upper diagonal of matrix are considered
228 CppAD::local::internal_sparsity<bool>::pattern_type hessparsity_pattern; /**< packed sparsity pattern of Hessian in CppAD-internal form */
229 CppAD::vector<size_t> hessparsity_row; /**< row indices of sparsity pattern of Hessian in CppAD-internal form */
230 CppAD::vector<size_t> hessparsity_col; /**< column indices of sparsity pattern of Hessian in CppAD-internal form */
231 CppAD::sparse_hessian_work heswork; /**< work memory of CppAD for sparse Hessians */
232};
233
234#ifndef NO_CPPAD_USER_ATOMIC
235
236/** computes sparsity of jacobian for a univariate function during a forward sweep
237 *
238 * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
239 * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
240 */
241static
242bool univariate_for_sparse_jac(
243 size_t q, /**< number of columns in R */
244 const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
245 CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
246 )
247{
248 assert(r.size() == q);
249 assert(s.size() == q);
250
251 s = r;
252
253 return true;
254}
255
256/** Computes sparsity of jacobian during a reverse sweep
257 *
258 * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
259 * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
260 */
261static
262bool univariate_rev_sparse_jac(
263 size_t q, /**< number of rows in R */
264 const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
265 CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
266 )
267{
268 assert(r.size() == q);
269 assert(s.size() == q);
270
271 s = r;
272
273 return true;
274}
275
276/** computes sparsity of hessian during a reverse sweep
277 *
278 * Assume V(x) = (g(f(x)))'' R with f(x) = x^p for a function g:R->R and a matrix R.
279 * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
280 */ /*lint -e715*/
281static
282bool univariate_rev_sparse_hes(
283 const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
284 const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
285 CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
286 size_t q, /**< number of columns in R, U, and V */
287 const CppAD::vector<bool>& r, /**< sparsity pattern of R */
288 const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
289 CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
290 )
291{ /*lint --e{439,715}*/ /* @todo take vx into account */
292 assert(r.size() == q);
293 assert(s.size() == 1);
294 assert(t.size() == 1);
295 assert(u.size() == q);
296 assert(v.size() == q);
297
298 // T(x) = g'(f(x)) * f'(x) = S * f'(x), and f' is not identically 0
299 t[0] = s[0];
300
301 // V(x) = g''(f(x)) f'(x) f'(x) R + g'(f(x)) f''(x) R466
302 // = f'(x) U + S f''(x) R, with f'(x) and f''(x) not identically 0
303 v = u;
304 if( s[0] )
305 for( size_t j = 0; j < q; ++j )
306 if( r[j] )
307 v[j] = true;
308
309 return true;
310}
311
312
313/** Automatic differentiation of x -> x^p, p>=2 integer, as CppAD user-atomic function.
314 *
315 * This class implements forward and reverse operations for the function x -> x^p for use within CppAD.
316 * While CppAD would implement integer powers as a recursion of multiplications, we still use pow functions as they allow us to avoid overestimation in interval arithmetics.
317 *
318 * @todo treat the exponent as a (variable) argument to the function, with the assumption that we never differentiate w.r.t. it (this should make the approach threadsafe again)
319 */
320template<class Type>
321class atomic_posintpower : public CppAD::atomic_base<Type>
322{
323public:
324 atomic_posintpower()
325 : CppAD::atomic_base<Type>("posintpower"),
326 exponent(0)
327 {
328 /* indicate that we want to use bool-based sparsity pattern */
329 this->option(CppAD::atomic_base<Type>::bool_sparsity_enum);
330 }
331
332private:
333 /** exponent value for next call to forward or reverse */
334 int exponent;
335
336 /** stores exponent value corresponding to next call to forward or reverse
337 *
338 * how is this supposed to be threadsafe? (we use only one global instantiation of this class)
339 * TODO using this function is deprecated, do as with atomic_userexpr instead
340 */
341 virtual void set_old(size_t id)
342 {
343 exponent = (int) id;
344 }
345
346 /** forward sweep of positive integer power
347 *
348 * Given the taylor coefficients for x, we have to compute the taylor coefficients for f(x),
349 * that is, given tx = (x, x', x'', ...), we compute the coefficients ty = (y, y', y'', ...)
350 * in the taylor expansion of f(x) = x^p.
351 * Thus, y = x^p
352 * = tx[0]^p,
353 * y' = p * x^(p-1) * x'
354 * = p * tx[0]^(p-1) * tx[1],
355 * y'' = 1/2 * p * (p-1) * x^(p-2) * x'^2 + p * x^(p-1) * x''
356 * = 1/2 * p * (p-1) * tx[0]^(p-2) * tx[1]^2 + p * tx[0]^(p-1) * tx[2]
357 */
358 bool forward(
359 size_t q, /**< lowest order Taylor coefficient that we are evaluating */
360 size_t p, /**< highest order Taylor coefficient that we are evaluating */
361 const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
362 CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
363 const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
364 CppAD::vector<Type>& ty /**< vector to store taylor coefficients of y */
365 )
366 {
367 assert(exponent > 1);
368 assert(tx.size() >= p+1);
369 assert(ty.size() >= p+1);
370 assert(q <= p);
371
372 if( vx.size() > 0 )
373 {
374 assert(vx.size() == 1);
375 assert(vy.size() == 1);
376 assert(p == 0);
377
378 vy[0] = vx[0];
379 }
380
381 if( q == 0 /* q <= 0 && 0 <= p */ )
382 {
383 ty[0] = CppAD::pow(tx[0], exponent);
384 }
385
386 if( q <= 1 && 1 <= p )
387 {
388 ty[1] = CppAD::pow(tx[0], exponent-1) * tx[1];
389 ty[1] *= double(exponent);
390 }
391
392 if( q <= 2 && 2 <= p )
393 {
394 if( exponent > 2 )
395 {
396 // ty[2] = 1/2 * exponent * (exponent-1) * pow(tx[0], exponent-2) * tx[1] * tx[1] + exponent * pow(tx[0], exponent-1) * tx[2];
397 ty[2] = CppAD::pow(tx[0], exponent-2) * tx[1] * tx[1];
398 ty[2] *= (exponent-1) / 2.0;
399 ty[2] += CppAD::pow(tx[0], exponent-1) * tx[2];
400 ty[2] *= exponent;
401 }
402 else
403 {
404 assert(exponent == 2);
405 // ty[2] = 1/2 * exponent * tx[1] * tx[1] + exponent * tx[0] * tx[2];
406 ty[2] = tx[1] * tx[1] + 2.0 * tx[0] * tx[2];
407 }
408 }
409
410 /* higher order derivatives not implemented */
411 if( p > 2 )
412 return false;
413
414 return true;
415 }
416
417 /** reverse sweep of positive integer power
418 *
419 * Assume y(x) is a function of the taylor coefficients of f(x) = x^p for x, i.e.,
420 * y(x) = [ x^p, p * x^(p-1) * x', p * (p-1) * x^(p-2) * x'^2 + p * x^(p-1) * x'', ... ].
421 * Then in the reverse sweep we have to compute the elements of \f$\partial h / \partial x^[l], l = 0, ..., k,\f$
422 * where x^[l] is the l'th taylor coefficient (x, x', x'', ...) and h(x) = g(y(x)) for some function g:R^k -> R.
423 * That is, we have to compute
424 *\f$
425 * px[l] = \partial h / \partial x^[l] = (\partial g / \partial y) * (\partial y / \partial x^[l])
426 * = \sum_{i=0}^k (\partial g / \partial y_i) * (\partial y_i / \partial x^[l])
427 * = \sum_{i=0}^k py[i] * (\partial y_i / \partial x^[l])
428 * \f$
429 *
430 * For k = 0, this means
431 *\f$
432 * px[0] = py[0] * (\partial y_0 / \partial x^[0])
433 * = py[0] * (\partial x^p / \partial x)
434 * = py[0] * p * tx[0]^(p-1)
435 *\f$
436 *
437 * For k = 1, this means
438 * \f$
439 * px[0] = py[0] * (\partial y_0 / \partial x^[0]) + py[1] * (\partial y_1 / \partial x^[0])
440 * = py[0] * (\partial x^p / \partial x) + py[1] * (\partial (p * x^(p-1) * x') / \partial x)
441 * = py[0] * p * tx[0]^(p-1) + py[1] * p * (p-1) * tx[0]^(p-2) * tx[1]
442 * px[1] = py[0] * (\partial y_0 / \partial x^[1]) + py[1] * (\partial y_1 / \partial x^[1])
443 * = py[0] * (\partial x^p / \partial x') + py[1] * (\partial (p * x^(p-1) x') / \partial x')
444 * = py[0] * 0 + py[1] * p * tx[0]^(p-1)
445 * \f$
446 */ /*lint -e715*/
447 bool reverse(
448 size_t p, /**< highest order Taylor coefficient that we are evaluating */
449 const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
450 const CppAD::vector<Type>& ty, /**< values for taylor coefficients of y */
451 CppAD::vector<Type>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
452 const CppAD::vector<Type>& py /**< values for partial derivatives of g(x) w.r.t. y */
453 )
454 { /*lint --e{715}*/
455 assert(exponent > 1);
456 assert(px.size() >= p+1);
457 assert(py.size() >= p+1);
458 assert(tx.size() >= p+1);
459
460 switch( p )
461 {
462 case 0:
463 // px[0] = py[0] * exponent * pow(tx[0], exponent-1);
464 px[0] = py[0] * CppAD::pow(tx[0], exponent-1);
465 px[0] *= exponent;
466 break;
467
468 case 1:
469 // px[0] = py[0] * exponent * pow(tx[0], exponent-1) + py[1] * exponent * (exponent-1) * pow(tx[0], exponent-2) * tx[1];
470 px[0] = py[1] * tx[1] * CppAD::pow(tx[0], exponent-2);
471 px[0] *= exponent-1;
472 px[0] += py[0] * CppAD::pow(tx[0], exponent-1);
473 px[0] *= exponent;
474 // px[1] = py[1] * exponent * pow(tx[0], exponent-1);
475 px[1] = py[1] * CppAD::pow(tx[0], exponent-1);
476 px[1] *= exponent;
477 break;
478
479 default:
480 return false;
481 }
482
483 return true;
484 }
485
486 using CppAD::atomic_base<Type>::for_sparse_jac;
487
488 /** computes sparsity of jacobian during a forward sweep
489 *
490 * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
491 * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
492 */
493 bool for_sparse_jac(
494 size_t q, /**< number of columns in R */
495 const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
496 CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
497 )
498 {
499 return univariate_for_sparse_jac(q, r, s);
500 }
501
502 using CppAD::atomic_base<Type>::rev_sparse_jac;
503
504 /** computes sparsity of jacobian during a reverse sweep
505 *
506 * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
507 * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
508 */
509 bool rev_sparse_jac(
510 size_t q, /**< number of rows in R */
511 const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
512 CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
513 )
514 {
515 return univariate_rev_sparse_jac(q, r, s);
516 }
517
518 using CppAD::atomic_base<Type>::rev_sparse_hes;
519
520 /** computes sparsity of hessian during a reverse sweep
521 *
522 * Assume V(x) = (g(f(x)))'' R with f(x) = x^p for a function g:R->R and a matrix R.
523 * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
524 */
525 bool rev_sparse_hes(
526 const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
527 const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
528 CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
529 size_t q, /**< number of columns in R, U, and V */
530 const CppAD::vector<bool>& r, /**< sparsity pattern of R */
531 const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
532 CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
533 )
534 {
535 return univariate_rev_sparse_hes(vx, s, t, q, r, u, v);
536 }
537};
538
539/** power function with natural exponents */
540template<class Type>
541static
542void posintpower(
543 const vector<Type>& in, /**< vector which first argument is base */
544 vector<Type>& out, /**< vector where to store result in first argument */
545 size_t exponent /**< exponent */
546 )
547{
548 static atomic_posintpower<typename Type::value_type> pip;
549 pip(in, out, exponent);
550}
551
552#else
553
554/** power function with natural exponents */
555template<class Type>
557 const vector<Type>& in, /**< vector which first argument is base */
558 vector<Type>& out, /**< vector where to store result in first argument */
559 size_t exponent /**< exponent */
560 )
561{
562 out[0] = pow(in[0], (int)exponent);
563}
564
565#endif
566
567
568#ifndef NO_CPPAD_USER_ATOMIC
569
570/** sign of a value (-1 or +1)
571 *
572 * 0.0 has sign +1
573 */
574#define SIGN(x) ((x) >= 0.0 ? 1.0 : -1.0)
575
576/** Automatic differentiation of x -> sign(x)abs(x)^p, p>=1, as CppAD user-atomic function.
577 *
578 * This class implements forward and reverse operations for the function x -> sign(x)abs(x)^p for use within CppAD.
579 * While we otherwise would have to use discontinuous sign and abs functions, our own implementation allows to provide
580 * a continuously differentiable function.
581 *
582 * @todo treat the exponent as a (variable) argument to the function, with the assumption that we never differentiate w.r.t. it (this should make the approach threadsafe again)
583 */
584template<class Type>
585class atomic_signpower : public CppAD::atomic_base<Type>
586{
587public:
588 atomic_signpower()
589 : CppAD::atomic_base<Type>("signpower"),
590 exponent(0.0)
591 {
592 /* indicate that we want to use bool-based sparsity pattern */
593 this->option(CppAD::atomic_base<Type>::bool_sparsity_enum);
594 }
595
596private:
597 /** exponent for use in next call to forward or reverse */
598 SCIP_Real exponent;
599
600 /** stores exponent corresponding to next call to forward or reverse
601 *
602 * How is this supposed to be threadsafe? (we use only one global instantiation of this class)
603 * TODO using this function is deprecated, do as with atomic_userexpr instead
604 */
605 virtual void set_old(size_t id)
606 {
607 exponent = SCIPgetExponentExprPow((SCIP_EXPR*)(void*)id);
608 }
609
610 /** forward sweep of signpower
611 *
612 * Given the taylor coefficients for x, we have to compute the taylor coefficients for f(x),
613 * that is, given tx = (x, x', x'', ...), we compute the coefficients ty = (y, y', y'', ...)
614 * in the taylor expansion of f(x) = sign(x)abs(x)^p.
615 * Thus, y = sign(x)abs(x)^p
616 * = sign(tx[0])abs(tx[0])^p,
617 * y' = p * abs(x)^(p-1) * x'
618 * = p * abs(tx[0])^(p-1) * tx[1],
619 * y'' = 1/2 * p * (p-1) * sign(x) * abs(x)^(p-2) * x'^2 + p * abs(x)^(p-1) * x''
620 * = 1/2 * p * (p-1) * sign(tx[0]) * abs(tx[0])^(p-2) * tx[1]^2 + p * abs(tx[0])^(p-1) * tx[2]
621 */
622 bool forward(
623 size_t q, /**< lowest order Taylor coefficient that we are evaluating */
624 size_t p, /**< highest order Taylor coefficient that we are evaluating */
625 const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
626 CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
627 const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
628 CppAD::vector<Type>& ty /**< vector to store taylor coefficients of y */
629 )
630 {
631 assert(exponent > 0.0);
632 assert(tx.size() >= p+1);
633 assert(ty.size() >= p+1);
634 assert(q <= p);
635
636 if( vx.size() > 0 )
637 {
638 assert(vx.size() == 1);
639 assert(vy.size() == 1);
640 assert(p == 0);
641
642 vy[0] = vx[0];
643 }
644
645 if( q == 0 /* q <= 0 && 0 <= p */ )
646 {
647 ty[0] = SIGN(tx[0]) * pow(REALABS(tx[0]), exponent);
648 }
649
650 if( q <= 1 && 1 <= p )
651 {
652 ty[1] = pow(REALABS(tx[0]), exponent - 1.0) * tx[1];
653 ty[1] *= exponent;
654 }
655
656 if( q <= 2 && 2 <= p )
657 {
658 if( exponent != 2.0 )
659 {
660 ty[2] = SIGN(tx[0]) * pow(REALABS(tx[0]), exponent - 2.0) * tx[1] * tx[1];
661 ty[2] *= (exponent - 1.0) / 2.0;
662 ty[2] += pow(REALABS(tx[0]), exponent - 1.0) * tx[2];
663 ty[2] *= exponent;
664 }
665 else
666 {
667 // y'' = 2 (1/2 * sign(x) * x'^2 + |x|*x'') = sign(tx[0]) * tx[1]^2 + 2 * abs(tx[0]) * tx[2]
668 ty[2] = SIGN(tx[0]) * tx[1] * tx[1];
669 ty[2] += 2.0 * REALABS(tx[0]) * tx[2];
670 }
671 }
672
673 /* higher order derivatives not implemented */
674 if( p > 2 )
675 return false;
676
677 return true;
678 }
679
680 /** reverse sweep of signpower
681 *
682 * Assume y(x) is a function of the taylor coefficients of f(x) = sign(x)|x|^p for x, i.e.,
683 * y(x) = [ f(x), f'(x), f''(x), ... ].
684 * Then in the reverse sweep we have to compute the elements of \f$\partial h / \partial x^[l], l = 0, ..., k,\f$
685 * where x^[l] is the l'th taylor coefficient (x, x', x'', ...) and h(x) = g(y(x)) for some function g:R^k -> R.
686 * That is, we have to compute
687 *\f$
688 * px[l] = \partial h / \partial x^[l] = (\partial g / \partial y) * (\partial y / \partial x^[l])
689 * = \sum_{i=0}^k (\partial g / \partial y_i) * (\partial y_i / \partial x^[l])
690 * = \sum_{i=0}^k py[i] * (\partial y_i / \partial x^[l])
691 *\f$
692 *
693 * For k = 0, this means
694 *\f$
695 * px[0] = py[0] * (\partial y_0 / \partial x^[0])
696 * = py[0] * (\partial f(x) / \partial x)
697 * = py[0] * p * abs(tx[0])^(p-1)
698 * \f$
699 *
700 * For k = 1, this means
701 *\f$
702 * px[0] = py[0] * (\partial y_0 / \partial x^[0]) + py[1] * (\partial y_1 / \partial x^[0])
703 * = py[0] * (\partial f(x) / \partial x) + py[1] * (\partial f'(x) / \partial x)
704 * = py[0] * p * abs(tx[0])^(p-1) + py[1] * p * (p-1) * abs(tx[0])^(p-2) * sign(tx[0]) * tx[1]
705 * px[1] = py[0] * (\partial y_0 / \partial x^[1]) + py[1] * (\partial y_1 / \partial x^[1])
706 * = py[0] * (\partial f(x) / \partial x') + py[1] * (\partial f'(x) / \partial x')
707 * = py[0] * 0 + py[1] * p * abs(tx[0])^(p-1)
708 * \f$
709 */
710 bool reverse(
711 size_t p, /**< highest order Taylor coefficient that we are evaluating */
712 const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
713 const CppAD::vector<Type>& ty, /**< values for taylor coefficients of y */
714 CppAD::vector<Type>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
715 const CppAD::vector<Type>& py /**< values for partial derivatives of g(x) w.r.t. y */
716 )
717 { /*lint --e{715}*/
718 assert(exponent > 1);
719 assert(px.size() >= p+1);
720 assert(py.size() >= p+1);
721 assert(tx.size() >= p+1);
722
723 switch( p )
724 {
725 case 0:
726 // px[0] = py[0] * p * pow(abs(tx[0]), p-1);
727 px[0] = py[0] * pow(REALABS(tx[0]), exponent - 1.0);
728 px[0] *= p;
729 break;
730
731 case 1:
732 if( exponent != 2.0 )
733 {
734 // px[0] = py[0] * p * abs(tx[0])^(p-1) + py[1] * p * (p-1) * abs(tx[0])^(p-2) * sign(tx[0]) * tx[1]
735 px[0] = py[1] * tx[1] * pow(REALABS(tx[0]), exponent - 2.0) * SIGN(tx[0]);
736 px[0] *= exponent - 1.0;
737 px[0] += py[0] * pow(REALABS(tx[0]), exponent - 1.0);
738 px[0] *= exponent;
739 // px[1] = py[1] * p * abs(tx[0])^(p-1)
740 px[1] = py[1] * pow(REALABS(tx[0]), exponent - 1.0);
741 px[1] *= exponent;
742 }
743 else
744 {
745 // px[0] = py[0] * 2.0 * abs(tx[0]) + py[1] * 2.0 * sign(tx[0]) * tx[1]
746 px[0] = py[1] * tx[1] * SIGN(tx[0]);
747 px[0] += py[0] * REALABS(tx[0]);
748 px[0] *= 2.0;
749 // px[1] = py[1] * 2.0 * abs(tx[0])
750 px[1] = py[1] * REALABS(tx[0]);
751 px[1] *= 2.0;
752 }
753 break;
754
755 default:
756 return false;
757 }
758
759 return true;
760 }
761
762 using CppAD::atomic_base<Type>::for_sparse_jac;
763
764 /** computes sparsity of jacobian during a forward sweep
765 *
766 * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
767 * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
768 */
769 bool for_sparse_jac(
770 size_t q, /**< number of columns in R */
771 const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
772 CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
773 )
774 {
775 return univariate_for_sparse_jac(q, r, s);
776 }
777
778 using CppAD::atomic_base<Type>::rev_sparse_jac;
779
780 /** computes sparsity of jacobian during a reverse sweep
781 *
782 * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
783 * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
784 */
785 bool rev_sparse_jac(
786 size_t q, /**< number of rows in R */
787 const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
788 CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
789 )
790 {
791 return univariate_rev_sparse_jac(q, r, s);
792 }
793
794 using CppAD::atomic_base<Type>::rev_sparse_hes;
795
796 /** computes sparsity of hessian during a reverse sweep
797 *
798 * Assume V(x) = (g(f(x)))'' R with f(x) = sign(x)abs(x)^p for a function g:R->R and a matrix R.
799 * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
800 */
801 bool rev_sparse_hes(
802 const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
803 const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
804 CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
805 size_t q, /**< number of columns in S and R */
806 const CppAD::vector<bool>& r, /**< sparsity pattern of R */
807 const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
808 CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
809 )
810 {
811 return univariate_rev_sparse_hes(vx, s, t, q, r, u, v);
812 }
813
814};
815
816/** template for evaluation for signpower operator */
817template<class Type>
818static
819void evalSignPower(
820 Type& resultant, /**< resultant */
821 const Type& arg, /**< operand */
822 SCIP_EXPR* expr /**< expression that holds the exponent */
823 )
824{
825 vector<Type> in(1, arg);
826 vector<Type> out(1);
827
828 static atomic_signpower<typename Type::value_type> sp;
829 sp(in, out, (size_t)(void*)expr);
830
831 resultant = out[0];
832 return;
833}
834
835#else
836
837/** specialization of signpower evaluation for real numbers */
838template<class Type>
839static
841 CppAD::AD<Type>& resultant, /**< resultant */
842 const CppAD::AD<Type>& arg, /**< operand */
843 SCIP_EXPR* expr /**< expression that holds the exponent */
844 )
845{
846 AD<Type> adzero(0.);
847 SCIP_Real exponent;
848
849 exponent = SCIPgetExponentExprPow(expr);
850 assert(exponent >= 1.0);
851
852 if( EPSISINT(exponent, 0.0) )
853 {
854 resultant = CppAD::CondExpGe(arg, adzero, pow(arg, (int)exponent), -pow(-arg, (int)exponent));
855 }
856 else
857 {
858 /* pow(0,fractional>1) is not differential in the CppAD world (https://github.com/coin-or/CppAD/discussions/93)
859 * this works around this by evaluating pow(eps,exponent) in this case
860 */
861 resultant = CppAD::CondExpEq(arg, adzero, pow(arg+std::numeric_limits<SCIP_Real>::epsilon(), exponent)-pow(std::numeric_limits<SCIP_Real>::epsilon(), exponent),
862 CppAD::CondExpGe(arg, adzero, pow(arg, exponent), -pow(-arg, exponent)));
863 }
864}
865#endif
866
867/** Automatic differentiation of user expression as CppAD user-atomic function.
868 *
869 * This class implements forward and reverse operations for a function given by a user expression for use within CppAD.
870 */
871class atomic_userexpr : public CppAD::atomic_base<SCIP_Real>
872{
873public:
875 SCIP* scip_, /**< SCIP data structure */
876 SCIP_EXPR* expr_ /**< expression to use */
877 )
878 : CppAD::atomic_base<SCIP_Real>(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr_)), CppAD::atomic_base<SCIP_Real>::bool_sparsity_enum),
879 scip(scip_),
880 expr(expr_)
881 { }
882
883private:
884 /** SCIP data structure */
885 SCIP* scip;
886
887 /** expression */
888 SCIP_EXPR* expr;
889
890 /** forward sweep of userexpr
891 *
892 * We follow http://www.coin-or.org/CppAD/Doc/atomic_forward.xml
893 * Note, that p and q are interchanged!
894 *
895 * For a scalar variable t, let
896 * Y(t) = f(X(t))
897 * X(t) = x^0 + x^1 t^1 + ... + x^p t^p
898 * where for x^i the i an index, while for t^i the i is an exponent.
899 * Thus, x^k = 1/k! X^(k) (0), where X^(k)(.) denotes the k-th derivative.
900 *
901 * Next, let y^k = 1/k! Y^(k)(0) be the k'th taylor coefficient of Y. Thus,
902 * y^0 = Y^(0)(0) = Y(0) = f(X(0)) = f(x^0)
903 * y^1 = Y^(1)(0) = Y'(0) = f'(X(0)) * X'(0) = f'(x^0) * x^1
904 * y^2 = 1/2 Y^(2)(0) = 1/2 Y''(0) = 1/2 X'(0) * f''(X(0)) X'(0) + 1/2 * f'(X(0)) * X''(0) = 1/2 x^1 * f''(x^0) * x^1 + f'(x^0) * x^2
905 *
906 * As x^k = (tx[k], tx[(p+1)+k], tx[2*(p+1)+k], ..., tx[n*(p+1)+k], we get
907 * ty[0] = y^0 = f(x^0) = f(tx[{1..n}*(p+1)])
908 * ty[1] = y^1 = f'(x^0) * tx[{1..n}*(p+1)+1] = sum(i=1..n, grad[i] * tx[i*(p+1)+1]), where grad = f'(x^0)
909 * ty[2] = 1/2 sum(i,j=1..n, x[i*(p+1)+1] * x[j*(p+1)+q] * hessian[i,j]) + sum(i=1..n, grad[i] * x[i*(p+1)+2])
910 */
911 /* cppcheck-suppress unusedPrivateFunction */
912 bool forward(
913 size_t q, /**< lowest order Taylor coefficient that we are evaluating */
914 size_t p, /**< highest order Taylor coefficient that we are evaluating */
915 const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
916 CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
917 const CppAD::vector<SCIP_Real>& tx, /**< values for taylor coefficients of x */
918 CppAD::vector<SCIP_Real>& ty /**< vector to store taylor coefficients of y */
919 )
920 {
921 assert(scip != NULL);
922 assert(expr != NULL);
923 assert(ty.size() == p+1);
924 assert(q <= p);
925
926 size_t n = tx.size() / (p+1);
927 assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
928 assert(n >= 1);
929
930 SCIPdebugMsg(scip, "expr_%s:forward, q=%zd, p=%zd\n", SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), q, p);
931
932 if( vx.size() > 0 )
933 {
934 assert(vx.size() == n);
935 assert(vy.size() == 1);
936 assert(p == 0);
937
938 /* y_0 is a variable if at least one of the x_i is a variable */
939 vy[0] = false;
940 for( size_t i = 0; i < n; ++i )
941 if( vx[i] )
942 {
943 vy[0] = true;
944 break;
945 }
946 }
947
948 if( p == 0 )
949 {
950 /* only eval requested
951 * arguments are in tx[i * (p+1) + 0], i == 0..n-1, that is tx[0]..tx[n-1]
952 */
953 if( SCIPcallExprEval(scip, expr, const_cast<double*>(tx.data()), &ty[0]) != SCIP_OKAY )
954 return false;
955
956 if( ty[0] == SCIP_INVALID )
958
959 return true;
960 }
961
962 if( p == 1 )
963 {
964 /* eval and forward-diff requested
965 *
966 * point to eval is in tx[i * (p+1) + 0], i == 0..n-1
967 * direction is in tx[i * (p+1) + 1], i == 0..n-1
968 */
969 SCIP_Real* x = new SCIP_Real[n];
970 SCIP_Real* dir = new SCIP_Real[n];
971 for( size_t i = 0; i < n; ++i )
972 {
973 x[i] = tx[i * (p+1) + 0]; /*lint !e835*/
974 dir[i] = tx[i * (p+1) + 1]; /*lint !e835*/
975 }
976
977 SCIP_RETCODE rc = SCIPcallExprEvalFwdiff(scip, expr, x, dir, &ty[0], &ty[1]);
978
979 if( ty[0] == SCIP_INVALID )
981 if( ty[1] == SCIP_INVALID )
983
984 delete[] dir;
985 delete[] x;
986
987 return rc == SCIP_OKAY;
988 }
989
990 /* higher order derivatives not implemented yet (TODO) */
991 SCIPABORT();
992 return false;
993
994#if SCIP_DISABLED_CODE
995 /* cppcheck-suppress unreachableCode */
996 SCIP_Real* gradient = NULL;
997 SCIP_Real* hessian = NULL;
998
999 if( q <= 2 && 1 <= p )
1000 gradient = new SCIP_Real[n];
1001 if( q <= 2 && 2 <= p )
1002 hessian = new SCIP_Real[n*n];
1003
1004 if( exprEvalUser(expr, x, ty[0], gradient, hessian) != SCIP_OKAY )
1005 {
1006 delete[] x;
1007 delete[] gradient;
1008 delete[] hessian;
1009 return false;
1010 }
1011
1012 if( gradient != NULL )
1013 {
1014 ty[1] = 0.0;
1015 for( size_t i = 0; i < n; ++i )
1016 ty[1] += gradient[i] * tx[i * (p+1) + 1];
1017 }
1018
1019 if( hessian != NULL )
1020 {
1021 assert(gradient != NULL);
1022
1023 ty[2] = 0.0;
1024 for( size_t i = 0; i < n; ++i )
1025 {
1026 for( size_t j = 0; j < n; ++j )
1027 ty[2] += 0.5 * hessian[i*n+j] * tx[i * (p+1) + 1] * tx[j * (p+1) + 1];
1028
1029 ty[2] += gradient[i] * tx[i * (p+1) + 2];
1030 }
1031 }
1032
1033 delete[] x;
1034 delete[] gradient;
1035 delete[] hessian;
1036
1037 /* higher order derivatives not implemented */
1038 if( p > 2 )
1039 return false;
1040
1041 return true;
1042#endif
1043 }
1044
1045 /** reverse sweep of userexpr
1046 *
1047 * We follow http://www.coin-or.org/CppAD/Doc/atomic_reverse.xml
1048 * Note, that there q is our p.
1049 *
1050 * For a scalar variable t, let
1051 * Y(t) = f(X(t))
1052 * X(t) = x^0 + x^1 t^1 + ... + x^p t^p
1053 * where for x^i the i an index, while for t^i the i is an exponent.
1054 * Thus, x^k = 1/k! X^(k) (0), where X^(k)(.) denotes the k-th derivative.
1055 *
1056 * Next, let y^k = 1/k! Y^(k)(0) be the k'th taylor coefficient of Y. Thus,
1057 * Y(t) = y^0 + y^1 t^1 + y^2 t^2 + ...
1058 * y^0, y^1, ... are the taylor coefficients of f(x).
1059 *
1060 * Further, let F(x^0,..,x^p) by given as F^k(x) = y^k. Thus,
1061 * F^0(x) = y^0 = Y^(0)(0) = f(x^0)
1062 * F^1(x) = y^1 = Y^(1)(0) = f'(x^0) * x^1
1063 * F^2(x) = y^2 = 1/2 Y''(0) = 1/2 x^1 f''(x^0) x^1 + f'(x^0) x^2
1064 *
1065 * Given functions G: R^(p+1) -> R and H: R^(n*(p+1)) -> R, where H(x^0, x^1, .., x^p) = G(F(x^0,..,x^p)),
1066 * we have to return the value of \f$\partial H / \partial x^l, l = 0..p,\f$ in px. Therefor,
1067 * \f$
1068 * px^l = \partial H / \partial x^l
1069 * = sum(k=0..p, (\partial G / \partial y^k) * (\partial y^k / \partial x^l)
1070 * = sum(k=0..p, py[k] * (\partial F^k / \partial x^l)
1071 * \f$
1072 *
1073 * For p = 0, this means
1074 * \f$
1075 * px^0 = py[0] * \partial F^0 / \partial x^0
1076 * = py[0] * \partial f(x^0) / \partial x^0
1077 * = py[0] * f'(x^0)
1078 * \f$
1079 *
1080 * For p = 1, this means (for l = 0):
1081 * \f[
1082 * px^0 = py[0] * \partial F^0 / \partial x^0 + py[1] * \partial F^1 / \partial x^0
1083 * = py[0] * \partial f(x^0) / \partial x^0 + py[1] * \partial (f'(x^0) * x^1) / \partial x^0
1084 * = py[0] * f'(x^0) + py[1] * f''(x^0) * x^1
1085 * \f]
1086 * and (for l=1):
1087 * \[
1088 * px^1 = py[0] * \partial F^0 / \partial x^1 + py[1] * \partial F^1 / \partial x^1
1089 * = py[0] * \partial f(x^0) / \partial x^1 + py[1] * \partial (f'(x^0) * x^1) / \partial x^0
1090 * = py[0] * 0 + py[1] * f'(x^0)
1091 * \f]
1092 *
1093 * As x^k = (tx[k], tx[(p+1)+k], tx[2*(p+1)+k], ..., tx[n*(p+1)+k] and
1094 * px^k = (px[k], px[(p+1)+k], px[2*(p+1)+k], ..., px[n*(p+1)+k], we get
1095 * for p = 0:
1096 * px[i] = (px^0)_i = py[0] * grad[i]
1097 * for p = 1:
1098 * px[i*2+0] = (px^0)_i = py[0] * grad[i] + py[1] * sum(j, hessian[j,i] * tx[j*2+1])
1099 * px[i*2+1] = (px^1)_i = py[1] * grad[i]
1100 */
1101 /* cppcheck-suppress unusedPrivateFunction */
1102 bool reverse(
1103 size_t p, /**< highest order Taylor coefficient that we are evaluating */
1104 const CppAD::vector<SCIP_Real>& tx, /**< values for taylor coefficients of x */
1105 const CppAD::vector<SCIP_Real>& ty, /**< values for taylor coefficients of y */
1106 CppAD::vector<SCIP_Real>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
1107 const CppAD::vector<SCIP_Real>& py /**< values for partial derivatives of g(x) w.r.t. y */
1108 )
1109 {
1110 assert(expr != NULL);
1111 assert(px.size() == tx.size());
1112 assert(py.size() == p+1);
1113
1114 // TODO implement this again and then remove check for userexprs in SCIPexprintGrad
1115 SCIPABORT();
1116 return false;
1117
1118#ifdef SCIP_DISABLED_CODE
1119 /* cppcheck-suppress unreachableCode */
1120 size_t n = tx.size() / (p+1);
1121 assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
1122 assert(n >= 1);
1123
1124 SCIP_Real* x = new SCIP_Real[n];
1125 SCIP_Real funcval;
1126 SCIP_Real* gradient = new SCIP_Real[n];
1127 SCIP_Real* hessian = NULL;
1128
1129 if( p == 1 )
1130 hessian = new SCIP_Real[n*n];
1131
1132 for( size_t i = 0; i < n; ++i )
1133 x[i] = tx[i * (p+1) + 0]; /*lint !e835*/
1134
1135 if( exprEvalUser(expr, x, funcval, gradient, hessian) != SCIP_OKAY )
1136 {
1137 delete[] x;
1138 delete[] gradient;
1139 delete[] hessian;
1140 return false;
1141 }
1142
1143 switch( p )
1144 {
1145 case 0:
1146 // px[j] = (px^0)_j = py[0] * grad[j]
1147 for( size_t i = 0; i < n; ++i )
1148 px[i] = py[0] * gradient[i];
1149 break;
1150
1151 case 1:
1152 // px[i*2+0] = (px^0)_i = py[0] * grad[i] + py[1] * sum(j, hessian[j,i] * tx[j*2+1])
1153 // px[i*2+1] = (px^1)_i = py[1] * grad[i]
1154 assert(hessian != NULL);
1155 for( size_t i = 0; i < n; ++i )
1156 {
1157 px[i*2+0] = py[0] * gradient[i]; /*lint !e835*/
1158 for( size_t j = 0; j < n; ++j )
1159 px[i*2+0] += py[1] * hessian[i+n*j] * tx[j*2+1]; /*lint !e835*/
1160
1161 px[i*2+1] = py[1] * gradient[i];
1162 }
1163 break;
1164
1165 default:
1166 return false;
1167 }
1168
1169 return true;
1170#endif
1171 } /*lint !e715*/
1172
1173 using CppAD::atomic_base<SCIP_Real>::for_sparse_jac;
1174
1175 /** computes sparsity of jacobian during a forward sweep
1176 * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
1177 * Since we assume f'(x) to be dense, the sparsity of S will be the sparsity of R.
1178 */
1179 /* cppcheck-suppress unusedPrivateFunction */
1180 bool for_sparse_jac(
1181 size_t q, /**< number of columns in R */
1182 const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
1183 CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
1184 )
1185 {
1186 assert(expr != NULL);
1187 assert(s.size() == q);
1188
1189 size_t n = r.size() / q;
1190 assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
1191
1192 // sparsity for S(x) = f'(x) * R
1193 for( size_t j = 0; j < q; j++ )
1194 {
1195 s[j] = false;
1196 for( size_t i = 0; i < n; i++ )
1197 s[j] |= (bool)r[i * q + j]; /*lint !e1786*/
1198 }
1199
1200 return true;
1201 }
1202
1203 using CppAD::atomic_base<SCIP_Real>::rev_sparse_jac;
1204
1205 /** computes sparsity of jacobian during a reverse sweep
1206 * For a q x 1 matrix S, we have to return the sparsity pattern of the q x 1 matrix R(x) = S * f'(x).
1207 * Since we assume f'(x) to be dense, the sparsity of R will be the sparsity of S.
1208 */
1209 /* cppcheck-suppress unusedPrivateFunction */
1210 bool rev_sparse_jac(
1211 size_t q, /**< number of rows in R */
1212 const CppAD::vector<bool>& rt, /**< sparsity of R, rowwise */
1213 CppAD::vector<bool>& st /**< vector to store sparsity of S, rowwise */
1214 )
1215 {
1216 assert(expr != NULL);
1217 assert(rt.size() == q);
1218
1219 size_t n = st.size() / q;
1220 assert(n == (size_t)SCIPexprGetNChildren(expr));
1221
1222 // sparsity for S(x)^T = f'(x)^T * R^T
1223 for( size_t j = 0; j < q; j++ )
1224 for( size_t i = 0; i < n; i++ )
1225 st[i * q + j] = rt[j];
1226
1227 return true;
1228 }
1229
1230 using CppAD::atomic_base<SCIP_Real>::rev_sparse_hes;
1231
1232 /** computes sparsity of hessian during a reverse sweep
1233 * Assume V(x) = (g(f(x)))'' R for a function g:R->R and a matrix R.
1234 * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
1235 */
1236 /* cppcheck-suppress unusedPrivateFunction */
1237 bool rev_sparse_hes(
1238 const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
1239 const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
1240 CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
1241 size_t q, /**< number of columns in S and R */
1242 const CppAD::vector<bool>& r, /**< sparsity pattern of R */
1243 const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
1244 CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
1245 )
1246 {
1247 assert(expr != NULL);
1248 size_t n = vx.size();
1249 assert((size_t)SCIPexprGetNChildren(expr) == n);
1250 assert(s.size() == 1);
1251 assert(t.size() == n);
1252 assert(r.size() == n * q);
1253 assert(u.size() == q);
1254 assert(v.size() == n * q);
1255
1256 size_t i, j, k;
1257
1258 // sparsity for T(x) = S(x) * f'(x)
1259 for( i = 0; i < n; ++i )
1260 t[i] = s[0];
1261
1262 // V(x) = f'(x)^T * g''(y) * f'(x) * R + g'(y) * f''(x) * R
1263 // U(x) = g''(y) * f'(x) * R
1264 // S(x) = g'(y)
1265
1266 // back propagate the sparsity for U
1267 for( j = 0; j < q; j++ )
1268 for( i = 0; i < n; i++ )
1269 v[ i * q + j] = u[j];
1270
1271 // include forward Jacobian sparsity in Hessian sparsity
1272 // sparsity for g'(y) * f''(x) * R (Note f''(x) is assumed to be dense)
1273 if( s[0] )
1274 for( j = 0; j < q; j++ )
1275 for( i = 0; i < n; i++ )
1276 for( k = 0; k < n; ++k )
1277 v[ i * q + j] |= (bool) r[ k * q + j];
1278
1279 return true;
1280 }
1281};
1282
1283
1284/** integer power operation for arbitrary integer exponents */
1285template<class Type>
1286static
1288 Type& resultant, /**< resultant */
1289 const Type& arg, /**< operand */
1290 const int exponent /**< exponent */
1291 )
1292{
1293 if( exponent > 1 )
1294 {
1295 vector<Type> in(1, arg);
1296 vector<Type> out(1);
1297
1298 posintpower(in, out, exponent);
1299
1300 resultant = out[0];
1301 return;
1302 }
1303
1304 if( exponent < -1 )
1305 {
1306 vector<Type> in(1, arg);
1307 vector<Type> out(1);
1308
1309 posintpower(in, out, -exponent);
1310
1311 resultant = Type(1.0)/out[0];
1312 return;
1313 }
1314
1315 if( exponent == 1 )
1316 {
1317 resultant = arg;
1318 return;
1319 }
1320
1321 if( exponent == 0 )
1322 {
1323 resultant = Type(1.0);
1324 return;
1325 }
1326
1327 assert(exponent == -1);
1328 resultant = Type(1.0)/arg;
1329}
1330
1331/** CppAD compatible evaluation of an expression for given arguments */
1332template<class Type>
1333static
1335 SCIP* scip, /**< SCIP data structure */
1336 SCIP_EXPR* expr, /**< expression */
1337 SCIP_EXPRINTDATA* exprintdata, /**< interpreter data for root expression */
1338 const vector<Type>& x, /**< values of variables */
1339 Type& val /**< buffer to store expression value */
1340 )
1341{
1342 Type* buf = NULL;
1343
1344 assert(expr != NULL);
1345
1346 // TODO this should iterate instead of using recursion
1347 // but the iterdata wouldn't work to hold Type at the moment
1348 // they could hold Type*, but then we need to alloc small portions all the time
1349 // or we have a big Type-array outside and point to it in iterdata
1350
1351 if( SCIPisExprVaridx(scip, expr) )
1352 {
1353 val = x[exprintdata->getVarPos(SCIPgetIndexExprVaridx(expr))];
1354 return SCIP_OKAY;
1355 }
1356 if( SCIPisExprValue(scip, expr) )
1357 {
1358 val = SCIPgetValueExprValue(expr);
1359 return SCIP_OKAY;
1360 }
1361
1363 for( int i = 0; i < SCIPexprGetNChildren(expr); ++i )
1364 {
1365 SCIP_CALL( eval(scip, SCIPexprGetChildren(expr)[i], exprintdata, x, buf[i]) );
1366 }
1367
1368#ifndef EVAL_USE_EXPRHDLR_ALWAYS
1369 if( SCIPisExprSum(scip, expr) )
1370 {
1371 val = SCIPgetConstantExprSum(expr);
1372 for( int i = 0; i < SCIPexprGetNChildren(expr); ++i )
1373 val += SCIPgetCoefsExprSum(expr)[i] * buf[i];
1374 }
1375 else if( SCIPisExprProduct(scip, expr) )
1376 {
1377 val = SCIPgetCoefExprProduct(expr);
1378 for( int i = 0; i < SCIPexprGetNChildren(expr); ++i )
1379 val *= buf[i];
1380 }
1381 else if( SCIPisExprPower(scip, expr) )
1382 {
1383 SCIP_Real exponent = SCIPgetExponentExprPow(expr);
1384 if( EPSISINT(exponent, 0.0) )
1385 evalIntPower(val, buf[0], (int)SCIPgetExponentExprPow(expr));
1386 else if( exponent == 0.5 )
1387 val = sqrt(buf[0]);
1388 else if( exponent < 1.0 )
1389 val = CppAD::pow(buf[0], SCIPgetExponentExprPow(expr));
1390 else
1391 {
1392 // workaround bug where CppAD claims pow(x,fractional>0) is nondiff at x=0
1393 // https://github.com/coin-or/CppAD/discussions/93#discussioncomment-327876
1394 AD<double> adzero(0.);
1395 val = CppAD::CondExpEq(buf[0], adzero, pow(buf[0]+std::numeric_limits<SCIP_Real>::epsilon(), exponent)-pow(std::numeric_limits<SCIP_Real>::epsilon(), exponent),
1396 pow(buf[0], exponent));
1397 }
1398 }
1399 else if( SCIPisExprSignpower(scip, expr) )
1400 {
1401 evalSignPower(val, buf[0], expr);
1402 }
1403 else if( SCIPisExprExp(scip, expr) )
1404 {
1405 val = exp(buf[0]);
1406 }
1407 else if( SCIPisExprLog(scip, expr) )
1408 {
1409 val = log(buf[0]);
1410 }
1411 else if( strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "sin") == 0 )
1412 {
1413 val = sin(buf[0]);
1414 }
1415 else if( strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "cos") == 0 )
1416 {
1417 val = cos(buf[0]);
1418 }
1419 else if( strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "erf") == 0 )
1420 {
1421 val = erf(buf[0]);
1422 }
1423 else if( strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "abs") == 0 )
1424 {
1425 val = abs(buf[0]);
1426 }
1427 else if( strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "entropy") == 0 )
1428 {
1429 /* -x*log(x) if x > 0, else 0
1430 * https://coin-or.github.io/CppAD/doc/cond_exp.cpp.htm suggest to use 0 for the x=0 case
1431 * but then derivatives at 0 are 0, while they are actually infinite (see expr_entropy.c:bwdiff)
1432 * so we use -sqrt(x) for the x=0 case, as this also has value 0 and first derivative -inf at 0
1433 */
1434 val = CppAD::CondExpGt(buf[0], AD<double>(0.), -buf[0] * log(buf[0]), -sqrt(buf[0]));
1435 }
1436 else
1437#endif
1438 {
1439 //SCIPerrorMessage("using derivative methods of exprhdlr %s in CppAD is not implemented yet", SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)));
1440 //CppAD::ErrorHandler::Call(true, __LINE__, __FILE__, "evalUser()", "Error: user expressions in CppAD not possible without CppAD user atomic facility");
1441 vector<Type> in(buf, buf + SCIPexprGetNChildren(expr));
1442 vector<Type> out(1);
1443
1444 exprintdata->userexprs.push_back(new atomic_userexpr(scip, expr));
1445 (*exprintdata->userexprs.back())(in, out);
1446
1447 val = out[0];
1448 }
1449
1451
1452 return SCIP_OKAY;
1453}
1454
1455/** replacement for CppAD's default error handler
1456 *
1457 * In debug mode, CppAD gives an error when an evaluation contains a nan.
1458 * We do not want to stop execution in such a case, since the calling routine should check for nan's and decide what to do.
1459 * Since we cannot ignore this particular error, we ignore all.
1460 * @todo find a way to check whether the error corresponds to a nan and communicate this back
1461 */
1462static
1464 bool known, /**< is the error from a known source? */
1465 int line, /**< line where error occured */
1466 const char* file, /**< file where error occured */
1467 const char* cond, /**< error condition */
1468 const char* msg /**< error message */
1469 )
1470{
1471 SCIPdebugMessage("ignore CppAD error from %sknown source %s:%d: msg: %s exp: %s\n", known ? "" : "un", file, line, msg, cond);
1472}
1473
1474/* install our error handler */
1475static CppAD::ErrorHandler errorhandler(cppaderrorcallback);
1476
1477/** gets name and version of expression interpreter */
1478const char* SCIPexprintGetName(void)
1479{
1480 return CPPAD_PACKAGE_STRING;
1481}
1482
1483/** gets descriptive text of expression interpreter */
1484const char* SCIPexprintGetDesc(void)
1485{
1486 return "Algorithmic Differentiation of C++ algorithms developed by B. Bell (github.com/coin-or/CppAD)";
1487}
1488
1489/** gets capabilities of expression interpreter (using bitflags) */
1491 void
1492 )
1493{
1495}
1496
1497/** creates an expression interpreter object */
1499 SCIP* scip, /**< SCIP data structure */
1500 SCIP_EXPRINT** exprint /**< buffer to store pointer to expression interpreter */
1501 )
1502{
1503 assert(exprint != NULL);
1504
1505 *exprint = (SCIP_EXPRINT*)1u; /* some code checks that a non-NULL pointer is returned here, even though it may not point anywhere */
1506
1507 return SCIP_OKAY;
1508}
1509
1510/** frees an expression interpreter object */
1512 SCIP* scip, /**< SCIP data structure */
1513 SCIP_EXPRINT** exprint /**< expression interpreter that should be freed */
1514 )
1515{
1516 assert(exprint != NULL);
1517
1518 *exprint = NULL;
1519
1520 return SCIP_OKAY;
1521}
1522
1523/** compiles an expression and returns interpreter-specific data for expression
1524 *
1525 * can be called again with existing exprintdata if expression has been changed
1526 *
1527 * @attention *exprintdata needs to be initialized to NULL at first call
1528 * @attention the expression is assumed to use varidx expressions instead of var expressions
1529 */
1531 SCIP* scip, /**< SCIP data structure */
1532 SCIP_EXPRINT* exprint, /**< interpreter data structure */
1533 SCIP_EXPR* rootexpr, /**< expression */
1534 SCIP_EXPRINTDATA** exprintdata /**< buffer to store pointer to compiled data */
1535 )
1536{
1537 SCIP_EXPRITER* it;
1538 SCIP_EXPR* expr;
1539
1540 assert(rootexpr != NULL);
1541 assert(exprintdata != NULL);
1542
1543 if( *exprintdata == NULL )
1544 {
1545 *exprintdata = new SCIP_EXPRINTDATA();
1546 assert(*exprintdata != NULL);
1547 }
1548 else
1549 {
1550 (*exprintdata)->need_retape = true;
1551 (*exprintdata)->need_retape_always = false;
1552 (*exprintdata)->hesconstant = false;
1553 }
1554
1557
1558 std::set<int> varidxs;
1559 for( expr = SCIPexpriterGetCurrent(it); !SCIPexpriterIsEnd(it); expr = SCIPexpriterGetNext(it) )
1560 {
1561 /* cannot handle var-expressions in exprint so far, should be varidx expressions */
1562 assert(!SCIPisExprVar(scip, expr));
1563
1564 if( SCIPisExprVaridx(scip, expr) )
1565 {
1566 varidxs.insert(SCIPgetIndexExprVaridx(expr));
1567 continue;
1568 }
1569
1570 /* check whether expr is of a type we don't recognize */
1571#ifndef EVAL_USE_EXPRHDLR_ALWAYS
1572 if( !SCIPisExprSum(scip, expr) &&
1573 !SCIPisExprProduct(scip, expr) &&
1574 !SCIPisExprPower(scip, expr) &&
1575 !SCIPisExprSignpower(scip, expr) &&
1576 !SCIPisExprExp(scip, expr) &&
1577 !SCIPisExprLog(scip, expr) &&
1578 strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "abs") != 0 &&
1579 strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "sin") != 0 &&
1580 strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "cos") != 0 &&
1581 strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "entropy") != 0 &&
1582 strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "erf") != 0 &&
1583 !SCIPisExprValue(scip, expr) )
1584#endif
1585 {
1586 /* an expression for which we have no taping implemented in eval()
1587 * so we will try to use CppAD's atomic operand and the expression handler methods
1588 * however, only atomic_userexpr:forward() is implemented for p=0,1 at the moment, so we cannot do Hessians
1589 * also the exprhdlr needs to implement the fwdiff callback for derivatives
1590 */
1592 (*exprintdata)->userevalcapability &= SCIP_EXPRINTCAPABILITY_FUNCVALUE | SCIP_EXPRINTCAPABILITY_GRADIENT;
1593 else
1594 (*exprintdata)->userevalcapability &= SCIP_EXPRINTCAPABILITY_FUNCVALUE;
1595 }
1596
1597 //if( strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "xyz") == 0 )
1598 // (*exprintdata)->need_retape_always = true;
1599 }
1600
1601 SCIPfreeExpriter(&it);
1602
1603 (*exprintdata)->varidxs.reserve(varidxs.size());
1604 (*exprintdata)->varidxs.insert((*exprintdata)->varidxs.begin(), varidxs.begin(), varidxs.end());
1605
1606 size_t n = (*exprintdata)->varidxs.size();
1607 (*exprintdata)->X.resize(n);
1608 (*exprintdata)->x.resize(n);
1609 (*exprintdata)->Y.resize(1);
1610
1611 // check whether we are quadratic (or linear), so we can save on Hessian time (so
1612 // assumes simplified and skips over x^2 and x*y cases
1613 // not using SCIPcheckExprQuadratic(), because we don't need the quadratic form
1614 if( SCIPisExprSum(scip, rootexpr) )
1615 {
1616 (*exprintdata)->hesconstant = true;
1617 for( int i = 0; i < SCIPexprGetNChildren(rootexpr); ++i )
1618 {
1619 SCIP_EXPR* child;
1620 child = SCIPexprGetChildren(rootexpr)[i];
1621 /* linear term is ok */
1622 if( SCIPisExprVaridx(scip, child) )
1623 continue;
1624 /* square term is ok */
1625 if( SCIPisExprPower(scip, child) && SCIPgetExponentExprPow(child) == 2.0 && SCIPisExprVaridx(scip, SCIPexprGetChildren(child)[0]) )
1626 continue;
1627 /* bilinear term is ok */
1629 continue;
1630 /* everything else means not quadratic (or not simplified) */
1631 (*exprintdata)->hesconstant = false;
1632 break;
1633 }
1634 SCIPdebugMsg(scip, "Hessian found %sconstant\n", (*exprintdata)->hesconstant ? "" : "not ");
1635 }
1636
1637 return SCIP_OKAY;
1638}
1639
1640/** frees interpreter data for expression */
1642 SCIP* scip, /**< SCIP data structure */
1643 SCIP_EXPRINT* exprint, /**< interpreter data structure */
1644 SCIP_EXPR* expr, /**< expression */
1645 SCIP_EXPRINTDATA** exprintdata /**< pointer to pointer to compiled data to be freed */
1646 )
1647{
1648 assert( exprintdata != NULL);
1649 assert(*exprintdata != NULL);
1650
1651 SCIPfreeBlockMemoryArrayNull(scip, &(*exprintdata)->hesrowidxs, (*exprintdata)->hesnnz);
1652 SCIPfreeBlockMemoryArrayNull(scip, &(*exprintdata)->hescolidxs, (*exprintdata)->hesnnz);
1653
1654 delete *exprintdata;
1655 *exprintdata = NULL;
1656
1657 return SCIP_OKAY;
1658}
1659
1660/** gives the capability to evaluate an expression by the expression interpreter
1661 *
1662 * In cases of user-given expressions, higher order derivatives may not be available for the user-expression,
1663 * even if the expression interpreter could handle these. This method allows to recognize that, e.g., the
1664 * Hessian for an expression is not available because it contains a user expression that does not provide
1665 * Hessians.
1666 */
1668 SCIP* scip, /**< SCIP data structure */
1669 SCIP_EXPRINT* exprint, /**< interpreter data structure */
1670 SCIP_EXPR* expr, /**< expression */
1671 SCIP_EXPRINTDATA* exprintdata /**< interpreter-specific data for expression */
1672 )
1673{
1674 assert(exprintdata != NULL);
1675
1676 return exprintdata->userevalcapability;
1677}/*lint !e715*/
1678
1679/** evaluates an expression */
1681 SCIP* scip, /**< SCIP data structure */
1682 SCIP_EXPRINT* exprint, /**< interpreter data structure */
1683 SCIP_EXPR* expr, /**< expression */
1684 SCIP_EXPRINTDATA* exprintdata, /**< interpreter-specific data for expression */
1685 SCIP_Real* varvals, /**< values of variables */
1686 SCIP_Real* val /**< buffer to store value of expression */
1687 )
1688{
1689 assert(expr != NULL);
1690 assert(exprintdata != NULL);
1691 assert(varvals != NULL);
1692 assert(val != NULL);
1693
1694 size_t n = exprintdata->varidxs.size();
1695
1696 if( n == 0 )
1697 {
1698 SCIP_CALL( SCIPevalExpr(scip, expr, NULL, 0L) );
1699 exprintdata->val = *val = SCIPexprGetEvalValue(expr);
1700 return SCIP_OKAY;
1701 }
1702
1703 if( exprintdata->need_retape_always || exprintdata->need_retape )
1704 {
1705 /* free old Hessian data, if any */
1706 SCIPfreeBlockMemoryArrayNull(scip, &exprintdata->hesrowidxs, exprintdata->hesnnz);
1707 SCIPfreeBlockMemoryArrayNull(scip, &exprintdata->hescolidxs, exprintdata->hesnnz);
1708 exprintdata->hesvalues.clear();
1709 exprintdata->hesnnz = 0;
1710
1711 for( size_t i = 0; i < n; ++i )
1712 {
1713 int idx = exprintdata->varidxs[i];
1714 exprintdata->X[i] = varvals[idx];
1715 exprintdata->x[i] = varvals[idx]; /* need this for a following grad or hessian eval with new_x = false */
1716 }
1717
1718 // delete old atomic_userexprs before we start collecting new ones
1719 for( vector<atomic_userexpr*>::iterator it(exprintdata->userexprs.begin()); it != exprintdata->userexprs.end(); ++it )
1720 delete *it;
1721 exprintdata->userexprs.clear();
1722
1723 CppAD::Independent(exprintdata->X);
1724
1725 SCIP_CALL( eval(scip, expr, exprintdata, exprintdata->X, exprintdata->Y[0]) );
1726
1727 exprintdata->f.Dependent(exprintdata->X, exprintdata->Y);
1728
1729 exprintdata->val = Value(exprintdata->Y[0]);
1730 SCIPdebugMessage("Eval retaped and computed value %g\n", exprintdata->val);
1731
1732 // the following is required if the gradient shall be computed by a reverse sweep later
1733 // exprintdata->val = exprintdata->f.Forward(0, exprintdata->x)[0];
1734
1735 // https://coin-or.github.io/CppAD/doc/optimize.htm
1736 exprintdata->f.optimize();
1737
1738 exprintdata->need_retape = false;
1739 }
1740 else
1741 {
1742 assert(exprintdata->x.size() >= n);
1743 for( size_t i = 0; i < n; ++i )
1744 exprintdata->x[i] = varvals[exprintdata->varidxs[i]];
1745
1746 exprintdata->val = exprintdata->f.Forward(0, exprintdata->x)[0];
1747 SCIPdebugMessage("Eval used forward sweep to compute value %g\n", exprintdata->val);
1748 }
1749
1750 *val = exprintdata->val;
1751
1752 return SCIP_OKAY;
1753}
1754
1755/** computes value and gradient of an expression */
1757 SCIP* scip, /**< SCIP data structure */
1758 SCIP_EXPRINT* exprint, /**< interpreter data structure */
1759 SCIP_EXPR* expr, /**< expression */
1760 SCIP_EXPRINTDATA* exprintdata, /**< interpreter-specific data for expression */
1761 SCIP_Real* varvals, /**< values of variables, can be NULL if new_varvals is FALSE */
1762 SCIP_Bool new_varvals, /**< have variable values changed since last call to a point evaluation routine? */
1763 SCIP_Real* val, /**< buffer to store expression value */
1764 SCIP_Real* gradient /**< buffer to store expression gradient */
1765 )
1766{
1767 assert(expr != NULL);
1768 assert(exprintdata != NULL);
1769 assert(varvals != NULL || new_varvals == FALSE);
1770 assert(val != NULL);
1771 assert(gradient != NULL);
1772
1773 if( new_varvals )
1774 {
1775 SCIP_CALL( SCIPexprintEval(scip, exprint, expr, exprintdata, varvals, val) );
1776 }
1777 else
1778 *val = exprintdata->val;
1779
1780 size_t n = exprintdata->varidxs.size();
1781
1782 if( n == 0 )
1783 return SCIP_OKAY;
1784
1785 vector<double> jac;
1786 if( exprintdata->userexprs.empty() )
1787 jac = exprintdata->f.Jacobian(exprintdata->x);
1788 else
1789 {
1790 // atomic_userexpr:reverse not implemented yet (even for p=0)
1791 // so force use of forward-eval for gradient (though that seems pretty inefficient)
1792 exprintdata->f.Forward(0, exprintdata->x);
1793 jac.resize(n);
1794 CppAD::JacobianFor(exprintdata->f, exprintdata->x, jac);
1795 }
1796
1797 for( size_t i = 0; i < n; ++i )
1798 gradient[exprintdata->varidxs[i]] = jac[i];
1799
1800#ifdef SCIP_DEBUG
1801 SCIPinfoMessage(scip, NULL, "Grad for ");
1802 SCIP_CALL( SCIPprintExpr(scip, expr, NULL) );
1803 SCIPinfoMessage(scip, NULL, "\nx = ");
1804 for( size_t i = 0; i < n; ++i )
1805 {
1806 SCIPinfoMessage(scip, NULL, "\t %g", exprintdata->x[i]);
1807 }
1808 SCIPinfoMessage(scip, NULL, "\ngrad = ");
1809 for( size_t i = 0; i < n; ++i )
1810 {
1811 SCIPinfoMessage(scip, NULL, "\t %g", jac[i]);
1812 }
1813 SCIPinfoMessage(scip, NULL, "\n");
1814#endif
1815
1816 return SCIP_OKAY;
1817}
1818
1819/** gives sparsity pattern of lower-triangular part of Hessian
1820 *
1821 * Since the AD code might need to do a forward sweep, variable values need to be passed in here.
1822 *
1823 * Result will have `(*colidxs)[i] <= (*rowidixs)[i]` for `i=0..*nnz`.
1824 */
1826 SCIP* scip, /**< SCIP data structure */
1827 SCIP_EXPRINT* exprint, /**< interpreter data structure */
1828 SCIP_EXPR* expr, /**< expression */
1829 SCIP_EXPRINTDATA* exprintdata, /**< interpreter-specific data for expression */
1830 SCIP_Real* varvals, /**< values of variables */
1831 int** rowidxs, /**< buffer to return array with row indices of Hessian elements */
1832 int** colidxs, /**< buffer to return array with column indices of Hessian elements */
1833 int* nnz /**< buffer to return length of arrays */
1834 )
1835{
1836 assert(expr != NULL);
1837 assert(exprintdata != NULL);
1838 assert(varvals != NULL);
1839 assert(rowidxs != NULL);
1840 assert(colidxs != NULL);
1841 assert(nnz != NULL);
1842
1843 if( exprintdata->hesrowidxs == NULL )
1844 {
1845 assert(exprintdata->hescolidxs == NULL);
1846 assert(exprintdata->hesvalues.size() == 0);
1847 assert(exprintdata->hesnnz == 0);
1848
1849 size_t n = exprintdata->varidxs.size();
1850 if( n == 0 )
1851 {
1852 *nnz = 0;
1853 return SCIP_OKAY;
1854 }
1855
1856 size_t nn = n*n;
1857
1858 if( exprintdata->need_retape_always )
1859 {
1860 // pretend dense
1861 exprintdata->hesnnz = (n * (n+1))/2;
1862 SCIP_CALL( SCIPallocBlockMemoryArray(scip, &exprintdata->hesrowidxs, exprintdata->hesnnz) );
1863 SCIP_CALL( SCIPallocBlockMemoryArray(scip, &exprintdata->hescolidxs, exprintdata->hesnnz) );
1864
1865 int k = 0;
1866 for( size_t i = 0; i < n; ++i )
1867 for( size_t j = 0; j <= i; ++j )
1868 {
1869 exprintdata->hesrowidxs[k] = exprintdata->varidxs[i];
1870 exprintdata->hescolidxs[k] = exprintdata->varidxs[j];
1871 k++;
1872 }
1873
1874#ifdef SCIP_DEBUG
1875 SCIPinfoMessage(scip, NULL, "HessianSparsity for ");
1876 SCIP_CALL( SCIPprintExpr(scip, expr, NULL) );
1877 SCIPinfoMessage(scip, NULL, ": all elements, due to discontinuities\n");
1878#endif
1879
1880 *rowidxs = exprintdata->hesrowidxs;
1881 *colidxs = exprintdata->hescolidxs;
1882 *nnz = exprintdata->hesnnz;
1883
1884 return SCIP_OKAY;
1885 }
1886
1887 if( exprintdata->need_retape )
1888 {
1889 SCIP_Real val;
1890 SCIP_CALL( SCIPexprintEval(scip, exprint, expr, exprintdata, varvals, &val) );
1891 } /*lint !e438*/
1892
1893 // following https://github.com/coin-or/CppAD/blob/20180000.0/cppad_ipopt/src/vec_fun_pattern.cpp
1894
1895 SCIPdebugMessage("calling ForSparseJac\n");
1896
1897 vector<bool> r(nn, false);
1898 for( size_t i = 0; i < n; ++i )
1899 r[i*n+i] = true;
1900 (void) exprintdata->f.ForSparseJac(n, r); // need to compute sparsity for Jacobian first
1901
1902 SCIPdebugMessage("calling RevSparseHes\n");
1903
1904 // this was originally
1905 // vector<bool> s(1, true);
1906 // hessparsity = exprintdata->f.RevSparseHes(n, s);
1907 // RevSparseHes is just calling RevSparseHesCase
1908 // to avoid copying hessparsity, call RevSparseHesCase directly
1909 vector<bool> hessparsity;
1910 exprintdata->f.RevSparseHesCase(true, false, n, vector<bool>(1, true), hessparsity);
1911
1912 // count number of hessian elements and setup hessparsity_pattern
1913 exprintdata->hessparsity_pattern.resize(0, 0); // clear old data, if any
1914 exprintdata->hessparsity_pattern.resize(n, n);
1915 size_t hesnnz_full = 0; // number of nonzeros in full matrix, that is, not only lower-diagonal
1916 for( size_t i = 0; i < nn; ++i )
1917 if( hessparsity[i] )
1918 {
1919 size_t row = i / n;
1920 size_t col = i % n;
1921
1922 ++hesnnz_full;
1923
1924 if( col > row )
1925 continue;
1926 ++exprintdata->hesnnz;
1927 }
1928
1929 // hessian sparsity in sparse form
1930 // - hesrowidxs,hescolidxs are nonzero entries in the lower-diagonal of the Hessian and are returned to the caller; indices are variable indices as in expression
1931 // - hessparsity_row,hessparsity_col are nonzero entries in the full Hessian and are used in SCIPexprintHessian(); indices are 0..n-1 (n=dimension of f)
1932 // - hessparsity_pattern is the same information as hessparsity_row,hessparsity_col, but stored in different form
1933 // originally we were calling CppAD::local::sparsity_user2internal(exprintdata->hessparsity_pattern, exprintdata->hessparsity, n, n, false, ""),
1934 // which was again looping over hessparsity, so this is included into one loop here
1935 SCIP_CALL( SCIPallocBlockMemoryArray(scip, &exprintdata->hesrowidxs, exprintdata->hesnnz) );
1936 SCIP_CALL( SCIPallocBlockMemoryArray(scip, &exprintdata->hescolidxs, exprintdata->hesnnz) );
1937
1938 exprintdata->hessparsity_row.resize(hesnnz_full);
1939 exprintdata->hessparsity_col.resize(hesnnz_full);
1940
1941 for( size_t i = 0, j = 0, k = 0; i < nn; ++i )
1942 if( hessparsity[i] )
1943 {
1944 size_t row = i / n;
1945 size_t col = i % n;
1946
1947 if( (size_t)exprintdata->hesnnz <= nn/4 )
1948 {
1949 // we need hessparsity_pattern only if SCIPexprintHessian is doing a sparse hessian eval; nn/4 is the threshold for that
1950 exprintdata->hessparsity_pattern.add_element(row, col);
1951 }
1952
1953 if( col > row )
1954 {
1955 // add above-diagonal elements into end part of hessparsity_row/col, so we have a 1:1 correspondence
1956 // between hessparsity_row/col and hesrowidxs/hescolidxs
1957 assert(exprintdata->hesnnz + k < hesnnz_full);
1958 exprintdata->hessparsity_row[exprintdata->hesnnz + k] = row;
1959 exprintdata->hessparsity_col[exprintdata->hesnnz + k] = col;
1960 ++k;
1961 continue;
1962 }
1963
1964 exprintdata->hessparsity_row[j] = row;
1965 exprintdata->hessparsity_col[j] = col;
1966
1967 assert(j < (size_t)exprintdata->hesnnz);
1968 exprintdata->hesrowidxs[j] = exprintdata->varidxs[row];
1969 exprintdata->hescolidxs[j] = exprintdata->varidxs[col];
1970 ++j;
1971 }
1972
1973#ifdef SCIP_DEBUG
1974 SCIPinfoMessage(scip, NULL, "HessianSparsity for ");
1975 SCIP_CALL( SCIPprintExpr(scip, expr, NULL) );
1976 SCIPinfoMessage(scip, NULL, ":");
1977 for( int i = 0; i < exprintdata->hesnnz; ++i )
1978 {
1979 SCIPinfoMessage(scip, NULL, " (%d,%d)", exprintdata->hesrowidxs[i], exprintdata->hescolidxs[i]);
1980 }
1981 SCIPinfoMessage(scip, NULL, "\n");
1982#endif
1983 }
1984
1985 *rowidxs = exprintdata->hesrowidxs;
1986 *colidxs = exprintdata->hescolidxs;
1987 *nnz = exprintdata->hesnnz;
1988
1989 return SCIP_OKAY;
1990}
1991
1992/** computes value and Hessian of an expression
1993 *
1994 * Returned arrays `rowidxs` and `colidxs` and number of elements `nnz` are the same as given by SCIPexprintHessianSparsity().
1995 * Returned array `hessianvals` will contain the corresponding Hessian elements.
1996 */
1998 SCIP* scip, /**< SCIP data structure */
1999 SCIP_EXPRINT* exprint, /**< interpreter data structure */
2000 SCIP_EXPR* expr, /**< expression */
2001 SCIP_EXPRINTDATA* exprintdata, /**< interpreter-specific data for expression */
2002 SCIP_Real* varvals, /**< values of variables, can be NULL if new_varvals is FALSE */
2003 SCIP_Bool new_varvals, /**< have variable values changed since last call to an evaluation routine? */
2004 SCIP_Real* val, /**< buffer to store function value */
2005 int** rowidxs, /**< buffer to return array with row indices of Hessian elements */
2006 int** colidxs, /**< buffer to return array with column indices of Hessian elements */
2007 SCIP_Real** hessianvals, /**< buffer to return array with Hessian elements */
2008 int* nnz /**< buffer to return length of arrays */
2009 )
2010{
2011 assert(expr != NULL);
2012 assert(exprintdata != NULL);
2013
2014 if( exprintdata->hesrowidxs == NULL )
2015 {
2016 /* setup sparsity if not done yet */
2017 int dummy1;
2018 int* dummy2;
2019
2020 assert(exprintdata->hescolidxs == NULL);
2021 assert(exprintdata->hesvalues.size() == 0);
2022 assert(exprintdata->hesnnz == 0);
2023
2024 SCIP_CALL( SCIPexprintHessianSparsity(scip, exprint, expr, exprintdata, varvals, &dummy2, &dummy2, &dummy1) );
2025
2026 new_varvals = FALSE;
2027 }
2028
2029 if( new_varvals )
2030 {
2031 SCIP_CALL( SCIPexprintEval(scip, exprint, expr, exprintdata, varvals, val) );
2032 }
2033 else
2034 *val = exprintdata->val;
2035
2036 size_t n = exprintdata->varidxs.size();
2037
2038 // eval hessian; if constant, then only if not evaluated yet (hesvalues has size 0, but hesnnz > 0)
2039 if( n > 0 && (!exprintdata->hesconstant || exprintdata->hesvalues.size() < (size_t)exprintdata->hesnnz) )
2040 {
2041 size_t nn = n*n;
2042
2043 if( exprintdata->hesvalues.size() == 0 )
2044 {
2045 // using here hessparsity_row.size() instead of hesnnz as we want to pass hesvalues to CppAD and it prefers to calculate a full Hessian
2046 exprintdata->hesvalues.resize(exprintdata->hessparsity_row.size());
2047 }
2048
2049 // these use reverse mode
2050 if( (size_t)exprintdata->hesnnz > nn/4 )
2051 {
2052 // use dense Hessian if there are many nonzero elements (approx more than half (recall only lower (or upper)-triangular is considered))
2053 // because it seems faster than the sparse Hessian if there isn't actually much sparsity
2054 vector<double> hess = exprintdata->f.Hessian(exprintdata->x, 0);
2055 for( int i = 0; i < exprintdata->hesnnz; ++i )
2056 exprintdata->hesvalues[i] = hess[exprintdata->hessparsity_row[i] * n + exprintdata->hessparsity_col[i]];
2057 }
2058 else
2059 {
2060 // originally, this was hess = exprintdata->f.SparseHessian(exprintdata->x, vector<double>(1, 1.0), exprintdata->hessparsity),
2061 // where hess was a dense nxn matrix as in the case above
2062 // to reuse the coloring of the sparsity pattern and use also a sparse matrix for the Hessian values, we now call SparseHessianCompute directly
2063 exprintdata->f.SparseHessianCompute(exprintdata->x, vector<double>(1, 1.0), exprintdata->hessparsity_pattern, exprintdata->hessparsity_row, exprintdata->hessparsity_col, exprintdata->hesvalues, exprintdata->heswork);
2064
2065#ifndef NDEBUG
2066 // hessparsity_row, hessparsity_col, hessparsity_pattern are no longer used by SparseHessianCompute after coloring has been computed in first call, except for some asserts, so we free some mem
2067 exprintdata->hessparsity_row.clear();
2068 exprintdata->hessparsity_col.clear();
2069 exprintdata->hessparsity_pattern.resize(0,0);
2070#endif
2071 }
2072 }
2073
2074#ifdef SCIP_DEBUG
2075 SCIPinfoMessage(scip, NULL, "Hessian for ");
2076 SCIP_CALL( SCIPprintExpr(scip, expr, NULL) );
2077 SCIPinfoMessage(scip, NULL, "\nat x = ");
2078 for( size_t i = 0; i < n; ++i )
2079 {
2080 SCIPinfoMessage(scip, NULL, "\t %g", exprintdata->x[i]);
2081 }
2082 SCIPinfoMessage(scip, NULL, "\nis ");
2083 for( int i = 0; i < exprintdata->hesnnz; ++i )
2084 {
2085 SCIPinfoMessage(scip, NULL, " (%d,%d)=%g", exprintdata->hesrowidxs[i], exprintdata->hescolidxs[i], exprintdata->hesvalues[i]);
2086 }
2087 SCIPinfoMessage(scip, NULL, "\n");
2088#endif
2089
2090 *rowidxs = exprintdata->hesrowidxs;
2091 *colidxs = exprintdata->hescolidxs;
2092 *hessianvals = exprintdata->hesvalues.data();
2093 *nnz = exprintdata->hesnnz;
2094
2095 return SCIP_OKAY;
2096}
SCIP_Real * r
Definition: circlepacking.c:59
SCIP_VAR ** x
Definition: circlepacking.c:63
atomic_userexpr(SCIP *scip_, SCIP_EXPR *expr_)
unsigned short Type
Definition: cons_xor.c:131
common defines and data types used in all packages of SCIP
#define NULL
Definition: def.h:266
#define EPSISINT(x, eps)
Definition: def.h:209
#define SCIP_INVALID
Definition: def.h:192
#define SCIP_Bool
Definition: def.h:91
#define SCIP_Real
Definition: def.h:172
#define FALSE
Definition: def.h:94
#define SCIPABORT()
Definition: def.h:345
#define REALABS(x)
Definition: def.h:196
#define SCIP_CALL(x)
Definition: def.h:373
exponential expression handler
logarithm expression handler
#define SIGN(x)
Definition: expr_pow.c:71
power and signed power expression handlers
handler for variable index expressions
methods to interpret (evaluate) an expression "fast"
#define CPPAD_MAX_NUM_THREADS
void posintpower(const vector< Type > &in, vector< Type > &out, size_t exponent)
static CppAD::ErrorHandler errorhandler(cppaderrorcallback)
static void cppaderrorcallback(bool known, int line, const char *file, const char *cond, const char *msg)
static void evalIntPower(Type &resultant, const Type &arg, const int exponent)
static void evalSignPower(CppAD::AD< Type > &resultant, const CppAD::AD< Type > &arg, SCIP_EXPR *expr)
static SCIP_RETCODE eval(SCIP *scip, SCIP_EXPR *expr, SCIP_EXPRINTDATA *exprintdata, const vector< Type > &x, Type &val)
#define infinity
Definition: gastrans.c:80
int SCIPgetIndexExprVaridx(SCIP_EXPR *expr)
Definition: expr_varidx.c:267
SCIP_Bool SCIPisExprVaridx(SCIP *scip, SCIP_EXPR *expr)
Definition: expr_varidx.c:252
SCIP_Bool SCIPisExprLog(SCIP *scip, SCIP_EXPR *expr)
Definition: expr_log.c:648
SCIP_Bool SCIPisExprExp(SCIP *scip, SCIP_EXPR *expr)
Definition: expr_exp.c:528
SCIP_Bool SCIPisExprSignpower(SCIP *scip, SCIP_EXPR *expr)
Definition: expr_pow.c:3242
SCIP_RETCODE SCIPexprintCompile(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *rootexpr, SCIP_EXPRINTDATA **exprintdata)
SCIP_RETCODE SCIPexprintFreeData(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *expr, SCIP_EXPRINTDATA **exprintdata)
SCIP_EXPRINTCAPABILITY SCIPexprintGetCapability(void)
SCIP_RETCODE SCIPexprintHessianSparsity(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *expr, SCIP_EXPRINTDATA *exprintdata, SCIP_Real *varvals, int **rowidxs, int **colidxs, int *nnz)
SCIP_RETCODE SCIPexprintFree(SCIP *scip, SCIP_EXPRINT **exprint)
SCIP_RETCODE SCIPexprintEval(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *expr, SCIP_EXPRINTDATA *exprintdata, SCIP_Real *varvals, SCIP_Real *val)
const char * SCIPexprintGetName(void)
const char * SCIPexprintGetDesc(void)
SCIP_EXPRINTCAPABILITY SCIPexprintGetExprCapability(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *expr, SCIP_EXPRINTDATA *exprintdata)
SCIP_RETCODE SCIPexprintHessian(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *expr, SCIP_EXPRINTDATA *exprintdata, SCIP_Real *varvals, SCIP_Bool new_varvals, SCIP_Real *val, int **rowidxs, int **colidxs, SCIP_Real **hessianvals, int *nnz)
SCIP_RETCODE SCIPexprintCreate(SCIP *scip, SCIP_EXPRINT **exprint)
SCIP_RETCODE SCIPexprintGrad(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *expr, SCIP_EXPRINTDATA *exprintdata, SCIP_Real *varvals, SCIP_Bool new_varvals, SCIP_Real *val, SCIP_Real *gradient)
void SCIPinfoMessage(SCIP *scip, FILE *file, const char *formatstr,...)
Definition: scip_message.c:208
#define SCIPdebugMsg
Definition: scip_message.h:78
const char * SCIPexprhdlrGetName(SCIP_EXPRHDLR *exprhdlr)
Definition: expr.c:545
SCIP_Bool SCIPexprhdlrHasFwdiff(SCIP_EXPRHDLR *exprhdlr)
Definition: expr.c:605
SCIP_RETCODE SCIPevalExpr(SCIP *scip, SCIP_EXPR *expr, SCIP_SOL *sol, SCIP_Longint soltag)
Definition: scip_expr.c:1635
int SCIPexprGetNChildren(SCIP_EXPR *expr)
Definition: expr.c:3860
SCIP_Real SCIPgetExponentExprPow(SCIP_EXPR *expr)
Definition: expr_pow.c:3456
SCIP_Bool SCIPisExprProduct(SCIP *scip, SCIP_EXPR *expr)
Definition: scip_expr.c:1464
SCIP_Bool SCIPexpriterIsEnd(SCIP_EXPRITER *iterator)
Definition: expriter.c:969
SCIP_Bool SCIPisExprSum(SCIP *scip, SCIP_EXPR *expr)
Definition: scip_expr.c:1453
SCIP_Real * SCIPgetCoefsExprSum(SCIP_EXPR *expr)
Definition: expr_sum.c:1552
SCIP_Bool SCIPisExprValue(SCIP *scip, SCIP_EXPR *expr)
Definition: scip_expr.c:1442
SCIP_Real SCIPgetCoefExprProduct(SCIP_EXPR *expr)
SCIP_EXPR * SCIPexpriterGetCurrent(SCIP_EXPRITER *iterator)
Definition: expriter.c:683
SCIP_Bool SCIPisExprVar(SCIP *scip, SCIP_EXPR *expr)
Definition: scip_expr.c:1431
SCIP_RETCODE SCIPcallExprEval(SCIP *scip, SCIP_EXPR *expr, SCIP_Real *childrenvalues, SCIP_Real *val)
Definition: scip_expr.c:2185
SCIP_RETCODE SCIPcreateExpriter(SCIP *scip, SCIP_EXPRITER **iterator)
Definition: scip_expr.c:2337
SCIP_RETCODE SCIPcallExprEvalFwdiff(SCIP *scip, SCIP_EXPR *expr, SCIP_Real *childrenvalues, SCIP_Real *direction, SCIP_Real *val, SCIP_Real *dot)
Definition: scip_expr.c:2212
SCIP_RETCODE SCIPprintExpr(SCIP *scip, SCIP_EXPR *expr, FILE *file)
Definition: scip_expr.c:1486
SCIP_Real SCIPgetValueExprValue(SCIP_EXPR *expr)
Definition: expr_value.c:294
SCIP_Bool SCIPisExprPower(SCIP *scip, SCIP_EXPR *expr)
Definition: scip_expr.c:1475
SCIP_Real SCIPexprGetEvalValue(SCIP_EXPR *expr)
Definition: expr.c:3934
SCIP_EXPR * SCIPexpriterGetNext(SCIP_EXPRITER *iterator)
Definition: expriter.c:858
SCIP_EXPR ** SCIPexprGetChildren(SCIP_EXPR *expr)
Definition: expr.c:3870
SCIP_Real SCIPgetConstantExprSum(SCIP_EXPR *expr)
Definition: expr_sum.c:1567
void SCIPfreeExpriter(SCIP_EXPRITER **iterator)
Definition: scip_expr.c:2351
SCIP_RETCODE SCIPexpriterInit(SCIP_EXPRITER *iterator, SCIP_EXPR *expr, SCIP_EXPRITER_TYPE type, SCIP_Bool allowrevisit)
Definition: expriter.c:501
SCIP_EXPRHDLR * SCIPexprGetHdlr(SCIP_EXPR *expr)
Definition: expr.c:3883
#define SCIPallocBufferArray(scip, ptr, num)
Definition: scip_mem.h:124
#define SCIPfreeBufferArray(scip, ptr)
Definition: scip_mem.h:136
#define SCIPallocBlockMemoryArray(scip, ptr, num)
Definition: scip_mem.h:93
#define SCIPfreeBlockMemoryArrayNull(scip, ptr, num)
Definition: scip_mem.h:111
interval arithmetics for provable bounds
public functions to work with algebraic expressions
#define SCIPdebugMessage
Definition: pub_message.h:96
public functions to work with algebraic expressions
@ SCIP_EXPRITER_DFS
Definition: type_expr.h:716
#define SCIP_EXPRINTCAPABILITY_GRADIENT
struct SCIP_ExprIntData SCIP_EXPRINTDATA
#define SCIP_EXPRINTCAPABILITY_HESSIAN
#define SCIP_EXPRINTCAPABILITY_FUNCVALUE
#define SCIP_EXPRINTCAPABILITY_ALL
struct SCIP_ExprInt SCIP_EXPRINT
unsigned int SCIP_EXPRINTCAPABILITY
@ SCIP_OKAY
Definition: type_retcode.h:42
enum SCIP_Retcode SCIP_RETCODE
Definition: type_retcode.h:63