<  *  | *** >

Parameters 

  • desc parameter
  • set

-- ###########################
-- RUNNING stop_on_error.sql
stop_on_error;
reponse
success

-- ###########################
-- RUNNING clear.sh

/*
sure mode, no confirmation prompt
clearing directory INTERNAL_FILES_DIR (../STORAGE/INT) ...
done
clearing directory MAX_PERF_FILES_DIR (../STORAGE/MAX_PERF) ...
done
clearing directory TRANSAC_FILES_DIR (../STORAGE/TRANSAC) ...
done
*/

-- ###########################
-- RUNNING doc_data_init.sql
create col_type t_site_id as text;
reponse
success

create col_type t_dept_id as text;
reponse
success

create col_type t_item_id as text;
reponse
success

create col_type t_customer_id as text;
reponse
success

create col_type t_date as text;
reponse
success

create col_type t_customer_info as text;
reponse
success

create col_type end_user as text;
reponse
success

create merge table items( item_id t_item_id, art_label text, dept t_dept_id, avg_week_sales number, sales_price number);
reponse
success

create merge table customers( customer_id t_customer_id, customer_name text);
reponse
success

create table item_tags( item_id t_item_id, tag text);
reponse
success

create table fidelity_cards( customer_id t_customer_id, card_label t_customer_info, valid_from t_date, valid_until t_date);
reponse
success

create table item_customer_infos( customer_id t_customer_id, item_id t_item_id, info t_customer_info, valid_from t_date, valid_until t_date);
reponse
success

create big table sales( item_id t_item_id, customer_id t_customer_id, sales_date t_date, sales_qty number, line_id text, packaging_id t_item_id);
reponse
success

create big table inventory( item_id t_item_id, inv_qty number);
reponse
success

create view v_items as select * from items;
reponse
success

create view v_item_tags as select * from item_tags;
reponse
success

create view v_fidelity_cards as select * from fidelity_cards;
reponse
success

create view v_item_customer_infos as select * from item_customer_infos;
reponse
success

create view v_sales as select * from sales, items, customers where items.item_id=sales.item_id and customers.customer_id=sales.customer_id;
reponse
success

create view v_inventory as select * from inventory, items where items.item_id=inventory.item_id;
reponse
success

insert into items values('artA','the article A','dept #1',10,1.5);
reponse
success

insert into items values('artB','the article B','dept #2',10,3.2);
reponse
success

insert into items values('box1','a box','packaging',10,0);
reponse
success

insert into customers values('C1','customer #1')('C2','customer #2');
reponse
success

insert into item_tags values('artA','tag #1');
reponse
success

insert into item_tags values('artA','tag #2');
reponse
success

insert into fidelity_cards values('C1','SILVER','20191201','20191231');
reponse
success

insert into fidelity_cards values('C1','GOLD','20201201','20201231');
reponse
success

insert into item_customer_infos values('C1','artA','FREQUENT BUYER of artA in 2019','20190101','20191231');
reponse
success

insert into item_customer_infos values('C1','artB','FREQUENT BUYER of artB in 2020','20200101','20201231');
reponse
success

insert into item_customer_infos values('C2','artB','FREQUENT BUYER of artB in 2020','20200101','20201231');
reponse
success

insert into sales values('artA','C1','20191231',5,'ID01','box1');
reponse
success

insert into sales values('artB','C2','20200102',6,'ID02','');
reponse
success

insert into sales values('artB','C1','20191231',4,'ID03','');
reponse
success

insert into sales values('artB','C2','20200102',7,'ID04','box1');
reponse
success

insert into sales values('artC','C1','20200102',8,'ID05','');
reponse
success

insert into sales values('artA','C1','20191231',5,'ID06','box1');
reponse
success

insert into sales values('artA','C2','20191231',5,'ID07','box1');
reponse
success

insert into inventory values('artA',32);
reponse
success

insert into inventory values('artC',12);
reponse
success

refresh dirty view;
reponse
success

-- ###########################
-- RUNNING doc_parameters.sql
--

Parameters

desc parameter

desc <|parameter> <|verbose>
--ACCEPT_DIFF_START
--show non default parameter names
desc callback where(2,'parameter');
object_nameobject_type
TCP_PORT_TRANSACparameter
LOG_VERBOSEparameter
CPU_COUNTparameter
SLEEP_AFTER_SQLparameter
ARRAY_BLOCK_SIZE_BIGparameter
ARRAY_BLOCK_SIZE_SMALLparameter
PROD_SERVERparameter
MAX_PERF_USE_COMPRESSIONparameter
PARETO_LIMITparameter
PARTITION_LINE_COUNTparameter
CACHEparameter
MAX_PERF_CHANGE_COUNTparameter
INIT_FILE_PATHparameter

--show non default parameters information
desc parameter;
param_namecurrent_valuecommentis_defaultdefault_value
TCP_PORT_TRANSAC3319integer, SB listening port for transaction moden-1
LOG_VERBOSEyy/n, y: increase verbosity in stormbase.lognn
CPU_COUNT10integer, number of threads that SB will create for most operations (select, refresh, insert)n20
SLEEP_AFTER_SQLny/n, y: a pause of 0.1 sec is added after each sql executionny
ARRAY_BLOCK_SIZE_BIG2integer, array size in B, this parameter should not be modified in there is already datan10000
ARRAY_BLOCK_SIZE_SMALL2integer, small array size in B, this parameter should not be modified in there is already datan100
PROD_SERVERny/n, n: reduces SB internal memory for non prod serverny
MAX_PERF_USE_COMPRESSIONyy/n, y: SB will compress (put several ival in one integer) columns in memorynn
PARETO_LIMIT80 %float<=1, during max_perf ival are split between rare and often value, the x% more frequent ivals are the often ivaln100 %
PARTITION_LINE_COUNT2integer, maximum #line in a table partition (SB will created a new partition during insert if needed), 0 means no partitionn3000000000
CACHEninteger, SB listening port for transaction modeny
MAX_PERF_CHANGE_COUNTny/n, y: count becomes countdistinctny
INIT_FILE_PATHinit.sbstring, path of script executed on startup and after "refresh dirty view"n./init.sb

--show all parameters information
desc parameter verbose;
param_namecurrent_valuecommentis_defaultdefault_value
TCP_PORT2219integer, SB listening porty2219
TCP_PORT_TRANSAC3319integer, SB listening port for transaction moden-1
LOG_VERBOSEyy/n, y: increase verbosity in stormbase.lognn
GENERATE_REPLAYny/n, y: sql are written in stormbase_replay.logyn
CPU_COUNT10integer, number of threads that SB will create for most operations (select, refresh, insert)n20
SLEEP_AFTER_SQLny/n, y: a pause of 0.1 sec is added after each sql executionny
ACTIVITY_SECURITYny/n, y: a dynamic pause (from 0 to 10 sec depending on #active sessions) is added before select executionyn
SESSION_SECURITYyy/n, y: no more connections is accepted if #active sessions >= 30yy
CSV_FILES_DIR../STORAGE/CSVstring, CSV file location used in "insert from file" when file path does not start with /y../STORAGE/CSV
INTERNAL_FILES_DIR../STORAGE/INTstring, SB internal storage locationy../STORAGE/INT
TMP_FILES_DIR../STORAGE/TMPstring, SB temporary storage locationy../STORAGE/TMP
MAX_PERF_FILES_DIR../STORAGE/MAX_PERFstring, SB "memory image" storage locationy../STORAGE/MAX_PERF
TRANSAC_FILES_DIR../STORAGE/TRANSACstring, SB internal storage location for transaction modey../STORAGE/TRANSAC
CELL_BLOCK_INCREMENT_SIZE_GB0.10 GBfloat, CELL size, this parameter should not be modified if there is already datay0.10 GB
ARRAY_BLOCK_SIZE_BIG2integer, array size in B, this parameter should not be modified in there is already datan10000
ARRAY_BLOCK_SIZE_SMALL2integer, small array size in B, this parameter should not be modified in there is already datan100
MEMORY_MAX_GB20integer, SB allows defragmentation if "CELL memory" exceed this numbery20
UNFRAG_BLOCK_PER_BLOCKny/n, y : SB will defrag block per block hence defrag will be longer but it won't consume more memoryyn
SPARSE_TEXT_KEEP10000000integer, number of values than can be displayed (last inserted) for sparse_text columnsy10000000
PROD_SERVERny/n, n: reduces SB internal memory for non prod serverny
IN_MEMORY_BIG_TABLES,*,string, list (comma separated) of in memory big tables or *y,*,
NOT_IN_MEMORY_BIG_TABLES,-1,string, list (comma separated) of "not in memory" big tablesy,-1,
NOT_IN_MEMORY_COLUMNS,-1,string, list (comma separated) of "not in memory columns" big tablesy,-1,
MAX_PERF_INDEX_FOR_NOT_IN_MEMORY_BIG_TABLESny/n, y: compute indexes for non in memory tablesyn
MAX_PERF_INDEX_ONLYny/n, y: indexes are in memory but data is on diskyn
NOT_INDEXED_DIMENSIONS,-1,string, list (comma separated) of dimensions on which indexes won't be createdy,-1,
MAX_PERF_FILTER_COL_TYPE-1string, col_type used to reduce amount of data in memory (for sandbox environments)y-1
MAX_PERF_FILTER_FN-1string, Lua boolean function used to reduce amount of data in memory (for sandbox environments)y-1
MAX_PERF_USE_COMPRESSIONyy/n, y: SB will compress (put several ival in one integer) columns in memorynn
PARETO_LIMIT80 %float<=1, during max_perf ival are split between rare and often value, the x% more frequent ivals are the often ivaln100 %
MAX_PERF_USE_IVAL_OFTEN_LIMIT60000integer, pareto ival are not computed during max_perf if the ival sparsity exceed this numbery60000
PIN_MEMORYny/n, y: memory is pin hence virtual memory becomes residentyn
MAX_PERF_COMPUTE_BIG_TABLES_JOINyy/n, n: joined columns in big tables are not stored in memoryyy
MAX_COUNT_DISTINCT_THREAD_SIZE_GB30integer, SB will return an error if a countdistinct requires too much memory, the limit is per thread (see CPU_COUNT)y30
QUERY_TABLE_ALLOWEDny/n, y: select from table allowed (non in memory volatile views will be created on top of each table)yn
SKIP_REFRESH_FOR_NOT_IN_MEMORY_BIG_TABLESny/n, y: (NOT_)IN_MEMORY_BIG_TABLES parameter applies also to view refresh (not only to MAX_PERF)yn
HEADERyy/n, y: CSV files have a header lineyy
FLOAT_PRECISION3integer, number of digits kept when a number is insertedy3
ALLOW_ORPHANyy/n, y: SB will not refresh a view if its big tables has an orphan value (not found in dimension)yy
NEW_COLUMN_NULL_STRINGNO_DATAstring, default value when a column is added to a table or when a column is not provided in a CSV fileyNO_DATA
FILE_SEPARATOR,one character, CSV file separatory,
INSERT_FORCE_VALUE.col_type_nameforced_valuestring: forced value during insert of this col_type in a big table, unset means "not active"yforced_value
SHOW_LINE100000integer, progression is log every x lines during insertsy100000
PARTITION_LINE_COUNT2integer, maximum #line in a table partition (SB will created a new partition during insert if needed), 0 means no partitionn3000000000
FILE_LOAD_WITHOUT_BUFFERyy/n, y: insert from file does not use buffer, hence it is faster but the full file is loaded in memoryyy
ESCAPE_SEPARATOR_INSIDE_DOUBLE_QUOTEny/n, y: ,"abc,def", in a csv file with , separator is interpreted as one value: abc,defyn
SKIP_LINE_KOny/n, y: bad lines in csv are skipped, n: bad lines generate error in insertyn
CACHEninteger, SB listening port for transaction modeny
MAX_PERF_USE_WHERE_INDEXyy/n, y: index are created on dimensions for each view and put in memoryyy
USE_INDEX_LIMIT2integer (>0 and <100), if a where clause targets less than x% of the lines then index scan is doney2
USE_INDEX_WITH_SORT_LIMIT10integer, if a where clause targets less than #lines/this_parameter of the lines then index scan is done in sequence contexty10
REPLACE_COUNTDISTINCT_BY_COUNTSEQUENCEny/n, countdistinct are replaced by countsequence for sequence columnsyn
MAX_PERF_CHANGE_COUNTny/n, y: count becomes countdistinctny
MAX_GBY_BOOSTER_LEN200000integer, a select with group by will be computed with optimum performance if the combined sparsity of the group by columns doesn't exceed this number, otherwise it will be slowery200000
SEQUENCE_COLUMNS,-1,string, list (comma separated) of sequence columnsy,-1,
SEQUENCE_COLUMN_COMPANIONS,-1,string, list (comma separated) of sequence columns companionsy,-1,
ALLOW_WHERE_ERRORny/n, y: where clause on bad column is ignored in select and does not return an erroryn
ALLOW_GROUP_BY_ERRORny/n, y: group by clause on bad column is replaced by * in select resultset and does not return an erroryn
ALLOW_EXP_ERRORny/n, y: sum/count/etc.. on bad column is replaced by 0 in select resultset and does not return an erroryn
ACTIVATE_NEW_DEVny/n, used to test a new dev on demandyn
INIT_FILE_PATHinit.sbstring, path of script executed on startup and after "refresh dirty view"n./init.sb
COMPUTED_COLUMNS,-1,string, list (comma separated) of computed columnsy,-1,
SO_FILE_NAMEstormbase.sostring, file under _SO_LINUX that contains the custom C codeystormbase.so
DEBUGny/n, y: triggers debug mode (dev only)yn
CELL_BLOCK_INCREMENT_SIZE_B1000 Bfloat, CELL size, this parameter should not be modified if there is already datay1000 B

--ACCEPT_DIFF_END

set

set param_name='param_value'
set param_name=default
set LOG_VERBOSE='n';
reponse
success

desc parameter verbose callback where(1,'LOG_VERBOSE');
param_namecurrent_valuecommentis_defaultdefault_value
LOG_VERBOSEny/n, y: increase verbosity in stormbase.logyn

set LOG_VERBOSE='y';
reponse
success

desc parameter verbose callback where(1,'LOG_VERBOSE');
param_namecurrent_valuecommentis_defaultdefault_value
LOG_VERBOSEyy/n, y: increase verbosity in stormbase.lognn

set LOG_VERBOSE=default;
reponse
success

desc parameter verbose callback where(1,'LOG_VERBOSE');
param_namecurrent_valuecommentis_defaultdefault_value
LOG_VERBOSEny/n, y: increase verbosity in stormbase.logyn

--error management
continue_on_error(202,203,204);
reponse
success

set LOG_VERBOSE='bad_value';
reponse
error 203 (continue): invalid parameter value

set BAD_PARAMETER='value';
reponse
error 202 (continue): invalid parameter name

set TCP_PORT_TRANSAC='2324';
reponse
error 204 (continue): parameter can only be set in stormbase.conf

stop_on_error;
reponse
success

-- ###########################
-- RUNNING shutdown.sql
shutdown;