

-- ########################### -- RUNNING _v1.17.10_F28stop_on_error.sql stop_on_error;
reponse
success
-- ########################### -- RUNNING clear.sh
/*
sure mode, no confirmation prompt
clearing directory INTERNAL_FILES_DIR (../STORAGE/INT) ...
done
clearing directory MAX_PERF_FILES_DIR (../STORAGE/MAX_PERF) ...
done
clearing directory TRANSAC_FILES_DIR (../STORAGE/TRANSAC) ...
done
*/
-- ########################### -- RUNNING _v1.17.10_F28doc_data_init.sql create col_type t_site_id as text;
reponse
success
create col_type t_dept_id as text;
reponse
success
create col_type t_item_id as text;
reponse
success
create col_type t_customer_id as text;
reponse
success
create col_type t_date as text;
reponse
success
create col_type t_customer_info as text;
reponse
success
create col_type end_user as text;
reponse
success
create merge table items( item_id t_item_id, art_label text, dept t_dept_id, avg_week_sales number, sales_price number);
reponse
success
create merge table customers( customer_id t_customer_id, customer_name text);
reponse
success
create table item_tags( item_id t_item_id, tag text);
reponse
success
create table fidelity_cards( customer_id t_customer_id, card_label t_customer_info, valid_from t_date, valid_until t_date);
reponse
success
create table item_customer_infos( customer_id t_customer_id, item_id t_item_id, info t_customer_info, valid_from t_date, valid_until t_date);
reponse
success
create big table sales( item_id t_item_id, customer_id t_customer_id, sales_date t_date, sales_qty number, line_id text, packaging_id t_item_id);
reponse
success
create big table inventory( item_id t_item_id, inv_qty number);
reponse
success
create view v_items as select * from items;
reponse
success
create view v_item_tags as select * from item_tags;
reponse
success
create view v_fidelity_cards as select * from fidelity_cards;
reponse
success
create view v_item_customer_infos as select * from item_customer_infos;
reponse
success
create view v_sales as select * from sales, items, customers where items.item_id=sales.item_id and customers.customer_id=sales.customer_id;
reponse
success
create view v_inventory as select * from inventory, items where items.item_id=inventory.item_id;
reponse
success
insert into items values('artA','the article A','dept #1',10,1.5);
reponse
success
insert into items values('artB','the article B','dept #2',10,3.2);
reponse
success
insert into items values('box1','a box','packaging',10,0);
reponse
success
insert into customers values('C1','customer #1')('C2','customer #2');
reponse
success
insert into item_tags values('artA','tag #1');
reponse
success
insert into item_tags values('artA','tag #2');
reponse
success
insert into fidelity_cards values('C1','SILVER','20191201','20191231');
reponse
success
insert into fidelity_cards values('C1','GOLD','20201201','20201231');
reponse
success
insert into item_customer_infos values('C1','artA','FREQUENT BUYER of artA in 2019','20190101','20191231');
reponse
success
insert into item_customer_infos values('C1','artB','FREQUENT BUYER of artB in 2020','20200101','20201231');
reponse
success
insert into item_customer_infos values('C2','artB','FREQUENT BUYER of artB in 2020','20200101','20201231');
reponse
success
insert into sales values('artA','C1','20191231',5,'ID01','box1');
reponse
success
insert into sales values('artB','C2','20200102',6,'ID02','');
reponse
success
insert into sales values('artB','C1','20191231',4,'ID03','');
reponse
success
insert into sales values('artB','C2','20200102',7,'ID04','box1');
reponse
success
insert into sales values('artC','C1','20200102',8,'ID05','');
reponse
success
insert into sales values('artA','C1','20191231',5,'ID06','box1');
reponse
success
insert into sales values('artA','C2','20191231',5,'ID07','box1');
reponse
success
insert into inventory values('artA',32);
reponse
success
insert into inventory values('artC',12);
reponse
success
refresh dirty view;
reponse
success
-- ########################### -- RUNNING _v1.17.10_F28doc_others.sql -- desc parameter verbose cb where(1,'QUERY_TABLE_ALLOWED');
param_name current_value comment is_default default_value
QUERY_TABLE_ALLOWED n y/n, y: select from table allowed (non in memory volatile views will be created on top of each table) y n
continue_on_error(49);
reponse
success
select * from sales;
reponse
error 49 (continue): <text> is not a view
stop_on_error;
reponse
success
--the volatile views used to query table are created at startup, hence SB must be saved/bounced to take into account QUERY_TABLE_ALLOWED system '(echo "QUERY_TABLE_ALLOWED:y">>stormbase.conf)';
reponse
save;
reponse
success
bounce; desc parameter verbose cb where(1,'QUERY_TABLE_ALLOWED');
param_name current_value comment is_default default_value
QUERY_TABLE_ALLOWED y y/n, y: select from table allowed (non in memory volatile views will be created on top of each table) n n
--volatile views are created desc view cb like(1,'volatile');
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
volatile#items 1 3 n y y 10 0 0 0 0 0
volatile#customers 1 2 n y y 10 0 0 0 0 0
volatile#item_tags 1 2 n y y 10 0 0 0 0 0
volatile#fidelity_cards 1 2 n y y 10 0 0 0 0 0
volatile#item_customer_infos 1 3 n y y 10 0 0 0 0 0
volatile#sales 1 7 n y y 10 0 0 0 0 0
volatile#inventory 1 2 n y y 10 0 0 0 0 0
select * from sales;
item_id customer_id sales_date sales_qty line_id packaging_id
artA C1 20191231 5 ID01 box1
artB C2 20200102 6 ID02 #
artB C1 20191231 4 ID03 #
artB C2 20200102 7 ID04 box1
artC C1 20200102 8 ID05 #
artA C1 20191231 5 ID06 box1
artA C2 20191231 5 ID07 box1
select sales.* from v_sales;
item_id customer_id sales_date sales_qty line_id packaging_id
artA C1 20191231 5 ID01 box1
artB C2 20200102 6 ID02 #
artB C1 20191231 4 ID03 #
artB C2 20200102 7 ID04 box1
artA C1 20191231 5 ID06 box1
artA C2 20191231 5 ID07 box1
# C1 20200102 8 ID05 #
set skip_refresh_for_not_in_memory_big_tables='y';
reponse
success
set in_memory_big_tables='-1';
reponse
success
refresh_force dirty view;
reponse
success
select sales.* from v_sales;
item_id customer_id sales_date sales_qty line_id packaging_id
select * from sales;
item_id customer_id sales_date sales_qty line_id packaging_id
artA C1 20191231 5 ID01 box1
artB C2 20200102 6 ID02 #
artB C1 20191231 4 ID03 #
artB C2 20200102 7 ID04 box1
artC C1 20200102 8 ID05 #
artA C1 20191231 5 ID06 box1
artA C2 20191231 5 ID07 box1
save;
reponse
success
--volatile views are not saved, they are created at startup (or during table creation) system 'ls -ltr ../STORAGE/INT/*|grep volatile|wc -l';
reponse
0
system 'cat ../STORAGE/INT/_DESC|grep volatile|wc -l';
reponse
0
system '(echo "QUERY_TABLE_ALLOWED:n">>stormbase.conf)';
reponse
save;
reponse
success
bounce; set 'INSERT_FORCE_VALUE.t_item_id'='-1';
reponse
success
desc parameter verbose cb like(1,'INSERT_FORCE_VALUE.T_ITEM_ID');
param_name current_value comment is_default default_value
INSERT_FORCE_VALUE.T_ITEM_ID -1 string: forced value during insert of this col_type in a big table, unset means "not active" y -1
insert into sales values('artZ','C2','20191231',5,'ID_TEST','box1');
reponse
success
refresh dirty view;
reponse
success
select sales#item_id from v_sales and line_id='ID_TEST';
sales#item_id
-1
set 'INSERT_FORCE_VALUE.t_item_id'='unset';
reponse
success
desc parameter verbose cb like(1,'INSERT_FORCE_VALUE.T_ITEM_ID');
param_name current_value comment is_default default_value
INSERT_FORCE_VALUE.T_ITEM_ID unset string: forced value during insert of this col_type in a big table, unset means "not active" n -1
insert into sales values('artZ','C2','20191231',5,'ID_TEST','box1');
reponse
success
refresh dirty view;
reponse
success
select sales#item_id from v_sales and line_id='ID_TEST';
sales#item_id
-1
artZ
loop table_name(columns) function sb_export('path/to/directory','file_tag',thread_count) ... loop sales(*) function sb_export('./foo','ABC',4);
LOOP_RESULT
DONE
--ACCEPT_DIFF_START__v1.17.10_F28 system 'for f in `ls ./foo/*.csv`; do echo "### $f ###"; cat $f; done';
reponse
### ./foo/ABC_001.csv ###
item_id,customer_id,sales_date,sales_qty,line_id,packaging_id
artA,C1,20191231,5.000,ID01,box1
artC,C1,20200102,8.000,ID05,
-1,C2,20191231,5.000,ID_TEST,-1
### ./foo/ABC_002.csv ###
item_id,customer_id,sales_date,sales_qty,line_id,packaging_id
artB,C2,20200102,6.000,ID02,
artB,C1,20191231,4.000,ID03,
artA,C1,20191231,5.000,ID06,box1
artA,C2,20191231,5.000,ID07,box1
### ./foo/ABC_003.csv ###
item_id,customer_id,sales_date,sales_qty,line_id,packaging_id
### ./foo/ABC_004.csv ###
item_id,customer_id,sales_date,sales_qty,line_id,packaging_id
artB,C2,20200102,7.000,ID04,box1
artZ,C2,20191231,5.000,ID_TEST,box1
### ./foo/aa_001.csv ###
col
a1
a2
a3
### ./foo/bb_001.csv ###
col
a1
a2
a3
--ACCEPT_DIFF_END__v1.17.10_F28 continue_on_error(20);
reponse
success
drop view v_foo;
reponse
error 20 (continue): "<object_name>" is not an object or "<type_name>" is not correct
drop table foo;
reponse
error 20 (continue): "<object_name>" is not an object or "<type_name>" is not correct
refresh dirty view;
reponse
success
stop_on_error;
reponse
success
create table foo(col number,str text);
reponse
success
create view v_foo as select * from foo;
reponse
success
refresh dirty view;
reponse
success
insert into foo values(111.11,'A')(222.22,'B');
reponse
success
refresh dirty view;
reponse
success
select * from v_foo;
col str
111.110 A
222.220 B
desc table foo;
table_name column_name column_type col_type_name
foo col number sys#type#foo#col
foo str text sys#type#foo#str
set_text sys#type#foo#col;
reponse
success
desc table foo;
table_name column_name column_type col_type_name
foo col text sys#type#foo#col
foo str text sys#type#foo#str
select * from v_foo;
col str
111.110 A
222.220 B
insert into foo values('True','C');
reponse
success
refresh dirty view;
reponse
success
select * from v_foo;
col str
111.110 A
222.220 B
True C
-- create table foo(a text, b number); -- create view v_foo as select * from foo; --ival behaves "normaly" insert into foo values('aa3','bb3');
reponse
success
insert into foo values('aa3','bb3');
reponse
success
insert into foo values('aa3','bb3');
reponse
success
refresh dirty view;
reponse
success
select col,ival(col),count(*) from v_foo group by col;
col ival(col) count(*)
111.110 10 1
222.220 11 1
True 12 1
aa3 13 3
set_sparse_text sys#type#foo#col;
reponse
success
select col,ival(col),count(*) from v_foo group by col;
col ival(col) count(*)
111.110 10 1
222.220 11 1
True 12 1
aa3 13 3
save;
reponse
success
--ACCEPT_DIFF_START__v1.17.10_F28 system 'ls -l ../STORAGE/INT/*.cell_array';
reponse
ls: ../STORAGE/INT/*.cell_array: No such file or directory
--bounce will finalize the migration bounce; system 'ls -l ../STORAGE/INT/*.cell_array';
reponse
-rw-r--r-- 1 philippe staff 117 10 jui 11:43 ../STORAGE/INT/0099_sys#type#foo#col.ival_to_val.cell_array
--ACCEPT_DIFF_END__v1.17.10_F28 system 'cat ../STORAGE/INT/*.cell_array';
reponse
0;0.000
1;0.000
2;0.000
3;0.000
4;0.000
5;0.000
6;0.000
7;0.000
8;0.000
9;0.000
10;111.110
11;222.220
12;True
13;aa3
-- select col,ival(col),count(*) from v_foo group by col;
col ival(col) count(*)
111.110 10 1
222.220 11 1
True 12 1
aa3 13 3
--each insert gets a new ival insert into foo values('aa3','bb3')('aa3','bb3');
reponse
success
insert into foo values('aa4','bb4')('aa4','bb4');
reponse
success
refresh dirty view;
reponse
success
select col,ival(col),count(*) from v_foo group by col;
col ival(col) count(*)
111.110 10 1
222.220 11 1
True 12 1
aa3 13 3
aa3 14 1
aa3 15 1
aa4 16 1
aa4 17 1
save;
reponse
success
--ACCEPT_DIFF_START__v1.17.10_F28 system 'ls -l ../STORAGE/INT/*.cell_array';
reponse
-rw-r--r-- 1 philippe staff 145 10 jui 11:43 ../STORAGE/INT/0099_sys#type#foo#col.ival_to_val.cell_array
system 'cat ../STORAGE/INT/*.cell_array';
reponse
0;0.000
1;0.000
2;0.000
3;0.000
4;0.000
5;0.000
6;0.000
7;0.000
8;0.000
9;0.000
10;111.110
11;222.220
12;True
13;aa3
14;aa3
15;aa3
16;aa4
17;aa4
--ACCEPT_DIFF_END__v1.17.10_F28 loop sales(sales_date,sales_qty) function fn_compute_info('my_info');
column_name abs_avg abs_min abs_max
sales_date nan nan nan
sales_qty 5.56 4.00 8.00
select sum(sales_qty),count(sales_qty),avg(sales_qty) from v_sales;
sum(sales_qty) count(sales_qty) avg(sales_qty)
50 9 5.556
continue_on_error(53,120,150);
reponse
success
select sales.* from v_sales and bad_column='1' and item_id='artA';
item_id customer_id sales_date sales_qty line_id packaging_id
artA C1 20191231 5 ID01 box1
artA C1 20191231 5 ID06 box1
artA C2 20191231 5 ID07 box1
select * from v_items and v_fidelity_cards.card_label='bad_value';
item_id art_label dept avg_week_sales sales_price
artA the article A dept #1 10 1.500
artB the article B dept #2 10 3.200
box1 a box packaging 10 0
with a as (select card_label,count(card_label) from v_fidelity_cards group by card_label) select * from v_items,a;
item_id art_label dept avg_week_sales sales_price
artA the article A dept #1 10 1.500
artB the article B dept #2 10 3.200
box1 a box packaging 10 0
stop_on_error;
reponse
success
set allow_where_error='y';
reponse
success
select sales.* from v_sales and bad_column='1' and item_id='artA';
item_id customer_id sales_date sales_qty line_id packaging_id
artA C1 20191231 5 ID01 box1
artA C1 20191231 5 ID06 box1
artA C2 20191231 5 ID07 box1
select sales.* from v_sales and bad_column in ('1','2','3') and item_id='artA';
item_id customer_id sales_date sales_qty line_id packaging_id
artA C1 20191231 5 ID01 box1
artA C1 20191231 5 ID06 box1
artA C2 20191231 5 ID07 box1
select sales.* from v_sales and gt(bad_column,1) and item_id='artA';
item_id customer_id sales_date sales_qty line_id packaging_id
artA C1 20191231 5 ID01 box1
artA C1 20191231 5 ID06 box1
artA C2 20191231 5 ID07 box1
select sales.* from v_sales and bad_column in ('1','2','3') and item_id='artA' and bad_column in ('1','2','3') and item_id='artA' and gt(bad_column,1) and item_id='artA';
item_id customer_id sales_date sales_qty line_id packaging_id
artA C1 20191231 5 ID01 box1
artA C1 20191231 5 ID06 box1
artA C2 20191231 5 ID07 box1
select * from v_items and bad_column='bad_value';
item_id art_label dept avg_week_sales sales_price
artA the article A dept #1 10 1.500
artB the article B dept #2 10 3.200
box1 a box packaging 10 0
select * from v_items and v_fidelity_cards.card_label='bad_value';
item_id art_label dept avg_week_sales sales_price
artA the article A dept #1 10 1.500
artB the article B dept #2 10 3.200
box1 a box packaging 10 0
with a as (select card_label,count(card_label) from v_fidelity_cards group by card_label) select * from v_items,a;
item_id art_label dept avg_week_sales sales_price
artA the article A dept #1 10 1.500
artB the article B dept #2 10 3.200
box1 a box packaging 10 0
continue_on_error(53);
reponse
success
select bad_column,count(*) from v_sales group by bad_column;
bad_column count(*)
* 9
stop_on_error;
reponse
success
set allow_group_by_error='y';
reponse
success
select count(*) from v_sales;
count(*)
9
select bad_column,count(*) from v_sales group by bad_column;
bad_column count(*)
* 9
select item_id,bad_column,item_id,count(*) from v_sales group by item_id,bad_column,item_id;
item_id bad_column item_id count(*)
artA * artA 3
artB * artB 3
# * # 3
continue_on_error(53);
reponse
success
select sum(sales_qty),sum(bad_column) from v_sales;
reponse
error 53 (continue): <column_name> not part of <view_name> definition
stop_on_error;
reponse
success
set allow_exp_error='y';
reponse
success
select sum(sales_qty),sum(bad_column) from v_sales;
sum(sales_qty) sum(bad_column)
50 0
--only one sb_parallel clause is authorized continue_on_error(206);
reponse
success
select sales.* from v_sales and sb_parallel(item_id) and sb_parallel(sales_date);
reponse
error 206 (continue): only one sb_parallel clause is allowed
stop_on_error;
reponse
success
set pareto_limit='1';
reponse
success
refresh_force dirty view;
reponse
success
select sales.* from v_sales and sb_parallel(item_id);
item_id customer_id sales_date sales_qty line_id packaging_id
artA C1 20191231 5 ID01 box1
artA C1 20191231 5 ID06 box1
artA C2 20191231 5 ID07 box1
artB C2 20200102 6 ID02 #
artB C1 20191231 4 ID03 #
artB C2 20200102 7 ID04 box1
--without the sb_parallel there is no logic between item_id and thread_pos select p(item_id) from v_sales function fn_custom( '(void* c, U_INT thread_pos, U_INT iline, int key1){printf("## item_id ival: %d, thread_pos: %u\n",key1,thread_pos);return 0.;}', 'fn_key', 'fn_hello_world_before','fn_hello_world_after' ) ; item_id ival: 11, thread_pos: 0 item_id ival: 12, thread_pos: 0 item_id ival: 12, thread_pos: 0 item_id ival: 12, thread_pos: 0 item_id ival: 11, thread_pos: 0 item_id ival: 11, thread_pos: 0
txt
hello world
--with the sb_parallel one item_id receives one thread_pos select p(item_id) from v_sales function fn_custom( '(void* c, U_INT thread_pos, U_INT iline, int key1){printf("## item_id ival: %d, thread_pos: %u\n",key1,thread_pos);return 0.;}', 'fn_key', 'fn_hello_world_before','fn_hello_world_after' ) and sb_parallel(item_id) ; item_id ival: 11, thread_pos: 0 item_id ival: 11, thread_pos: 0 item_id ival: 11, thread_pos: 0 item_id ival: 12, thread_pos: 0 item_id ival: 12, thread_pos: 0 item_id ival: 12, thread_pos: 0
txt
hello world
loop sales(sales_date,sales_qty) function fn_compute_info('my_info');
column_name abs_avg abs_min abs_max
sales_date nan nan nan
sales_qty 5.56 4.00 8.00
select sales_qty from v_sales callback sort(1,'desc');
sales_qty
8
7
6
5
5
5
5
5
4
with a as (select avg(sales_qty) as v from v_sales function fn_store), del_count as (select count(sales_qty) as v from v_sales function fn_store and gte(math.abs(sales_qty),`a.v*1.1`)), kept_count as (select count(sales_qty) as v from v_sales function fn_store and lt(math.abs(sales_qty),`a.v*1.1`)), select del_count.v, kept_count.v, a.v*1.1 from no_view, * function fn_merge ;
del_count.v kept_count.v a.v*1.1
2 7 6.111
loop sales(sales_qty) function fn_smart_delete('my_info','1.1') update(item_id);
LOOP_RESULT
DONE
refresh dirty view;
reponse
success
select sales_qty from v_sales callback sort(1,'desc');
sales_qty
6
5
5
5
5
5
4
-- refresh a particular view set_dirty <the_view> desc view callback like(1,'v_sales');
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 7 n n n 10 0 17 17 0 0
set_dirty v_sales;
reponse
success
desc view callback like(1,'v_sales');
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 7 y n n 10 0 17 17 0 0
refresh dirty view;
reponse
success
desc view callback like(1,'v_sales');
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 7 n n n 10 0 17 17 0 0
--but in partition context you must specify the partitions save;
reponse
success
set PARTITION_LINE_COUNT=1;
reponse
success
insert into sales values('artA','C1','20191231',5,'_','box1');
reponse
success
insert into sales values('artA','C1','20191231',5,'_','box1');
reponse
success
insert into sales values('artA','C1','20191231',5,'_','box1');
reponse
success
refresh dirty view;
reponse
success
desc view callback like(1,'v_sales') sort(1,'asc');
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 7 n n n 10 0 17 17 0 0
v_sales_#partition#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00002 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00003 3 1 n n n 10 0 11 11 0 0
set_dirty v_sales;
reponse
success
set_dirty v_sales_#partition#_00002;
reponse
success
desc view callback like(1,'v_sales') sort(1,'asc');
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 7 y n n 10 0 17 17 0 0
v_sales_#partition#_00001 3 1 y n n 10 0 11 11 0 0
v_sales_#partition#_00002 3 1 y n n 10 0 11 11 0 0
v_sales_#partition#_00003 3 1 y n n 10 0 11 11 0 0
refresh dirty view;
reponse
success
desc view callback like(1,'v_sales') sort(1,'asc');
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 7 n n n 10 0 17 17 0 0
v_sales_#partition#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00002 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00003 3 1 n n n 10 0 11 11 0 0
bounce; -- backup 'the_directory' --ACCEPT_DIFF_START__v1.17.10_F28 system 'ls -l ../STORAGE/INT| wc -l';
reponse
302
save;
reponse
success
save;
reponse
success
system 'ls -l ../STORAGE/INT| wc -l';
reponse
304
system 'rm -Rf ./toto';
reponse
system 'mkdir ./toto';
reponse
backup './toto';
reponse
BACKUP OK
system 'ls -l ./toto| wc -l';
reponse
285
system 'ls -l ../STORAGE/INT| wc -l';
reponse
304
--ACCEPT_DIFF_END__v1.17.10_F28 bounce; -- --ACCEPT_DIFF_START__v1.17.10_F28 select now(), sysdate(), to_char(now(),'yyyymmdd') from no_view function fn_merge;
now() sysdate() to_char(now(),'yyyymmdd')
1749548672 1749548672 20250610
--ACCEPT_DIFF_END__v1.17.10_F28 select to_date('20200101','yyyymmdd') as d, to_char(line.d,'%a'), to_char(line.d,'%j'), from no_view function fn_merge;
d to_char(line.d,'%a') to_char(line.d,'%j')
1577833216 Wed 001
create table my_data (date_yyyymmdd text);
reponse
success
create view v_my_data as select * from my_data;
reponse
success
insert into my_data values('20191230')('20191231')('20200101')('20200102')('20200103')('20200104')('20200105');
reponse
success
insert into my_data values('20200107')('20200108')('20200109')('20200110')('20200111')('20200112');
reponse
success
refresh dirty view;
reponse
success
select * from v_my_data;
date_yyyymmdd
20191230
20191231
20200101
20200102
20200103
20200104
20200105
20200107
20200108
20200109
20200110
20200111
20200112
set log_verbose='y';
reponse
success
--working with week numbers (TODO) --#SB no_cache log_verbose with a as(select date_yyyymmdd,unique(date_yyyymmdd) as v from v_my_data function fn_store group by date_yyyymmdd) select col_type.val as yyyymmdd, to_date(a.v,'yyyymmdd') as d, -1 as prio_lag, 196 as keep_count, --20000103 is a Monday to_date('20000103','yyyymmdd')+line.prio_lag*24*3600 as prio_day, to_char(line.prio_day,'%a') as prio_dow, to_char(line.d,'%a') as d_dow, math.floor((now()-line.d)/24/3600) as delta_today, (line.d-line.prio_day)/24/3600 as delta, math.floor(line.delta/7) as delta_week, line.delta%7 as delta_dow, from no_view, a function fn_merge callback sub_select('group(delta_week) min(delta_dow)') add_text('keep_yn','if line.delta_today>line.keep_count then return "n" end if math.floor(line.min_delta_dow-line.delta_dow)==0 then return "y" else return "n" end') select_from('group(yyyymmdd) group(keep_yn) group(d_dow)') sort(1,'asc') ;
group_yyyymmdd group_keep_yn group_d_dow
20191230 n Mon
20191231 n Tue
20200101 n Wed
20200102 n Thu
20200103 n Fri
20200104 n Sat
20200105 n Sun
20200107 n Tue
20200108 n Wed
20200109 n Thu
20200110 n Fri
20200111 n Sat
20200112 n Sun
--build a calendar table (TODO) select `fn_calendar('20200101','20200230','yyyymmdd',{"%Y%m%d","%A","%B","%Y"})` as `"date"..sep().."week_day"..sep().."month_name"..sep().."full_year"`, from no_view function fn_merge export to tmp_file('calendar.csv') ; -- select .. from ... --no_cache: obvious --the explain tag tells SB to return the logs instead of the values --the idea is that the logs "explain" what SB does --note that without no_cache SB will "do nothing" (read result from cache) select --#SB no_cache explain * from v_sales ;
logs
--handy to understand error continue_on_error(53);
reponse
success
--#SB explain select bad_column from v_sales;
logs
reponse
y
success
stop_on_error;
reponse
success
--log_verbose: obvious --explain is always used with log_verbose select --#SB no_cache explain log_verbose * from v_sales callback limit(10) ;
logs
--no_index: obvious select --#SB explain no_cache log_verbose no_index * from v_sales and dept like 'dept #1' callback like(1,'where_index|function') ;
logs
--use_index: obvious select --#SB explain no_cache log_verbose use_index * from v_sales and dept like 'dept #1' callback like(1,'where_index|function') ;
logs
index read versus no_index (aka sequential read or full scan) read set PARETO_LIMIT='1';
reponse
success
set CPU_COUNT='1';
reponse
success
refresh_force dirty view;
reponse
success
--a no_index (full scan) reads all lines sequentialy and filter select --#SB log_verbose no_index no_cache customer_id, line_id, sales#item_id from v_sales and item_id in ('artA','artB') ;
customer_id line_id sales#item_id
C1 ID01 artA
C2 ID02 artB
C1 ID03 artB
C1 ID06 artA
C2 ID07 artA
--an index read does that (lines corresponding to artA, then artB) select --#SB log_verbose use_index no_cache customer_id, line_id, sales#item_id from v_sales and item_id in ('artA','artB') ;
customer_id line_id sales#item_id
C1 ID01 artA
C1 ID06 artA
C2 ID07 artA
C2 ID02 artB
C1 ID03 artB
--SB's logic to do index or sequential read is this: --X=total #lines --Y=#lines targeted by index --SB will use the index if Y < X / USE_INDEX_LIMIT set USE_INDEX_LIMIT=1;
reponse
success
select --#SB log_verbose no_cache explain customer_id, line_id, sales#item_id from v_sales and item_id in ('artA','artB') callback like(1,'where_index') ;
logs
set USE_INDEX_LIMIT=10;
reponse
success
select --#SB log_verbose no_cache explain customer_id, line_id, sales#item_id from v_sales and item_id in ('artA','artB') callback like(1,'where_index') ;
logs
--if SB needs to sort the lines before (countsequence context, explained after) then USE_INDEX_WITH_SORT_LIMIT is used --SB will use the index if Y < X / USE_INDEX_WITH_SORT_LIMIT set USE_INDEX_LIMIT=1;
reponse
success
set USE_INDEX_WITH_SORT_LIMIT=1;
reponse
success
select --#SB log_verbose no_cache explain countsequence(customer_id) from v_sales and item_id in ('artA','artB') callback like(1,'where_index') ;
logs
set USE_INDEX_WITH_SORT_LIMIT=10;
reponse
success
select --#SB log_verbose no_cache explain countsequence(customer_id) from v_sales and item_id in ('artA','artB') callback like(1,'where_index') ;
logs
countsequence(the_column) counts the number of value changes during reading countsequence can be an alternative to countdistinct !!! countsequence will return incorrect result if PARETO_LIMIT!=1 !!! select --#SB no_cache no_index customer_id, line_id, sales#item_id from v_sales and item_id in ('artA','artB') ;
customer_id line_id sales#item_id
C1 ID01 artA
C2 ID02 artB
C1 ID03 artB
C1 ID06 artA
C2 ID07 artA
select --#SB no_cache no_index countsequence(customer_id) from v_sales and item_id in ('artA','artB') ;
countsequence(customer_id)
4
--same result (of course) with index select --#SB no_cache use_index countsequence(customer_id) from v_sales and item_id in ('artA','artB') ;
countsequence(customer_id)
4
--but when an index read is done, SB must sort the lines to get correct result select --#SB log_verbose use_index no_cache explain countsequence(customer_id) from v_sales and item_id in ('artA','artB') callback like(1,'sequence') ;
logs
save;
reponse
success
select customer_id,count(*) from v_sales group by customer_id;
customer_id count(*)
C1 3
C2 4
desc view callback like(1,'sales');
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 7 n n n 10 0 17 17 0 0
set MAX_PERF_FILTER_COL_TYPE='t_customer_id';
reponse
success
set MAX_PERF_FILTER_FN='e(x,''C1'')';
reponse
success
refresh_force dirty view;
reponse
success
desc view callback like(1,'sales');
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 7 n n n 10 0 13 13 0 0
select customer_id,count(*) from v_sales group by customer_id;
customer_id count(*)
C1 3
select sales.* from v_sales;
item_id customer_id sales_date sales_qty line_id packaging_id
artA C1 20191231 5 ID01 box1
artB C1 20191231 4 ID03 #
artA C1 20191231 5 ID06 box1
set MAX_PERF_FILTER_COL_TYPE='-1';
reponse
success
refresh_force dirty view;
reponse
success
select customer_id,count(*) from v_sales group by customer_id;
customer_id count(*)
C1 3
C2 4
set SEQUENCE_COLUMNS='customer_id';
reponse
success
--the view must be refresh, otherwise sql will fail refresh_force dirty view;
reponse
success
--we can see that lines have a new order, that can be seen with no_index tag select --#SB no_cache no_index customer_id, line_id, sales#item_id from v_sales and item_id in ('artA','artB') ;
customer_id line_id sales#item_id
C1 ID01 artA
C1 ID03 artB
C1 ID06 artA
C2 ID02 artB
C2 ID07 artA
--and countsequence(customer_id) has changed select --#SB no_cache countsequence(customer_id) from v_sales and item_id in ('artA','artB') ;
countsequence(customer_id)
2
--index read will see lines in this order select --#SB no_cache use_index item_id, customer_id from v_sales and item_id in ('artA','artB') ;
item_id customer_id
artA C1
artA C1
artA C2
artB C1
artB C2
--which is why index read will need a resort operation to get correct countsequence result select --#SB no_cache explain use_index log_verbose countsequence(customer_id) from v_sales and item_id in ('artA','artB') callback like(1,'sort') ;
logs
--but this sort operation can be long --some dimensions are functionaly liked to a column --common case is transaction_id and dates, you expect a given transaction (with several lines) to have a unique date --in SB's jargon date dimension is a companion of transaction_id --in such cases (you know something about your data), you can tell SB with param SEQUENCE_COLUMN_COMPANIONS --here we will tell SB that items is a companion of customer_id --this is incorrect, I know, but you will understand what happens without sorting set SEQUENCE_COLUMN_COMPANIONS='customer_id=items';
reponse
success
--no more sorting select --#SB no_cache explain use_index log_verbose countsequence(customer_id) from v_sales and item_id in ('artA','artB') callback like(1,'sort') ;
logs
--but result is incorrect select --#SB no_cache use_index countsequence(customer_id) from v_sales and item_id in ('artA','artB') ;
countsequence(customer_id)
4
-- countdistinct is greedy against high sparsity columns -- countsequence is not -- in some context both are equivalent -- so SB will replace countdistinct(the_col) by countsequence(the_col) if: -- SEQUENCE_COLUMNS is set to the_col -- and REPLACE_COUNTDISTINCT_BY_COUNTSEQUENCE=y insert into sales values('artA','CX','20191231',5,'ID11','box1');
reponse
success
insert into sales values('artA','CY','20191231',5,'ID11','box1');
reponse
success
refresh_online dirty view;
reponse
success
select --#SB log_verbose no_cache explain countdistinct(customer_id) from v_sales and item_id in ('artA','artB') callback like(1,'sequence');
logs
set REPLACE_COUNTDISTINCT_BY_COUNTSEQUENCE='y';
reponse
success
--no_index context select --#SB log_verbose no_cache no_index explain countdistinct(customer_id) from v_sales and item_id in ('artA','artB') callback like(1,'sequence');
logs
select --#SB log_verbose no_cache no_index countdistinct(customer_id) from v_sales and item_id in ('artA','artB') ;
countdistinct(customer_id)
2
--use_index context select --#SB log_verbose no_cache use_index explain countdistinct(customer_id) from v_sales and item_id in ('artA','artB') callback like(1,'sequence');
logs
select --#SB log_verbose no_cache use_index countdistinct(customer_id) from v_sales and item_id in ('artA','artB') ;
countdistinct(customer_id)
2
--remember that USE_INDEX_WITH_SORT_LIMIT can be used (see before) set SEQUENCE_COLUMNS='-1';
reponse
success
refresh_force dirty view;
reponse
success
continue_on_error(20);
reponse
success
drop view v_foo;
reponse
success
drop table foo;
reponse
success
refresh dirty view;
reponse
success
stop_on_error;
reponse
success
create table foo(col number,str text);
reponse
success
create view v_foo as select * from foo;
reponse
success
set float_precision=1;
reponse
success
system '(printf "col,str\n1234,1234\n0.01234,0.01234\n12.43,12.43">./foo.csv)';
reponse
insert into foo select * from file('./foo.csv');
reponse
success
set float_precision=4;
reponse
success
system '(printf "col,str\n1234,1234\n0.01234,0.01234\n12.43,12.43">./foo.csv)';
reponse
insert into foo select * from file('./foo.csv');
reponse
success
refresh dirty view;
reponse
success
--number inserted with float_precision 1 are truncated in SB select * from v_foo;
col str
1234 1234
0 0.01234
12.4000 12.43
1234 1234
0.0123 0.01234
12.4300 12.43
--float_precision param also affects the display set float_precision=0;
reponse
success
select * from v_foo;
col str
1234 1234
0 0.01234
12 12.43
1234 1234
0 0.01234
12 12.43
--complex notation is allowed, float_precision affects only display system '(printf "col,str\n4.04e-02,4.04e-02">./foo.csv)';
reponse
insert into foo select * from file('./foo.csv');
reponse
success
refresh dirty view;
reponse
success
select * from v_foo;
col str
1234 1234
0 0.01234
12 12.43
1234 1234
0 0.01234
12 12.43
0 4.04e-02
set float_precision=4;
reponse
success
select * from v_foo;
col str
1234 1234
0 0.01234
12.4000 12.43
1234 1234
0.0123 0.01234
12.4300 12.43
0 4.04e-02
set float_precision=2;
reponse
success
-- ########################### -- RUNNING _v1.17.10_F28shutdown.sql shutdown;