rrs-commit: r30 - trunk

decibel at decibel.org decibel at decibel.org
Sun Mar 6 15:29:21 GMT 2005


Author: decibel
Date: Sun Mar  6 15:29:20 2005
New Revision: 30

Modified:
   trunk/rrs.sql
   trunk/rrs_functions.sql
Log:
Rate limiting code now runs, but it's stuck using very small update times right now

Modified: trunk/rrs.sql
==============================================================================
--- trunk/rrs.sql	(original)
+++ trunk/rrs.sql	Sun Mar  6 15:29:20 2005
@@ -16,8 +16,8 @@
 ) WITHOUT OIDS;
 
 COPY setting ( setting_name, setting ) FROM stdin;
-history length	10
-history_data_interval length	10
+history length	100
+history_data_interval length	100
 \.
 
 CREATE TABLE rrs (
@@ -68,7 +68,7 @@
 
 CREATE TABLE history_data_interval (
 	data_interval	interval	NOT NULL
-) INHERITS ( history ) WITHOUT OIDs;
+) INHERITS( history ) WITHOUT OIDs;
 
 COPY rrs (rrs_id, keep_buckets, parent, parent_buckets, time_per_bucket, rrs_name) FROM stdin;
 1	60	\N	\N	00:01:00	last hour

Modified: trunk/rrs_functions.sql
==============================================================================
--- trunk/rrs_functions.sql	(original)
+++ trunk/rrs_functions.sql	Sun Mar  6 15:29:20 2005
@@ -22,6 +22,7 @@
     v_sql text;
 
     v_start_time rrs.history.start_time%TYPE;
+    v_run_time interval;
     v_my_oid oid;
 BEGIN
     -- Figure out our OID and try to aquire a lock
@@ -82,7 +83,10 @@
                                             JOIN '' || v_source.source_table || '' s
                                                 ON (
                                                     b.prev_end_time  < '' || quote_ident(v_source.source_timestamptz_field) || ''
-                                                    AND b.end_time >= '' || quote_ident(v_source.source_timestamptz_field) || '' )
+                                                    AND b.end_time >= '' || quote_ident(v_source.source_timestamptz_field) || ''
+                                                    AND '' || quote_ident(v_source.source_timestamptz_field) || '' <= '' || quote_literal(v_max_end_time) || ''
+                                                    AND '' || quote_ident(v_source.source_timestamptz_field) || '' > '' || quote_literal(v_last_end_time) || ''
+                                                    )
                                         WHERE b.rrs_id = '' || quote_literal(v_rrs.rrs_id) || ''
                                             AND b.end_time <= '' || quote_literal(v_max_end_time) || ''
                                             AND b.end_time > '' || quote_literal(v_last_end_time) || ''
@@ -144,6 +148,9 @@
         END LOOP;
     END LOOP;
 
+    v_run_time := log_time( v_start_time );
+    RAISE INFO ''% rows updated in %'', v_total_rows, v_run_time;
+
     --debug.f(''alert_rrs exit'');
     PERFORM rrs.update_lock(v_my_oid, 0);
     RETURN v_total_rows;
@@ -222,7 +229,7 @@
 -- Name: update_buckets(); Type: FUNCTION; Schema: rrs; Owner: pgsql
 --
 
-CREATE OR REPLACE FUNCTION update_buckets( history_bucket.start_time%TYPE ) RETURNS integer
+CREATE OR REPLACE FUNCTION update_buckets( history_data_interval.start_time%TYPE ) RETURNS integer
     AS '
 DECLARE
     p_start_time ALIAS FOR $1;
@@ -382,8 +389,9 @@
 CREATE OR REPLACE FUNCTION calculate_end_time ( rrs.bucket.end_time%TYPE, rrs.history.start_time%TYPE ) RETURNS timestamp with time zone AS '
 DECLARE
     p_first_end_time ALIAS FOR $1;
-    p_start_time ALIAS FOR $3;
+    p_start_time ALIAS FOR $2;
 
+    v_data_interval rrs.history_data_interval.data_interval%TYPE;
     v_last_end_time rrs.bucket.end_time%TYPE;
 BEGIN
     /*
@@ -400,37 +408,58 @@
     */
     
     SELECT INTO v_data_interval
-            sum_y - b * sum_x ) * rss.setting_get(''desired run time'')::interval + b
+            coalesce(
+                    seconds_to_interval(
+                            ( sum_y - b * sum_x )
+                            * rrs.interval_to_seconds( rrs.setting_get(''desired run time'')::interval )
+                            + b
+                        )
+                    , rrs.setting_get(''desired run time'')::interval
+                )
         FROM (
                 SELECT sum(x) AS sum_x, sum(y) AS sum_y
-                        , ( sum( x * y ) - count(*) * sum(x) * sum(y) ) / ( sum(x*x) - count(*) * sum(x) * sum(x) ) AS b
+                        , CASE WHEN ( sum(x*x) - count(*) * sum(x) * sum(x) ) = 0
+                            THEN 1
+                            ELSE ( sum( x * y ) - count(*) * sum(x) * sum(y) ) / ( sum(x*x) - count(*) * sum(x) * sum(x) )
+                            END
+                        AS b
                     FROM (
-                            SELECT data_interval AS y, end_time - start_time AS x
-                                FROM history_bucket
+                            SELECT rrs.interval_to_seconds(data_interval) AS y
+                                    , rrs.interval_to_seconds( end_time - start_time) AS x
+                                FROM history_data_interval
                         ) raw
             ) sums_b
     ;
+    IF v_data_interval <= 0 THEN
+        RAISE INFO ''v_data_interval % <= 0, forcing to desired run time'', v_data_interval;
+        v_data_interval = rrs.setting_get(''desired run time'');
+    END IF;
 
     -- Now we have an upper limit on how many buckets to process, but we don''t want to go beyond current_timestamp
     v_last_end_time := min( current_timestamp, p_first_end_time + v_data_interval );
 
     -- Log how much time we''ll actually be processing
-    INSERT INTO rrs.history_bucket( start_time, end_time, buckets )
-        VALUES( p_start_time, p_start_time, v_last_end_time - v_first_end_time )
+    INSERT INTO rrs.history_data_interval( start_time, end_time, data_interval )
+        VALUES( p_start_time, p_start_time, v_last_end_time - p_first_end_time )
     ;
 
+    RETURN v_last_end_time;
 END;
 ' LANGUAGE plpgsql;
 
+CREATE OR REPLACE FUNCTION interval_to_seconds(interval) RETURNS double precision AS '
+    SELECT extract( EPOCH FROM $1 );
+' LANGUAGE sql IMMUTABLE STRICT;
+
+CREATE OR REPLACE FUNCTION seconds_to_interval(double precision) RETURNS interval AS '
+    SELECT ( $1::text || '' seconds'' )::interval(6);
+' LANGUAGE sql IMMUTABLE STRICT;
+
 CREATE OR REPLACE FUNCTION interval_time(timestamp with time zone, interval) RETURNS timestamp with time zone
     AS '
-SELECT ''1970-01-01 GMT''::timestamptz +
-            (
-                (
-                    floor( extract( EPOCH FROM $1 ) / extract( EPOCH FROM $2 ) ) * extract( EPOCH FROM $2 )::int
-                )::text || '' seconds''
-            )::interval
-;
+SELECT ''1970-01-01 GMT''::timestamp with time zone + rrs.seconds_to_interval(
+            floor( extract( EPOCH FROM $1 ) / rrs.interval_to_seconds($2) ) * rrs.interval_to_seconds($2)::int
+        ) ;
 '
     LANGUAGE sql IMMUTABLE STRICT;
 
@@ -497,7 +526,8 @@
     v_end_time rrs.history.end_time%TYPE;
     v_min_keep_time rrs.history.start_time%TYPE;
 BEGIN
-    v_end_time := current_timestamp;
+    v_end_time := timeofday();
+    RAISE DEBUG ''log_time: p_start_time %, v_end_time %'', p_start_time, v_end_time;
 
     -- Trim the history table
     SELECT INTO v_min_keep_time
@@ -509,7 +539,7 @@
                                 FROM rrs.history
                                 ORDER BY start_time DESC
                         ) ordered
-                    LIMIT rrs.setting_get( ''history length'' )
+                    LIMIT rrs.setting_get( ''history length'' )::bigint
             ) limited
     ;
     IF FOUND THEN
@@ -521,7 +551,7 @@
     ;
 
     -- IF add_buckets decided not to limit bucket creation, don''t bother processing anything
-    IF EXISTS (SELECT * FROM rrs.history_bucket WHERE start_time = p_start_time) THEN
+    IF EXISTS (SELECT * FROM rrs.history_data_interval WHERE start_time = p_start_time) THEN
         -- Trim the history table
         SELECT INTO v_min_keep_time
                 min( start_time )
@@ -529,23 +559,23 @@
                     SELECT start_time
                         FROM (
                                 SELECT start_time
-                                    FROM rrs.history_bucket
+                                    FROM rrs.history_data_interval
                                     ORDER BY start_time DESC
                             ) ordered
-                        LIMIT rrs.setting_get( ''history length'' )
+                        LIMIT rrs.setting_get( ''history_data_interval length'' )::bigint
                 ) limited
         ;
         IF FOUND THEN
-            DELETE FROM rrs.history_bucket WHERE start_time < v_min_keep_time;
+            DELETE FROM rrs.history_data_interval WHERE start_time < v_min_keep_time;
         END IF;
         
-        UPDATE rrs.history_bucket
+        UPDATE rrs.history_data_interval
             SET end_time = v_end_time
             WHERE start_time = p_start_time
         ;
     END IF;
 
-    RETURN v_end_time - v_start_time;
+    RETURN v_end_time - p_start_time;
 END;
 '
     LANGUAGE plpgsql


More information about the rrs-commit mailing list