max_connections = 150 # A comprimise :)
effective_cache_size = 2048MB # Old value 439MB --> Even older : 128MB
#Is this too high?
maintenance_work_mem = 96MB # Old 16MB. Would 64MB be better? Updates
and therefore re-indexing of tuples happens quite frequently.
work_mem = 3MB
# Old was 1MB!? That is too low.
# Scott you mentioned an example with 1 GB. I guess this is the work
memory to work on per user query to sort, join and so on. I will be
doing those things quite often.
# After all, if I understand the concept correctly, it will only use
it if needs too, otherwise performance will take a hit.
# Scott, you say that I might need to change this later on when I have
several gigs of data. But will it hurt when I don't?
# I think 4-8MB should be enough and relativly safe to start with. I
am scared of going higher. But 1MB is low.
shared_buffer = 1024MB # Kept it
random_page_cost = 3 # I have pretty fast disks.
wal_buffers = 1024KB
Scott, you mentioned :
You can also use the pg_stat_all_indexes table to look at index scans
vs. tuples being read, this can sometimes hint at index 'bloat'. I
would also recommend pg_stattuple which has a pg_statindex function
for looking at index fragmentation.
From where can I see these stats ? Is there any graphic tool?
Thanks all / Jennifer