+# to convert a rating number into rating images out of 10 stars
+def html_format_rating (rating):
+ items = []
+ # if -1 then return unrated as the text
+ if rating == -1:
+ return "unrated"
+ # fill up the number of stars for the rating
+ for i in range (rating):
+ items.append ('<img src="star.gif" alt="*" />')
+ # fill up remaining slots (of 10) with grey stars
+ for i in range (10 - rating):
+ items.append ('<img src="star-grey.gif" alt="-" />')
+
+ rating_str = "".join (items)
+ return rating_str
+
+# function to generate the search index file
+def generate_search_index (dbname, conf, full_text_index = True):
+ # get all the articles
+ arts = biaweb_db.site_articles (dbname)
+ # if cannot get articles
+ if arts == False:
+ return False
+
+ # if full text index, then field should be article content otherwise keywords
+ if full_text_index:
+ searchfield = 4
+ else:
+ searchfield = 3
+
+ # initialize the search index dictionary
+ search_index = dict ()
+
+ # now run through the articles and generate a table of unique words (except
+ # stop words)
+ for art in arts:
+ # now strip out the HTML tags from the articles
+ parser = HTMLTagRemover ()
+ parser.feed (art[searchfield])
+ parser.close ()
+ # get the word list
+ word_list = parser.get_raw_string ().split ()
+
+ # now run through each word, make it lowercase, remove all cruft from it
+ # and add it to a dictionary
+ for word in word_list:
+ cleanword = word.strip (":;?!_<>,.+-\"'=`!@#$%^&*()[]{}/= \n\r\t").lower ()
+ # if the word is not a "stop word", then add it to the search database
+ if cleanword not in biaweb_strings.stopwords:
+ # title of the search entry should be the article title
+ title = art[1]
+ # url should be the article URL: http://siteurl/Category/Article.html
+ url = "http://" + conf[0] + art[13] + "/" + art[8] + ".html"
+ # if search index has the word (as key)
+ if search_index.has_key (cleanword):
+ # add the title and url as a tuple to the set
+ search_index[cleanword].add ((title, url))
+ # create the key as the word
+ else:
+ # create a set for the keyword. Set will hold the tuples
+ # representing article title and url
+ search_index[cleanword] = set ()
+ search_index[cleanword].add ((title, url))
+
+ # done now write the search database as a python pickle object of search_index
+ search_index_path = os.path.join (conf[5], "cgi-bin", "searchwords.idx")
+ htaccess_path = os.path.join (conf[5], "cgi-bin", ".htaccess")
+ try:
+ # open the file in write binary mode
+ fsearchindex = open (search_index_path, "wb")
+ # dump the dictionary as a pickle object in binary mode
+ cPickle.dump (search_index, fsearchindex, 2)
+ fsearchindex.close ()
+ # write the htaccess file to prevent opening the index file from web browser
+ fhtaccess = open (htaccess_path, "w+")
+ fhtaccess.write (biaweb_strings.searchindex_htaccess)
+ fhtaccess.close ()
+ except OSError, IOError:
+ return False
+
+ # finally return true
+ return True
+
+# function to copy additional files and folders to the destination path
+def copy_files_folders (conf, files_to_copy, folders_to_copy):
+ # create the cgi-bin directory and try to copy search.py into the destination directory if possible
+ # otherwise user must copy it manually
+ search_script_path = os.path.join (sys.path[0], "search.py")
+ if os.path.exists (search_script_path):
+ try:
+ os.mkdir (os.path.join (conf[5], "cgi-bin"))
+ shutil.copy2 (search_script_path, os.path.join(conf[5], "cgi-bin"))
+ except IOError, OSError:
+ return False
+
+ # try to copy the star rating images to destination directory if possible
+ # otherwise user must copy it manually
+ rating_img_star = os.path.join (sys.path[0], "star.gif")
+ rating_img_greystar = os.path.join (sys.path[0], "star-grey.gif")
+ if os.path.exists (rating_img_star):
+ try:
+ shutil.copy2 (rating_img_star, conf[5])
+ except IOError, OSError:
+ return False
+ if os.path.exists (rating_img_greystar):
+ try:
+ shutil.copy2 (rating_img_greystar, conf[5])
+ except IOError, OSError:
+ return False
+
+ # additional files to copy
+
+ # first copy files
+ # check if files to copy is not empty
+ if files_to_copy <> []:
+ for src, dest in files_to_copy:
+ # get full path from relative path in dest
+ full_dest = os.path.join (conf[5], dest)
+ try:
+ shutil.copy2 (src, full_dest)
+ except IOError, OSError:
+ return False
+
+ # additional folders to copy
+
+ # now copy the folders
+ if folders_to_copy <> []:
+ for src, dest in folders_to_copy:
+ # get full path from relative path in dest
+ full_dest = os.path.join (conf[5], dest)
+ try:
+ shutil.copytree (src, full_dest)
+ except IOError, OSError:
+ return False
+
+ # finally return true
+ return True
+
+# function to generate article pages
+def generate_article_pages (dbname, conf, templates, category_str, bestrated_str):
+ # main template
+ tpl_main = string.Template (templates[0][1])
+ # article template
+ tpl_articlebit = string.Template (templates[1][1])
+
+ # get all articles from the database
+ articles = biaweb_db.site_articles (dbname)
+ if articles == False:
+ return
+
+ # walk through each article and generate the file in the appropriate category
+ # folder
+ for art in articles:
+ art_cdate = time.ctime (art[5])
+ art_mdate = time.ctime (art[6])
+ rating_str = html_format_rating (art[9])
+ # now build the article from the article bit template
+ article_str = tpl_articlebit.safe_substitute (article_title = art[1],
+ article_cdate = art_cdate,
+ article_mdate = art_mdate,
+ rating = rating_str,
+ article_contents = art[4])
+
+ # now build the article page
+ articlepage_str = tpl_main.safe_substitute (site_title = conf[1],
+ site_url = "http://" + conf[0],
+ meta_keywords = art[3],
+ meta_description = art[2],
+ page_title = conf[1],
+ page_desc = conf[3],
+ contents_bit = article_str,
+ list_of_categories = category_str,
+ list_best_rated = bestrated_str,
+ copyright = conf[6])
+ # write to the article file
+ try:
+ farticle = open (os.path.join (conf[5], art[13], art[8] + ".html"), "w+")
+ farticle.write (articlepage_str)
+ except OSError, IOError:
+ return False
+
+ # finally return true
+ return True
+
+# function to generate category directories and indices
+def generate_category_indices (dbname, conf, templates, category_str, bestrated_str, category_list):
+ # main template
+ tpl_main = string.Template (templates[0][1])
+ # table bit
+ tpl_tablebit = string.Template (templates[3][1])
+ # table row bit
+ tpl_trowbit = string.Template (templates[4][1])
+
+ # run through each category and generate category index page
+ for cat in category_list:
+ try:
+ # create the category directory
+ os.mkdir (os.path.join (conf[5], cat[3]))
+ except IOError, OSError:
+ return False
+
+ # now get the list of articles for the specified category
+ articles_list = biaweb_db.site_articles (dbname, cat[0])
+ if articles_list == False:
+ return False
+
+ tableitems = []
+ # run through the list of articles in category
+ for art in articles_list:
+ url = art[13] + "/" + art[8] + ".html"
+ creattime = time.ctime (art[5])
+ rating_str = html_format_rating (art[9])
+ # now build the table rows for each article
+ tableitem_str = tpl_trowbit.safe_substitute (article_url = url,
+ title = art[1],
+ created = creattime,
+ rating = rating_str)
+ tableitems.append (tableitem_str)
+ # generate the rows as a string
+ tablerows_str = "".join (tableitems)
+
+ # now create the page template
+ table_str = tpl_tablebit.safe_substitute (category_title = cat[1],
+ category_desc = cat[2],
+ table_rows = tablerows_str)
+
+ # now create the index page
+ categoryindex_str = tpl_main.safe_substitute (site_title = conf[1],
+ site_url = "http://" + conf[0],
+ meta_keywords = conf[2],
+ meta_description = cat[2],
+ page_title = conf[1],
+ page_desc = conf[3],
+ contents_bit = table_str,
+ list_of_categories = category_str,
+ list_best_rated = bestrated_str,
+ copyright = conf[6])
+
+ # now write to Category/index.html
+ try:
+ fcatindex = open (os.path.join (conf[5], cat[3], "index.html"), "w+")
+ fcatindex.write (categoryindex_str)
+ fcatindex.close ()
+ except OSError, IOError:
+ return False
+
+ # finally return true
+ return True
+