cleaning some code (from MR #9)

This commit is contained in:
Carl Chenet 2017-07-31 15:58:30 +02:00
parent 67b7e72507
commit 37ee1e5a67
6 changed files with 27 additions and 29 deletions

View file

@ -20,7 +20,7 @@
# standard library imports # standard library imports
from operator import itemgetter from operator import itemgetter
class AddTags(object): class AddTags:
'''Add as many tags as possible depending on the tweet length''' '''Add as many tags as possible depending on the tweet length'''
def __init__(self, tweet, tags): def __init__(self, tweet, tags):
'''Constructor of AddTags class''' '''Constructor of AddTags class'''
@ -34,14 +34,14 @@ class AddTags(object):
tweetlength = len(self.tweet) tweetlength = len(self.tweet)
# sort list of tags, the ones with the greater length first # sort list of tags, the ones with the greater length first
tagswithindices = ({'text':i, 'length':len(i)} for i in self.tags) tagswithindices = ({'text':i, 'length': len(i)} for i in self.tags)
sortedtagswithindices = sorted(tagswithindices, key=itemgetter('length'), reverse=True) sortedtagswithindices = sorted(tagswithindices, key=itemgetter('length'), reverse=True)
self.tags = (i['text'] for i in sortedtagswithindices) self.tags = (i['text'] for i in sortedtagswithindices)
# add tags is space is available # add tags is space is available
for tag in self.tags: for tag in self.tags:
taglength = len(tag) taglength = len(tag)
if (tweetlength + (taglength +1)) <= maxlength: if (tweetlength + (taglength + 1)) <= maxlength:
self.tweet = ' '.join([self.tweet, tag]) self.tweet = ' '.join([self.tweet, tag])
tweetlength += (taglength + 1) tweetlength += (taglength + 1)

View file

@ -25,7 +25,7 @@ import sys
__version__ = '0.5' __version__ = '0.5'
class CliParse(object): class CliParse:
'''CliParse class''' '''CliParse class'''
def __init__(self): def __init__(self):
'''Constructor for the CliParse class''' '''Constructor for the CliParse class'''

View file

@ -17,18 +17,17 @@
'''Get values of the configuration file''' '''Get values of the configuration file'''
# standard library imports # standard library imports
from configparser import SafeConfigParser, NoOptionError, NoSectionError from configparser import SafeConfigParser
import logging import logging
import os import os
import os.path import os.path
import socket
import sys import sys
import re import re
# 3rd party library imports # 3rd party library imports
import feedparser import feedparser
class ConfParse(object): class ConfParse:
'''ConfParse class''' '''ConfParse class'''
def __init__(self, clioptions): def __init__(self, clioptions):
'''Constructor of the ConfParse class''' '''Constructor of the ConfParse class'''
@ -46,7 +45,6 @@ class ConfParse(object):
config = SafeConfigParser() config = SafeConfigParser()
if not config.read(os.path.expanduser(pathtoconfig)): if not config.read(os.path.expanduser(pathtoconfig)):
sys.exit('Could not read config file') sys.exit('Could not read config file')
# The feedparser section # The feedparser section
if config.has_option('feedparser', 'accept_bozo_exceptions'): if config.has_option('feedparser', 'accept_bozo_exceptions'):
self.accept_bozo_exceptions = config.getboolean('feedparser', 'accept_bozo_exceptions') self.accept_bozo_exceptions = config.getboolean('feedparser', 'accept_bozo_exceptions')
@ -185,7 +183,7 @@ class ConfParse(object):
sys.exit('The parent directory of the cache file does not exist: {cachefileparent}'.format(cachefileparent=cachefileparent)) sys.exit('The parent directory of the cache file does not exist: {cachefileparent}'.format(cachefileparent=cachefileparent))
else: else:
options['cachefile'] = self.clioptions.cachefile options['cachefile'] = self.clioptions.cachefile
### cache limit # cache limit
if config.has_section(section): if config.has_section(section):
confoption = 'cache_limit' confoption = 'cache_limit'
if config.has_option(section, confoption): if config.has_option(section, confoption):
@ -225,7 +223,7 @@ class ConfParse(object):
# host, port, user, pass, database options # host, port, user, pass, database options
########################################## ##########################################
plugins[section] = {} plugins[section] = {}
for currentoption in ['host','port','user','pass','database']: for currentoption in ['host', 'port', 'user', 'pass', 'database']:
if config.has_option(section, currentoption): if config.has_option(section, currentoption):
plugins[section][currentoption] = config.get(section, currentoption) plugins[section][currentoption] = config.get(section, currentoption)
if 'host' not in plugins[section]: if 'host' not in plugins[section]:
@ -234,7 +232,7 @@ class ConfParse(object):
plugins[section]['port'] = 8086 plugins[section]['port'] = 8086
if 'measurement' not in plugins[section]: if 'measurement' not in plugins[section]:
plugins[section]['measurement'] = 'tweets' plugins[section]['measurement'] = 'tweets'
for field in ['user','pass','database']: for field in ['user', 'pass', 'database']:
if field not in plugins[section]: if field not in plugins[section]:
sys.exit('Parsing error for {field} in the [{section}] section: {field} is not defined'.format(field=field, section=section)) sys.exit('Parsing error for {field} in the [{section}] section: {field} is not defined'.format(field=field, section=section))

View file

@ -38,9 +38,9 @@ class FeedCache:
with open(self.options['cachefile']) as dbdsc: with open(self.options['cachefile']) as dbdsc:
dbfromfile = dbdsc.readlines() dbfromfile = dbdsc.readlines()
dblist = [i.strip() for i in dbfromfile] dblist = [i.strip() for i in dbfromfile]
self.dbfeed = deque(dblist, self.options['cache_limit'] ) self.dbfeed = deque(dblist, self.options['cache_limit'])
else: else:
self.dbfeed = deque([], self.options['cache_limit'] ) self.dbfeed = deque([], self.options['cache_limit'])
def append(self, rssid): def append(self, rssid):
'''Append a rss id to the cache''' '''Append a rss id to the cache'''

View file

@ -16,9 +16,6 @@
# Push values to a influxdb database # Push values to a influxdb database
'''Push values to a influxdb database''' '''Push values to a influxdb database'''
# standard libraries imports
import json
# 3rd party libraries imports # 3rd party libraries imports
from influxdb import InfluxDBClient from influxdb import InfluxDBClient
@ -38,5 +35,8 @@ class InfluxdbPlugin(object):
def main(self): def main(self):
'''Main of the PiwikModule class''' '''Main of the PiwikModule class'''
self.datatoinfluxdb.append({'measurement': self.plugininfo['measurement'], 'fields': {'value': self.data}}) self.datatoinfluxdb.append({
'measurement': self.plugininfo['measurement'],
'fields': {'value': self.data}
})
self.client.write_points(self.datatoinfluxdb) self.client.write_points(self.datatoinfluxdb)

View file

@ -17,7 +17,7 @@
# Remove duplicates from the final string before sending the tweet # Remove duplicates from the final string before sending the tweet
'''Remove duplicates from the final string before sending the tweet''' '''Remove duplicates from the final string before sending the tweet'''
class RemoveDuplicates(object): class RemoveDuplicates:
'''Remove duplicates from the final string before sending the tweet''' '''Remove duplicates from the final string before sending the tweet'''
def __init__(self, tweet): def __init__(self, tweet):
'''Constructor of RemoveDuplicates class''' '''Constructor of RemoveDuplicates class'''
@ -32,7 +32,7 @@ class RemoveDuplicates(object):
if element != ' ' and (element.startswith('http://') or element.startswith('https://')): if element != ' ' and (element.startswith('http://') or element.startswith('https://')):
newlink = True newlink = True
# if we already found this link, increment the counter # if we already found this link, increment the counter
for i,_ in enumerate(links): for i, _ in enumerate(links):
if links[i]['link'] == element: if links[i]['link'] == element:
newlink = False newlink = False
links[i]['count'] += 1 links[i]['count'] += 1
@ -49,7 +49,7 @@ class RemoveDuplicates(object):
# needed for not inversing the order of links if it is a duplicate # needed for not inversing the order of links if it is a duplicate
# and the second link is not one # and the second link is not one
if i == 0: if i == 0:
self.tweet = self.tweet.replace(element['link'], wildcard, 1 ) self.tweet = self.tweet.replace(element['link'], wildcard, 1)
else: else:
self.tweet = self.tweet.replace(element['link'], '', 1) self.tweet = self.tweet.replace(element['link'], '', 1)
# finally # finally