twitter data mining script in python

I've written a simple script that searches for keywords on twitter and saves them in a csv file if they contain those words. It can be found on my github here.

How can I improve this code so that it is generally more efficient and conforms to coding standards?

"" "
Script crossing English tweets filtered by security words and posted during the last hour. It records the polarity, the identifier, the date and time, the query, the user name and the text in a csv file.
"" "

import tweepy
import date / time, csv, codecs
since import textblob TextBlob
import cleanit

## setting permissions for twitter ##
consumer_key = "xxx"
consumer_secret = "xxx"
access_token = "xxx"
access_token_secret = "xxx"
auth = tweepy.OAuthHandler (consumer_key, consumer_secret)
auth.set_access_token (access_token, access_token_secret)
api = tweepy.API (auth)

## initialization of lists ##
big_list = []
text_list = []
id_list = []
name_list = []
created_list = []
query_list = []
polarity = []

t = 0

#use words in this list as search terms for the tweepy.cursor function
security_words = ['phishing','dos','botnet','xss','smb','wannacry','heartbleed','ransomware','trojan','spyware','exploit','virus','malware','mitm']

# if word in the list of security words and double_meaning_words list if the text also contains a word from the list of words gen, if it records otherwise it is ignored
double_meaning_words = ['petya','smb','dos','infosec','hacker','backdoor']
gen_words = ["attack","security","hit","detected","protected","injection","data","exploit", "router", 'ransomware', 'phishing', 'wannacry', 'security']

def storing_data (stat):
## store ID, username, date and time, text and polarity for tweets filtered in csv ##
text_list.append (str (cleanit.tweet_cleaner_updated (status.text)). encode ("utf-8"))
id_list.append (str (status.id)) # add the identification number to the list
name_list.append (str (status.user.screen_name)) # add the user name to the list
created_list.append ((status.created_at) .strftime (#% c)) # adds the time to the list
analysis = TextBlob (status.text)
analysis = analysis.sentiment.polarity # use textblob on the text to get the sentiment score of the text

if analysis> = -1 and analysis <= 0: # adds the sentiment score to the list
polarityy.append ("4")
other:
polarityy.append ("0")

def rejects (stat):
## stores tweets that do not transmit filters in csv ##
with open ("reject.csv", "a", newline = "encoding =" utf-8 ") as rejecting the file:
logger = csv.writer (reject the file)
logger.writerow ([status.text])


in truth:
print ('running', datetime.datetime.now ())
with open (& # 39; sec_tweet_dataset_5.csv & # 39 ;, & quot; a & quot ;, newline = & quot; encoding = & utf-8 & # 39;) as a log file:
logger = csv.writer (log file)
for i in security_words:
alex = []
            for status in tweepy.Cursor (api.search, i, lang = "en"). items (40): #search twitter for word in the list of security words in English
if (status.retweeted == False) or (& # 39; RT @ not in status.text): # this tweet is retweeted, do not store it
if i in double_meaning_words and i in status.text: #if search term used in the list of security words, also in the two-way words, check it also contains the word -
for words in gen_words: # - in the gen_words list. If this continues to store otherwise do not store.
if the words are in status.text:
storage_data (status)
Pause
other:
rejects (status)
other:
storage_data (status)

rejects (status)

while t <len (polarity):
alex = ([polarityy
t + = 1
logger.writerow (alex)
hour (1800)