-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path2. Create-timeseries-from-bookid.py
61 lines (42 loc) · 1.46 KB
/
2. Create-timeseries-from-bookid.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
# -*- coding: utf-8 -*-
#
#READ BOOK
# Create timesseries of books
from os import listdir
from os.path import isfile, join, isdir
from json import loads
from sys import path
import pandas as pd
from numpy import dot,cumsum,floor,zeros,sum,array,random,ones
from bookclass import Book_raw_data
from labMTsimple.speedy import LabMT
my_LabMT = LabMT()
book_ids = 1
#slides rawtext of books through labMT and gets timeseries
#gets matrix of matrixes a timeseries matrix for every bookselected
filteredbooks = [907]
#plays = [1787,1533,2263,2235,2253,1128,1777,1110,1118,1134]
timeseries = []
for book in filteredbooks:
b = Book.objects.get(gutenberg_id=p)
b_data = Book_raw_data(b)
print(b.txt_file_path)
try:
b_data.chopper_sliding(my_LabMT,num_points=200,stop_val=1.0,randomize=False,use_cache=True)
except:
print("couldn't find",b.title)
pass
# print(b_data.timeseries)
timeseries.append(b_data.timeseries)
#Store timeseries matrix to as picle to be loaded and uploaded in other programs
big-time-series-matrix = timeseries
pickle.dump('big-time-series-matrix', 'rb')
#USE book_raw_data directory
#ORIGINAL working directory: /Users/home/Desktop/NN\ Projects/Emotional\ Arcs/core-stories
os.chdir(os.path.join("/Volumes/NewVolume/books"))
#OPEN bookid txt from book_raw_data database
f = open('1.txt', 'r')
#READ bookid into library
book_raw_data = f.read()
#STORE bookid text in library
library = [book_raw_data]